Home
last modified time | relevance | path

Searched refs:kvm (Results 1 – 25 of 350) sorted by relevance

12345678910>>...14

/linux/arch/powerpc/include/asm/
H A Dkvm_book3s_uvmem.h9 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot);
10 void kvmppc_uvmem_slot_free(struct kvm *kvm,
12 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm,
16 unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
20 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
44 kvmppc_uvmem_slot_init(struct kvm * kvm,const struct kvm_memory_slot * slot) kvmppc_uvmem_slot_init() argument
50 kvmppc_uvmem_slot_free(struct kvm * kvm,const struct kvm_memory_slot * slot) kvmppc_uvmem_slot_free() argument
53 kvmppc_h_svm_page_in(struct kvm * kvm,unsigned long gra,unsigned long flags,unsigned long page_shift) kvmppc_h_svm_page_in() argument
60 kvmppc_h_svm_page_out(struct kvm * kvm,unsigned long gra,unsigned long flags,unsigned long page_shift) kvmppc_h_svm_page_out() argument
66 kvmppc_h_svm_init_start(struct kvm * kvm) kvmppc_h_svm_init_start() argument
71 kvmppc_h_svm_init_done(struct kvm * kvm) kvmppc_h_svm_init_done() argument
76 kvmppc_h_svm_init_abort(struct kvm * kvm) kvmppc_h_svm_init_abort() argument
81 kvmppc_send_page_to_uv(struct kvm * kvm,unsigned long gfn) kvmppc_send_page_to_uv() argument
88 kvmppc_uvmem_drop_pages(const struct kvm_memory_slot * free,struct kvm * kvm,bool skip_page_out) kvmppc_uvmem_drop_pages() argument
90 kvmppc_uvmem_memslot_create(struct kvm * kvm,const struct kvm_memory_slot * new) kvmppc_uvmem_memslot_create() argument
96 kvmppc_uvmem_memslot_delete(struct kvm * kvm,const struct kvm_memory_slot * old) kvmppc_uvmem_memslot_delete() argument
[all...]
H A Dkvm_ppc.h169 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
170 extern int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
172 extern void kvmppc_rmap_reset(struct kvm *kvm);
176 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
178 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
365 is_kvmppc_hv_enabled(struct kvm * kvm) is_kvmppc_hv_enabled() argument
662 kvmppc_get_passthru_irqmap(struct kvm * kvm) kvmppc_get_passthru_irqmap() argument
696 kvmppc_get_passthru_irqmap(struct kvm * kvm) kvmppc_get_passthru_irqmap() argument
700 kvmppc_free_pimap(struct kvm * kvm) kvmppc_free_pimap() argument
759 kvmppc_xive_set_xive(struct kvm * kvm,u32 irq,u32 server,u32 priority) kvmppc_xive_set_xive() argument
761 kvmppc_xive_get_xive(struct kvm * kvm,u32 irq,u32 * server,u32 * priority) kvmppc_xive_get_xive() argument
763 kvmppc_xive_int_on(struct kvm * kvm,u32 irq) kvmppc_xive_int_on() argument
764 kvmppc_xive_int_off(struct kvm * kvm,u32 irq) kvmppc_xive_int_off() argument
769 kvmppc_xive_set_mapped(struct kvm * kvm,unsigned long guest_irq,struct irq_desc * host_desc) kvmppc_xive_set_mapped() argument
771 kvmppc_xive_clr_mapped(struct kvm * kvm,unsigned long guest_irq,struct irq_desc * host_desc) kvmppc_xive_clr_mapped() argument
776 kvmppc_xive_set_irq(struct kvm * kvm,int irq_source_id,u32 irq,int level,bool line_status) kvmppc_xive_set_irq() argument
[all...]
/linux/arch/x86/kvm/mmu/
H A Dpage_track.c23 static bool kvm_external_write_tracking_enabled(struct kvm *kvm) in kvm_external_write_tracking_enabled() argument
30 return smp_load_acquire(&kvm->arch.external_write_tracking_enabled); in kvm_external_write_tracking_enabled()
36 bool kvm_page_track_write_tracking_enabled(struct kvm *kvm) in kvm_page_track_write_tracking_enabled() argument
38 return kvm_external_write_tracking_enabled(kvm) || in kvm_page_track_write_tracking_enabled()
39 kvm_shadow_root_allocated(kvm) || !tdp_enabled; in kvm_page_track_write_tracking_enabled()
60 int kvm_page_track_create_memslot(struct kvm *kvm, in kvm_page_track_create_memslot() argument
64 if (!kvm_page_track_write_tracking_enabled(kvm)) in kvm_page_track_create_memslot()
90 __kvm_write_track_add_gfn(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn) __kvm_write_track_add_gfn() argument
113 __kvm_write_track_remove_gfn(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn) __kvm_write_track_remove_gfn() argument
136 kvm_gfn_is_write_tracked(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn) kvm_gfn_is_write_tracked() argument
152 kvm_page_track_cleanup(struct kvm * kvm) kvm_page_track_cleanup() argument
160 kvm_page_track_init(struct kvm * kvm) kvm_page_track_init() argument
169 kvm_enable_external_write_tracking(struct kvm * kvm) kvm_enable_external_write_tracking() argument
220 kvm_page_track_register_notifier(struct kvm * kvm,struct kvm_page_track_notifier_node * n) kvm_page_track_register_notifier() argument
250 kvm_page_track_unregister_notifier(struct kvm * kvm,struct kvm_page_track_notifier_node * n) kvm_page_track_unregister_notifier() argument
273 __kvm_page_track_write(struct kvm * kvm,gpa_t gpa,const u8 * new,int bytes) __kvm_page_track_write() argument
296 kvm_page_track_delete_slot(struct kvm * kvm,struct kvm_memory_slot * slot) kvm_page_track_delete_slot() argument
322 kvm_write_track_add_gfn(struct kvm * kvm,gfn_t gfn) kvm_write_track_add_gfn() argument
352 kvm_write_track_remove_gfn(struct kvm * kvm,gfn_t gfn) kvm_write_track_remove_gfn() argument
[all...]
H A Dtdp_mmu.c12 #include <trace/events/kvm.h>
15 void kvm_mmu_init_tdp_mmu(struct kvm *kvm) in kvm_mmu_init_tdp_mmu() argument
17 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); in kvm_mmu_init_tdp_mmu()
18 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); in kvm_mmu_init_tdp_mmu()
22 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, in kvm_lockdep_assert_mmu_lock_held() argument
26 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
28 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
33 void kvm_mmu_uninit_tdp_mmu(struct kvm *kv argument
79 kvm_tdp_mmu_put_root(struct kvm * kvm,struct kvm_mmu_page * root) kvm_tdp_mmu_put_root() argument
121 tdp_mmu_next_root(struct kvm * kvm,struct kvm_mmu_page * prev_root,enum kvm_tdp_mmu_root_types types) tdp_mmu_next_root() argument
258 struct kvm *kvm = vcpu->kvm; kvm_tdp_mmu_alloc_root() local
327 tdp_account_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp) tdp_account_mmu_page() argument
335 tdp_unaccount_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp) tdp_unaccount_mmu_page() argument
349 tdp_mmu_unlink_sp(struct kvm * kvm,struct kvm_mmu_page * sp) tdp_mmu_unlink_sp() argument
362 remove_external_spte(struct kvm * kvm,gfn_t gfn,u64 old_spte,int level) remove_external_spte() argument
402 handle_removed_pt(struct kvm * kvm,tdp_ptep_t pt,bool shared) handle_removed_pt() argument
515 set_external_spte_present(struct kvm * kvm,tdp_ptep_t sptep,gfn_t gfn,u64 old_spte,u64 new_spte,int level) set_external_spte_present() argument
571 handle_changed_spte(struct kvm * kvm,int as_id,gfn_t gfn,u64 old_spte,u64 new_spte,int level,bool shared) handle_changed_spte() argument
655 __tdp_mmu_set_spte_atomic(struct kvm * kvm,struct tdp_iter * iter,u64 new_spte) __tdp_mmu_set_spte_atomic() argument
716 tdp_mmu_set_spte_atomic(struct kvm * kvm,struct tdp_iter * iter,u64 new_spte) tdp_mmu_set_spte_atomic() argument
747 tdp_mmu_set_spte(struct kvm * kvm,int as_id,tdp_ptep_t sptep,u64 old_spte,u64 new_spte,gfn_t gfn,int level) tdp_mmu_set_spte() argument
777 tdp_mmu_iter_set_spte(struct kvm * kvm,struct tdp_iter * iter,u64 new_spte) tdp_mmu_iter_set_spte() argument
796 tdp_mmu_iter_need_resched(struct kvm * kvm,struct tdp_iter * iter) tdp_mmu_iter_need_resched() argument
820 tdp_mmu_iter_cond_resched(struct kvm * kvm,struct tdp_iter * iter,bool flush,bool shared) tdp_mmu_iter_cond_resched() argument
858 __tdp_mmu_zap_root(struct kvm * kvm,struct kvm_mmu_page * root,bool shared,int zap_level) __tdp_mmu_zap_root() argument
881 tdp_mmu_zap_root(struct kvm * kvm,struct kvm_mmu_page * root,bool shared) tdp_mmu_zap_root() argument
928 kvm_tdp_mmu_zap_sp(struct kvm * kvm,struct kvm_mmu_page * sp) kvm_tdp_mmu_zap_sp() argument
956 tdp_mmu_zap_leafs(struct kvm * kvm,struct kvm_mmu_page * root,gfn_t start,gfn_t end,bool can_yield,bool flush) tdp_mmu_zap_leafs() argument
1002 kvm_tdp_mmu_zap_leafs(struct kvm * kvm,gfn_t start,gfn_t end,bool flush) kvm_tdp_mmu_zap_leafs() argument
1013 kvm_tdp_mmu_zap_all(struct kvm * kvm) kvm_tdp_mmu_zap_all() argument
1040 kvm_tdp_mmu_zap_invalidated_roots(struct kvm * kvm,bool shared) kvm_tdp_mmu_zap_invalidated_roots() argument
1091 kvm_tdp_mmu_invalidate_roots(struct kvm * kvm,enum kvm_tdp_mmu_root_types root_types) kvm_tdp_mmu_invalidate_roots() argument
1213 tdp_mmu_link_sp(struct kvm * kvm,struct tdp_iter * iter,struct kvm_mmu_page * sp,bool shared) tdp_mmu_link_sp() argument
1242 struct kvm *kvm = vcpu->kvm; kvm_tdp_mmu_map() local
1327 kvm_tdp_mmu_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range,bool flush) kvm_tdp_mmu_unmap_gfn_range() argument
1350 kvm_tdp_mmu_age_spte(struct kvm * kvm,struct tdp_iter * iter) kvm_tdp_mmu_age_spte() argument
1372 __kvm_tdp_mmu_age_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range,bool test_only) __kvm_tdp_mmu_age_gfn_range() argument
1408 kvm_tdp_mmu_age_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range) kvm_tdp_mmu_age_gfn_range() argument
1413 kvm_tdp_mmu_test_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range) kvm_tdp_mmu_test_age_gfn() argument
1423 wrprot_gfn_range(struct kvm * kvm,struct kvm_mmu_page * root,gfn_t start,gfn_t end,int min_level) wrprot_gfn_range() argument
1461 kvm_tdp_mmu_wrprot_slot(struct kvm * kvm,const struct kvm_memory_slot * slot,int min_level) kvm_tdp_mmu_wrprot_slot() argument
1494 tdp_mmu_split_huge_page(struct kvm * kvm,struct tdp_iter * iter,struct kvm_mmu_page * sp,bool shared) tdp_mmu_split_huge_page() argument
1532 tdp_mmu_split_huge_pages_root(struct kvm * kvm,struct kvm_mmu_page * root,gfn_t start,gfn_t end,int target_level,bool shared) tdp_mmu_split_huge_pages_root() argument
1614 kvm_tdp_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t start,gfn_t end,int target_level,bool shared) kvm_tdp_mmu_try_split_huge_pages() argument
1632 tdp_mmu_need_write_protect(struct kvm * kvm,struct kvm_mmu_page * sp) tdp_mmu_need_write_protect() argument
1642 clear_dirty_gfn_range(struct kvm * kvm,struct kvm_mmu_page * root,gfn_t start,gfn_t end) clear_dirty_gfn_range() argument
1677 kvm_tdp_mmu_clear_dirty_slot(struct kvm * kvm,const struct kvm_memory_slot * slot) kvm_tdp_mmu_clear_dirty_slot() argument
1688 clear_dirty_pt_masked(struct kvm * kvm,struct kvm_mmu_page * root,gfn_t gfn,unsigned long mask,bool wrprot) clear_dirty_pt_masked() argument
1733 kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,unsigned long mask,bool wrprot) kvm_tdp_mmu_clear_dirty_pt_masked() argument
1744 tdp_mmu_make_huge_spte(struct kvm * kvm,struct tdp_iter * parent,u64 * huge_spte) tdp_mmu_make_huge_spte() argument
1770 recover_huge_pages_range(struct kvm * kvm,struct kvm_mmu_page * root,const struct kvm_memory_slot * slot) recover_huge_pages_range() argument
1842 kvm_tdp_mmu_recover_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * slot) kvm_tdp_mmu_recover_huge_pages() argument
1857 write_protect_gfn(struct kvm * kvm,struct kvm_mmu_page * root,gfn_t gfn,int min_level) write_protect_gfn() argument
1893 kvm_tdp_mmu_write_protect_gfn(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,int min_level) kvm_tdp_mmu_write_protect_gfn() argument
1939 struct kvm *kvm = vcpu->kvm; kvm_tdp_mmu_gpa_is_mapped() local
[all...]
H A Dpage_track.h10 bool kvm_page_track_write_tracking_enabled(struct kvm *kvm);
14 int kvm_page_track_create_memslot(struct kvm *kvm,
18 void __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
20 void __kvm_write_track_remove_gfn(struct kvm *kvm,
23 bool kvm_gfn_is_write_tracked(struct kvm *kvm,
33 kvm_page_track_has_external_user(struct kvm * kvm) kvm_page_track_has_external_user() argument
38 kvm_page_track_init(struct kvm * kvm) kvm_page_track_init() argument
39 kvm_page_track_cleanup(struct kvm * kvm) kvm_page_track_cleanup() argument
41 __kvm_page_track_write(struct kvm * kvm,gpa_t gpa,const u8 * new,int bytes) __kvm_page_track_write() argument
43 kvm_page_track_delete_slot(struct kvm * kvm,struct kvm_memory_slot * slot) kvm_page_track_delete_slot() argument
46 kvm_page_track_has_external_user(struct kvm * kvm) kvm_page_track_has_external_user() argument
[all...]
H A Dtdp_mmu.h10 void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
11 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
20 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
30 static inline enum kvm_tdp_mmu_root_types kvm_gfn_range_filter_to_root_types(struct kvm *kvm, in kvm_gfn_range_filter_to_root_types() argument
35 if (!kvm_has_mirrored_tdp(kvm)) in kvm_gfn_range_filter_to_root_types()
51 if (unlikely(!kvm_is_addr_direct(vcpu->kvm, faul in tdp_mmu_get_root_for_fault()
[all...]
H A Dmmu.c122 #include <trace/events/kvm.h>
284 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep) in kvm_flush_remote_tlbs_sptep() argument
289 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in kvm_flush_remote_tlbs_sptep()
522 static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep) in mmu_spte_clear_track_bits() argument
536 kvm_update_page_stats(kvm, level, -1); in mmu_spte_clear_track_bits()
603 if (kvm_has_mirrored_tdp(vcpu->kvm)) { in mmu_topup_memory_caches()
749 static void account_shadowed(struct kvm *kvm, struc argument
779 track_possible_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp) track_possible_nx_huge_page() argument
797 account_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp,bool nx_huge_page_possible) account_nx_huge_page() argument
806 unaccount_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp) unaccount_shadowed() argument
822 untrack_possible_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp) untrack_possible_nx_huge_page() argument
831 unaccount_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp) unaccount_nx_huge_page() argument
935 kvm_rmap_lock(struct kvm * kvm,struct kvm_rmap_head * rmap_head) kvm_rmap_lock() argument
956 kvm_rmap_unlock(struct kvm * kvm,struct kvm_rmap_head * rmap_head,unsigned long new_val) kvm_rmap_unlock() argument
1003 pte_list_add(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,u64 * spte,struct kvm_rmap_head * rmap_head) pte_list_add() argument
1047 pte_list_desc_remove_entry(struct kvm * kvm,unsigned long * rmap_val,struct pte_list_desc * desc,int i) pte_list_desc_remove_entry() argument
1083 pte_list_remove(struct kvm * kvm,u64 * spte,struct kvm_rmap_head * rmap_head) pte_list_remove() argument
1119 kvm_zap_one_rmap_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head,u64 * sptep) kvm_zap_one_rmap_spte() argument
1127 kvm_zap_all_rmap_sptes(struct kvm * kvm,struct kvm_rmap_head * rmap_head) kvm_zap_all_rmap_sptes() argument
1180 rmap_remove(struct kvm * kvm,u64 * spte) rmap_remove() argument
1278 drop_spte(struct kvm * kvm,u64 * sptep) drop_spte() argument
1286 drop_large_spte(struct kvm * kvm,u64 * sptep,bool flush) drop_large_spte() argument
1355 __rmap_clear_dirty(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot) __rmap_clear_dirty() argument
1373 kvm_mmu_write_protect_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask) kvm_mmu_write_protect_pt_masked() argument
1396 kvm_mmu_clear_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask) kvm_mmu_clear_dirty_pt_masked() argument
1419 kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask) kvm_arch_mmu_enable_log_dirty_pt_masked() argument
1466 kvm_cpu_dirty_log_size(struct kvm * kvm) kvm_cpu_dirty_log_size() argument
1471 kvm_mmu_slot_gfn_write_protect(struct kvm * kvm,struct kvm_memory_slot * slot,u64 gfn,int min_level) kvm_mmu_slot_gfn_write_protect() argument
1501 kvm_zap_rmap(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot) kvm_zap_rmap() argument
1581 __walk_slot_rmaps(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn,bool can_yield,bool flush_on_yield,bool flush) __walk_slot_rmaps() argument
1614 walk_slot_rmaps(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,int start_level,int end_level,bool flush_on_yield) walk_slot_rmaps() argument
1625 walk_slot_rmaps_4k(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,bool flush_on_yield) walk_slot_rmaps_4k() argument
1633 __kvm_rmap_zap_gfn_range(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t start,gfn_t end,bool can_yield,bool flush) __kvm_rmap_zap_gfn_range() argument
1643 kvm_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range) kvm_unmap_gfn_range() argument
1675 __rmap_add(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,const struct kvm_memory_slot * slot,u64 * spte,gfn_t gfn,unsigned int access) __rmap_add() argument
1707 kvm_rmap_age_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range,bool test_only) kvm_rmap_age_gfn_range() argument
1755 kvm_may_have_shadow_mmu_sptes(struct kvm * kvm) kvm_may_have_shadow_mmu_sptes() argument
1760 kvm_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range) kvm_age_gfn() argument
1773 kvm_test_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range) kvm_test_age_gfn() argument
1803 kvm_account_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp) kvm_account_mmu_page() argument
1809 kvm_unaccount_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp) kvm_unaccount_mmu_page() argument
1831 mmu_page_add_parent_pte(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,struct kvm_mmu_page * sp,u64 * parent_pte) mmu_page_add_parent_pte() argument
1841 mmu_page_remove_parent_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * parent_pte) mmu_page_remove_parent_pte() argument
1847 drop_parent_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * parent_pte) drop_parent_pte() argument
1962 kvm_unlink_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp) kvm_unlink_unsync_page() argument
1988 kvm_get_mmu_page_hash(struct kvm * kvm,gfn_t gfn) kvm_get_mmu_page_hash() argument
2097 kvm_mmu_remote_flush_or_zap(struct kvm * kvm,struct list_head * invalid_list,bool remote_flush) kvm_mmu_remote_flush_or_zap() argument
2111 is_obsolete_sp(struct kvm * kvm,struct kvm_mmu_page * sp) is_obsolete_sp() argument
2251 kvm_mmu_find_shadow_page(struct kvm * kvm,struct kvm_vcpu * vcpu,gfn_t gfn,struct hlist_head * sp_list,union kvm_mmu_page_role role) kvm_mmu_find_shadow_page() argument
2336 kvm_mmu_alloc_shadow_page(struct kvm * kvm,struct shadow_page_caches * caches,gfn_t gfn,struct hlist_head * sp_list,union kvm_mmu_page_role role) kvm_mmu_alloc_shadow_page() argument
2372 __kvm_mmu_get_shadow_page(struct kvm * kvm,struct kvm_vcpu * vcpu,struct shadow_page_caches * caches,gfn_t gfn,union kvm_mmu_page_role role) __kvm_mmu_get_shadow_page() argument
2535 __link_shadow_page(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,u64 * sptep,struct kvm_mmu_page * sp,bool flush) __link_shadow_page() argument
2599 mmu_page_zap_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * spte,struct list_head * invalid_list) mmu_page_zap_pte() argument
2630 kvm_mmu_page_unlink_children(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list) kvm_mmu_page_unlink_children() argument
2643 kvm_mmu_unlink_parents(struct kvm * kvm,struct kvm_mmu_page * sp) kvm_mmu_unlink_parents() argument
2652 mmu_zap_unsync_children(struct kvm * kvm,struct kvm_mmu_page * parent,struct list_head * invalid_list) mmu_zap_unsync_children() argument
2676 __kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list,int * nr_zapped) __kvm_mmu_prepare_zap_page() argument
2741 kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list) kvm_mmu_prepare_zap_page() argument
2750 kvm_mmu_commit_zap_page(struct kvm * kvm,struct list_head * invalid_list) kvm_mmu_commit_zap_page() argument
2775 kvm_mmu_zap_oldest_mmu_pages(struct kvm * kvm,unsigned long nr_to_zap) kvm_mmu_zap_oldest_mmu_pages() argument
2812 kvm_mmu_available_pages(struct kvm * kvm) kvm_mmu_available_pages() argument
2848 kvm_mmu_change_mmu_pages(struct kvm * kvm,unsigned long goal_nr_mmu_pages) kvm_mmu_change_mmu_pages() argument
2867 struct kvm *kvm = vcpu->kvm; __kvm_mmu_unprotect_gfn_and_retry() local
2910 kvm_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp) kvm_unsync_page() argument
2925 mmu_try_to_unsync_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,bool synchronizing,bool prefetch) mmu_try_to_unsync_pages() argument
3224 host_pfn_mapping_level(struct kvm * kvm,gfn_t gfn,const struct kvm_memory_slot * slot) host_pfn_mapping_level() argument
3288 __kvm_mmu_max_mapping_level(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,int max_level,bool is_private) __kvm_mmu_max_mapping_level() argument
3312 kvm_mmu_max_mapping_level(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn) kvm_mmu_max_mapping_level() argument
3491 page_fault_can_be_fast(struct kvm * kvm,struct kvm_page_fault * fault) page_fault_can_be_fast() argument
3725 mmu_free_root_page(struct kvm * kvm,hpa_t * root_hpa,struct list_head * invalid_list) mmu_free_root_page() argument
3750 kvm_mmu_free_roots(struct kvm * kvm,struct kvm_mmu * mmu,ulong roots_to_free) kvm_mmu_free_roots() argument
3815 kvm_mmu_free_guest_mode_roots(struct kvm * kvm,struct kvm_mmu * mmu) kvm_mmu_free_guest_mode_roots() argument
3912 kvm_mmu_alloc_page_hash(struct kvm * kvm) kvm_mmu_alloc_page_hash() argument
3934 mmu_first_shadow_root_alloc(struct kvm * kvm) mmu_first_shadow_root_alloc() argument
4523 kvm_max_private_mapping_level(struct kvm * kvm,kvm_pfn_t pfn,u8 max_level,int gmem_order) kvm_max_private_mapping_level() argument
4622 struct kvm *kvm = vcpu->kvm; kvm_mmu_faultin_pfn() local
5026 cached_root_find_and_keep_current(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role) cached_root_find_and_keep_current() argument
5060 cached_root_find_without_current(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role) cached_root_find_without_current() argument
5081 fast_pgd_switch(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role) fast_pgd_switch() argument
5980 struct kvm *kvm = vcpu->kvm; kvm_mmu_unload() local
5989 is_obsolete_root(struct kvm * kvm,hpa_t root_hpa) is_obsolete_root() argument
6016 __kvm_mmu_free_obsolete_roots(struct kvm * kvm,struct kvm_mmu * mmu) __kvm_mmu_free_obsolete_roots() argument
6614 kvm_zap_obsolete_pages(struct kvm * kvm) kvm_zap_obsolete_pages() argument
6682 kvm_mmu_zap_all_fast(struct kvm * kvm) kvm_mmu_zap_all_fast() argument
6738 kvm_mmu_init_vm(struct kvm * kvm) kvm_mmu_init_vm() argument
6765 mmu_free_vm_memory_caches(struct kvm * kvm) mmu_free_vm_memory_caches() argument
6772 kvm_mmu_uninit_vm(struct kvm * kvm) kvm_mmu_uninit_vm() argument
6782 kvm_rmap_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end) kvm_rmap_zap_gfn_range() argument
6816 kvm_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end) kvm_zap_gfn_range() argument
6842 slot_rmap_write_protect(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot) slot_rmap_write_protect() argument
6849 kvm_mmu_slot_remove_write_access(struct kvm * kvm,const struct kvm_memory_slot * memslot,int start_level) kvm_mmu_slot_remove_write_access() argument
6872 need_topup_split_caches_or_resched(struct kvm * kvm) need_topup_split_caches_or_resched() argument
6887 topup_split_caches(struct kvm * kvm) topup_split_caches() argument
6920 shadow_mmu_get_sp_for_split(struct kvm * kvm,u64 * huge_sptep) shadow_mmu_get_sp_for_split() argument
6947 shadow_mmu_split_huge_page(struct kvm * kvm,const struct kvm_memory_slot * slot,u64 * huge_sptep) shadow_mmu_split_huge_page() argument
6993 shadow_mmu_try_split_huge_page(struct kvm * kvm,const struct kvm_memory_slot * slot,u64 * huge_sptep) shadow_mmu_try_split_huge_page() argument
7032 shadow_mmu_try_split_huge_pages(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot) shadow_mmu_try_split_huge_pages() argument
7078 kvm_shadow_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t start,gfn_t end,int target_level) kvm_shadow_mmu_try_split_huge_pages() argument
7097 kvm_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,u64 start,u64 end,int target_level) kvm_mmu_try_split_huge_pages() argument
7116 kvm_mmu_slot_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,int target_level) kvm_mmu_slot_try_split_huge_pages() argument
7147 kvm_mmu_zap_collapsible_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot) kvm_mmu_zap_collapsible_spte() argument
7184 kvm_rmap_zap_collapsible_sptes(struct kvm * kvm,const struct kvm_memory_slot * slot) kvm_rmap_zap_collapsible_sptes() argument
7196 kvm_mmu_recover_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * slot) kvm_mmu_recover_huge_pages() argument
7212 kvm_mmu_slot_leaf_clear_dirty(struct kvm * kvm,const struct kvm_memory_slot * memslot) kvm_mmu_slot_leaf_clear_dirty() argument
7241 kvm_mmu_zap_all(struct kvm * kvm) kvm_mmu_zap_all() argument
7266 kvm_arch_flush_shadow_all(struct kvm * kvm) kvm_arch_flush_shadow_all() argument
7271 kvm_mmu_zap_memslot_pages_and_flush(struct kvm * kvm,struct kvm_memory_slot * slot,bool flush) kvm_mmu_zap_memslot_pages_and_flush() argument
7305 kvm_mmu_zap_memslot(struct kvm * kvm,struct kvm_memory_slot * slot) kvm_mmu_zap_memslot() argument
7323 kvm_memslot_flush_zap_all(struct kvm * kvm) kvm_memslot_flush_zap_all() argument
7329 kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot) kvm_arch_flush_shadow_memslot() argument
7338 kvm_mmu_invalidate_mmio_sptes(struct kvm * kvm,u64 gen) kvm_mmu_invalidate_mmio_sptes() argument
7369 kvm_wake_nx_recovery_thread(struct kvm * kvm) kvm_wake_nx_recovery_thread() argument
7433 struct kvm *kvm; set_nx_huge_pages() local
7572 struct kvm *kvm; set_nx_huge_pages_recovery_param() local
7585 kvm_recover_nx_huge_pages(struct kvm * kvm) kvm_recover_nx_huge_pages() argument
7686 struct kvm *kvm = data; kvm_nx_huge_page_recovery_worker() local
7712 struct kvm *kvm = container_of(ka, struct kvm, arch); kvm_mmu_start_lpage_recovery() local
7730 kvm_mmu_post_init_vm(struct kvm * kvm) kvm_mmu_post_init_vm() argument
7738 kvm_mmu_pre_destroy_vm(struct kvm * kvm) kvm_mmu_pre_destroy_vm() argument
7763 kvm_arch_pre_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range) kvm_arch_pre_set_memory_attributes() argument
7826 hugepage_has_attrs(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,int level,unsigned long attrs) hugepage_has_attrs() argument
7843 kvm_arch_post_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range) kvm_arch_post_set_memory_attributes() argument
7910 kvm_mmu_init_memslot_memory_attributes(struct kvm * kvm,struct kvm_memory_slot * slot) kvm_mmu_init_memslot_memory_attributes() argument
[all...]
/linux/arch/s390/kvm/
H A Dpv.c10 #include <linux/kvm.h>
21 #include "kvm-s390.h"
23 bool kvm_s390_pv_is_protected(struct kvm *kvm) in kvm_s390_pv_is_protected() argument
25 lockdep_assert_held(&kvm->lock); in kvm_s390_pv_is_protected()
26 return !!kvm_s390_pv_get_handle(kvm); in kvm_s390_pv_is_protected()
39 * @kvm: the guest
43 * Context: needs to be called with kvm->srcu held.
46 int kvm_s390_pv_make_secure(struct kvm *kvm, unsigne argument
58 kvm_s390_pv_convert_to_secure(struct kvm * kvm,unsigned long gaddr) kvm_s390_pv_convert_to_secure() argument
81 kvm_s390_pv_destroy_page(struct kvm * kvm,unsigned long gaddr) kvm_s390_pv_destroy_page() argument
118 kvm_s390_clear_pv_state(struct kvm * kvm) kvm_s390_clear_pv_state() argument
217 kvm_s390_pv_dealloc_vm(struct kvm * kvm) kvm_s390_pv_dealloc_vm() argument
225 kvm_s390_pv_alloc_vm(struct kvm * kvm) kvm_s390_pv_alloc_vm() argument
274 kvm_s390_pv_dispose_one_leftover(struct kvm * kvm,struct pv_vm_to_be_destroyed * leftover,u16 * rc,u16 * rrc) kvm_s390_pv_dispose_one_leftover() argument
309 kvm_s390_destroy_lower_2g(struct kvm * kvm) kvm_s390_destroy_lower_2g() argument
331 kvm_s390_pv_deinit_vm_fast(struct kvm * kvm,u16 * rc,u16 * rrc) kvm_s390_pv_deinit_vm_fast() argument
380 kvm_s390_pv_set_aside(struct kvm * kvm,u16 * rc,u16 * rrc) kvm_s390_pv_set_aside() argument
446 kvm_s390_pv_deinit_vm(struct kvm * kvm,u16 * rc,u16 * rrc) kvm_s390_pv_deinit_vm() argument
482 kvm_s390_pv_deinit_cleanup_all(struct kvm * kvm,u16 * rc,u16 * rrc) kvm_s390_pv_deinit_cleanup_all() argument
559 kvm_s390_pv_deinit_aside_vm(struct kvm * kvm,u16 * rc,u16 * rrc) kvm_s390_pv_deinit_aside_vm() argument
598 struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier); kvm_s390_pv_mmu_notifier_release() local
618 kvm_s390_pv_init_vm(struct kvm * kvm,u16 * rc,u16 * rrc) kvm_s390_pv_init_vm() argument
670 kvm_s390_pv_set_sec_parms(struct kvm * kvm,void * hdr,u64 length,u16 * rc,u16 * rrc) kvm_s390_pv_set_sec_parms() argument
689 unpack_one(struct kvm * kvm,unsigned long addr,u64 tweak,u64 offset,u16 * rc,u16 * rrc) unpack_one() argument
729 kvm_s390_pv_unpack(struct kvm * kvm,unsigned long addr,unsigned long size,unsigned long tweak,u16 * rc,u16 * rrc) kvm_s390_pv_unpack() argument
826 kvm_s390_pv_dump_stor_state(struct kvm * kvm,void __user * buff_user,u64 * gaddr,u64 buff_user_len,u16 * rc,u16 * rrc) kvm_s390_pv_dump_stor_state() argument
934 kvm_s390_pv_dump_complete(struct kvm * kvm,void __user * buff_user,u16 * rc,u16 * rrc) kvm_s390_pv_dump_complete() argument
[all...]
H A Dkvm-s390.c13 #define KMSG_COMPONENT "kvm-s390"
22 #include <linux/kvm.h>
54 #include "kvm-s390.h"
261 /* available cpu features supported by kvm */
275 static int sca_switch_to_extended(struct kvm *kvm);
302 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
308 struct kvm *kvm; in kvm_clock_sync() local
313 list_for_each_entry(kvm, in kvm_clock_sync()
576 kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext) kvm_vm_ioctl_check_extension() argument
696 kvm_arch_sync_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot) kvm_arch_sync_dirty_log() argument
732 kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log) kvm_vm_ioctl_get_dirty_log() argument
764 icpt_operexc_on_all_vcpus(struct kvm * kvm) icpt_operexc_on_all_vcpus() argument
774 kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap) kvm_vm_ioctl_enable_cap() argument
930 kvm_s390_get_mem_control(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_mem_control() argument
949 kvm_s390_set_mem_control(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_set_mem_control() argument
1039 kvm_s390_vcpu_crypto_reset_all(struct kvm * kvm) kvm_s390_vcpu_crypto_reset_all() argument
1055 kvm_s390_vm_set_crypto(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_vm_set_crypto() argument
1135 kvm_s390_vcpu_pci_enable_interp(struct kvm * kvm) kvm_s390_vcpu_pci_enable_interp() argument
1161 kvm_s390_sync_request_broadcast(struct kvm * kvm,int req) kvm_s390_sync_request_broadcast() argument
1174 kvm_s390_vm_start_migration(struct kvm * kvm) kvm_s390_vm_start_migration() argument
1215 kvm_s390_vm_stop_migration(struct kvm * kvm) kvm_s390_vm_stop_migration() argument
1226 kvm_s390_vm_set_migration(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_vm_set_migration() argument
1247 kvm_s390_vm_get_migration(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_vm_get_migration() argument
1262 kvm_s390_set_tod_ext(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_set_tod_ext() argument
1279 kvm_s390_set_tod_high(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_set_tod_high() argument
1294 kvm_s390_set_tod_low(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_set_tod_low() argument
1307 kvm_s390_set_tod(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_set_tod() argument
1344 kvm_s390_get_tod_clock(struct kvm * kvm,struct kvm_s390_vm_tod_clock * gtod) kvm_s390_get_tod_clock() argument
1364 kvm_s390_get_tod_ext(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_tod_ext() argument
1378 kvm_s390_get_tod_high(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_tod_high() argument
1390 kvm_s390_get_tod_low(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_tod_low() argument
1402 kvm_s390_get_tod(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_tod() argument
1426 kvm_s390_set_processor(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_set_processor() argument
1472 kvm_s390_set_processor_feat(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_set_processor_feat() argument
1498 kvm_s390_set_processor_subfunc(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_set_processor_subfunc() argument
1587 kvm_s390_set_uv_feat(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_set_uv_feat() argument
1611 kvm_s390_set_cpu_model(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_set_cpu_model() argument
1632 kvm_s390_get_processor(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_processor() argument
1660 kvm_s390_get_machine(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_machine() argument
1694 kvm_s390_get_processor_feat(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_processor_feat() argument
1709 kvm_s390_get_machine_feat(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_machine_feat() argument
1724 kvm_s390_get_processor_subfunc(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_processor_subfunc() argument
1795 kvm_s390_get_machine_subfunc(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_machine_subfunc() argument
1866 kvm_s390_get_processor_uv_feat(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_processor_uv_feat() argument
1878 kvm_s390_get_machine_uv_feat(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_machine_uv_feat() argument
1893 kvm_s390_get_cpu_model(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_cpu_model() argument
1937 kvm_s390_update_topology_change_report(struct kvm * kvm,bool val) kvm_s390_update_topology_change_report() argument
1952 kvm_s390_set_topo_change_indication(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_set_topo_change_indication() argument
1962 kvm_s390_get_topo_change_indication(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_get_topo_change_indication() argument
1977 kvm_s390_vm_set_attr(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_vm_set_attr() argument
2008 kvm_s390_vm_get_attr(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_vm_get_attr() argument
2036 kvm_s390_vm_has_attr(struct kvm * kvm,struct kvm_device_attr * attr) kvm_s390_vm_has_attr() argument
2114 kvm_s390_get_skeys(struct kvm * kvm,struct kvm_s390_skeys * args) kvm_s390_get_skeys() argument
2162 kvm_s390_set_skeys(struct kvm * kvm,struct kvm_s390_skeys * args) kvm_s390_set_skeys() argument
2235 kvm_s390_peek_cmma(struct kvm * kvm,struct kvm_s390_cmma_log * args,u8 * res,unsigned long bufsize) kvm_s390_peek_cmma() argument
2292 kvm_s390_get_cmma(struct kvm * kvm,struct kvm_s390_cmma_log * args,u8 * res,unsigned long bufsize) kvm_s390_get_cmma() argument
2351 kvm_s390_get_cmma_bits(struct kvm * kvm,struct kvm_s390_cmma_log * args) kvm_s390_get_cmma_bits() argument
2409 kvm_s390_set_cmma_bits(struct kvm * kvm,const struct kvm_s390_cmma_log * args) kvm_s390_set_cmma_bits() argument
2480 kvm_s390_cpus_from_pv(struct kvm * kvm,u16 * rc,u16 * rrc) kvm_s390_cpus_from_pv() argument
2521 kvm_s390_cpus_to_pv(struct kvm * kvm,u16 * rc,u16 * rrc) kvm_s390_cpus_to_pv() argument
2592 kvm_s390_pv_dmp(struct kvm * kvm,struct kvm_pv_cmd * cmd,struct kvm_s390_pv_dmp dmp) kvm_s390_pv_dmp() argument
2654 kvm_s390_handle_pv(struct kvm * kvm,struct kvm_pv_cmd * cmd) kvm_s390_handle_pv() argument
2905 kvm_s390_vm_mem_op_abs(struct kvm * kvm,struct kvm_s390_mem_op * mop) kvm_s390_vm_mem_op_abs() argument
2958 kvm_s390_vm_mem_op_cmpxchg(struct kvm * kvm,struct kvm_s390_mem_op * mop) kvm_s390_vm_mem_op_cmpxchg() argument
3002 kvm_s390_vm_mem_op(struct kvm * kvm,struct kvm_s390_mem_op * mop) kvm_s390_vm_mem_op() argument
3029 struct kvm *kvm = filp->private_data; kvm_arch_vm_ioctl() local
3193 kvm_s390_set_crycb_format(struct kvm * kvm) kvm_s390_set_crycb_format() argument
3225 kvm_arch_crypto_set_masks(struct kvm * kvm,unsigned long * apm,unsigned long * aqm,unsigned long * adm) kvm_arch_crypto_set_masks() argument
3275 kvm_arch_crypto_clear_masks(struct kvm * kvm) kvm_arch_crypto_clear_masks() argument
3300 kvm_s390_crypto_init(struct kvm * kvm) kvm_s390_crypto_init() argument
3318 sca_dispose(struct kvm * kvm) sca_dispose() argument
3327 kvm_arch_free_vm(struct kvm * kvm) kvm_arch_free_vm() argument
3335 kvm_arch_init_vm(struct kvm * kvm,unsigned long type) kvm_arch_init_vm() argument
3506 kvm_arch_destroy_vm(struct kvm * kvm) kvm_arch_destroy_vm() argument
3618 sca_switch_to_extended(struct kvm * kvm) sca_switch_to_extended() argument
3661 sca_can_add_vcpu(struct kvm * kvm,unsigned int id) sca_can_add_vcpu() argument
3801 kvm_has_pckmo_subfunc(struct kvm * kvm,unsigned long nr) kvm_has_pckmo_subfunc() argument
3809 kvm_has_pckmo_ecc(struct kvm * kvm) kvm_has_pckmo_ecc() argument
3820 kvm_has_pckmo_hmac(struct kvm * kvm) kvm_has_pckmo_hmac() argument
3971 kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id) kvm_arch_vcpu_precreate() argument
4113 struct kvm *kvm = gmap->private; kvm_gmap_notifier() local
4521 struct kvm *kvm = gmap->private; __kvm_s390_fixup_fault_sync() local
4665 __kvm_s390_set_tod_clock(struct kvm * kvm,const struct kvm_s390_vm_tod_clock * gtod) __kvm_s390_set_tod_clock() argument
4693 kvm_s390_try_set_tod_clock(struct kvm * kvm,const struct kvm_s390_vm_tod_clock * gtod) kvm_s390_try_set_tod_clock() argument
5469 __disable_ibs_on_all_vcpus(struct kvm * kvm) __disable_ibs_on_all_vcpus() argument
6034 kvm_arch_irqchip_in_kernel(struct kvm * kvm) kvm_arch_irqchip_in_kernel() argument
6040 kvm_arch_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change) kvm_arch_prepare_memory_region() argument
6092 kvm_arch_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change) kvm_arch_commit_memory_region() argument
[all...]
H A Dkvm-s390.h3 * definition for kvm on s390
16 #include <linux/kvm.h>
75 debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
103 return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask); in is_vcpu_idle()
106 static inline int kvm_is_ucontrol(struct kvm *kvm) in kvm_is_ucontrol() argument
109 if (kvm->arch.gmap) in kvm_is_ucontrol()
217 /* test availability of facility in a kvm instance */
218 static inline int test_kvm_facility(struct kvm *kvm, unsigne argument
235 test_kvm_cpu_feat(struct kvm * kvm,unsigned long nr) test_kvm_cpu_feat() argument
242 kvm_s390_user_cpu_state_ctrl(struct kvm * kvm) kvm_s390_user_cpu_state_ctrl() argument
247 kvm_s390_set_user_cpu_state_ctrl(struct kvm * kvm) kvm_s390_set_user_cpu_state_ctrl() argument
270 kvm_s390_get_gisa_desc(struct kvm * kvm) kvm_s390_get_gisa_desc() argument
284 gpa_to_hva(struct kvm * kvm,gpa_t gpa) gpa_to_hva() argument
315 kvm_s390_pv_get_handle(struct kvm * kvm) kvm_s390_pv_get_handle() argument
476 kvm_s390_vcpu_block_all(struct kvm * kvm) kvm_s390_vcpu_block_all() argument
486 kvm_s390_vcpu_unblock_all(struct kvm * kvm) kvm_s390_vcpu_unblock_all() argument
495 kvm_s390_get_tod_clock_fast(struct kvm * kvm) kvm_s390_get_tod_clock_fast() argument
574 kvm_s390_get_ipte_control(struct kvm * kvm) kvm_s390_get_ipte_control() argument
[all...]
/linux/virt/kvm/
H A Deventfd.c3 * kvm eventfd support - use eventfd objects to signal various KVM events
13 #include <linux/kvm.h>
27 #include <trace/events/kvm.h>
29 #include <kvm/iodev.h>
36 kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) in kvm_arch_irqfd_allowed() argument
46 struct kvm *kvm = irqfd->kvm; in irqfd_inject() local
49 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_I in irqfd_inject()
76 struct kvm *kvm; irqfd_resampler_ack() local
95 struct kvm *kvm = resampler->kvm; irqfd_resampler_shutdown() local
126 struct kvm *kvm = irqfd->kvm; irqfd_shutdown() local
184 kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry * irq,struct kvm * kvm,int irq_source_id,int level,bool line_status) kvm_arch_set_irq_inatomic() argument
201 struct kvm *kvm = irqfd->kvm; irqfd_wakeup() local
258 irqfd_update(struct kvm * kvm,struct kvm_kernel_irqfd * irqfd) irqfd_update() argument
281 struct kvm *kvm; global() member
291 struct kvm *kvm = p->kvm; kvm_irqfd_register() local
356 kvm_irqfd_assign(struct kvm * kvm,struct kvm_irqfd * args) kvm_irqfd_assign() argument
509 kvm_irq_has_notifier(struct kvm * kvm,unsigned irqchip,unsigned pin) kvm_irq_has_notifier() argument
530 kvm_notify_acked_gsi(struct kvm * kvm,int gsi) kvm_notify_acked_gsi() argument
540 kvm_notify_acked_irq(struct kvm * kvm,unsigned irqchip,unsigned pin) kvm_notify_acked_irq() argument
553 kvm_register_irq_ack_notifier(struct kvm * kvm,struct kvm_irq_ack_notifier * kian) kvm_register_irq_ack_notifier() argument
562 kvm_unregister_irq_ack_notifier(struct kvm * kvm,struct kvm_irq_ack_notifier * kian) kvm_unregister_irq_ack_notifier() argument
576 kvm_irqfd_deassign(struct kvm * kvm,struct kvm_irqfd * args) kvm_irqfd_deassign() argument
616 kvm_irqfd(struct kvm * kvm,struct kvm_irqfd * args) kvm_irqfd() argument
632 kvm_irqfd_release(struct kvm * kvm) kvm_irqfd_release() argument
655 kvm_irq_routing_update(struct kvm * kvm) kvm_irq_routing_update() argument
678 kvm_notify_irqfd_resampler(struct kvm * kvm,unsigned int irqchip,unsigned int pin) kvm_notify_irqfd_resampler() argument
835 ioeventfd_check_collision(struct kvm * kvm,struct _ioeventfd * p) ioeventfd_check_collision() argument
860 kvm_assign_ioeventfd_idx(struct kvm * kvm,enum kvm_bus bus_idx,struct kvm_ioeventfd * args) kvm_assign_ioeventfd_idx() argument
924 kvm_deassign_ioeventfd_idx(struct kvm * kvm,enum kvm_bus bus_idx,struct kvm_ioeventfd * args) kvm_deassign_ioeventfd_idx() argument
967 kvm_deassign_ioeventfd(struct kvm * kvm,struct kvm_ioeventfd * args) kvm_deassign_ioeventfd() argument
979 kvm_assign_ioeventfd(struct kvm * kvm,struct kvm_ioeventfd * args) kvm_assign_ioeventfd() argument
1031 kvm_ioeventfd(struct kvm * kvm,struct kvm_ioeventfd * args) kvm_ioeventfd() argument
1040 kvm_eventfd_init(struct kvm * kvm) kvm_eventfd_init() argument
[all...]
H A Dkvm_main.c13 #include <kvm/iodev.h>
16 #include <linux/kvm.h>
65 #include <trace/events/kvm.h>
107 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
132 * - Prevent a compat task from opening /dev/kvm
151 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
157 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kv argument
244 kvm_make_vcpus_request_mask(struct kvm * kvm,unsigned int req,unsigned long * vcpu_bitmap) kvm_make_vcpus_request_mask() argument
270 kvm_make_all_cpus_request(struct kvm * kvm,unsigned int req) kvm_make_all_cpus_request() argument
293 kvm_flush_remote_tlbs(struct kvm * kvm) kvm_flush_remote_tlbs() argument
314 kvm_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages) kvm_flush_remote_tlbs_range() argument
327 kvm_flush_remote_tlbs_memslot(struct kvm * kvm,const struct kvm_memory_slot * memslot) kvm_flush_remote_tlbs_memslot() argument
341 kvm_flush_shadow_all(struct kvm * kvm) kvm_flush_shadow_all() argument
441 kvm_vcpu_init(struct kvm_vcpu * vcpu,struct kvm * kvm,unsigned id) kvm_vcpu_init() argument
482 kvm_destroy_vcpus(struct kvm * kvm) kvm_destroy_vcpus() argument
561 kvm_handle_hva_range(struct kvm * kvm,const struct kvm_mmu_notifier_range * range) kvm_handle_hva_range() argument
656 struct kvm *kvm = mmu_notifier_to_kvm(mn); kvm_age_hva_range() local
678 kvm_mmu_invalidate_begin(struct kvm * kvm) kvm_mmu_invalidate_begin() argument
694 kvm_mmu_invalidate_range_add(struct kvm * kvm,gfn_t start,gfn_t end) kvm_mmu_invalidate_range_add() argument
720 kvm_mmu_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range) kvm_mmu_unmap_gfn_range() argument
729 struct kvm *kvm = mmu_notifier_to_kvm(mn); kvm_mmu_notifier_invalidate_range_start() local
776 kvm_mmu_invalidate_end(struct kvm * kvm) kvm_mmu_invalidate_end() argument
805 struct kvm *kvm = mmu_notifier_to_kvm(mn); kvm_mmu_notifier_invalidate_range_end() local
880 struct kvm *kvm = mmu_notifier_to_kvm(mn); kvm_mmu_notifier_release() local
897 kvm_init_mmu_notifier(struct kvm * kvm) kvm_init_mmu_notifier() argument
905 kvm_init_mmu_notifier(struct kvm * kvm) kvm_init_mmu_notifier() argument
917 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); kvm_pm_notifier_call() local
922 kvm_init_pm_notifier(struct kvm * kvm) kvm_init_pm_notifier() argument
930 kvm_destroy_pm_notifier(struct kvm * kvm) kvm_destroy_pm_notifier() argument
935 kvm_init_pm_notifier(struct kvm * kvm) kvm_init_pm_notifier() argument
939 kvm_destroy_pm_notifier(struct kvm * kvm) kvm_destroy_pm_notifier() argument
954 kvm_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot) kvm_free_memslot() argument
966 kvm_free_memslots(struct kvm * kvm,struct kvm_memslots * slots) kvm_free_memslots() argument
998 kvm_destroy_vm_debugfs(struct kvm * kvm) kvm_destroy_vm_debugfs() argument
1016 kvm_create_vm_debugfs(struct kvm * kvm,const char * fdname) kvm_create_vm_debugfs() argument
1092 kvm_arch_pre_destroy_vm(struct kvm * kvm) kvm_arch_pre_destroy_vm() argument
1102 kvm_arch_create_vm_debugfs(struct kvm * kvm) kvm_arch_create_vm_debugfs() argument
1108 struct kvm *kvm = kvm_arch_alloc_vm(); kvm_create_vm() local
1243 kvm_destroy_devices(struct kvm * kvm) kvm_destroy_devices() argument
1264 kvm_destroy_vm(struct kvm * kvm) kvm_destroy_vm() argument
1325 kvm_get_kvm(struct kvm * kvm) kvm_get_kvm() argument
1335 kvm_get_kvm_safe(struct kvm * kvm) kvm_get_kvm_safe() argument
1341 kvm_put_kvm(struct kvm * kvm) kvm_put_kvm() argument
1355 kvm_put_kvm_no_destroy(struct kvm * kvm) kvm_put_kvm_no_destroy() argument
1363 struct kvm *kvm = filp->private_data; kvm_vm_release() local
1371 kvm_trylock_all_vcpus(struct kvm * kvm) kvm_trylock_all_vcpus() argument
1393 kvm_lock_all_vcpus(struct kvm * kvm) kvm_lock_all_vcpus() argument
1418 kvm_unlock_all_vcpus(struct kvm * kvm) kvm_unlock_all_vcpus() argument
1445 kvm_get_inactive_memslots(struct kvm * kvm,int as_id) kvm_get_inactive_memslots() argument
1525 kvm_replace_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new) kvm_replace_memslot() argument
1586 check_memory_region_flags(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem) check_memory_region_flags() argument
1613 kvm_swap_active_memslots(struct kvm * kvm,int as_id) kvm_swap_active_memslots() argument
1671 kvm_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change) kvm_prepare_memory_region() argument
1709 kvm_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change) kvm_commit_memory_region() argument
1771 kvm_activate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new) kvm_activate_memslot() argument
1796 kvm_invalidate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot) kvm_invalidate_memslot() argument
1838 kvm_create_memslot(struct kvm * kvm,struct kvm_memory_slot * new) kvm_create_memslot() argument
1846 kvm_delete_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot) kvm_delete_memslot() argument
1858 kvm_move_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,struct kvm_memory_slot * invalid_slot) kvm_move_memslot() argument
1871 kvm_update_flags_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new) kvm_update_flags_memslot() argument
1884 kvm_set_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change) kvm_set_memslot() argument
1992 kvm_set_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem) kvm_set_memory_region() argument
2125 kvm_set_internal_memslot(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem) kvm_set_internal_memslot() argument
2138 kvm_vm_ioctl_set_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region2 * mem) kvm_vm_ioctl_set_memory_region() argument
2156 kvm_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log,int * is_dirty,struct kvm_memory_slot ** memslot) kvm_get_dirty_log() argument
2219 kvm_get_dirty_log_protect(struct kvm * kvm,struct kvm_dirty_log * log) kvm_get_dirty_log_protect() argument
2310 kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log) kvm_vm_ioctl_get_dirty_log() argument
2329 kvm_clear_dirty_log_protect(struct kvm * kvm,struct kvm_clear_dirty_log * log) kvm_clear_dirty_log_protect() argument
2405 kvm_vm_ioctl_clear_dirty_log(struct kvm * kvm,struct kvm_clear_dirty_log * log) kvm_vm_ioctl_clear_dirty_log() argument
2420 kvm_supported_mem_attributes(struct kvm * kvm) kvm_supported_mem_attributes() argument
2432 kvm_range_has_memory_attributes(struct kvm * kvm,gfn_t start,gfn_t end,unsigned long mask,unsigned long attrs) kvm_range_has_memory_attributes() argument
2463 kvm_handle_gfn_range(struct kvm * kvm,struct kvm_mmu_notifier_range * range) kvm_handle_gfn_range() argument
2515 kvm_pre_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range) kvm_pre_set_memory_attributes() argument
2535 kvm_vm_set_mem_attributes(struct kvm * kvm,gfn_t start,gfn_t end,unsigned long attributes) kvm_vm_set_mem_attributes() argument
2597 kvm_vm_ioctl_set_mem_attributes(struct kvm * kvm,struct kvm_memory_attributes * attrs) kvm_vm_ioctl_set_mem_attributes() argument
2626 gfn_to_memslot(struct kvm * kvm,gfn_t gfn) gfn_to_memslot() argument
2665 kvm_is_visible_gfn(struct kvm * kvm,gfn_t gfn) kvm_is_visible_gfn() argument
2738 gfn_to_hva(struct kvm * kvm,gfn_t gfn) gfn_to_hva() argument
2769 gfn_to_hva_prot(struct kvm * kvm,gfn_t gfn,bool * writable) gfn_to_hva_prot() argument
3092 __gfn_to_page(struct kvm * kvm,gfn_t gfn,bool write) __gfn_to_page() argument
3196 kvm_read_guest_page(struct kvm * kvm,gfn_t gfn,void * data,int offset,int len) kvm_read_guest_page() argument
3214 kvm_read_guest(struct kvm * kvm,gpa_t gpa,void * data,unsigned long len) kvm_read_guest() argument
3286 __kvm_write_guest_page(struct kvm * kvm,struct kvm_memory_slot * memslot,gfn_t gfn,const void * data,int offset,int len) __kvm_write_guest_page() argument
3306 kvm_write_guest_page(struct kvm * kvm,gfn_t gfn,const void * data,int offset,int len) kvm_write_guest_page() argument
3324 kvm_write_guest(struct kvm * kvm,gpa_t gpa,const void * data,unsigned long len) kvm_write_guest() argument
3407 kvm_gfn_to_hva_cache_init(struct kvm * kvm,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len) kvm_gfn_to_hva_cache_init() argument
3415 kvm_write_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len) kvm_write_guest_offset_cached() argument
3446 kvm_write_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len) kvm_write_guest_cached() argument
3453 kvm_read_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len) kvm_read_guest_offset_cached() argument
3483 kvm_read_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len) kvm_read_guest_cached() argument
3490 kvm_clear_guest(struct kvm * kvm,gpa_t gpa,unsigned long len) kvm_clear_guest() argument
3510 mark_page_dirty_in_slot(struct kvm * kvm,const struct kvm_memory_slot * memslot,gfn_t gfn) mark_page_dirty_in_slot() argument
3535 mark_page_dirty(struct kvm * kvm,gfn_t gfn) mark_page_dirty() argument
3695 struct kvm *kvm = vcpu->kvm; kvm_vcpu_max_halt_poll_ns() local
3959 struct kvm *kvm = me->kvm; kvm_vcpu_on_spin() local
4032 kvm_page_in_dirty_ring(struct kvm * kvm,unsigned long pgoff) kvm_page_in_dirty_ring() argument
4148 kvm_vm_ioctl_create_vcpu(struct kvm * kvm,unsigned long id) kvm_vm_ioctl_create_vcpu() argument
4377 struct kvm *kvm = vcpu->kvm; kvm_wait_for_vcpu_online() local
4741 struct kvm *kvm = dev->kvm; kvm_device_release() local
4795 kvm_ioctl_create_device(struct kvm * kvm,struct kvm_create_device * cd) kvm_ioctl_create_device() argument
4854 kvm_vm_ioctl_check_extension_generic(struct kvm * kvm,long arg) kvm_vm_ioctl_check_extension_generic() argument
4928 kvm_vm_ioctl_enable_dirty_log_ring(struct kvm * kvm,u32 size) kvm_vm_ioctl_enable_dirty_log_ring() argument
4966 kvm_vm_ioctl_reset_dirty_pages(struct kvm * kvm) kvm_vm_ioctl_reset_dirty_pages() argument
4991 kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap) kvm_vm_ioctl_enable_cap() argument
4997 kvm_are_all_memslots_empty(struct kvm * kvm) kvm_are_all_memslots_empty() argument
5012 kvm_vm_ioctl_enable_cap_generic(struct kvm * kvm,struct kvm_enable_cap * cap) kvm_vm_ioctl_enable_cap_generic() argument
5083 struct kvm *kvm = file->private_data; kvm_vm_stats_read() local
5092 struct kvm *kvm = file->private_data; kvm_vm_stats_release() local
5105 kvm_vm_ioctl_get_stats_fd(struct kvm * kvm) kvm_vm_ioctl_get_stats_fd() argument
5138 struct kvm *kvm = filp->private_data; kvm_vm_ioctl() local
5403 struct kvm *kvm = filp->private_data; kvm_vm_compat_ioctl() local
5471 struct kvm *kvm; kvm_dev_ioctl_create_vm() local
5935 kvm_io_bus_register_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr,int len,struct kvm_io_device * dev) kvm_io_bus_register_dev() argument
5979 kvm_io_bus_unregister_dev(struct kvm * kvm,enum kvm_bus bus_idx,struct kvm_io_device * dev) kvm_io_bus_unregister_dev() argument
6027 kvm_io_bus_get_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr) kvm_io_bus_get_dev() argument
6087 kvm_get_stat_per_vm(struct kvm * kvm,size_t offset,u64 * val) kvm_get_stat_per_vm() argument
6094 kvm_clear_stat_per_vm(struct kvm * kvm,size_t offset) kvm_clear_stat_per_vm() argument
6101 kvm_get_stat_per_vcpu(struct kvm * kvm,size_t offset,u64 * val) kvm_get_stat_per_vcpu() argument
6114 kvm_clear_stat_per_vcpu(struct kvm * kvm,size_t offset) kvm_clear_stat_per_vcpu() argument
6184 struct kvm *kvm; vm_stat_get() local
6200 struct kvm *kvm; vm_stat_clear() local
6220 struct kvm *kvm; vcpu_stat_get() local
6236 struct kvm *kvm; vcpu_stat_clear() local
6254 kvm_uevent_notify_change(unsigned int type,struct kvm * kvm) kvm_uevent_notify_change() argument
[all...]
H A Dkvm_mm.h14 #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) argument
15 #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) argument
16 #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) argument
18 #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) argument
19 #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm) argument
20 KVM_MMU_UNLOCK(kvm) global() argument
63 gfn_to_pfn_cache_invalidate_start(struct kvm * kvm,unsigned long start,unsigned long end) gfn_to_pfn_cache_invalidate_start() argument
82 kvm_gmem_bind(struct kvm * kvm,struct kvm_memory_slot * slot,unsigned int fd,loff_t offset) kvm_gmem_bind() argument
[all...]
/linux/arch/arm64/kvm/vgic/
H A Dvgic-init.c10 #include <kvm/arm_vgic.h>
46 * @kvm: The VM whose VGIC districutor should be initialized
52 void kvm_vgic_early_init(struct kvm *kvm) in kvm_vgic_early_init() argument
54 struct vgic_dist *dist = &kvm->arch.vgic; in kvm_vgic_early_init()
68 * @kvm: kvm struct pointer
71 int kvm_vgic_create(struct kvm *kvm, u32 type) in kvm_vgic_create() argument
90 * - Holding kvm in kvm_vgic_create()
185 kvm_vgic_dist_init(struct kvm * kvm,unsigned int nr_spis) kvm_vgic_dist_init() argument
373 vgic_init(struct kvm * kvm) vgic_init() argument
420 kvm_vgic_dist_destroy(struct kvm * kvm) kvm_vgic_dist_destroy() argument
487 struct kvm *kvm = vcpu->kvm; kvm_vgic_vcpu_destroy() local
494 kvm_vgic_destroy(struct kvm * kvm) kvm_vgic_destroy() argument
524 vgic_lazy_init(struct kvm * kvm) vgic_lazy_init() argument
559 kvm_vgic_map_resources(struct kvm * kvm) kvm_vgic_map_resources() argument
[all...]
H A Dvgic.h47 * As per Documentation/virt/kvm/devices/arm-vgic-v3.rst,
86 * As per Documentation/virt/kvm/devices/arm-vgic-its.rst,
123 return vcpu->kvm->arch.vgic.implementation_rev; in vgic_get_implementation_rev()
154 static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa, in vgic_write_guest_lock() argument
157 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_write_guest_lock()
161 ret = kvm_write_guest_lock(kvm, gpa, data, len); in vgic_write_guest_lock()
230 struct vgic_irq *vgic_get_irq(struct kvm *kvm, u32 intid);
232 void vgic_put_irq(struct kvm *kv
360 vgic_v3_rd_region_size(struct kvm * kvm,struct vgic_redist_region * rdreg) vgic_v3_rd_region_size() argument
374 vgic_dist_overlap(struct kvm * kvm,gpa_t base,size_t size) vgic_dist_overlap() argument
397 vgic_supports_direct_irqs(struct kvm * kvm) vgic_supports_direct_irqs() argument
418 kvm_has_gicv3(struct kvm * kvm) kvm_has_gicv3() argument
429 vgic_is_v3_compat(struct kvm * kvm) vgic_is_v3_compat() argument
435 vgic_is_v3(struct kvm * kvm) vgic_is_v3() argument
[all...]
/linux/arch/powerpc/kvm/
H A Dbook3s_hv_uvmem.c31 * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
49 * 1. kvm->srcu - Protects KVM memslots
50 * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
51 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
233 struct kvm *kvm; member
248 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) in kvmppc_uvmem_slot_init() argument
263 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init()
264 list_add(&p->list, &kvm in kvmppc_uvmem_slot_init()
273 kvmppc_uvmem_slot_free(struct kvm * kvm,const struct kvm_memory_slot * slot) kvmppc_uvmem_slot_free() argument
289 kvmppc_mark_gfn(unsigned long gfn,struct kvm * kvm,unsigned long flag,unsigned long uvmem_pfn) kvmppc_mark_gfn() argument
309 kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn,unsigned long uvmem_pfn,struct kvm * kvm) kvmppc_gfn_secure_uvmem_pfn() argument
315 kvmppc_gfn_secure_mem_pfn(unsigned long gfn,struct kvm * kvm) kvmppc_gfn_secure_mem_pfn() argument
321 kvmppc_gfn_shared(unsigned long gfn,struct kvm * kvm) kvmppc_gfn_shared() argument
327 kvmppc_gfn_remove(unsigned long gfn,struct kvm * kvm) kvmppc_gfn_remove() argument
333 kvmppc_gfn_is_uvmem_pfn(unsigned long gfn,struct kvm * kvm,unsigned long * uvmem_pfn) kvmppc_gfn_is_uvmem_pfn() argument
362 kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot * memslot,struct kvm * kvm,unsigned long * gfn) kvmppc_next_nontransitioned_gfn() argument
391 kvmppc_memslot_page_merge(struct kvm * kvm,const struct kvm_memory_slot * memslot,bool merge) kvmppc_memslot_page_merge() argument
430 __kvmppc_uvmem_memslot_delete(struct kvm * kvm,const struct kvm_memory_slot * memslot) __kvmppc_uvmem_memslot_delete() argument
438 __kvmppc_uvmem_memslot_create(struct kvm * kvm,const struct kvm_memory_slot * memslot) __kvmppc_uvmem_memslot_create() argument
465 kvmppc_h_svm_init_start(struct kvm * kvm) kvmppc_h_svm_init_start() argument
516 __kvmppc_svm_page_out(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long page_shift,struct kvm * kvm,unsigned long gpa,struct page * fault_page) __kvmppc_svm_page_out() argument
589 kvmppc_svm_page_out(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long page_shift,struct kvm * kvm,unsigned long gpa,struct page * fault_page) kvmppc_svm_page_out() argument
611 kvmppc_uvmem_drop_pages(const struct kvm_memory_slot * slot,struct kvm * kvm,bool skip_page_out) kvmppc_uvmem_drop_pages() argument
659 kvmppc_h_svm_init_abort(struct kvm * kvm) kvmppc_h_svm_init_abort() argument
695 kvmppc_uvmem_get_page(unsigned long gpa,struct kvm * kvm) kvmppc_uvmem_get_page() argument
742 kvmppc_svm_page_in(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long gpa,struct kvm * kvm,unsigned long page_shift,bool pagein) kvmppc_svm_page_in() argument
794 kvmppc_uv_migrate_mem_slot(struct kvm * kvm,const struct kvm_memory_slot * memslot) kvmppc_uv_migrate_mem_slot() argument
830 kvmppc_h_svm_init_done(struct kvm * kvm) kvmppc_h_svm_init_done() argument
877 kvmppc_share_page(struct kvm * kvm,unsigned long gpa,unsigned long page_shift) kvmppc_share_page() argument
935 kvmppc_h_svm_page_in(struct kvm * kvm,unsigned long gpa,unsigned long flags,unsigned long page_shift) kvmppc_h_svm_page_in() argument
1045 kvmppc_h_svm_page_out(struct kvm * kvm,unsigned long gpa,unsigned long flags,unsigned long page_shift) kvmppc_h_svm_page_out() argument
1083 kvmppc_send_page_to_uv(struct kvm * kvm,unsigned long gfn) kvmppc_send_page_to_uv() argument
1104 kvmppc_uvmem_memslot_create(struct kvm * kvm,const struct kvm_memory_slot * new) kvmppc_uvmem_memslot_create() argument
1114 kvmppc_uvmem_memslot_delete(struct kvm * kvm,const struct kvm_memory_slot * old) kvmppc_uvmem_memslot_delete() argument
[all...]
H A DMakefile6 ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
8 include $(srctree)/virt/kvm/Makefile.kvm
16 kvm-e500-objs := \
26 kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs)
28 kvm-e500mc-objs := \
38 kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm
[all...]
H A Dbook3s_64_mmu_radix.c9 #include <linux/kvm.h>
102 int lpid = vcpu->kvm->arch.lpid; in kvmhv_copy_tofrom_guest_radix()
146 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_walk_radix_tree() local
180 ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte)); in kvmppc_mmu_walk_radix_tree()
242 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_radix_translate_table() local
258 ret = kvm_read_guest(kvm, ptb in kvmppc_mmu_radix_translate_table()
315 kvmppc_radix_tlbie_page(struct kvm * kvm,unsigned long addr,unsigned int pshift,u64 lpid) kvmppc_radix_tlbie_page() argument
353 kvmppc_radix_flush_pwc(struct kvm * kvm,u64 lpid) kvmppc_radix_flush_pwc() argument
374 kvmppc_radix_update_pte(struct kvm * kvm,pte_t * ptep,unsigned long clr,unsigned long set,unsigned long addr,unsigned int shift) kvmppc_radix_update_pte() argument
381 kvmppc_radix_set_pte_at(struct kvm * kvm,unsigned long addr,pte_t * ptep,pte_t pte) kvmppc_radix_set_pte_at() argument
423 kvmppc_unmap_pte(struct kvm * kvm,pte_t * pte,unsigned long gpa,unsigned int shift,const struct kvm_memory_slot * memslot,u64 lpid) kvmppc_unmap_pte() argument
476 kvmppc_unmap_free_pte(struct kvm * kvm,pte_t * pte,bool full,u64 lpid) kvmppc_unmap_free_pte() argument
497 kvmppc_unmap_free_pmd(struct kvm * kvm,pmd_t * pmd,bool full,u64 lpid) kvmppc_unmap_free_pmd() argument
526 kvmppc_unmap_free_pud(struct kvm * kvm,pud_t * pud,u64 lpid) kvmppc_unmap_free_pud() argument
548 kvmppc_free_pgtable_radix(struct kvm * kvm,pgd_t * pgd,u64 lpid) kvmppc_free_pgtable_radix() argument
564 kvmppc_free_radix(struct kvm * kvm) kvmppc_free_radix() argument
574 kvmppc_unmap_free_pmd_entry_table(struct kvm * kvm,pmd_t * pmd,unsigned long gpa,u64 lpid) kvmppc_unmap_free_pmd_entry_table() argument
590 kvmppc_unmap_free_pud_entry_table(struct kvm * kvm,pud_t * pud,unsigned long gpa,u64 lpid) kvmppc_unmap_free_pud_entry_table() argument
615 kvmppc_create_pte(struct kvm * kvm,pgd_t * pgtable,pte_t pte,unsigned long gpa,unsigned int level,unsigned long mmu_seq,u64 lpid,unsigned long * rmapp,struct rmap_nested ** n_rmap) kvmppc_create_pte() argument
793 kvmppc_hv_handle_set_rc(struct kvm * kvm,bool nested,bool writing,unsigned long gpa,u64 lpid) kvmppc_hv_handle_set_rc() argument
827 struct kvm *kvm = vcpu->kvm; kvmppc_book3s_instantiate_page() local
929 struct kvm *kvm = vcpu->kvm; kvmppc_book3s_radix_page_fault() local
1011 kvm_unmap_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn) kvm_unmap_radix() argument
1030 kvm_age_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn) kvm_age_radix() argument
1058 kvm_test_age_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn) kvm_test_age_radix() argument
1077 kvm_radix_test_clear_dirty(struct kvm * kvm,struct kvm_memory_slot * memslot,int pagenum) kvm_radix_test_clear_dirty() argument
1132 kvmppc_hv_get_dirty_log_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long * map) kvmppc_hv_get_dirty_log_radix() argument
1157 kvmppc_radix_flush_memslot(struct kvm * kvm,const struct kvm_memory_slot * memslot) kvmppc_radix_flush_memslot() argument
1198 kvmhv_get_rmmu_info(struct kvm * kvm,struct kvm_ppc_rmmu_info * info) kvmhv_get_rmmu_info() argument
1225 kvmppc_init_vm_radix(struct kvm * kvm) kvmppc_init_vm_radix() argument
1244 struct kvm *kvm; global() member
1256 struct kvm *kvm = inode->i_private; debugfs_radix_open() local
1286 struct kvm *kvm; debugfs_radix_read() local
1447 kvmhv_radix_debugfs_init(struct kvm * kvm) kvmhv_radix_debugfs_init() argument
[all...]
H A Dbook3s_64_mmu_hv.c9 #include <linux/kvm.h>
47 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
53 struct kvm *kvm; member
57 /* These fields protected by kvm->arch.mmu_setup_lock */
67 * then protected by kvm->arch.mmu_setup_lock.
118 void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info) in kvmppc_set_hpt() argument
120 atomic64_set(&kvm in kvmppc_set_hpt()
128 kvmppc_alloc_reset_hpt(struct kvm * kvm,int order) kvmppc_alloc_reset_hpt() argument
217 struct kvm *kvm = vcpu->kvm; kvmppc_map_vrma() local
292 kvmppc_virtmode_do_h_enter(struct kvm * kvm,unsigned long flags,long pte_index,unsigned long pteh,unsigned long ptel,unsigned long * pte_idx_ret) kvmppc_virtmode_do_h_enter() argument
344 struct kvm *kvm = vcpu->kvm; kvmppc_mmu_book3s_64_hv_xlate() local
509 struct kvm *kvm = vcpu->kvm; kvmppc_book3s_hv_page_fault() local
739 kvmppc_rmap_reset(struct kvm * kvm) kvmppc_rmap_reset() argument
762 kvmppc_unmap_hpte(struct kvm * kvm,unsigned long i,struct kvm_memory_slot * memslot,unsigned long * rmapp,unsigned long gfn) kvmppc_unmap_hpte() argument
804 kvm_unmap_rmapp(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn) kvm_unmap_rmapp() argument
840 kvm_unmap_gfn_range_hv(struct kvm * kvm,struct kvm_gfn_range * range) kvm_unmap_gfn_range_hv() argument
855 kvmppc_core_flush_memslot_hv(struct kvm * kvm,struct kvm_memory_slot * memslot) kvmppc_core_flush_memslot_hv() argument
882 kvm_age_rmapp(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn) kvm_age_rmapp() argument
937 kvm_age_gfn_hv(struct kvm * kvm,struct kvm_gfn_range * range) kvm_age_gfn_hv() argument
953 kvm_test_age_rmapp(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn) kvm_test_age_rmapp() argument
986 kvm_test_age_gfn_hv(struct kvm * kvm,struct kvm_gfn_range * range) kvm_test_age_gfn_hv() argument
996 vcpus_running(struct kvm * kvm) vcpus_running() argument
1005 kvm_test_clear_dirty_npages(struct kvm * kvm,unsigned long * rmapp) kvm_test_clear_dirty_npages() argument
1104 kvmppc_hv_get_dirty_log_hpt(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long * map) kvmppc_hv_get_dirty_log_hpt() argument
1127 kvmppc_pin_guest_page(struct kvm * kvm,unsigned long gpa,unsigned long * nb_ret) kvmppc_pin_guest_page() argument
1158 kvmppc_unpin_guest_page(struct kvm * kvm,void * va,unsigned long gpa,bool dirty) kvmppc_unpin_guest_page() argument
1200 struct kvm *kvm = resize->kvm; resize_hpt_rehash_hpte() local
1354 struct kvm *kvm = resize->kvm; resize_hpt_rehash() local
1369 struct kvm *kvm = resize->kvm; resize_hpt_pivot() local
1394 resize_hpt_release(struct kvm * kvm,struct kvm_resize_hpt * resize) resize_hpt_release() argument
1417 struct kvm *kvm = resize->kvm; resize_hpt_prepare_work() local
1457 kvm_vm_ioctl_resize_hpt_prepare(struct kvm * kvm,struct kvm_ppc_resize_hpt * rhpt) kvm_vm_ioctl_resize_hpt_prepare() argument
1523 kvm_vm_ioctl_resize_hpt_commit(struct kvm * kvm,struct kvm_ppc_resize_hpt * rhpt) kvm_vm_ioctl_resize_hpt_commit() argument
1597 struct kvm *kvm; global() member
1700 struct kvm *kvm = ctx->kvm; kvm_htab_read() local
1799 struct kvm *kvm = ctx->kvm; kvm_htab_write() local
1937 kvm_vm_ioctl_get_htab_fd(struct kvm * kvm,struct kvm_get_htab_fd * ghf) kvm_vm_ioctl_get_htab_fd() argument
1975 struct kvm *kvm; global() member
1985 struct kvm *kvm = inode->i_private; debugfs_htab_open() local
2016 struct kvm *kvm; debugfs_htab_read() local
2106 kvmppc_mmu_debugfs_init(struct kvm * kvm) kvmppc_mmu_debugfs_init() argument
[all...]
/linux/arch/riscv/kvm/
H A DMakefile8 include $(srctree)/virt/kvm/Makefile.kvm
10 obj-$(CONFIG_KVM) += kvm.o
13 kvm-y += aia.o
14 kvm-y += aia_aplic.o
15 kvm-y += aia_device.o
16 kvm-y += aia_imsic.o
17 kvm-y += gstage.o
18 kvm-y += main.o
19 kvm
[all...]
H A Daia_device.c19 struct kvm *kvm = dev->kvm; in aia_create() local
22 if (irqchip_in_kernel(kvm)) in aia_create()
26 if (kvm_trylock_all_vcpus(kvm)) in aia_create()
29 kvm_for_each_vcpu(i, vcpu, kvm) { in aia_create()
35 kvm->arch.aia.in_kernel = true; in aia_create()
38 kvm_unlock_all_vcpus(kvm); in aia_create()
47 static int aia_config(struct kvm *kvm, unsigne argument
141 aia_aplic_addr(struct kvm * kvm,u64 * addr,bool write) aia_aplic_addr() argument
160 aia_imsic_addr(struct kvm * kvm,u64 * addr,unsigned long vcpu_idx,bool write) aia_imsic_addr() argument
222 aia_init(struct kvm * kvm) aia_init() argument
541 kvm_riscv_aia_inject_msi_by_id(struct kvm * kvm,u32 hart_index,u32 guest_index,u32 iid) kvm_riscv_aia_inject_msi_by_id() argument
562 kvm_riscv_aia_inject_msi(struct kvm * kvm,struct kvm_msi * msi) kvm_riscv_aia_inject_msi() argument
596 kvm_riscv_aia_inject_irq(struct kvm * kvm,unsigned int irq,bool level) kvm_riscv_aia_inject_irq() argument
606 kvm_riscv_aia_init_vm(struct kvm * kvm) kvm_riscv_aia_init_vm() argument
632 kvm_riscv_aia_destroy_vm(struct kvm * kvm) kvm_riscv_aia_destroy_vm() argument
[all...]
H A Dvm.c31 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument
35 r = kvm_riscv_mmu_alloc_pgd(kvm); in kvm_arch_init_vm()
39 r = kvm_riscv_gstage_vmid_init(kvm); in kvm_arch_init_vm()
41 kvm_riscv_mmu_free_pgd(kvm); in kvm_arch_init_vm()
45 kvm_riscv_aia_init_vm(kvm); in kvm_arch_init_vm()
47 kvm_riscv_guest_timer_init(kvm); in kvm_arch_init_vm()
52 void kvm_arch_destroy_vm(struct kvm *kvm) in kvm_arch_destroy_vm() argument
54 kvm_destroy_vcpus(kvm); in kvm_arch_destroy_vm()
59 kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irql,bool line_status) kvm_vm_ioctl_irq_line() argument
69 kvm_set_msi(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status) kvm_set_msi() argument
87 kvm_riscv_set_irq(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status) kvm_riscv_set_irq() argument
93 kvm_riscv_setup_default_irq_routing(struct kvm * kvm,u32 lines) kvm_riscv_setup_default_irq_routing() argument
114 kvm_arch_can_set_irq_routing(struct kvm * kvm) kvm_arch_can_set_irq_routing() argument
119 kvm_set_routing_entry(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,const struct kvm_irq_routing_entry * ue) kvm_set_routing_entry() argument
151 kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status) kvm_arch_set_irq_inatomic() argument
169 kvm_arch_irqchip_in_kernel(struct kvm * kvm) kvm_arch_irqchip_in_kernel() argument
174 kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext) kvm_vm_ioctl_check_extension() argument
213 kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap) kvm_vm_ioctl_enable_cap() argument
[all...]
/linux/arch/x86/kvm/
H A Dirq.h18 #include <kvm/iodev.h>
27 struct kvm;
55 struct kvm *kvm; member
64 int kvm_pic_init(struct kvm *kvm);
65 void kvm_pic_destroy(struct kvm *kvm);
66 int kvm_pic_read_irq(struct kvm *kvm);
76 irqchip_full(struct kvm * kvm) irqchip_full() argument
85 irqchip_full(struct kvm * kvm) irqchip_full() argument
91 pic_in_kernel(struct kvm * kvm) pic_in_kernel() argument
97 irqchip_split(struct kvm * kvm) irqchip_split() argument
106 irqchip_in_kernel(struct kvm * kvm) irqchip_in_kernel() argument
[all...]
H A Dirq.c82 if (pic_in_kernel(v->kvm)) in kvm_cpu_has_extint()
83 return v->kvm->arch.vpic->output; in kvm_cpu_has_extint()
86 WARN_ON_ONCE(!irqchip_split(v->kvm)); in kvm_cpu_has_extint()
140 return v->kvm->arch.xen.upcall_vector; in kvm_cpu_get_extint()
144 if (pic_in_kernel(v->kvm)) in kvm_cpu_get_extint()
145 return kvm_pic_read_irq(v->kvm); /* PIC */ in kvm_cpu_get_extint()
148 WARN_ON_ONCE(!irqchip_split(v->kvm)); in kvm_cpu_get_extint()
186 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) in kvm_arch_irqfd_allowed() argument
190 return resample ? irqchip_full(kvm) in kvm_arch_irqfd_allowed()
193 kvm_arch_irqchip_in_kernel(struct kvm * kvm) kvm_arch_irqchip_in_kernel() argument
198 kvm_irq_delivery_to_apic(struct kvm * kvm,struct kvm_lapic * src,struct kvm_lapic_irq * irq,struct dest_map * dest_map) kvm_irq_delivery_to_apic() argument
255 kvm_msi_to_lapic_irq(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,struct kvm_lapic_irq * irq) kvm_msi_to_lapic_irq() argument
276 kvm_msi_route_invalid(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e) kvm_msi_route_invalid() argument
283 kvm_set_msi(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status) kvm_set_msi() argument
299 kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status) kvm_arch_set_irq_inatomic() argument
336 kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irq_event,bool line_status) kvm_vm_ioctl_irq_line() argument
348 kvm_arch_can_set_irq_routing(struct kvm * kvm) kvm_arch_can_set_irq_routing() argument
353 kvm_set_routing_entry(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,const struct kvm_irq_routing_entry * ue) kvm_set_routing_entry() argument
414 kvm_intr_is_single_vcpu(struct kvm * kvm,struct kvm_lapic_irq * irq,struct kvm_vcpu ** dest_vcpu) kvm_intr_is_single_vcpu() argument
472 struct kvm *kvm = vcpu->kvm; kvm_scan_ioapic_routes() local
501 kvm_arch_irq_routing_update(struct kvm * kvm) kvm_arch_irq_routing_update() argument
515 struct kvm *kvm = irqfd->kvm; kvm_pi_update_irte() local
565 struct kvm *kvm = irqfd->kvm; kvm_arch_irq_bypass_add_producer() local
589 struct kvm *kvm = irqfd->kvm; kvm_arch_irq_bypass_del_producer() local
658 kvm_setup_default_ioapic_and_pic_routing(struct kvm * kvm) kvm_setup_default_ioapic_and_pic_routing() argument
664 kvm_vm_ioctl_get_irqchip(struct kvm * kvm,struct kvm_irqchip * chip) kvm_vm_ioctl_get_irqchip() argument
689 kvm_vm_ioctl_set_irqchip(struct kvm * kvm,struct kvm_irqchip * chip) kvm_vm_ioctl_set_irqchip() argument
[all...]
H A Dxen.h25 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
26 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
27 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt);
29 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
30 void kvm_xen_init_vm(struct kvm *kvm);
61 kvm_xen_msr_enabled(struct kvm * kvm) kvm_xen_msr_enabled() argument
67 kvm_xen_is_hypercall_page_msr(struct kvm * kvm,u32 msr) kvm_xen_is_hypercall_page_msr() argument
75 kvm_xen_hypercall_enabled(struct kvm * kvm) kvm_xen_hypercall_enabled() argument
118 kvm_xen_init_vm(struct kvm * kvm) kvm_xen_init_vm() argument
122 kvm_xen_destroy_vm(struct kvm * kvm) kvm_xen_destroy_vm() argument
138 kvm_xen_msr_enabled(struct kvm * kvm) kvm_xen_msr_enabled() argument
143 kvm_xen_is_hypercall_page_msr(struct kvm * kvm,u32 msr) kvm_xen_is_hypercall_page_msr() argument
148 kvm_xen_hypercall_enabled(struct kvm * kvm) kvm_xen_hypercall_enabled() argument
[all...]

12345678910>>...14