1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * KVM guest fault handling.
4 *
5 * Copyright IBM Corp. 2025
6 * Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
7 */
8
9 #ifndef __KVM_S390_FAULTIN_H
10 #define __KVM_S390_FAULTIN_H
11
12 #include <linux/kvm_host.h>
13
14 #include "dat.h"
15
16 int kvm_s390_faultin_gfn(struct kvm_vcpu *vcpu, struct kvm *kvm, struct guest_fault *f);
17 int kvm_s390_get_guest_page(struct kvm *kvm, struct guest_fault *f, gfn_t gfn, bool w);
18
kvm_s390_faultin_gfn_simple(struct kvm_vcpu * vcpu,struct kvm * kvm,gfn_t gfn,bool wr)19 static inline int kvm_s390_faultin_gfn_simple(struct kvm_vcpu *vcpu, struct kvm *kvm,
20 gfn_t gfn, bool wr)
21 {
22 struct guest_fault f = { .gfn = gfn, .write_attempt = wr, };
23
24 return kvm_s390_faultin_gfn(vcpu, kvm, &f);
25 }
26
kvm_s390_get_guest_page_and_read_gpa(struct kvm * kvm,struct guest_fault * f,gpa_t gaddr,unsigned long * val)27 static inline int kvm_s390_get_guest_page_and_read_gpa(struct kvm *kvm, struct guest_fault *f,
28 gpa_t gaddr, unsigned long *val)
29 {
30 int rc;
31
32 rc = kvm_s390_get_guest_page(kvm, f, gpa_to_gfn(gaddr), false);
33 if (rc)
34 return rc;
35
36 *val = *(unsigned long *)phys_to_virt(pfn_to_phys(f->pfn) | offset_in_page(gaddr));
37
38 return 0;
39 }
40
kvm_s390_release_multiple(struct kvm * kvm,struct guest_fault * guest_faults,int n,bool ignore)41 static inline void kvm_s390_release_multiple(struct kvm *kvm, struct guest_fault *guest_faults,
42 int n, bool ignore)
43 {
44 int i;
45
46 for (i = 0; i < n; i++) {
47 kvm_release_faultin_page(kvm, guest_faults[i].page, ignore,
48 guest_faults[i].write_attempt);
49 guest_faults[i].page = NULL;
50 }
51 }
52
kvm_s390_multiple_faults_need_retry(struct kvm * kvm,unsigned long seq,struct guest_fault * guest_faults,int n,bool unsafe)53 static inline bool kvm_s390_multiple_faults_need_retry(struct kvm *kvm, unsigned long seq,
54 struct guest_fault *guest_faults, int n,
55 bool unsafe)
56 {
57 int i;
58
59 for (i = 0; i < n; i++) {
60 if (!guest_faults[i].valid)
61 continue;
62 if (unsafe && mmu_invalidate_retry_gfn_unsafe(kvm, seq, guest_faults[i].gfn))
63 return true;
64 if (!unsafe && mmu_invalidate_retry_gfn(kvm, seq, guest_faults[i].gfn))
65 return true;
66 }
67 return false;
68 }
69
kvm_s390_get_guest_pages(struct kvm * kvm,struct guest_fault * guest_faults,gfn_t start,int n_pages,bool write_attempt)70 static inline int kvm_s390_get_guest_pages(struct kvm *kvm, struct guest_fault *guest_faults,
71 gfn_t start, int n_pages, bool write_attempt)
72 {
73 int i, rc;
74
75 for (i = 0; i < n_pages; i++) {
76 rc = kvm_s390_get_guest_page(kvm, guest_faults + i, start + i, write_attempt);
77 if (rc)
78 break;
79 }
80 return rc;
81 }
82
83 #define kvm_s390_release_faultin_array(kvm, array, ignore) \
84 kvm_s390_release_multiple(kvm, array, ARRAY_SIZE(array), ignore)
85
86 #define kvm_s390_array_needs_retry_unsafe(kvm, seq, array) \
87 kvm_s390_multiple_faults_need_retry(kvm, seq, array, ARRAY_SIZE(array), true)
88
89 #define kvm_s390_array_needs_retry_safe(kvm, seq, array) \
90 kvm_s390_multiple_faults_need_retry(kvm, seq, array, ARRAY_SIZE(array), false)
91
92 #endif /* __KVM_S390_FAULTIN_H */
93