1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * KVM guest address space mapping code
4 *
5 * Copyright IBM Corp. 2007, 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9 #ifndef _ASM_S390_GMAP_H
10 #define _ASM_S390_GMAP_H
11
12 #include <linux/radix-tree.h>
13 #include <linux/refcount.h>
14
15 /* Generic bits for GMAP notification on DAT table entry changes. */
16 #define GMAP_NOTIFY_SHADOW 0x2
17 #define GMAP_NOTIFY_MPROT 0x1
18
19 /* Status bits only for huge segment entries */
20 #define _SEGMENT_ENTRY_GMAP_IN 0x0800 /* invalidation notify bit */
21 #define _SEGMENT_ENTRY_GMAP_UC 0x0002 /* dirty (migration) */
22
23 /**
24 * struct gmap_struct - guest address space
25 * @list: list head for the mm->context gmap list
26 * @mm: pointer to the parent mm_struct
27 * @guest_to_host: radix tree with guest to host address translation
28 * @host_to_guest: radix tree with pointer to segment table entries
29 * @guest_table_lock: spinlock to protect all entries in the guest page table
30 * @ref_count: reference counter for the gmap structure
31 * @table: pointer to the page directory
32 * @asce: address space control element for gmap page table
33 * @pfault_enabled: defines if pfaults are applicable for the guest
34 * @guest_handle: protected virtual machine handle for the ultravisor
35 * @host_to_rmap: radix tree with gmap_rmap lists
36 * @children: list of shadow gmap structures
37 * @shadow_lock: spinlock to protect the shadow gmap list
38 * @parent: pointer to the parent gmap for shadow guest address spaces
39 * @orig_asce: ASCE for which the shadow page table has been created
40 * @edat_level: edat level to be used for the shadow translation
41 * @removed: flag to indicate if a shadow guest address space has been removed
42 * @initialized: flag to indicate if a shadow guest address space can be used
43 */
44 struct gmap {
45 struct list_head list;
46 struct mm_struct *mm;
47 struct radix_tree_root guest_to_host;
48 struct radix_tree_root host_to_guest;
49 spinlock_t guest_table_lock;
50 refcount_t ref_count;
51 unsigned long *table;
52 unsigned long asce;
53 unsigned long asce_end;
54 void *private;
55 bool pfault_enabled;
56 /* only set for protected virtual machines */
57 unsigned long guest_handle;
58 /* Additional data for shadow guest address spaces */
59 struct radix_tree_root host_to_rmap;
60 struct list_head children;
61 spinlock_t shadow_lock;
62 struct gmap *parent;
63 unsigned long orig_asce;
64 int edat_level;
65 bool removed;
66 bool initialized;
67 };
68
69 /**
70 * struct gmap_rmap - reverse mapping for shadow page table entries
71 * @next: pointer to next rmap in the list
72 * @raddr: virtual rmap address in the shadow guest address space
73 */
74 struct gmap_rmap {
75 struct gmap_rmap *next;
76 unsigned long raddr;
77 };
78
79 #define gmap_for_each_rmap(pos, head) \
80 for (pos = (head); pos; pos = pos->next)
81
82 #define gmap_for_each_rmap_safe(pos, n, head) \
83 for (pos = (head); n = pos ? pos->next : NULL, pos; pos = n)
84
85 /**
86 * struct gmap_notifier - notify function block for page invalidation
87 * @notifier_call: address of callback function
88 */
89 struct gmap_notifier {
90 struct list_head list;
91 struct rcu_head rcu;
92 void (*notifier_call)(struct gmap *gmap, unsigned long start,
93 unsigned long end);
94 };
95
gmap_is_shadow(struct gmap * gmap)96 static inline int gmap_is_shadow(struct gmap *gmap)
97 {
98 return !!gmap->parent;
99 }
100
101 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit);
102 void gmap_remove(struct gmap *gmap);
103 struct gmap *gmap_get(struct gmap *gmap);
104 void gmap_put(struct gmap *gmap);
105 void gmap_free(struct gmap *gmap);
106 struct gmap *gmap_alloc(unsigned long limit);
107
108 int gmap_map_segment(struct gmap *gmap, unsigned long from,
109 unsigned long to, unsigned long len);
110 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
111 unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
112 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
113 void __gmap_zap(struct gmap *, unsigned long gaddr);
114 void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr);
115
116 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val);
117
118 void gmap_unshadow(struct gmap *sg);
119 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
120 int fake);
121 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
122 int fake);
123 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
124 int fake);
125 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
126 int fake);
127 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
128
129 void gmap_register_pte_notifier(struct gmap_notifier *);
130 void gmap_unregister_pte_notifier(struct gmap_notifier *);
131
132 int gmap_protect_one(struct gmap *gmap, unsigned long gaddr, int prot, unsigned long bits);
133
134 void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
135 unsigned long gaddr, unsigned long vmaddr);
136 int s390_replace_asce(struct gmap *gmap);
137 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
138 int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
139 unsigned long end, bool interruptible);
140 unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level);
141
142 /**
143 * s390_uv_destroy_range - Destroy a range of pages in the given mm.
144 * @mm: the mm on which to operate on
145 * @start: the start of the range
146 * @end: the end of the range
147 *
148 * This function will call cond_sched, so it should not generate stalls, but
149 * it will otherwise only return when it completed.
150 */
s390_uv_destroy_range(struct mm_struct * mm,unsigned long start,unsigned long end)151 static inline void s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
152 unsigned long end)
153 {
154 (void)__s390_uv_destroy_range(mm, start, end, false);
155 }
156
157 /**
158 * s390_uv_destroy_range_interruptible - Destroy a range of pages in the
159 * given mm, but stop when a fatal signal is received.
160 * @mm: the mm on which to operate on
161 * @start: the start of the range
162 * @end: the end of the range
163 *
164 * This function will call cond_sched, so it should not generate stalls. If
165 * a fatal signal is received, it will return with -EINTR immediately,
166 * without finishing destroying the whole range. Upon successful
167 * completion, 0 is returned.
168 */
s390_uv_destroy_range_interruptible(struct mm_struct * mm,unsigned long start,unsigned long end)169 static inline int s390_uv_destroy_range_interruptible(struct mm_struct *mm, unsigned long start,
170 unsigned long end)
171 {
172 return __s390_uv_destroy_range(mm, start, end, true);
173 }
174 #endif /* _ASM_S390_GMAP_H */
175