1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2023, Microsoft Corporation.
4  */
5 
6 #ifndef _MSHV_ROOT_H_
7 #define _MSHV_ROOT_H_
8 
9 #include <linux/spinlock.h>
10 #include <linux/mutex.h>
11 #include <linux/semaphore.h>
12 #include <linux/sched.h>
13 #include <linux/srcu.h>
14 #include <linux/wait.h>
15 #include <linux/hashtable.h>
16 #include <linux/dev_printk.h>
17 #include <linux/build_bug.h>
18 #include <uapi/linux/mshv.h>
19 
20 /*
21  * Hypervisor must be between these version numbers (inclusive)
22  * to guarantee compatibility
23  */
24 #define MSHV_HV_MIN_VERSION		(27744)
25 #define MSHV_HV_MAX_VERSION		(27751)
26 
27 static_assert(HV_HYP_PAGE_SIZE == MSHV_HV_PAGE_SIZE);
28 
29 #define MSHV_MAX_VPS			256
30 
31 #define MSHV_PARTITIONS_HASH_BITS	9
32 
33 #define MSHV_PIN_PAGES_BATCH_SIZE	(0x10000000ULL / HV_HYP_PAGE_SIZE)
34 
35 struct mshv_vp {
36 	u32 vp_index;
37 	struct mshv_partition *vp_partition;
38 	struct mutex vp_mutex;
39 	struct hv_vp_register_page *vp_register_page;
40 	struct hv_message *vp_intercept_msg_page;
41 	void *vp_ghcb_page;
42 	struct hv_stats_page *vp_stats_pages[2];
43 	struct {
44 		atomic64_t vp_signaled_count;
45 		struct {
46 			u64 intercept_suspend: 1;
47 			u64 root_sched_blocked: 1; /* root scheduler only */
48 			u64 root_sched_dispatched: 1; /* root scheduler only */
49 			u64 reserved: 61;
50 		} flags;
51 		unsigned int kicked_by_hv;
52 		wait_queue_head_t vp_suspend_queue;
53 	} run;
54 };
55 
56 #define vp_fmt(fmt) "p%lluvp%u: " fmt
57 #define vp_devprintk(level, v, fmt, ...) \
58 do { \
59 	const struct mshv_vp *__vp = (v); \
60 	const struct mshv_partition *__pt = __vp->vp_partition; \
61 	dev_##level(__pt->pt_module_dev, vp_fmt(fmt), __pt->pt_id, \
62 		    __vp->vp_index, ##__VA_ARGS__); \
63 } while (0)
64 #define vp_emerg(v, fmt, ...)	vp_devprintk(emerg, v, fmt, ##__VA_ARGS__)
65 #define vp_crit(v, fmt, ...)	vp_devprintk(crit, v, fmt, ##__VA_ARGS__)
66 #define vp_alert(v, fmt, ...)	vp_devprintk(alert, v, fmt, ##__VA_ARGS__)
67 #define vp_err(v, fmt, ...)	vp_devprintk(err, v, fmt, ##__VA_ARGS__)
68 #define vp_warn(v, fmt, ...)	vp_devprintk(warn, v, fmt, ##__VA_ARGS__)
69 #define vp_notice(v, fmt, ...)	vp_devprintk(notice, v, fmt, ##__VA_ARGS__)
70 #define vp_info(v, fmt, ...)	vp_devprintk(info, v, fmt, ##__VA_ARGS__)
71 #define vp_dbg(v, fmt, ...)	vp_devprintk(dbg, v, fmt, ##__VA_ARGS__)
72 
73 struct mshv_mem_region {
74 	struct hlist_node hnode;
75 	u64 nr_pages;
76 	u64 start_gfn;
77 	u64 start_uaddr;
78 	u32 hv_map_flags;
79 	struct {
80 		u64 large_pages:  1; /* 2MiB */
81 		u64 range_pinned: 1;
82 		u64 reserved:	 62;
83 	} flags;
84 	struct mshv_partition *partition;
85 	struct page *pages[];
86 };
87 
88 struct mshv_irq_ack_notifier {
89 	struct hlist_node link;
90 	unsigned int irq_ack_gsi;
91 	void (*irq_acked)(struct mshv_irq_ack_notifier *mian);
92 };
93 
94 struct mshv_partition {
95 	struct device *pt_module_dev;
96 
97 	struct hlist_node pt_hnode;
98 	u64 pt_id;
99 	refcount_t pt_ref_count;
100 	struct mutex pt_mutex;
101 	struct hlist_head pt_mem_regions; // not ordered
102 
103 	u32 pt_vp_count;
104 	struct mshv_vp *pt_vp_array[MSHV_MAX_VPS];
105 
106 	struct mutex pt_irq_lock;
107 	struct srcu_struct pt_irq_srcu;
108 	struct hlist_head irq_ack_notifier_list;
109 
110 	struct hlist_head pt_devices;
111 
112 	/*
113 	 * MSHV does not support more than one async hypercall in flight
114 	 * for a single partition. Thus, it is okay to define per partition
115 	 * async hypercall status.
116 	 */
117 	struct completion async_hypercall;
118 	u64 async_hypercall_status;
119 
120 	spinlock_t	  pt_irqfds_lock;
121 	struct hlist_head pt_irqfds_list;
122 	struct mutex	  irqfds_resampler_lock;
123 	struct hlist_head irqfds_resampler_list;
124 
125 	struct hlist_head ioeventfds_list;
126 
127 	struct mshv_girq_routing_table __rcu *pt_girq_tbl;
128 	u64 isolation_type;
129 	bool import_completed;
130 	bool pt_initialized;
131 };
132 
133 #define pt_fmt(fmt) "p%llu: " fmt
134 #define pt_devprintk(level, p, fmt, ...) \
135 do { \
136 	const struct mshv_partition *__pt = (p); \
137 	dev_##level(__pt->pt_module_dev, pt_fmt(fmt), __pt->pt_id, \
138 		    ##__VA_ARGS__); \
139 } while (0)
140 #define pt_emerg(p, fmt, ...)	pt_devprintk(emerg, p, fmt, ##__VA_ARGS__)
141 #define pt_crit(p, fmt, ...)	pt_devprintk(crit, p, fmt, ##__VA_ARGS__)
142 #define pt_alert(p, fmt, ...)	pt_devprintk(alert, p, fmt, ##__VA_ARGS__)
143 #define pt_err(p, fmt, ...)	pt_devprintk(err, p, fmt, ##__VA_ARGS__)
144 #define pt_warn(p, fmt, ...)	pt_devprintk(warn, p, fmt, ##__VA_ARGS__)
145 #define pt_notice(p, fmt, ...)	pt_devprintk(notice, p, fmt, ##__VA_ARGS__)
146 #define pt_info(p, fmt, ...)	pt_devprintk(info, p, fmt, ##__VA_ARGS__)
147 #define pt_dbg(p, fmt, ...)	pt_devprintk(dbg, p, fmt, ##__VA_ARGS__)
148 
149 struct mshv_lapic_irq {
150 	u32 lapic_vector;
151 	u64 lapic_apic_id;
152 	union hv_interrupt_control lapic_control;
153 };
154 
155 #define MSHV_MAX_GUEST_IRQS		4096
156 
157 /* representation of one guest irq entry, either msi or legacy */
158 struct mshv_guest_irq_ent {
159 	u32 girq_entry_valid;	/* vfio looks at this */
160 	u32 guest_irq_num;	/* a unique number for each irq */
161 	u32 girq_addr_lo;	/* guest irq msi address info */
162 	u32 girq_addr_hi;
163 	u32 girq_irq_data;	/* idt vector in some cases */
164 };
165 
166 struct mshv_girq_routing_table {
167 	u32 num_rt_entries;
168 	struct mshv_guest_irq_ent mshv_girq_info_tbl[];
169 };
170 
171 struct hv_synic_pages {
172 	struct hv_message_page *synic_message_page;
173 	struct hv_synic_event_flags_page *synic_event_flags_page;
174 	struct hv_synic_event_ring_page *synic_event_ring_page;
175 };
176 
177 struct mshv_root {
178 	struct hv_synic_pages __percpu *synic_pages;
179 	spinlock_t pt_ht_lock;
180 	DECLARE_HASHTABLE(pt_htable, MSHV_PARTITIONS_HASH_BITS);
181 };
182 
183 /*
184  * Callback for doorbell events.
185  * NOTE: This is called in interrupt context. Callback
186  * should defer slow and sleeping logic to later.
187  */
188 typedef void (*doorbell_cb_t) (int doorbell_id, void *);
189 
190 /*
191  * port table information
192  */
193 struct port_table_info {
194 	struct rcu_head portbl_rcu;
195 	enum hv_port_type hv_port_type;
196 	union {
197 		struct {
198 			u64 reserved[2];
199 		} hv_port_message;
200 		struct {
201 			u64 reserved[2];
202 		} hv_port_event;
203 		struct {
204 			u64 reserved[2];
205 		} hv_port_monitor;
206 		struct {
207 			doorbell_cb_t doorbell_cb;
208 			void *data;
209 		} hv_port_doorbell;
210 	};
211 };
212 
213 int mshv_update_routing_table(struct mshv_partition *partition,
214 			      const struct mshv_user_irq_entry *entries,
215 			      unsigned int numents);
216 void mshv_free_routing_table(struct mshv_partition *partition);
217 
218 struct mshv_guest_irq_ent mshv_ret_girq_entry(struct mshv_partition *partition,
219 					      u32 irq_num);
220 
221 void mshv_copy_girq_info(struct mshv_guest_irq_ent *src_irq,
222 			 struct mshv_lapic_irq *dest_irq);
223 
224 void mshv_irqfd_routing_update(struct mshv_partition *partition);
225 
226 void mshv_port_table_fini(void);
227 int mshv_portid_alloc(struct port_table_info *info);
228 int mshv_portid_lookup(int port_id, struct port_table_info *info);
229 void mshv_portid_free(int port_id);
230 
231 int mshv_register_doorbell(u64 partition_id, doorbell_cb_t doorbell_cb,
232 			   void *data, u64 gpa, u64 val, u64 flags);
233 void mshv_unregister_doorbell(u64 partition_id, int doorbell_portid);
234 
235 void mshv_isr(void);
236 int mshv_synic_init(unsigned int cpu);
237 int mshv_synic_cleanup(unsigned int cpu);
238 
mshv_partition_encrypted(struct mshv_partition * partition)239 static inline bool mshv_partition_encrypted(struct mshv_partition *partition)
240 {
241 	return partition->isolation_type == HV_PARTITION_ISOLATION_TYPE_SNP;
242 }
243 
244 struct mshv_partition *mshv_partition_get(struct mshv_partition *partition);
245 void mshv_partition_put(struct mshv_partition *partition);
246 struct mshv_partition *mshv_partition_find(u64 partition_id) __must_hold(RCU);
247 
248 /* hypercalls */
249 
250 int hv_call_withdraw_memory(u64 count, int node, u64 partition_id);
251 int hv_call_create_partition(u64 flags,
252 			     struct hv_partition_creation_properties creation_properties,
253 			     union hv_partition_isolation_properties isolation_properties,
254 			     u64 *partition_id);
255 int hv_call_initialize_partition(u64 partition_id);
256 int hv_call_finalize_partition(u64 partition_id);
257 int hv_call_delete_partition(u64 partition_id);
258 int hv_call_map_mmio_pages(u64 partition_id, u64 gfn, u64 mmio_spa, u64 numpgs);
259 int hv_call_map_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
260 			  u32 flags, struct page **pages);
261 int hv_call_unmap_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
262 			    u32 flags);
263 int hv_call_delete_vp(u64 partition_id, u32 vp_index);
264 int hv_call_assert_virtual_interrupt(u64 partition_id, u32 vector,
265 				     u64 dest_addr,
266 				     union hv_interrupt_control control);
267 int hv_call_clear_virtual_interrupt(u64 partition_id);
268 int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,
269 				  union hv_gpa_page_access_state_flags state_flags,
270 				  int *written_total,
271 				  union hv_gpa_page_access_state *states);
272 int hv_call_get_vp_state(u32 vp_index, u64 partition_id,
273 			 struct hv_vp_state_data state_data,
274 			 /* Choose between pages and ret_output */
275 			 u64 page_count, struct page **pages,
276 			 union hv_output_get_vp_state *ret_output);
277 int hv_call_set_vp_state(u32 vp_index, u64 partition_id,
278 			 /* Choose between pages and bytes */
279 			 struct hv_vp_state_data state_data, u64 page_count,
280 			 struct page **pages, u32 num_bytes, u8 *bytes);
281 int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
282 			      union hv_input_vtl input_vtl,
283 			      struct page **state_page);
284 int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
285 				union hv_input_vtl input_vtl);
286 int hv_call_create_port(u64 port_partition_id, union hv_port_id port_id,
287 			u64 connection_partition_id, struct hv_port_info *port_info,
288 			u8 port_vtl, u8 min_connection_vtl, int node);
289 int hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id);
290 int hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id,
291 			 u64 connection_partition_id,
292 			 union hv_connection_id connection_id,
293 			 struct hv_connection_info *connection_info,
294 			 u8 connection_vtl, int node);
295 int hv_call_disconnect_port(u64 connection_partition_id,
296 			    union hv_connection_id connection_id);
297 int hv_call_notify_port_ring_empty(u32 sint_index);
298 int hv_call_map_stat_page(enum hv_stats_object_type type,
299 			  const union hv_stats_object_identity *identity,
300 			  void **addr);
301 int hv_call_unmap_stat_page(enum hv_stats_object_type type,
302 			    const union hv_stats_object_identity *identity);
303 int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages,
304 				   u64 page_struct_count, u32 host_access,
305 				   u32 flags, u8 acquire);
306 
307 extern struct mshv_root mshv_root;
308 extern enum hv_scheduler_type hv_scheduler_type;
309 extern u8 * __percpu *hv_synic_eventring_tail;
310 
311 #endif /* _MSHV_ROOT_H_ */
312