1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * X86 specific Hyper-V initialization code.
4  *
5  * Copyright (C) 2016, Microsoft, Inc.
6  *
7  * Author : K. Y. Srinivasan <kys@microsoft.com>
8  */
9 
10 #define pr_fmt(fmt)  "Hyper-V: " fmt
11 
12 #include <linux/efi.h>
13 #include <linux/types.h>
14 #include <linux/bitfield.h>
15 #include <linux/io.h>
16 #include <asm/apic.h>
17 #include <asm/desc.h>
18 #include <asm/e820/api.h>
19 #include <asm/sev.h>
20 #include <asm/ibt.h>
21 #include <asm/hypervisor.h>
22 #include <hyperv/hvhdk.h>
23 #include <asm/mshyperv.h>
24 #include <asm/idtentry.h>
25 #include <asm/set_memory.h>
26 #include <linux/kexec.h>
27 #include <linux/version.h>
28 #include <linux/vmalloc.h>
29 #include <linux/mm.h>
30 #include <linux/slab.h>
31 #include <linux/kernel.h>
32 #include <linux/cpuhotplug.h>
33 #include <linux/syscore_ops.h>
34 #include <clocksource/hyperv_timer.h>
35 #include <linux/highmem.h>
36 
37 void *hv_hypercall_pg;
38 EXPORT_SYMBOL_GPL(hv_hypercall_pg);
39 
40 union hv_ghcb * __percpu *hv_ghcb_pg;
41 
42 /* Storage to save the hypercall page temporarily for hibernation */
43 static void *hv_hypercall_pg_saved;
44 
45 struct hv_vp_assist_page **hv_vp_assist_page;
46 EXPORT_SYMBOL_GPL(hv_vp_assist_page);
47 
hyperv_init_ghcb(void)48 static int hyperv_init_ghcb(void)
49 {
50 	u64 ghcb_gpa;
51 	void *ghcb_va;
52 	void **ghcb_base;
53 
54 	if (!ms_hyperv.paravisor_present || !hv_isolation_type_snp())
55 		return 0;
56 
57 	if (!hv_ghcb_pg)
58 		return -EINVAL;
59 
60 	/*
61 	 * GHCB page is allocated by paravisor. The address
62 	 * returned by MSR_AMD64_SEV_ES_GHCB is above shared
63 	 * memory boundary and map it here.
64 	 */
65 	rdmsrl(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
66 
67 	/* Mask out vTOM bit. ioremap_cache() maps decrypted */
68 	ghcb_gpa &= ~ms_hyperv.shared_gpa_boundary;
69 	ghcb_va = (void *)ioremap_cache(ghcb_gpa, HV_HYP_PAGE_SIZE);
70 	if (!ghcb_va)
71 		return -ENOMEM;
72 
73 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
74 	*ghcb_base = ghcb_va;
75 
76 	return 0;
77 }
78 
hv_cpu_init(unsigned int cpu)79 static int hv_cpu_init(unsigned int cpu)
80 {
81 	union hv_vp_assist_msr_contents msr = { 0 };
82 	struct hv_vp_assist_page **hvp;
83 	int ret;
84 
85 	ret = hv_common_cpu_init(cpu);
86 	if (ret)
87 		return ret;
88 
89 	if (!hv_vp_assist_page)
90 		return 0;
91 
92 	hvp = &hv_vp_assist_page[cpu];
93 	if (hv_root_partition()) {
94 		/*
95 		 * For root partition we get the hypervisor provided VP assist
96 		 * page, instead of allocating a new page.
97 		 */
98 		rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
99 		*hvp = memremap(msr.pfn << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT,
100 				PAGE_SIZE, MEMREMAP_WB);
101 	} else {
102 		/*
103 		 * The VP assist page is an "overlay" page (see Hyper-V TLFS's
104 		 * Section 5.2.1 "GPA Overlay Pages"). Here it must be zeroed
105 		 * out to make sure we always write the EOI MSR in
106 		 * hv_apic_eoi_write() *after* the EOI optimization is disabled
107 		 * in hv_cpu_die(), otherwise a CPU may not be stopped in the
108 		 * case of CPU offlining and the VM will hang.
109 		 */
110 		if (!*hvp) {
111 			*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO);
112 
113 			/*
114 			 * Hyper-V should never specify a VM that is a Confidential
115 			 * VM and also running in the root partition. Root partition
116 			 * is blocked to run in Confidential VM. So only decrypt assist
117 			 * page in non-root partition here.
118 			 */
119 			if (*hvp && !ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
120 				WARN_ON_ONCE(set_memory_decrypted((unsigned long)(*hvp), 1));
121 				memset(*hvp, 0, PAGE_SIZE);
122 			}
123 		}
124 
125 		if (*hvp)
126 			msr.pfn = vmalloc_to_pfn(*hvp);
127 
128 	}
129 	if (!WARN_ON(!(*hvp))) {
130 		msr.enable = 1;
131 		wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
132 	}
133 
134 	return hyperv_init_ghcb();
135 }
136 
137 static void (*hv_reenlightenment_cb)(void);
138 
hv_reenlightenment_notify(struct work_struct * dummy)139 static void hv_reenlightenment_notify(struct work_struct *dummy)
140 {
141 	struct hv_tsc_emulation_status emu_status;
142 
143 	rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
144 
145 	/* Don't issue the callback if TSC accesses are not emulated */
146 	if (hv_reenlightenment_cb && emu_status.inprogress)
147 		hv_reenlightenment_cb();
148 }
149 static DECLARE_DELAYED_WORK(hv_reenlightenment_work, hv_reenlightenment_notify);
150 
hyperv_stop_tsc_emulation(void)151 void hyperv_stop_tsc_emulation(void)
152 {
153 	u64 freq;
154 	struct hv_tsc_emulation_status emu_status;
155 
156 	rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
157 	emu_status.inprogress = 0;
158 	wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
159 
160 	rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
161 	tsc_khz = div64_u64(freq, 1000);
162 }
163 EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation);
164 
hv_reenlightenment_available(void)165 static inline bool hv_reenlightenment_available(void)
166 {
167 	/*
168 	 * Check for required features and privileges to make TSC frequency
169 	 * change notifications work.
170 	 */
171 	return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
172 		ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE &&
173 		ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT;
174 }
175 
DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)176 DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)
177 {
178 	apic_eoi();
179 	inc_irq_stat(irq_hv_reenlightenment_count);
180 	schedule_delayed_work(&hv_reenlightenment_work, HZ/10);
181 }
182 
set_hv_tscchange_cb(void (* cb)(void))183 void set_hv_tscchange_cb(void (*cb)(void))
184 {
185 	struct hv_reenlightenment_control re_ctrl = {
186 		.vector = HYPERV_REENLIGHTENMENT_VECTOR,
187 		.enabled = 1,
188 	};
189 	struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1};
190 
191 	if (!hv_reenlightenment_available()) {
192 		pr_warn("reenlightenment support is unavailable\n");
193 		return;
194 	}
195 
196 	if (!hv_vp_index)
197 		return;
198 
199 	hv_reenlightenment_cb = cb;
200 
201 	/* Make sure callback is registered before we write to MSRs */
202 	wmb();
203 
204 	re_ctrl.target_vp = hv_vp_index[get_cpu()];
205 
206 	wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
207 	wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl));
208 
209 	put_cpu();
210 }
211 EXPORT_SYMBOL_GPL(set_hv_tscchange_cb);
212 
clear_hv_tscchange_cb(void)213 void clear_hv_tscchange_cb(void)
214 {
215 	struct hv_reenlightenment_control re_ctrl;
216 
217 	if (!hv_reenlightenment_available())
218 		return;
219 
220 	rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
221 	re_ctrl.enabled = 0;
222 	wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
223 
224 	hv_reenlightenment_cb = NULL;
225 }
226 EXPORT_SYMBOL_GPL(clear_hv_tscchange_cb);
227 
hv_cpu_die(unsigned int cpu)228 static int hv_cpu_die(unsigned int cpu)
229 {
230 	struct hv_reenlightenment_control re_ctrl;
231 	unsigned int new_cpu;
232 	void **ghcb_va;
233 
234 	if (hv_ghcb_pg) {
235 		ghcb_va = (void **)this_cpu_ptr(hv_ghcb_pg);
236 		if (*ghcb_va)
237 			iounmap(*ghcb_va);
238 		*ghcb_va = NULL;
239 	}
240 
241 	hv_common_cpu_die(cpu);
242 
243 	if (hv_vp_assist_page && hv_vp_assist_page[cpu]) {
244 		union hv_vp_assist_msr_contents msr = { 0 };
245 		if (hv_root_partition()) {
246 			/*
247 			 * For root partition the VP assist page is mapped to
248 			 * hypervisor provided page, and thus we unmap the
249 			 * page here and nullify it, so that in future we have
250 			 * correct page address mapped in hv_cpu_init.
251 			 */
252 			memunmap(hv_vp_assist_page[cpu]);
253 			hv_vp_assist_page[cpu] = NULL;
254 			rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
255 			msr.enable = 0;
256 		}
257 		wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
258 	}
259 
260 	if (hv_reenlightenment_cb == NULL)
261 		return 0;
262 
263 	rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
264 	if (re_ctrl.target_vp == hv_vp_index[cpu]) {
265 		/*
266 		 * Reassign reenlightenment notifications to some other online
267 		 * CPU or just disable the feature if there are no online CPUs
268 		 * left (happens on hibernation).
269 		 */
270 		new_cpu = cpumask_any_but(cpu_online_mask, cpu);
271 
272 		if (new_cpu < nr_cpu_ids)
273 			re_ctrl.target_vp = hv_vp_index[new_cpu];
274 		else
275 			re_ctrl.enabled = 0;
276 
277 		wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
278 	}
279 
280 	return 0;
281 }
282 
hv_pci_init(void)283 static int __init hv_pci_init(void)
284 {
285 	bool gen2vm = efi_enabled(EFI_BOOT);
286 
287 	/*
288 	 * A Generation-2 VM doesn't support legacy PCI/PCIe, so both
289 	 * raw_pci_ops and raw_pci_ext_ops are NULL, and pci_subsys_init() ->
290 	 * pcibios_init() doesn't call pcibios_resource_survey() ->
291 	 * e820__reserve_resources_late(); as a result, any emulated persistent
292 	 * memory of E820_TYPE_PRAM (12) via the kernel parameter
293 	 * memmap=nn[KMG]!ss is not added into iomem_resource and hence can't be
294 	 * detected by register_e820_pmem(). Fix this by directly calling
295 	 * e820__reserve_resources_late() here: e820__reserve_resources_late()
296 	 * depends on e820__reserve_resources(), which has been called earlier
297 	 * from setup_arch(). Note: e820__reserve_resources_late() also adds
298 	 * any memory of E820_TYPE_PMEM (7) into iomem_resource, and
299 	 * acpi_nfit_register_region() -> acpi_nfit_insert_resource() ->
300 	 * region_intersects() returns REGION_INTERSECTS, so the memory of
301 	 * E820_TYPE_PMEM won't get added twice.
302 	 *
303 	 * We return 0 here so that pci_arch_init() won't print the warning:
304 	 * "PCI: Fatal: No config space access function found"
305 	 */
306 	if (gen2vm) {
307 		e820__reserve_resources_late();
308 		return 0;
309 	}
310 
311 	/* For Generation-1 VM, we'll proceed in pci_arch_init().  */
312 	return 1;
313 }
314 
hv_suspend(void)315 static int hv_suspend(void)
316 {
317 	union hv_x64_msr_hypercall_contents hypercall_msr;
318 	int ret;
319 
320 	if (hv_root_partition())
321 		return -EPERM;
322 
323 	/*
324 	 * Reset the hypercall page as it is going to be invalidated
325 	 * across hibernation. Setting hv_hypercall_pg to NULL ensures
326 	 * that any subsequent hypercall operation fails safely instead of
327 	 * crashing due to an access of an invalid page. The hypercall page
328 	 * pointer is restored on resume.
329 	 */
330 	hv_hypercall_pg_saved = hv_hypercall_pg;
331 	hv_hypercall_pg = NULL;
332 
333 	/* Disable the hypercall page in the hypervisor */
334 	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
335 	hypercall_msr.enable = 0;
336 	wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
337 
338 	ret = hv_cpu_die(0);
339 	return ret;
340 }
341 
hv_resume(void)342 static void hv_resume(void)
343 {
344 	union hv_x64_msr_hypercall_contents hypercall_msr;
345 	int ret;
346 
347 	ret = hv_cpu_init(0);
348 	WARN_ON(ret);
349 
350 	/* Re-enable the hypercall page */
351 	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
352 	hypercall_msr.enable = 1;
353 	hypercall_msr.guest_physical_address =
354 		vmalloc_to_pfn(hv_hypercall_pg_saved);
355 	wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
356 
357 	hv_hypercall_pg = hv_hypercall_pg_saved;
358 	hv_hypercall_pg_saved = NULL;
359 
360 	/*
361 	 * Reenlightenment notifications are disabled by hv_cpu_die(0),
362 	 * reenable them here if hv_reenlightenment_cb was previously set.
363 	 */
364 	if (hv_reenlightenment_cb)
365 		set_hv_tscchange_cb(hv_reenlightenment_cb);
366 }
367 
368 /* Note: when the ops are called, only CPU0 is online and IRQs are disabled. */
369 static struct syscore_ops hv_syscore_ops = {
370 	.suspend	= hv_suspend,
371 	.resume		= hv_resume,
372 };
373 
374 static void (* __initdata old_setup_percpu_clockev)(void);
375 
hv_stimer_setup_percpu_clockev(void)376 static void __init hv_stimer_setup_percpu_clockev(void)
377 {
378 	/*
379 	 * Ignore any errors in setting up stimer clockevents
380 	 * as we can run with the LAPIC timer as a fallback.
381 	 */
382 	(void)hv_stimer_alloc(false);
383 
384 	/*
385 	 * Still register the LAPIC timer, because the direct-mode STIMER is
386 	 * not supported by old versions of Hyper-V. This also allows users
387 	 * to switch to LAPIC timer via /sys, if they want to.
388 	 */
389 	if (old_setup_percpu_clockev)
390 		old_setup_percpu_clockev();
391 }
392 
393 #if IS_ENABLED(CONFIG_HYPERV_VTL_MODE)
get_vtl(void)394 static u8 __init get_vtl(void)
395 {
396 	u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS;
397 	struct hv_input_get_vp_registers *input;
398 	struct hv_output_get_vp_registers *output;
399 	unsigned long flags;
400 	u64 ret;
401 
402 	local_irq_save(flags);
403 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
404 	output = *this_cpu_ptr(hyperv_pcpu_output_arg);
405 
406 	memset(input, 0, struct_size(input, names, 1));
407 	input->partition_id = HV_PARTITION_ID_SELF;
408 	input->vp_index = HV_VP_INDEX_SELF;
409 	input->input_vtl.as_uint8 = 0;
410 	input->names[0] = HV_REGISTER_VSM_VP_STATUS;
411 
412 	ret = hv_do_hypercall(control, input, output);
413 	if (hv_result_success(ret)) {
414 		ret = output->values[0].reg8 & HV_X64_VTL_MASK;
415 	} else {
416 		pr_err("Failed to get VTL(error: %lld) exiting...\n", ret);
417 		BUG();
418 	}
419 
420 	local_irq_restore(flags);
421 	return ret;
422 }
423 #else
get_vtl(void)424 static inline u8 get_vtl(void) { return 0; }
425 #endif
426 
427 /*
428  * This function is to be invoked early in the boot sequence after the
429  * hypervisor has been detected.
430  *
431  * 1. Setup the hypercall page.
432  * 2. Register Hyper-V specific clocksource.
433  * 3. Setup Hyper-V specific APIC entry points.
434  */
hyperv_init(void)435 void __init hyperv_init(void)
436 {
437 	u64 guest_id;
438 	union hv_x64_msr_hypercall_contents hypercall_msr;
439 	int cpuhp;
440 
441 	if (x86_hyper_type != X86_HYPER_MS_HYPERV)
442 		return;
443 
444 	if (hv_common_init())
445 		return;
446 
447 	/*
448 	 * The VP assist page is useless to a TDX guest: the only use we
449 	 * would have for it is lazy EOI, which can not be used with TDX.
450 	 */
451 	if (hv_isolation_type_tdx())
452 		hv_vp_assist_page = NULL;
453 	else
454 		hv_vp_assist_page = kcalloc(nr_cpu_ids,
455 					    sizeof(*hv_vp_assist_page),
456 					    GFP_KERNEL);
457 	if (!hv_vp_assist_page) {
458 		ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
459 
460 		if (!hv_isolation_type_tdx())
461 			goto common_free;
462 	}
463 
464 	if (ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
465 		/* Negotiate GHCB Version. */
466 		if (!hv_ghcb_negotiate_protocol())
467 			hv_ghcb_terminate(SEV_TERM_SET_GEN,
468 					  GHCB_SEV_ES_PROT_UNSUPPORTED);
469 
470 		hv_ghcb_pg = alloc_percpu(union hv_ghcb *);
471 		if (!hv_ghcb_pg)
472 			goto free_vp_assist_page;
473 	}
474 
475 	cpuhp = cpuhp_setup_state(CPUHP_AP_HYPERV_ONLINE, "x86/hyperv_init:online",
476 				  hv_cpu_init, hv_cpu_die);
477 	if (cpuhp < 0)
478 		goto free_ghcb_page;
479 
480 	/*
481 	 * Setup the hypercall page and enable hypercalls.
482 	 * 1. Register the guest ID
483 	 * 2. Enable the hypercall and register the hypercall page
484 	 *
485 	 * A TDX VM with no paravisor only uses TDX GHCI rather than hv_hypercall_pg:
486 	 * when the hypercall input is a page, such a VM must pass a decrypted
487 	 * page to Hyper-V, e.g. hv_post_message() uses the per-CPU page
488 	 * hyperv_pcpu_input_arg, which is decrypted if no paravisor is present.
489 	 *
490 	 * A TDX VM with the paravisor uses hv_hypercall_pg for most hypercalls,
491 	 * which are handled by the paravisor and the VM must use an encrypted
492 	 * input page: in such a VM, the hyperv_pcpu_input_arg is encrypted and
493 	 * used in the hypercalls, e.g. see hv_mark_gpa_visibility() and
494 	 * hv_arch_irq_unmask(). Such a VM uses TDX GHCI for two hypercalls:
495 	 * 1. HVCALL_SIGNAL_EVENT: see vmbus_set_event() and _hv_do_fast_hypercall8().
496 	 * 2. HVCALL_POST_MESSAGE: the input page must be a decrypted page, i.e.
497 	 * hv_post_message() in such a VM can't use the encrypted hyperv_pcpu_input_arg;
498 	 * instead, hv_post_message() uses the post_msg_page, which is decrypted
499 	 * in such a VM and is only used in such a VM.
500 	 */
501 	guest_id = hv_generate_guest_id(LINUX_VERSION_CODE);
502 	wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
503 
504 	/* With the paravisor, the VM must also write the ID via GHCB/GHCI */
505 	hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, guest_id);
506 
507 	/* A TDX VM with no paravisor only uses TDX GHCI rather than hv_hypercall_pg */
508 	if (hv_isolation_type_tdx() && !ms_hyperv.paravisor_present)
509 		goto skip_hypercall_pg_init;
510 
511 	hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
512 			VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
513 			VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
514 			__builtin_return_address(0));
515 	if (hv_hypercall_pg == NULL)
516 		goto clean_guest_os_id;
517 
518 	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
519 	hypercall_msr.enable = 1;
520 
521 	if (hv_root_partition()) {
522 		struct page *pg;
523 		void *src;
524 
525 		/*
526 		 * For the root partition, the hypervisor will set up its
527 		 * hypercall page. The hypervisor guarantees it will not show
528 		 * up in the root's address space. The root can't change the
529 		 * location of the hypercall page.
530 		 *
531 		 * Order is important here. We must enable the hypercall page
532 		 * so it is populated with code, then copy the code to an
533 		 * executable page.
534 		 */
535 		wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
536 
537 		pg = vmalloc_to_page(hv_hypercall_pg);
538 		src = memremap(hypercall_msr.guest_physical_address << PAGE_SHIFT, PAGE_SIZE,
539 				MEMREMAP_WB);
540 		BUG_ON(!src);
541 		memcpy_to_page(pg, 0, src, HV_HYP_PAGE_SIZE);
542 		memunmap(src);
543 
544 		hv_remap_tsc_clocksource();
545 	} else {
546 		hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
547 		wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
548 	}
549 
550 skip_hypercall_pg_init:
551 	/*
552 	 * Some versions of Hyper-V that provide IBT in guest VMs have a bug
553 	 * in that there's no ENDBR64 instruction at the entry to the
554 	 * hypercall page. Because hypercalls are invoked via an indirect call
555 	 * to the hypercall page, all hypercall attempts fail when IBT is
556 	 * enabled, and Linux panics. For such buggy versions, disable IBT.
557 	 *
558 	 * Fixed versions of Hyper-V always provide ENDBR64 on the hypercall
559 	 * page, so if future Linux kernel versions enable IBT for 32-bit
560 	 * builds, additional hypercall page hackery will be required here
561 	 * to provide an ENDBR32.
562 	 */
563 #ifdef CONFIG_X86_KERNEL_IBT
564 	if (cpu_feature_enabled(X86_FEATURE_IBT) &&
565 	    *(u32 *)hv_hypercall_pg != gen_endbr()) {
566 		setup_clear_cpu_cap(X86_FEATURE_IBT);
567 		pr_warn("Disabling IBT because of Hyper-V bug\n");
568 	}
569 #endif
570 
571 	/*
572 	 * hyperv_init() is called before LAPIC is initialized: see
573 	 * apic_intr_mode_init() -> x86_platform.apic_post_init() and
574 	 * apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER
575 	 * depends on LAPIC, so hv_stimer_alloc() should be called from
576 	 * x86_init.timers.setup_percpu_clockev.
577 	 */
578 	old_setup_percpu_clockev = x86_init.timers.setup_percpu_clockev;
579 	x86_init.timers.setup_percpu_clockev = hv_stimer_setup_percpu_clockev;
580 
581 	hv_apic_init();
582 
583 	x86_init.pci.arch_init = hv_pci_init;
584 
585 	register_syscore_ops(&hv_syscore_ops);
586 
587 	if (ms_hyperv.priv_high & HV_ACCESS_PARTITION_ID)
588 		hv_get_partition_id();
589 
590 #ifdef CONFIG_PCI_MSI
591 	/*
592 	 * If we're running as root, we want to create our own PCI MSI domain.
593 	 * We can't set this in hv_pci_init because that would be too late.
594 	 */
595 	if (hv_root_partition())
596 		x86_init.irqs.create_pci_msi_domain = hv_create_pci_msi_domain;
597 #endif
598 
599 	/* Query the VMs extended capability once, so that it can be cached. */
600 	hv_query_ext_cap(0);
601 
602 	/* Find the VTL */
603 	ms_hyperv.vtl = get_vtl();
604 
605 	if (ms_hyperv.vtl > 0) /* non default VTL */
606 		hv_vtl_early_init();
607 
608 	return;
609 
610 clean_guest_os_id:
611 	wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
612 	hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
613 	cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
614 free_ghcb_page:
615 	free_percpu(hv_ghcb_pg);
616 free_vp_assist_page:
617 	kfree(hv_vp_assist_page);
618 	hv_vp_assist_page = NULL;
619 common_free:
620 	hv_common_free();
621 }
622 
623 /*
624  * This routine is called before kexec/kdump, it does the required cleanup.
625  */
hyperv_cleanup(void)626 void hyperv_cleanup(void)
627 {
628 	union hv_x64_msr_hypercall_contents hypercall_msr;
629 	union hv_reference_tsc_msr tsc_msr;
630 
631 	/* Reset our OS id */
632 	wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
633 	hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
634 
635 	/*
636 	 * Reset hypercall page reference before reset the page,
637 	 * let hypercall operations fail safely rather than
638 	 * panic the kernel for using invalid hypercall page
639 	 */
640 	hv_hypercall_pg = NULL;
641 
642 	/* Reset the hypercall page */
643 	hypercall_msr.as_uint64 = hv_get_msr(HV_X64_MSR_HYPERCALL);
644 	hypercall_msr.enable = 0;
645 	hv_set_msr(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
646 
647 	/* Reset the TSC page */
648 	tsc_msr.as_uint64 = hv_get_msr(HV_X64_MSR_REFERENCE_TSC);
649 	tsc_msr.enable = 0;
650 	hv_set_msr(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
651 }
652 
hyperv_report_panic(struct pt_regs * regs,long err,bool in_die)653 void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
654 {
655 	static bool panic_reported;
656 	u64 guest_id;
657 
658 	if (in_die && !panic_on_oops)
659 		return;
660 
661 	/*
662 	 * We prefer to report panic on 'die' chain as we have proper
663 	 * registers to report, but if we miss it (e.g. on BUG()) we need
664 	 * to report it on 'panic'.
665 	 */
666 	if (panic_reported)
667 		return;
668 	panic_reported = true;
669 
670 	rdmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
671 
672 	wrmsrl(HV_X64_MSR_CRASH_P0, err);
673 	wrmsrl(HV_X64_MSR_CRASH_P1, guest_id);
674 	wrmsrl(HV_X64_MSR_CRASH_P2, regs->ip);
675 	wrmsrl(HV_X64_MSR_CRASH_P3, regs->ax);
676 	wrmsrl(HV_X64_MSR_CRASH_P4, regs->sp);
677 
678 	/*
679 	 * Let Hyper-V know there is crash data available
680 	 */
681 	wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
682 }
683 EXPORT_SYMBOL_GPL(hyperv_report_panic);
684 
hv_is_hyperv_initialized(void)685 bool hv_is_hyperv_initialized(void)
686 {
687 	union hv_x64_msr_hypercall_contents hypercall_msr;
688 
689 	/*
690 	 * Ensure that we're really on Hyper-V, and not a KVM or Xen
691 	 * emulation of Hyper-V
692 	 */
693 	if (x86_hyper_type != X86_HYPER_MS_HYPERV)
694 		return false;
695 
696 	/* A TDX VM with no paravisor uses TDX GHCI call rather than hv_hypercall_pg */
697 	if (hv_isolation_type_tdx() && !ms_hyperv.paravisor_present)
698 		return true;
699 	/*
700 	 * Verify that earlier initialization succeeded by checking
701 	 * that the hypercall page is setup
702 	 */
703 	hypercall_msr.as_uint64 = 0;
704 	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
705 
706 	return hypercall_msr.enable;
707 }
708 EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
709