1 /*
2 * VMware Detection code.
3 *
4 * Copyright (C) 2008, VMware, Inc.
5 * Author : Alok N Kataria <akataria@vmware.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
15 * NON INFRINGEMENT. See the GNU General Public License for more
16 * details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 */
23
24 #include <linux/dmi.h>
25 #include <linux/init.h>
26 #include <linux/export.h>
27 #include <linux/clocksource.h>
28 #include <linux/cpu.h>
29 #include <linux/efi.h>
30 #include <linux/reboot.h>
31 #include <linux/static_call.h>
32 #include <linux/sched/cputime.h>
33 #include <asm/div64.h>
34 #include <asm/x86_init.h>
35 #include <asm/hypervisor.h>
36 #include <asm/timer.h>
37 #include <asm/apic.h>
38 #include <asm/vmware.h>
39 #include <asm/svm.h>
40
41 #undef pr_fmt
42 #define pr_fmt(fmt) "vmware: " fmt
43
44 #define CPUID_VMWARE_INFO_LEAF 0x40000000
45 #define CPUID_VMWARE_FEATURES_LEAF 0x40000010
46
47 #define GETVCPU_INFO_LEGACY_X2APIC BIT(3)
48 #define GETVCPU_INFO_VCPU_RESERVED BIT(31)
49
50 #define STEALCLOCK_NOT_AVAILABLE (-1)
51 #define STEALCLOCK_DISABLED 0
52 #define STEALCLOCK_ENABLED 1
53
54 struct vmware_steal_time {
55 union {
56 u64 clock; /* stolen time counter in units of vtsc */
57 struct {
58 /* only for little-endian */
59 u32 clock_low;
60 u32 clock_high;
61 };
62 };
63 u64 reserved[7];
64 };
65
66 static unsigned long vmware_tsc_khz __ro_after_init;
67 static u8 vmware_hypercall_mode __ro_after_init;
68
vmware_hypercall_slow(unsigned long cmd,unsigned long in1,unsigned long in3,unsigned long in4,unsigned long in5,u32 * out1,u32 * out2,u32 * out3,u32 * out4,u32 * out5)69 unsigned long vmware_hypercall_slow(unsigned long cmd,
70 unsigned long in1, unsigned long in3,
71 unsigned long in4, unsigned long in5,
72 u32 *out1, u32 *out2, u32 *out3,
73 u32 *out4, u32 *out5)
74 {
75 unsigned long out0, rbx, rcx, rdx, rsi, rdi;
76
77 switch (vmware_hypercall_mode) {
78 case CPUID_VMWARE_FEATURES_ECX_VMCALL:
79 asm_inline volatile ("vmcall"
80 : "=a" (out0), "=b" (rbx), "=c" (rcx),
81 "=d" (rdx), "=S" (rsi), "=D" (rdi)
82 : "a" (VMWARE_HYPERVISOR_MAGIC),
83 "b" (in1),
84 "c" (cmd),
85 "d" (in3),
86 "S" (in4),
87 "D" (in5)
88 : "cc", "memory");
89 break;
90 case CPUID_VMWARE_FEATURES_ECX_VMMCALL:
91 asm_inline volatile ("vmmcall"
92 : "=a" (out0), "=b" (rbx), "=c" (rcx),
93 "=d" (rdx), "=S" (rsi), "=D" (rdi)
94 : "a" (VMWARE_HYPERVISOR_MAGIC),
95 "b" (in1),
96 "c" (cmd),
97 "d" (in3),
98 "S" (in4),
99 "D" (in5)
100 : "cc", "memory");
101 break;
102 default:
103 asm_inline volatile ("movw %[port], %%dx; inl (%%dx), %%eax"
104 : "=a" (out0), "=b" (rbx), "=c" (rcx),
105 "=d" (rdx), "=S" (rsi), "=D" (rdi)
106 : [port] "i" (VMWARE_HYPERVISOR_PORT),
107 "a" (VMWARE_HYPERVISOR_MAGIC),
108 "b" (in1),
109 "c" (cmd),
110 "d" (in3),
111 "S" (in4),
112 "D" (in5)
113 : "cc", "memory");
114 break;
115 }
116
117 if (out1)
118 *out1 = rbx;
119 if (out2)
120 *out2 = rcx;
121 if (out3)
122 *out3 = rdx;
123 if (out4)
124 *out4 = rsi;
125 if (out5)
126 *out5 = rdi;
127
128 return out0;
129 }
130
__vmware_platform(void)131 static inline int __vmware_platform(void)
132 {
133 u32 eax, ebx, ecx;
134
135 eax = vmware_hypercall3(VMWARE_CMD_GETVERSION, 0, &ebx, &ecx);
136 return eax != UINT_MAX && ebx == VMWARE_HYPERVISOR_MAGIC;
137 }
138
vmware_get_tsc_khz(void)139 static unsigned long vmware_get_tsc_khz(void)
140 {
141 return vmware_tsc_khz;
142 }
143
144 #ifdef CONFIG_PARAVIRT
145 static struct cyc2ns_data vmware_cyc2ns __ro_after_init;
146 static bool vmw_sched_clock __initdata = true;
147 static DEFINE_PER_CPU_DECRYPTED(struct vmware_steal_time, vmw_steal_time) __aligned(64);
148 static bool has_steal_clock;
149 static bool steal_acc __initdata = true; /* steal time accounting */
150
setup_vmw_sched_clock(char * s)151 static __init int setup_vmw_sched_clock(char *s)
152 {
153 vmw_sched_clock = false;
154 return 0;
155 }
156 early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
157
parse_no_stealacc(char * arg)158 static __init int parse_no_stealacc(char *arg)
159 {
160 steal_acc = false;
161 return 0;
162 }
163 early_param("no-steal-acc", parse_no_stealacc);
164
vmware_sched_clock(void)165 static noinstr u64 vmware_sched_clock(void)
166 {
167 unsigned long long ns;
168
169 ns = mul_u64_u32_shr(rdtsc(), vmware_cyc2ns.cyc2ns_mul,
170 vmware_cyc2ns.cyc2ns_shift);
171 ns -= vmware_cyc2ns.cyc2ns_offset;
172 return ns;
173 }
174
vmware_cyc2ns_setup(void)175 static void __init vmware_cyc2ns_setup(void)
176 {
177 struct cyc2ns_data *d = &vmware_cyc2ns;
178 unsigned long long tsc_now = rdtsc();
179
180 clocks_calc_mult_shift(&d->cyc2ns_mul, &d->cyc2ns_shift,
181 vmware_tsc_khz, NSEC_PER_MSEC, 0);
182 d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul,
183 d->cyc2ns_shift);
184
185 pr_info("using clock offset of %llu ns\n", d->cyc2ns_offset);
186 }
187
vmware_cmd_stealclock(u32 addr_hi,u32 addr_lo)188 static int vmware_cmd_stealclock(u32 addr_hi, u32 addr_lo)
189 {
190 u32 info;
191
192 return vmware_hypercall5(VMWARE_CMD_STEALCLOCK, 0, 0, addr_hi, addr_lo,
193 &info);
194 }
195
stealclock_enable(phys_addr_t pa)196 static bool stealclock_enable(phys_addr_t pa)
197 {
198 return vmware_cmd_stealclock(upper_32_bits(pa),
199 lower_32_bits(pa)) == STEALCLOCK_ENABLED;
200 }
201
__stealclock_disable(void)202 static int __stealclock_disable(void)
203 {
204 return vmware_cmd_stealclock(0, 1);
205 }
206
stealclock_disable(void)207 static void stealclock_disable(void)
208 {
209 __stealclock_disable();
210 }
211
vmware_is_stealclock_available(void)212 static bool vmware_is_stealclock_available(void)
213 {
214 return __stealclock_disable() != STEALCLOCK_NOT_AVAILABLE;
215 }
216
217 /**
218 * vmware_steal_clock() - read the per-cpu steal clock
219 * @cpu: the cpu number whose steal clock we want to read
220 *
221 * The function reads the steal clock if we are on a 64-bit system, otherwise
222 * reads it in parts, checking that the high part didn't change in the
223 * meantime.
224 *
225 * Return:
226 * The steal clock reading in ns.
227 */
vmware_steal_clock(int cpu)228 static u64 vmware_steal_clock(int cpu)
229 {
230 struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu);
231 u64 clock;
232
233 if (IS_ENABLED(CONFIG_64BIT))
234 clock = READ_ONCE(steal->clock);
235 else {
236 u32 initial_high, low, high;
237
238 do {
239 initial_high = READ_ONCE(steal->clock_high);
240 /* Do not reorder initial_high and high readings */
241 virt_rmb();
242 low = READ_ONCE(steal->clock_low);
243 /* Keep low reading in between */
244 virt_rmb();
245 high = READ_ONCE(steal->clock_high);
246 } while (initial_high != high);
247
248 clock = ((u64)high << 32) | low;
249 }
250
251 return mul_u64_u32_shr(clock, vmware_cyc2ns.cyc2ns_mul,
252 vmware_cyc2ns.cyc2ns_shift);
253 }
254
vmware_register_steal_time(void)255 static void vmware_register_steal_time(void)
256 {
257 int cpu = smp_processor_id();
258 struct vmware_steal_time *st = &per_cpu(vmw_steal_time, cpu);
259
260 if (!has_steal_clock)
261 return;
262
263 if (!stealclock_enable(slow_virt_to_phys(st))) {
264 has_steal_clock = false;
265 return;
266 }
267
268 pr_info("vmware-stealtime: cpu %d, pa %llx\n",
269 cpu, (unsigned long long) slow_virt_to_phys(st));
270 }
271
vmware_disable_steal_time(void)272 static void vmware_disable_steal_time(void)
273 {
274 if (!has_steal_clock)
275 return;
276
277 stealclock_disable();
278 }
279
vmware_guest_cpu_init(void)280 static void vmware_guest_cpu_init(void)
281 {
282 if (has_steal_clock)
283 vmware_register_steal_time();
284 }
285
vmware_pv_guest_cpu_reboot(void * unused)286 static void vmware_pv_guest_cpu_reboot(void *unused)
287 {
288 vmware_disable_steal_time();
289 }
290
vmware_pv_reboot_notify(struct notifier_block * nb,unsigned long code,void * unused)291 static int vmware_pv_reboot_notify(struct notifier_block *nb,
292 unsigned long code, void *unused)
293 {
294 if (code == SYS_RESTART)
295 on_each_cpu(vmware_pv_guest_cpu_reboot, NULL, 1);
296 return NOTIFY_DONE;
297 }
298
299 static struct notifier_block vmware_pv_reboot_nb = {
300 .notifier_call = vmware_pv_reboot_notify,
301 };
302
303 #ifdef CONFIG_SMP
vmware_smp_prepare_boot_cpu(void)304 static void __init vmware_smp_prepare_boot_cpu(void)
305 {
306 vmware_guest_cpu_init();
307 native_smp_prepare_boot_cpu();
308 }
309
vmware_cpu_online(unsigned int cpu)310 static int vmware_cpu_online(unsigned int cpu)
311 {
312 local_irq_disable();
313 vmware_guest_cpu_init();
314 local_irq_enable();
315 return 0;
316 }
317
vmware_cpu_down_prepare(unsigned int cpu)318 static int vmware_cpu_down_prepare(unsigned int cpu)
319 {
320 local_irq_disable();
321 vmware_disable_steal_time();
322 local_irq_enable();
323 return 0;
324 }
325 #endif
326
activate_jump_labels(void)327 static __init int activate_jump_labels(void)
328 {
329 if (has_steal_clock) {
330 static_key_slow_inc(¶virt_steal_enabled);
331 if (steal_acc)
332 static_key_slow_inc(¶virt_steal_rq_enabled);
333 }
334
335 return 0;
336 }
337 arch_initcall(activate_jump_labels);
338
vmware_paravirt_ops_setup(void)339 static void __init vmware_paravirt_ops_setup(void)
340 {
341 pv_info.name = "VMware hypervisor";
342 pv_ops.cpu.io_delay = paravirt_nop;
343
344 if (vmware_tsc_khz == 0)
345 return;
346
347 vmware_cyc2ns_setup();
348
349 if (vmw_sched_clock)
350 paravirt_set_sched_clock(vmware_sched_clock);
351
352 if (vmware_is_stealclock_available()) {
353 has_steal_clock = true;
354 static_call_update(pv_steal_clock, vmware_steal_clock);
355
356 /* We use reboot notifier only to disable steal clock */
357 register_reboot_notifier(&vmware_pv_reboot_nb);
358
359 #ifdef CONFIG_SMP
360 smp_ops.smp_prepare_boot_cpu =
361 vmware_smp_prepare_boot_cpu;
362 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
363 "x86/vmware:online",
364 vmware_cpu_online,
365 vmware_cpu_down_prepare) < 0)
366 pr_err("vmware_guest: Failed to install cpu hotplug callbacks\n");
367 #else
368 vmware_guest_cpu_init();
369 #endif
370 }
371 }
372 #else
373 #define vmware_paravirt_ops_setup() do {} while (0)
374 #endif
375
376 /*
377 * VMware hypervisor takes care of exporting a reliable TSC to the guest.
378 * Still, due to timing difference when running on virtual cpus, the TSC can
379 * be marked as unstable in some cases. For example, the TSC sync check at
380 * bootup can fail due to a marginal offset between vcpus' TSCs (though the
381 * TSCs do not drift from each other). Also, the ACPI PM timer clocksource
382 * is not suitable as a watchdog when running on a hypervisor because the
383 * kernel may miss a wrap of the counter if the vcpu is descheduled for a
384 * long time. To skip these checks at runtime we set these capability bits,
385 * so that the kernel could just trust the hypervisor with providing a
386 * reliable virtual TSC that is suitable for timekeeping.
387 */
vmware_set_capabilities(void)388 static void __init vmware_set_capabilities(void)
389 {
390 setup_force_cpu_cap(X86_FEATURE_CONSTANT_TSC);
391 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
392 if (vmware_tsc_khz)
393 setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
394 if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMCALL)
395 setup_force_cpu_cap(X86_FEATURE_VMCALL);
396 else if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMMCALL)
397 setup_force_cpu_cap(X86_FEATURE_VMW_VMMCALL);
398 }
399
vmware_platform_setup(void)400 static void __init vmware_platform_setup(void)
401 {
402 u32 eax, ebx, ecx;
403 u64 lpj, tsc_khz;
404
405 eax = vmware_hypercall3(VMWARE_CMD_GETHZ, UINT_MAX, &ebx, &ecx);
406
407 if (ebx != UINT_MAX) {
408 lpj = tsc_khz = eax | (((u64)ebx) << 32);
409 do_div(tsc_khz, 1000);
410 WARN_ON(tsc_khz >> 32);
411 pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
412 (unsigned long) tsc_khz / 1000,
413 (unsigned long) tsc_khz % 1000);
414
415 if (!preset_lpj) {
416 do_div(lpj, HZ);
417 preset_lpj = lpj;
418 }
419
420 vmware_tsc_khz = tsc_khz;
421 x86_platform.calibrate_tsc = vmware_get_tsc_khz;
422 x86_platform.calibrate_cpu = vmware_get_tsc_khz;
423
424 #ifdef CONFIG_X86_LOCAL_APIC
425 /* Skip lapic calibration since we know the bus frequency. */
426 lapic_timer_period = ecx / HZ;
427 pr_info("Host bus clock speed read from hypervisor : %u Hz\n",
428 ecx);
429 #endif
430 } else {
431 pr_warn("Failed to get TSC freq from the hypervisor\n");
432 }
433
434 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !efi_enabled(EFI_BOOT))
435 x86_init.mpparse.find_mptable = mpparse_find_mptable;
436
437 vmware_paravirt_ops_setup();
438
439 #ifdef CONFIG_X86_IO_APIC
440 no_timer_check = 1;
441 #endif
442
443 vmware_set_capabilities();
444 }
445
vmware_select_hypercall(void)446 static u8 __init vmware_select_hypercall(void)
447 {
448 int eax, ebx, ecx, edx;
449
450 cpuid(CPUID_VMWARE_FEATURES_LEAF, &eax, &ebx, &ecx, &edx);
451 return (ecx & (CPUID_VMWARE_FEATURES_ECX_VMMCALL |
452 CPUID_VMWARE_FEATURES_ECX_VMCALL));
453 }
454
455 /*
456 * While checking the dmi string information, just checking the product
457 * serial key should be enough, as this will always have a VMware
458 * specific string when running under VMware hypervisor.
459 * If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode
460 * intentionally defaults to 0.
461 */
vmware_platform(void)462 static u32 __init vmware_platform(void)
463 {
464 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
465 unsigned int eax;
466 unsigned int hyper_vendor_id[3];
467
468 cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0],
469 &hyper_vendor_id[1], &hyper_vendor_id[2]);
470 if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) {
471 if (eax >= CPUID_VMWARE_FEATURES_LEAF)
472 vmware_hypercall_mode =
473 vmware_select_hypercall();
474
475 pr_info("hypercall mode: 0x%02x\n",
476 (unsigned int) vmware_hypercall_mode);
477
478 return CPUID_VMWARE_INFO_LEAF;
479 }
480 } else if (dmi_available && dmi_name_in_serial("VMware") &&
481 __vmware_platform())
482 return 1;
483
484 return 0;
485 }
486
487 /* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
vmware_legacy_x2apic_available(void)488 static bool __init vmware_legacy_x2apic_available(void)
489 {
490 u32 eax;
491
492 eax = vmware_hypercall1(VMWARE_CMD_GETVCPU_INFO, 0);
493 return !(eax & GETVCPU_INFO_VCPU_RESERVED) &&
494 (eax & GETVCPU_INFO_LEGACY_X2APIC);
495 }
496
497 #ifdef CONFIG_INTEL_TDX_GUEST
498 /*
499 * TDCALL[TDG.VP.VMCALL] uses %rax (arg0) and %rcx (arg2). Therefore,
500 * we remap those registers to %r12 and %r13, respectively.
501 */
vmware_tdx_hypercall(unsigned long cmd,unsigned long in1,unsigned long in3,unsigned long in4,unsigned long in5,u32 * out1,u32 * out2,u32 * out3,u32 * out4,u32 * out5)502 unsigned long vmware_tdx_hypercall(unsigned long cmd,
503 unsigned long in1, unsigned long in3,
504 unsigned long in4, unsigned long in5,
505 u32 *out1, u32 *out2, u32 *out3,
506 u32 *out4, u32 *out5)
507 {
508 struct tdx_module_args args = {};
509
510 if (!hypervisor_is_type(X86_HYPER_VMWARE)) {
511 pr_warn_once("Incorrect usage\n");
512 return ULONG_MAX;
513 }
514
515 if (cmd & ~VMWARE_CMD_MASK) {
516 pr_warn_once("Out of range command %lx\n", cmd);
517 return ULONG_MAX;
518 }
519
520 args.rbx = in1;
521 args.rdx = in3;
522 args.rsi = in4;
523 args.rdi = in5;
524 args.r10 = VMWARE_TDX_VENDOR_LEAF;
525 args.r11 = VMWARE_TDX_HCALL_FUNC;
526 args.r12 = VMWARE_HYPERVISOR_MAGIC;
527 args.r13 = cmd;
528 /* CPL */
529 args.r15 = 0;
530
531 __tdx_hypercall(&args);
532
533 if (out1)
534 *out1 = args.rbx;
535 if (out2)
536 *out2 = args.r13;
537 if (out3)
538 *out3 = args.rdx;
539 if (out4)
540 *out4 = args.rsi;
541 if (out5)
542 *out5 = args.rdi;
543
544 return args.r12;
545 }
546 EXPORT_SYMBOL_GPL(vmware_tdx_hypercall);
547 #endif
548
549 #ifdef CONFIG_AMD_MEM_ENCRYPT
vmware_sev_es_hcall_prepare(struct ghcb * ghcb,struct pt_regs * regs)550 static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb,
551 struct pt_regs *regs)
552 {
553 /* Copy VMWARE specific Hypercall parameters to the GHCB */
554 ghcb_set_rip(ghcb, regs->ip);
555 ghcb_set_rbx(ghcb, regs->bx);
556 ghcb_set_rcx(ghcb, regs->cx);
557 ghcb_set_rdx(ghcb, regs->dx);
558 ghcb_set_rsi(ghcb, regs->si);
559 ghcb_set_rdi(ghcb, regs->di);
560 ghcb_set_rbp(ghcb, regs->bp);
561 }
562
vmware_sev_es_hcall_finish(struct ghcb * ghcb,struct pt_regs * regs)563 static bool vmware_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
564 {
565 if (!(ghcb_rbx_is_valid(ghcb) &&
566 ghcb_rcx_is_valid(ghcb) &&
567 ghcb_rdx_is_valid(ghcb) &&
568 ghcb_rsi_is_valid(ghcb) &&
569 ghcb_rdi_is_valid(ghcb) &&
570 ghcb_rbp_is_valid(ghcb)))
571 return false;
572
573 regs->bx = ghcb_get_rbx(ghcb);
574 regs->cx = ghcb_get_rcx(ghcb);
575 regs->dx = ghcb_get_rdx(ghcb);
576 regs->si = ghcb_get_rsi(ghcb);
577 regs->di = ghcb_get_rdi(ghcb);
578 regs->bp = ghcb_get_rbp(ghcb);
579
580 return true;
581 }
582 #endif
583
584 const __initconst struct hypervisor_x86 x86_hyper_vmware = {
585 .name = "VMware",
586 .detect = vmware_platform,
587 .type = X86_HYPER_VMWARE,
588 .init.init_platform = vmware_platform_setup,
589 .init.x2apic_available = vmware_legacy_x2apic_available,
590 #ifdef CONFIG_AMD_MEM_ENCRYPT
591 .runtime.sev_es_hcall_prepare = vmware_sev_es_hcall_prepare,
592 .runtime.sev_es_hcall_finish = vmware_sev_es_hcall_finish,
593 #endif
594 };
595