1 #include "kvm/kvm.h"
2
3 #include <asm/image.h>
4
5 #include <linux/byteorder.h>
6 #include <linux/cpumask.h>
7 #include <linux/sizes.h>
8
9 #include <kvm/util.h>
10
vcpu_affinity_parser(const struct option * opt,const char * arg,int unset)11 int vcpu_affinity_parser(const struct option *opt, const char *arg, int unset)
12 {
13 struct kvm *kvm = opt->ptr;
14 const char *cpulist = arg;
15 cpumask_t *cpumask;
16 int cpu, ret;
17
18 kvm->cfg.arch.vcpu_affinity = cpulist;
19
20 cpumask = calloc(1, cpumask_size());
21 if (!cpumask)
22 die_perror("calloc");
23
24 ret = cpulist_parse(cpulist, cpumask);
25 if (ret) {
26 free(cpumask);
27 return ret;
28 }
29
30 kvm->arch.vcpu_affinity_cpuset = CPU_ALLOC(NR_CPUS);
31 if (!kvm->arch.vcpu_affinity_cpuset)
32 die_perror("CPU_ALLOC");
33 CPU_ZERO_S(CPU_ALLOC_SIZE(NR_CPUS), kvm->arch.vcpu_affinity_cpuset);
34
35 for_each_cpu(cpu, cpumask)
36 CPU_SET(cpu, kvm->arch.vcpu_affinity_cpuset);
37
38 return 0;
39 }
40
kvm__arch_validate_cfg(struct kvm * kvm)41 void kvm__arch_validate_cfg(struct kvm *kvm)
42 {
43
44 if (kvm->cfg.ram_addr < ARM_MEMORY_AREA) {
45 die("RAM address is below the I/O region ending at %luGB",
46 ARM_MEMORY_AREA >> 30);
47 }
48
49 if (kvm->cfg.arch.aarch32_guest &&
50 kvm->cfg.ram_addr + kvm->cfg.ram_size > SZ_4G) {
51 die("RAM extends above 4GB");
52 }
53 }
54
kvm__arch_default_ram_address(void)55 u64 kvm__arch_default_ram_address(void)
56 {
57 return ARM_MEMORY_AREA;
58 }
59
60 /*
61 * Return the TEXT_OFFSET value that the guest kernel expects. Note
62 * that pre-3.17 kernels expose this value using the native endianness
63 * instead of Little-Endian. BE kernels of this vintage may fail to
64 * boot. See Documentation/arm64/booting.rst in your local kernel tree.
65 */
kvm__arch_get_kern_offset(struct kvm * kvm,int fd)66 unsigned long long kvm__arch_get_kern_offset(struct kvm *kvm, int fd)
67 {
68 struct arm64_image_header header;
69 off_t cur_offset;
70 ssize_t size;
71 const char *debug_str;
72
73 /* the 32bit kernel offset is a well known value */
74 if (kvm->cfg.arch.aarch32_guest)
75 return 0x8000;
76
77 cur_offset = lseek(fd, 0, SEEK_CUR);
78 if (cur_offset == (off_t)-1 ||
79 lseek(fd, 0, SEEK_SET) == (off_t)-1) {
80 debug_str = "Failed to seek in kernel image file";
81 goto default_offset;
82 }
83
84 size = xread(fd, &header, sizeof(header));
85 if (size < 0 || (size_t)size < sizeof(header))
86 die("Failed to read kernel image header");
87
88 lseek(fd, cur_offset, SEEK_SET);
89
90 if (memcmp(&header.magic, ARM64_IMAGE_MAGIC, sizeof(header.magic))) {
91 debug_str = "Kernel image magic not matching";
92 goto default_offset;
93 }
94
95 if (le64_to_cpu(header.image_size))
96 return le64_to_cpu(header.text_offset);
97
98 debug_str = "Image size is 0";
99 default_offset:
100 pr_debug("%s, assuming TEXT_OFFSET to be 0x80000", debug_str);
101 return 0x80000;
102 }
103
kvm__arch_get_ipa_limit(struct kvm * kvm)104 int kvm__arch_get_ipa_limit(struct kvm *kvm)
105 {
106 int ret;
107
108 ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_ARM_VM_IPA_SIZE);
109 if (ret <= 0)
110 ret = 0;
111
112 return ret;
113 }
114
kvm__get_vm_type(struct kvm * kvm)115 int kvm__get_vm_type(struct kvm *kvm)
116 {
117 unsigned int ipa_bits, max_ipa_bits;
118 unsigned long max_ipa;
119
120 /* If we're running on an old kernel, use 0 as the VM type */
121 max_ipa_bits = kvm__arch_get_ipa_limit(kvm);
122 if (!max_ipa_bits)
123 return 0;
124
125 /* Otherwise, compute the minimal required IPA size */
126 max_ipa = kvm->cfg.ram_addr + kvm->cfg.ram_size - 1;
127 ipa_bits = max(32, fls_long(max_ipa));
128 pr_debug("max_ipa %lx ipa_bits %d max_ipa_bits %d",
129 max_ipa, ipa_bits, max_ipa_bits);
130
131 if (ipa_bits > max_ipa_bits)
132 die("Memory too large for this system (needs %d bits, %d available)", ipa_bits, max_ipa_bits);
133
134 return KVM_VM_TYPE_ARM_IPA_SIZE(ipa_bits);
135 }
136
kvm__arch_enable_mte(struct kvm * kvm)137 void kvm__arch_enable_mte(struct kvm *kvm)
138 {
139 struct kvm_enable_cap cap = {
140 .cap = KVM_CAP_ARM_MTE,
141 };
142
143 if (kvm->cfg.arch.aarch32_guest) {
144 pr_debug("MTE is incompatible with AArch32");
145 return;
146 }
147
148 if (kvm->cfg.arch.mte_disabled) {
149 pr_debug("MTE disabled by user");
150 return;
151 }
152
153 if (!kvm__supports_extension(kvm, KVM_CAP_ARM_MTE)) {
154 pr_debug("MTE capability not available");
155 return;
156 }
157
158 if (ioctl(kvm->vm_fd, KVM_ENABLE_CAP, &cap))
159 die_perror("KVM_ENABLE_CAP(KVM_CAP_ARM_MTE)");
160
161 pr_debug("MTE capability enabled");
162 }
163