1 #include "kvm/8250-serial.h"
2 #include "kvm/kvm.h"
3 #include "kvm/ioport.h"
4 #include "kvm/virtio-console.h"
5
6 #include <linux/kvm.h>
7
8 #include <ctype.h>
9 #include <unistd.h>
10 #include <elf.h>
11
12 struct kvm_ext kvm_req_ext[] = {
13 { 0, 0 }
14 };
15
kvm__arch_default_ram_address(void)16 u64 kvm__arch_default_ram_address(void)
17 {
18 return 0;
19 }
20
kvm__arch_validate_cfg(struct kvm * kvm)21 void kvm__arch_validate_cfg(struct kvm *kvm)
22 {
23 }
24
kvm__arch_read_term(struct kvm * kvm)25 void kvm__arch_read_term(struct kvm *kvm)
26 {
27 virtio_console__inject_interrupt(kvm);
28 }
29
kvm__init_ram(struct kvm * kvm)30 void kvm__init_ram(struct kvm *kvm)
31 {
32 u64 phys_start, phys_size;
33 void *host_mem;
34
35 if (kvm->ram_size <= KVM_MMIO_START) {
36 /* one region for all memory */
37 phys_start = 0;
38 phys_size = kvm->ram_size;
39 host_mem = kvm->ram_start;
40
41 kvm__register_ram(kvm, phys_start, phys_size, host_mem);
42 } else {
43 /* one region for memory that fits below MMIO range */
44 phys_start = 0;
45 phys_size = KVM_MMIO_START;
46 host_mem = kvm->ram_start;
47
48 kvm__register_ram(kvm, phys_start, phys_size, host_mem);
49
50 /* one region for rest of memory */
51 phys_start = KVM_MMIO_START + KVM_MMIO_SIZE;
52 phys_size = kvm->ram_size - KVM_MMIO_START;
53 host_mem = kvm->ram_start + KVM_MMIO_START;
54
55 kvm__register_ram(kvm, phys_start, phys_size, host_mem);
56 }
57 }
58
kvm__arch_delete_ram(struct kvm * kvm)59 void kvm__arch_delete_ram(struct kvm *kvm)
60 {
61 munmap(kvm->ram_start, kvm->ram_size);
62 }
63
kvm__arch_set_cmdline(char * cmdline,bool video)64 void kvm__arch_set_cmdline(char *cmdline, bool video)
65 {
66
67 }
68
69 /* Architecture-specific KVM init */
kvm__arch_init(struct kvm * kvm)70 void kvm__arch_init(struct kvm *kvm)
71 {
72 int ret;
73
74 kvm->ram_size = kvm->cfg.ram_size;
75 kvm->ram_start = mmap_anon_or_hugetlbfs(kvm, kvm->cfg.hugetlbfs_path,
76 kvm->ram_size);
77
78 if (kvm->ram_start == MAP_FAILED)
79 die("out of memory");
80
81 madvise(kvm->ram_start, kvm->ram_size, MADV_MERGEABLE);
82
83 ret = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP);
84 if (ret < 0)
85 die_perror("KVM_CREATE_IRQCHIP ioctl");
86 }
87
kvm__irq_line(struct kvm * kvm,int irq,int level)88 void kvm__irq_line(struct kvm *kvm, int irq, int level)
89 {
90 struct kvm_irq_level irq_level;
91 int ret;
92
93 irq_level.irq = irq;
94 irq_level.level = level ? 1 : 0;
95
96 ret = ioctl(kvm->vm_fd, KVM_IRQ_LINE, &irq_level);
97 if (ret < 0)
98 die_perror("KVM_IRQ_LINE ioctl");
99 }
100
kvm__irq_trigger(struct kvm * kvm,int irq)101 void kvm__irq_trigger(struct kvm *kvm, int irq)
102 {
103 struct kvm_irq_level irq_level;
104 int ret;
105
106 irq_level.irq = irq;
107 irq_level.level = 1;
108
109 ret = ioctl(kvm->vm_fd, KVM_IRQ_LINE, &irq_level);
110 if (ret < 0)
111 die_perror("KVM_IRQ_LINE ioctl");
112 }
113
kvm__arch_cpu_supports_vm(void)114 bool kvm__arch_cpu_supports_vm(void)
115 {
116 return true;
117 }
kvm__load_firmware(struct kvm * kvm,const char * firmware_filename)118 bool kvm__load_firmware(struct kvm *kvm, const char *firmware_filename)
119 {
120 return false;
121 }
kvm__arch_setup_firmware(struct kvm * kvm)122 int kvm__arch_setup_firmware(struct kvm *kvm)
123 {
124 return 0;
125 }
126
kvm__mips_install_cmdline(struct kvm * kvm)127 static void kvm__mips_install_cmdline(struct kvm *kvm)
128 {
129 char *p = kvm->ram_start;
130 u64 cmdline_offset = 0x2000;
131 u64 argv_start = 0x3000;
132 u64 argv_offset = argv_start;
133 u64 argc = 0;
134
135
136 if ((u64) kvm->ram_size <= KVM_MMIO_START)
137 sprintf(p + cmdline_offset, "mem=0x%llx@0 ",
138 (unsigned long long)kvm->ram_size);
139 else
140 sprintf(p + cmdline_offset, "mem=0x%llx@0 mem=0x%llx@0x%llx ",
141 (unsigned long long)KVM_MMIO_START,
142 (unsigned long long)kvm->ram_size - KVM_MMIO_START,
143 (unsigned long long)(KVM_MMIO_START + KVM_MMIO_SIZE));
144
145 if (kvm->cfg.real_cmdline)
146 strcat(p + cmdline_offset, kvm->cfg.real_cmdline); /* maximum size is 2K */
147
148 while (p[cmdline_offset]) {
149 if (!isspace(p[cmdline_offset])) {
150 if (kvm->arch.is64bit) {
151 *(u64 *)(p + argv_offset) = 0xffffffff80000000ull + cmdline_offset;
152 argv_offset += sizeof(u64);
153 } else {
154 *(u32 *)(p + argv_offset) = 0x80000000u + cmdline_offset;
155 argv_offset += sizeof(u32);
156 }
157 argc++;
158 while(p[cmdline_offset] && !isspace(p[cmdline_offset]))
159 cmdline_offset++;
160 continue;
161 }
162 /* Must be a space character skip over these*/
163 while(p[cmdline_offset] && isspace(p[cmdline_offset])) {
164 p[cmdline_offset] = 0;
165 cmdline_offset++;
166 }
167 }
168 kvm->arch.argc = argc;
169 kvm->arch.argv = 0xffffffff80000000ull + argv_start;
170 }
171
172 /* Load at the 1M point. */
173 #define KERNEL_LOAD_ADDR 0x1000000
174
load_flat_binary(struct kvm * kvm,int fd_kernel)175 static bool load_flat_binary(struct kvm *kvm, int fd_kernel)
176 {
177 void *p;
178 void *k_start;
179 ssize_t kernel_size;
180
181 if (lseek(fd_kernel, 0, SEEK_SET) < 0)
182 die_perror("lseek");
183
184 p = k_start = guest_flat_to_host(kvm, KERNEL_LOAD_ADDR);
185
186 kernel_size = read_file(fd_kernel, p,
187 kvm->cfg.ram_size - KERNEL_LOAD_ADDR);
188 if (kernel_size == -1) {
189 if (errno == ENOMEM)
190 die("kernel too big for guest memory");
191 else
192 die_perror("kernel read");
193 }
194
195 kvm->arch.is64bit = true;
196 kvm->arch.entry_point = 0xffffffff81000000ull;
197
198 pr_info("Loaded kernel to 0x%x (%zd bytes)", KERNEL_LOAD_ADDR,
199 kernel_size);
200
201 return true;
202 }
203
204 struct kvm__arch_elf_info {
205 u64 load_addr;
206 u64 entry_point;
207 size_t len;
208 size_t offset;
209 };
210
kvm__arch_get_elf_64_info(Elf64_Ehdr * ehdr,int fd_kernel,struct kvm__arch_elf_info * ei)211 static bool kvm__arch_get_elf_64_info(Elf64_Ehdr *ehdr, int fd_kernel,
212 struct kvm__arch_elf_info *ei)
213 {
214 int i;
215 Elf64_Phdr phdr;
216
217 if (ehdr->e_phentsize != sizeof(phdr)) {
218 pr_info("Incompatible ELF PHENTSIZE %d", ehdr->e_phentsize);
219 return false;
220 }
221
222 ei->entry_point = ehdr->e_entry;
223
224 if (lseek(fd_kernel, ehdr->e_phoff, SEEK_SET) < 0)
225 die_perror("lseek");
226
227 phdr.p_type = PT_NULL;
228 for (i = 0; i < ehdr->e_phnum; i++) {
229 if (read_in_full(fd_kernel, &phdr, sizeof(phdr)) != sizeof(phdr)) {
230 pr_info("Couldn't read %d bytes for ELF PHDR.", (int)sizeof(phdr));
231 return false;
232 }
233 if (phdr.p_type == PT_LOAD)
234 break;
235 }
236 if (phdr.p_type != PT_LOAD) {
237 pr_info("No PT_LOAD Program Header found.");
238 return false;
239 }
240
241 ei->load_addr = phdr.p_paddr;
242
243 if ((ei->load_addr & 0xffffffffc0000000ull) == 0xffffffff80000000ull)
244 ei->load_addr &= 0x1ffffffful; /* Convert KSEG{0,1} to physical. */
245 if ((ei->load_addr & 0xc000000000000000ull) == 0x8000000000000000ull)
246 ei->load_addr &= 0x07ffffffffffffffull; /* Convert XKPHYS to pysical */
247
248
249 ei->len = phdr.p_filesz;
250 ei->offset = phdr.p_offset;
251
252 return true;
253 }
254
kvm__arch_get_elf_32_info(Elf32_Ehdr * ehdr,int fd_kernel,struct kvm__arch_elf_info * ei)255 static bool kvm__arch_get_elf_32_info(Elf32_Ehdr *ehdr, int fd_kernel,
256 struct kvm__arch_elf_info *ei)
257 {
258 int i;
259 Elf32_Phdr phdr;
260
261 if (ehdr->e_phentsize != sizeof(phdr)) {
262 pr_info("Incompatible ELF PHENTSIZE %d", ehdr->e_phentsize);
263 return false;
264 }
265
266 ei->entry_point = (s64)((s32)ehdr->e_entry);
267
268 if (lseek(fd_kernel, ehdr->e_phoff, SEEK_SET) < 0)
269 die_perror("lseek");
270
271 phdr.p_type = PT_NULL;
272 for (i = 0; i < ehdr->e_phnum; i++) {
273 if (read_in_full(fd_kernel, &phdr, sizeof(phdr)) != sizeof(phdr)) {
274 pr_info("Couldn't read %d bytes for ELF PHDR.", (int)sizeof(phdr));
275 return false;
276 }
277 if (phdr.p_type == PT_LOAD)
278 break;
279 }
280 if (phdr.p_type != PT_LOAD) {
281 pr_info("No PT_LOAD Program Header found.");
282 return false;
283 }
284
285 ei->load_addr = (s64)((s32)phdr.p_paddr);
286
287 if ((ei->load_addr & 0xffffffffc0000000ull) == 0xffffffff80000000ull)
288 ei->load_addr &= 0x1fffffffull; /* Convert KSEG{0,1} to physical. */
289
290 ei->len = phdr.p_filesz;
291 ei->offset = phdr.p_offset;
292
293 return true;
294 }
295
load_elf_binary(struct kvm * kvm,int fd_kernel)296 static bool load_elf_binary(struct kvm *kvm, int fd_kernel)
297 {
298 union {
299 Elf64_Ehdr ehdr;
300 Elf32_Ehdr ehdr32;
301 } eh;
302
303 size_t nr;
304 char *p;
305 struct kvm__arch_elf_info ei;
306
307 nr = read(fd_kernel, &eh, sizeof(eh));
308 if (nr != sizeof(eh)) {
309 pr_info("Couldn't read %d bytes for ELF header.", (int)sizeof(eh));
310 return false;
311 }
312
313 if (eh.ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
314 eh.ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
315 eh.ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
316 eh.ehdr.e_ident[EI_MAG3] != ELFMAG3 ||
317 (eh.ehdr.e_ident[EI_CLASS] != ELFCLASS64 && eh.ehdr.e_ident[EI_CLASS] != ELFCLASS32) ||
318 eh.ehdr.e_ident[EI_VERSION] != EV_CURRENT) {
319 pr_info("Incompatible ELF header.");
320 return false;
321 }
322 if (eh.ehdr.e_type != ET_EXEC || eh.ehdr.e_machine != EM_MIPS) {
323 pr_info("Incompatible ELF not MIPS EXEC.");
324 return false;
325 }
326
327 if (eh.ehdr.e_ident[EI_CLASS] == ELFCLASS64) {
328 if (!kvm__arch_get_elf_64_info(&eh.ehdr, fd_kernel, &ei))
329 return false;
330 kvm->arch.is64bit = true;
331 } else {
332 if (!kvm__arch_get_elf_32_info(&eh.ehdr32, fd_kernel, &ei))
333 return false;
334 kvm->arch.is64bit = false;
335 }
336
337 kvm->arch.entry_point = ei.entry_point;
338
339 if (lseek(fd_kernel, ei.offset, SEEK_SET) < 0)
340 die_perror("lseek");
341
342 p = guest_flat_to_host(kvm, ei.load_addr);
343
344 pr_info("ELF Loading 0x%lx bytes from 0x%llx to 0x%llx",
345 (unsigned long)ei.len, (unsigned long long)ei.offset,
346 (unsigned long long)ei.load_addr);
347
348 if (read_in_full(fd_kernel, p, ei.len) != (ssize_t)ei.len)
349 die_perror("read");
350
351 return true;
352 }
353
kvm__arch_load_kernel_image(struct kvm * kvm,int fd_kernel,int fd_initrd,const char * kernel_cmdline)354 bool kvm__arch_load_kernel_image(struct kvm *kvm, int fd_kernel, int fd_initrd,
355 const char *kernel_cmdline)
356 {
357 if (fd_initrd != -1) {
358 pr_err("Initrd not supported on MIPS.");
359 return false;
360 }
361
362 if (load_elf_binary(kvm, fd_kernel)) {
363 kvm__mips_install_cmdline(kvm);
364 return true;
365 }
366
367 return load_flat_binary(kvm, fd_kernel);
368 }
369
ioport__map_irq(u8 * irq)370 void ioport__map_irq(u8 *irq)
371 {
372 }
373
serial8250__inject_sysrq(struct kvm * kvm,char sysrq)374 void serial8250__inject_sysrq(struct kvm *kvm, char sysrq)
375 {
376 }
377