12e996783SAnup Patel #include "kvm/kvm.h"
22e996783SAnup Patel #include "kvm/util.h"
3867159a7SAnup Patel #include "kvm/8250-serial.h"
4867159a7SAnup Patel #include "kvm/virtio-console.h"
52e996783SAnup Patel #include "kvm/fdt.h"
62e996783SAnup Patel
72e996783SAnup Patel #include <linux/kernel.h>
82e996783SAnup Patel #include <linux/kvm.h>
92e996783SAnup Patel #include <linux/sizes.h>
102e996783SAnup Patel
112e996783SAnup Patel struct kvm_ext kvm_req_ext[] = {
122e996783SAnup Patel { DEFINE_KVM_EXT(KVM_CAP_ONE_REG) },
132e996783SAnup Patel { 0, 0 },
142e996783SAnup Patel };
152e996783SAnup Patel
kvm__arch_default_ram_address(void)163f7e48f6SAlexandru Elisei u64 kvm__arch_default_ram_address(void)
173f7e48f6SAlexandru Elisei {
183f7e48f6SAlexandru Elisei return RISCV_RAM;
193f7e48f6SAlexandru Elisei }
203f7e48f6SAlexandru Elisei
kvm__arch_validate_cfg(struct kvm * kvm)21abe3f28aSAlexandru Elisei void kvm__arch_validate_cfg(struct kvm *kvm)
22abe3f28aSAlexandru Elisei {
23abe3f28aSAlexandru Elisei }
24abe3f28aSAlexandru Elisei
kvm__arch_cpu_supports_vm(void)252e996783SAnup Patel bool kvm__arch_cpu_supports_vm(void)
262e996783SAnup Patel {
272e996783SAnup Patel /* The KVM capability check is enough. */
282e996783SAnup Patel return true;
292e996783SAnup Patel }
302e996783SAnup Patel
kvm__init_ram(struct kvm * kvm)312e996783SAnup Patel void kvm__init_ram(struct kvm *kvm)
322e996783SAnup Patel {
33867159a7SAnup Patel int err;
34867159a7SAnup Patel u64 phys_start, phys_size;
35867159a7SAnup Patel void *host_mem;
36867159a7SAnup Patel
37867159a7SAnup Patel phys_start = RISCV_RAM;
38867159a7SAnup Patel phys_size = kvm->ram_size;
39867159a7SAnup Patel host_mem = kvm->ram_start;
40867159a7SAnup Patel
41867159a7SAnup Patel err = kvm__register_ram(kvm, phys_start, phys_size, host_mem);
42867159a7SAnup Patel if (err)
43867159a7SAnup Patel die("Failed to register %lld bytes of memory at physical "
44867159a7SAnup Patel "address 0x%llx [err %d]", phys_size, phys_start, err);
45867159a7SAnup Patel
46867159a7SAnup Patel kvm->arch.memory_guest_start = phys_start;
472e996783SAnup Patel }
482e996783SAnup Patel
kvm__arch_delete_ram(struct kvm * kvm)492e996783SAnup Patel void kvm__arch_delete_ram(struct kvm *kvm)
502e996783SAnup Patel {
51867159a7SAnup Patel munmap(kvm->arch.ram_alloc_start, kvm->arch.ram_alloc_size);
522e996783SAnup Patel }
532e996783SAnup Patel
kvm__arch_read_term(struct kvm * kvm)542e996783SAnup Patel void kvm__arch_read_term(struct kvm *kvm)
552e996783SAnup Patel {
56867159a7SAnup Patel serial8250__update_consoles(kvm);
57867159a7SAnup Patel virtio_console__inject_interrupt(kvm);
582e996783SAnup Patel }
592e996783SAnup Patel
kvm__arch_set_cmdline(char * cmdline,bool video)602e996783SAnup Patel void kvm__arch_set_cmdline(char *cmdline, bool video)
612e996783SAnup Patel {
622e996783SAnup Patel }
632e996783SAnup Patel
64106e2ea7SAnup Patel #if __riscv_xlen == 64
65106e2ea7SAnup Patel #define HUGEPAGE_SIZE SZ_2M
66106e2ea7SAnup Patel #else
67106e2ea7SAnup Patel #define HUGEPAGE_SIZE SZ_4M
68106e2ea7SAnup Patel #endif
69106e2ea7SAnup Patel
kvm__arch_init(struct kvm * kvm)705e9c654eSJulien Grall void kvm__arch_init(struct kvm *kvm)
712e996783SAnup Patel {
72867159a7SAnup Patel /*
73867159a7SAnup Patel * Allocate guest memory. We must align our buffer to 64K to
74867159a7SAnup Patel * correlate with the maximum guest page size for virtio-mmio.
75106e2ea7SAnup Patel * If using THP, then our minimal alignment becomes hugepage
76106e2ea7SAnup Patel * size. The hugepage size is always greater than 64K, so
77106e2ea7SAnup Patel * let's go with that.
78867159a7SAnup Patel */
795e9c654eSJulien Grall kvm->ram_size = min(kvm->cfg.ram_size, (u64)RISCV_MAX_MEMORY(kvm));
80106e2ea7SAnup Patel kvm->arch.ram_alloc_size = kvm->ram_size;
81106e2ea7SAnup Patel if (!kvm->cfg.hugetlbfs_path)
82106e2ea7SAnup Patel kvm->arch.ram_alloc_size += HUGEPAGE_SIZE;
835e9c654eSJulien Grall kvm->arch.ram_alloc_start = mmap_anon_or_hugetlbfs(kvm,
845e9c654eSJulien Grall kvm->cfg.hugetlbfs_path,
85867159a7SAnup Patel kvm->arch.ram_alloc_size);
86867159a7SAnup Patel
87867159a7SAnup Patel if (kvm->arch.ram_alloc_start == MAP_FAILED)
88867159a7SAnup Patel die("Failed to map %lld bytes for guest memory (%d)",
89867159a7SAnup Patel kvm->arch.ram_alloc_size, errno);
90867159a7SAnup Patel
91867159a7SAnup Patel kvm->ram_start = (void *)ALIGN((unsigned long)kvm->arch.ram_alloc_start,
92867159a7SAnup Patel SZ_2M);
93867159a7SAnup Patel
94867159a7SAnup Patel madvise(kvm->arch.ram_alloc_start, kvm->arch.ram_alloc_size,
95867159a7SAnup Patel MADV_MERGEABLE);
96867159a7SAnup Patel
97867159a7SAnup Patel madvise(kvm->arch.ram_alloc_start, kvm->arch.ram_alloc_size,
98867159a7SAnup Patel MADV_HUGEPAGE);
99*0dff3501SAnup Patel
100*0dff3501SAnup Patel riscv__irqchip_create(kvm);
1012e996783SAnup Patel }
1022e996783SAnup Patel
103867159a7SAnup Patel #define FDT_ALIGN SZ_4M
104867159a7SAnup Patel #define INITRD_ALIGN 8
kvm__arch_load_kernel_image(struct kvm * kvm,int fd_kernel,int fd_initrd,const char * kernel_cmdline)1052e996783SAnup Patel bool kvm__arch_load_kernel_image(struct kvm *kvm, int fd_kernel, int fd_initrd,
1062e996783SAnup Patel const char *kernel_cmdline)
1072e996783SAnup Patel {
108867159a7SAnup Patel void *pos, *kernel_end, *limit;
109867159a7SAnup Patel unsigned long guest_addr, kernel_offset;
110867159a7SAnup Patel ssize_t file_size;
111867159a7SAnup Patel
112867159a7SAnup Patel /*
113867159a7SAnup Patel * Linux requires the initrd and dtb to be mapped inside lowmem,
114867159a7SAnup Patel * so we can't just place them at the top of memory.
115867159a7SAnup Patel */
116867159a7SAnup Patel limit = kvm->ram_start + min(kvm->ram_size, (u64)SZ_256M) - 1;
117867159a7SAnup Patel
118867159a7SAnup Patel #if __riscv_xlen == 64
119867159a7SAnup Patel /* Linux expects to be booted at 2M boundary for RV64 */
120867159a7SAnup Patel kernel_offset = 0x200000;
121867159a7SAnup Patel #else
122867159a7SAnup Patel /* Linux expects to be booted at 4M boundary for RV32 */
123867159a7SAnup Patel kernel_offset = 0x400000;
124867159a7SAnup Patel #endif
125867159a7SAnup Patel
126867159a7SAnup Patel pos = kvm->ram_start + kernel_offset;
127867159a7SAnup Patel kvm->arch.kern_guest_start = host_to_guest_flat(kvm, pos);
128867159a7SAnup Patel file_size = read_file(fd_kernel, pos, limit - pos);
129867159a7SAnup Patel if (file_size < 0) {
130867159a7SAnup Patel if (errno == ENOMEM)
131867159a7SAnup Patel die("kernel image too big to fit in guest memory.");
132867159a7SAnup Patel
133867159a7SAnup Patel die_perror("kernel read");
134867159a7SAnup Patel }
135867159a7SAnup Patel kernel_end = pos + file_size;
136867159a7SAnup Patel pr_debug("Loaded kernel to 0x%llx (%zd bytes)",
137867159a7SAnup Patel kvm->arch.kern_guest_start, file_size);
138867159a7SAnup Patel
139867159a7SAnup Patel /* Place FDT just after kernel at FDT_ALIGN address */
140867159a7SAnup Patel pos = kernel_end + FDT_ALIGN;
141867159a7SAnup Patel guest_addr = ALIGN(host_to_guest_flat(kvm, pos), FDT_ALIGN);
142867159a7SAnup Patel pos = guest_flat_to_host(kvm, guest_addr);
143867159a7SAnup Patel if (pos < kernel_end)
144867159a7SAnup Patel die("fdt overlaps with kernel image.");
145867159a7SAnup Patel
146867159a7SAnup Patel kvm->arch.dtb_guest_start = guest_addr;
147867159a7SAnup Patel pr_debug("Placing fdt at 0x%llx - 0x%llx",
148867159a7SAnup Patel kvm->arch.dtb_guest_start,
149867159a7SAnup Patel host_to_guest_flat(kvm, limit));
150867159a7SAnup Patel
151867159a7SAnup Patel /* ... and finally the initrd, if we have one. */
152867159a7SAnup Patel if (fd_initrd != -1) {
153867159a7SAnup Patel struct stat sb;
154867159a7SAnup Patel unsigned long initrd_start;
155867159a7SAnup Patel
156867159a7SAnup Patel if (fstat(fd_initrd, &sb))
157867159a7SAnup Patel die_perror("fstat");
158867159a7SAnup Patel
159867159a7SAnup Patel pos = limit - (sb.st_size + INITRD_ALIGN);
160867159a7SAnup Patel guest_addr = ALIGN(host_to_guest_flat(kvm, pos), INITRD_ALIGN);
161867159a7SAnup Patel pos = guest_flat_to_host(kvm, guest_addr);
162867159a7SAnup Patel if (pos < kernel_end)
163867159a7SAnup Patel die("initrd overlaps with kernel image.");
164867159a7SAnup Patel
165867159a7SAnup Patel initrd_start = guest_addr;
166867159a7SAnup Patel file_size = read_file(fd_initrd, pos, limit - pos);
167867159a7SAnup Patel if (file_size == -1) {
168867159a7SAnup Patel if (errno == ENOMEM)
169867159a7SAnup Patel die("initrd too big to fit in guest memory.");
170867159a7SAnup Patel
171867159a7SAnup Patel die_perror("initrd read");
172867159a7SAnup Patel }
173867159a7SAnup Patel
174867159a7SAnup Patel kvm->arch.initrd_guest_start = initrd_start;
175867159a7SAnup Patel kvm->arch.initrd_size = file_size;
176867159a7SAnup Patel pr_debug("Loaded initrd to 0x%llx (%llu bytes)",
177867159a7SAnup Patel kvm->arch.initrd_guest_start,
178867159a7SAnup Patel kvm->arch.initrd_size);
179867159a7SAnup Patel } else {
180867159a7SAnup Patel kvm->arch.initrd_size = 0;
181867159a7SAnup Patel }
182867159a7SAnup Patel
1832e996783SAnup Patel return true;
1842e996783SAnup Patel }
1852e996783SAnup Patel
kvm__load_firmware(struct kvm * kvm,const char * firmware_filename)1862e996783SAnup Patel bool kvm__load_firmware(struct kvm *kvm, const char *firmware_filename)
1872e996783SAnup Patel {
1882e996783SAnup Patel /* TODO: Firmware loading to be supported later. */
1892e996783SAnup Patel return false;
1902e996783SAnup Patel }
1912e996783SAnup Patel
kvm__arch_setup_firmware(struct kvm * kvm)1922e996783SAnup Patel int kvm__arch_setup_firmware(struct kvm *kvm)
1932e996783SAnup Patel {
1942e996783SAnup Patel return 0;
1952e996783SAnup Patel }
196