12e996783SAnup Patel #include "kvm/kvm.h" 22e996783SAnup Patel #include "kvm/util.h" 3867159a7SAnup Patel #include "kvm/8250-serial.h" 4867159a7SAnup Patel #include "kvm/virtio-console.h" 52e996783SAnup Patel #include "kvm/fdt.h" 62e996783SAnup Patel 72e996783SAnup Patel #include <linux/kernel.h> 82e996783SAnup Patel #include <linux/kvm.h> 92e996783SAnup Patel #include <linux/sizes.h> 102e996783SAnup Patel 112e996783SAnup Patel struct kvm_ext kvm_req_ext[] = { 122e996783SAnup Patel { DEFINE_KVM_EXT(KVM_CAP_ONE_REG) }, 132e996783SAnup Patel { 0, 0 }, 142e996783SAnup Patel }; 152e996783SAnup Patel 163f7e48f6SAlexandru Elisei u64 kvm__arch_default_ram_address(void) 173f7e48f6SAlexandru Elisei { 183f7e48f6SAlexandru Elisei return RISCV_RAM; 193f7e48f6SAlexandru Elisei } 203f7e48f6SAlexandru Elisei 21abe3f28aSAlexandru Elisei void kvm__arch_validate_cfg(struct kvm *kvm) 22abe3f28aSAlexandru Elisei { 23abe3f28aSAlexandru Elisei } 24abe3f28aSAlexandru Elisei 252e996783SAnup Patel bool kvm__arch_cpu_supports_vm(void) 262e996783SAnup Patel { 272e996783SAnup Patel /* The KVM capability check is enough. */ 282e996783SAnup Patel return true; 292e996783SAnup Patel } 302e996783SAnup Patel 312e996783SAnup Patel void kvm__init_ram(struct kvm *kvm) 322e996783SAnup Patel { 33867159a7SAnup Patel int err; 34867159a7SAnup Patel u64 phys_start, phys_size; 35867159a7SAnup Patel void *host_mem; 36867159a7SAnup Patel 37867159a7SAnup Patel phys_start = RISCV_RAM; 38867159a7SAnup Patel phys_size = kvm->ram_size; 39867159a7SAnup Patel host_mem = kvm->ram_start; 40867159a7SAnup Patel 41867159a7SAnup Patel err = kvm__register_ram(kvm, phys_start, phys_size, host_mem); 42867159a7SAnup Patel if (err) 43867159a7SAnup Patel die("Failed to register %lld bytes of memory at physical " 44867159a7SAnup Patel "address 0x%llx [err %d]", phys_size, phys_start, err); 45867159a7SAnup Patel 46867159a7SAnup Patel kvm->arch.memory_guest_start = phys_start; 472e996783SAnup Patel } 482e996783SAnup Patel 492e996783SAnup Patel void kvm__arch_delete_ram(struct kvm *kvm) 502e996783SAnup Patel { 51867159a7SAnup Patel munmap(kvm->arch.ram_alloc_start, kvm->arch.ram_alloc_size); 522e996783SAnup Patel } 532e996783SAnup Patel 542e996783SAnup Patel void kvm__arch_read_term(struct kvm *kvm) 552e996783SAnup Patel { 56867159a7SAnup Patel serial8250__update_consoles(kvm); 57867159a7SAnup Patel virtio_console__inject_interrupt(kvm); 582e996783SAnup Patel } 592e996783SAnup Patel 602e996783SAnup Patel void kvm__arch_set_cmdline(char *cmdline, bool video) 612e996783SAnup Patel { 622e996783SAnup Patel } 632e996783SAnup Patel 64*106e2ea7SAnup Patel #if __riscv_xlen == 64 65*106e2ea7SAnup Patel #define HUGEPAGE_SIZE SZ_2M 66*106e2ea7SAnup Patel #else 67*106e2ea7SAnup Patel #define HUGEPAGE_SIZE SZ_4M 68*106e2ea7SAnup Patel #endif 69*106e2ea7SAnup Patel 705e9c654eSJulien Grall void kvm__arch_init(struct kvm *kvm) 712e996783SAnup Patel { 72867159a7SAnup Patel /* 73867159a7SAnup Patel * Allocate guest memory. We must align our buffer to 64K to 74867159a7SAnup Patel * correlate with the maximum guest page size for virtio-mmio. 75*106e2ea7SAnup Patel * If using THP, then our minimal alignment becomes hugepage 76*106e2ea7SAnup Patel * size. The hugepage size is always greater than 64K, so 77*106e2ea7SAnup Patel * let's go with that. 78867159a7SAnup Patel */ 795e9c654eSJulien Grall kvm->ram_size = min(kvm->cfg.ram_size, (u64)RISCV_MAX_MEMORY(kvm)); 80*106e2ea7SAnup Patel kvm->arch.ram_alloc_size = kvm->ram_size; 81*106e2ea7SAnup Patel if (!kvm->cfg.hugetlbfs_path) 82*106e2ea7SAnup Patel kvm->arch.ram_alloc_size += HUGEPAGE_SIZE; 835e9c654eSJulien Grall kvm->arch.ram_alloc_start = mmap_anon_or_hugetlbfs(kvm, 845e9c654eSJulien Grall kvm->cfg.hugetlbfs_path, 85867159a7SAnup Patel kvm->arch.ram_alloc_size); 86867159a7SAnup Patel 87867159a7SAnup Patel if (kvm->arch.ram_alloc_start == MAP_FAILED) 88867159a7SAnup Patel die("Failed to map %lld bytes for guest memory (%d)", 89867159a7SAnup Patel kvm->arch.ram_alloc_size, errno); 90867159a7SAnup Patel 91867159a7SAnup Patel kvm->ram_start = (void *)ALIGN((unsigned long)kvm->arch.ram_alloc_start, 92867159a7SAnup Patel SZ_2M); 93867159a7SAnup Patel 94867159a7SAnup Patel madvise(kvm->arch.ram_alloc_start, kvm->arch.ram_alloc_size, 95867159a7SAnup Patel MADV_MERGEABLE); 96867159a7SAnup Patel 97867159a7SAnup Patel madvise(kvm->arch.ram_alloc_start, kvm->arch.ram_alloc_size, 98867159a7SAnup Patel MADV_HUGEPAGE); 992e996783SAnup Patel } 1002e996783SAnup Patel 101867159a7SAnup Patel #define FDT_ALIGN SZ_4M 102867159a7SAnup Patel #define INITRD_ALIGN 8 1032e996783SAnup Patel bool kvm__arch_load_kernel_image(struct kvm *kvm, int fd_kernel, int fd_initrd, 1042e996783SAnup Patel const char *kernel_cmdline) 1052e996783SAnup Patel { 106867159a7SAnup Patel void *pos, *kernel_end, *limit; 107867159a7SAnup Patel unsigned long guest_addr, kernel_offset; 108867159a7SAnup Patel ssize_t file_size; 109867159a7SAnup Patel 110867159a7SAnup Patel /* 111867159a7SAnup Patel * Linux requires the initrd and dtb to be mapped inside lowmem, 112867159a7SAnup Patel * so we can't just place them at the top of memory. 113867159a7SAnup Patel */ 114867159a7SAnup Patel limit = kvm->ram_start + min(kvm->ram_size, (u64)SZ_256M) - 1; 115867159a7SAnup Patel 116867159a7SAnup Patel #if __riscv_xlen == 64 117867159a7SAnup Patel /* Linux expects to be booted at 2M boundary for RV64 */ 118867159a7SAnup Patel kernel_offset = 0x200000; 119867159a7SAnup Patel #else 120867159a7SAnup Patel /* Linux expects to be booted at 4M boundary for RV32 */ 121867159a7SAnup Patel kernel_offset = 0x400000; 122867159a7SAnup Patel #endif 123867159a7SAnup Patel 124867159a7SAnup Patel pos = kvm->ram_start + kernel_offset; 125867159a7SAnup Patel kvm->arch.kern_guest_start = host_to_guest_flat(kvm, pos); 126867159a7SAnup Patel file_size = read_file(fd_kernel, pos, limit - pos); 127867159a7SAnup Patel if (file_size < 0) { 128867159a7SAnup Patel if (errno == ENOMEM) 129867159a7SAnup Patel die("kernel image too big to fit in guest memory."); 130867159a7SAnup Patel 131867159a7SAnup Patel die_perror("kernel read"); 132867159a7SAnup Patel } 133867159a7SAnup Patel kernel_end = pos + file_size; 134867159a7SAnup Patel pr_debug("Loaded kernel to 0x%llx (%zd bytes)", 135867159a7SAnup Patel kvm->arch.kern_guest_start, file_size); 136867159a7SAnup Patel 137867159a7SAnup Patel /* Place FDT just after kernel at FDT_ALIGN address */ 138867159a7SAnup Patel pos = kernel_end + FDT_ALIGN; 139867159a7SAnup Patel guest_addr = ALIGN(host_to_guest_flat(kvm, pos), FDT_ALIGN); 140867159a7SAnup Patel pos = guest_flat_to_host(kvm, guest_addr); 141867159a7SAnup Patel if (pos < kernel_end) 142867159a7SAnup Patel die("fdt overlaps with kernel image."); 143867159a7SAnup Patel 144867159a7SAnup Patel kvm->arch.dtb_guest_start = guest_addr; 145867159a7SAnup Patel pr_debug("Placing fdt at 0x%llx - 0x%llx", 146867159a7SAnup Patel kvm->arch.dtb_guest_start, 147867159a7SAnup Patel host_to_guest_flat(kvm, limit)); 148867159a7SAnup Patel 149867159a7SAnup Patel /* ... and finally the initrd, if we have one. */ 150867159a7SAnup Patel if (fd_initrd != -1) { 151867159a7SAnup Patel struct stat sb; 152867159a7SAnup Patel unsigned long initrd_start; 153867159a7SAnup Patel 154867159a7SAnup Patel if (fstat(fd_initrd, &sb)) 155867159a7SAnup Patel die_perror("fstat"); 156867159a7SAnup Patel 157867159a7SAnup Patel pos = limit - (sb.st_size + INITRD_ALIGN); 158867159a7SAnup Patel guest_addr = ALIGN(host_to_guest_flat(kvm, pos), INITRD_ALIGN); 159867159a7SAnup Patel pos = guest_flat_to_host(kvm, guest_addr); 160867159a7SAnup Patel if (pos < kernel_end) 161867159a7SAnup Patel die("initrd overlaps with kernel image."); 162867159a7SAnup Patel 163867159a7SAnup Patel initrd_start = guest_addr; 164867159a7SAnup Patel file_size = read_file(fd_initrd, pos, limit - pos); 165867159a7SAnup Patel if (file_size == -1) { 166867159a7SAnup Patel if (errno == ENOMEM) 167867159a7SAnup Patel die("initrd too big to fit in guest memory."); 168867159a7SAnup Patel 169867159a7SAnup Patel die_perror("initrd read"); 170867159a7SAnup Patel } 171867159a7SAnup Patel 172867159a7SAnup Patel kvm->arch.initrd_guest_start = initrd_start; 173867159a7SAnup Patel kvm->arch.initrd_size = file_size; 174867159a7SAnup Patel pr_debug("Loaded initrd to 0x%llx (%llu bytes)", 175867159a7SAnup Patel kvm->arch.initrd_guest_start, 176867159a7SAnup Patel kvm->arch.initrd_size); 177867159a7SAnup Patel } else { 178867159a7SAnup Patel kvm->arch.initrd_size = 0; 179867159a7SAnup Patel } 180867159a7SAnup Patel 1812e996783SAnup Patel return true; 1822e996783SAnup Patel } 1832e996783SAnup Patel 1842e996783SAnup Patel bool kvm__load_firmware(struct kvm *kvm, const char *firmware_filename) 1852e996783SAnup Patel { 1862e996783SAnup Patel /* TODO: Firmware loading to be supported later. */ 1872e996783SAnup Patel return false; 1882e996783SAnup Patel } 1892e996783SAnup Patel 1902e996783SAnup Patel int kvm__arch_setup_firmware(struct kvm *kvm) 1912e996783SAnup Patel { 1922e996783SAnup Patel return 0; 1932e996783SAnup Patel } 194