xref: /kvmtool/powerpc/kvm.c (revision df129a0a82ea18d8a42376650f60ce09f6098747)
163e158a0SMatt Evans /*
263e158a0SMatt Evans  * PPC64 (SPAPR) platform support
363e158a0SMatt Evans  *
463e158a0SMatt Evans  * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation.
563e158a0SMatt Evans  *
663e158a0SMatt Evans  * This program is free software; you can redistribute it and/or modify it
763e158a0SMatt Evans  * under the terms of the GNU General Public License version 2 as published
863e158a0SMatt Evans  * by the Free Software Foundation.
963e158a0SMatt Evans  */
1063e158a0SMatt Evans 
1163e158a0SMatt Evans #include "kvm/kvm.h"
1263e158a0SMatt Evans #include "kvm/util.h"
1363e158a0SMatt Evans 
1463e158a0SMatt Evans #include <linux/kvm.h>
1563e158a0SMatt Evans 
1663e158a0SMatt Evans #include <sys/types.h>
1763e158a0SMatt Evans #include <sys/ioctl.h>
1863e158a0SMatt Evans #include <sys/mman.h>
1963e158a0SMatt Evans #include <stdbool.h>
2063e158a0SMatt Evans #include <stdlib.h>
2163e158a0SMatt Evans #include <string.h>
2263e158a0SMatt Evans #include <unistd.h>
2363e158a0SMatt Evans #include <stdio.h>
2463e158a0SMatt Evans #include <fcntl.h>
2563e158a0SMatt Evans #include <asm/unistd.h>
2663e158a0SMatt Evans #include <errno.h>
2763e158a0SMatt Evans 
2863e158a0SMatt Evans #include <linux/byteorder.h>
2963e158a0SMatt Evans #include <libfdt.h>
3063e158a0SMatt Evans 
3163e158a0SMatt Evans #define HUGETLBFS_PATH "/var/lib/hugetlbfs/global/pagesize-16MB/"
3263e158a0SMatt Evans 
3363e158a0SMatt Evans static char kern_cmdline[2048];
3463e158a0SMatt Evans 
3563e158a0SMatt Evans struct kvm_ext kvm_req_ext[] = {
3663e158a0SMatt Evans 	{ 0, 0 }
3763e158a0SMatt Evans };
3863e158a0SMatt Evans 
3963e158a0SMatt Evans bool kvm__arch_cpu_supports_vm(void)
4063e158a0SMatt Evans {
4163e158a0SMatt Evans 	return true;
4263e158a0SMatt Evans }
4363e158a0SMatt Evans 
4463e158a0SMatt Evans void kvm__init_ram(struct kvm *kvm)
4563e158a0SMatt Evans {
4663e158a0SMatt Evans 	u64	phys_start, phys_size;
4763e158a0SMatt Evans 	void	*host_mem;
4863e158a0SMatt Evans 
4963e158a0SMatt Evans 	phys_start = 0;
5063e158a0SMatt Evans 	phys_size  = kvm->ram_size;
5163e158a0SMatt Evans 	host_mem   = kvm->ram_start;
5263e158a0SMatt Evans 
5363e158a0SMatt Evans 	/*
5463e158a0SMatt Evans 	 * We put MMIO at PPC_MMIO_START, high up.  Make sure that this doesn't
5563e158a0SMatt Evans 	 * crash into the end of RAM -- on PPC64 at least, this is so high
5663e158a0SMatt Evans 	 * (63TB!) that this is unlikely.
5763e158a0SMatt Evans 	 */
5863e158a0SMatt Evans 	if (phys_size >= PPC_MMIO_START)
5963e158a0SMatt Evans 		die("Too much memory (%lld, what a nice problem): "
6063e158a0SMatt Evans 		    "overlaps MMIO!\n",
6163e158a0SMatt Evans 		    phys_size);
6263e158a0SMatt Evans 
6363e158a0SMatt Evans 	kvm__register_mem(kvm, phys_start, phys_size, host_mem);
6463e158a0SMatt Evans }
6563e158a0SMatt Evans 
6663e158a0SMatt Evans void kvm__arch_set_cmdline(char *cmdline, bool video)
6763e158a0SMatt Evans {
6863e158a0SMatt Evans 	/* We don't need anything unusual in here. */
6963e158a0SMatt Evans }
7063e158a0SMatt Evans 
7163e158a0SMatt Evans /* Architecture-specific KVM init */
727eff9f49SWanlong Gao void kvm__arch_init(struct kvm *kvm, const char *hugetlbfs_path, u64 ram_size)
7363e158a0SMatt Evans {
7463e158a0SMatt Evans 	int cap_ppc_rma;
75*df129a0aSMatt Evans 	unsigned long hpt;
7663e158a0SMatt Evans 
7763e158a0SMatt Evans 	kvm->ram_size		= ram_size;
7863e158a0SMatt Evans 
7963e158a0SMatt Evans 	/*
80*df129a0aSMatt Evans 	 * Currently, HV-mode PPC64 SPAPR requires that we map from hugetlfs.
81*df129a0aSMatt Evans 	 * Allow a 'default' option to assist.
82*df129a0aSMatt Evans 	 * PR-mode does not require this.
8363e158a0SMatt Evans 	 */
84*df129a0aSMatt Evans 	if (hugetlbfs_path) {
85*df129a0aSMatt Evans 		if (!strcmp(hugetlbfs_path, "default"))
8663e158a0SMatt Evans 			hugetlbfs_path = HUGETLBFS_PATH;
8763e158a0SMatt Evans 		kvm->ram_start = mmap_hugetlbfs(hugetlbfs_path, kvm->ram_size);
88*df129a0aSMatt Evans 	} else {
89*df129a0aSMatt Evans 		kvm->ram_start = mmap(0, kvm->ram_size, PROT_READ | PROT_WRITE,
90*df129a0aSMatt Evans 				      MAP_ANON | MAP_PRIVATE,
91*df129a0aSMatt Evans 				      -1, 0);
92*df129a0aSMatt Evans 	}
9363e158a0SMatt Evans 	if (kvm->ram_start == MAP_FAILED)
9463e158a0SMatt Evans 		die("Couldn't map %lld bytes for RAM (%d)\n",
9563e158a0SMatt Evans 		    kvm->ram_size, errno);
9663e158a0SMatt Evans 
9763e158a0SMatt Evans 	/* FDT goes at top of memory, RTAS just below */
9863e158a0SMatt Evans 	kvm->fdt_gra = kvm->ram_size - FDT_MAX_SIZE;
9963e158a0SMatt Evans 	/* FIXME: Not all PPC systems have RTAS */
10063e158a0SMatt Evans 	kvm->rtas_gra = kvm->fdt_gra - RTAS_MAX_SIZE;
10163e158a0SMatt Evans 	madvise(kvm->ram_start, kvm->ram_size, MADV_MERGEABLE);
10263e158a0SMatt Evans 
103*df129a0aSMatt Evans 	/* FIXME:  SPAPR-PR specific; allocate a guest HPT. */
104*df129a0aSMatt Evans 	if (posix_memalign((void **)&hpt, (1<<HPT_ORDER), (1<<HPT_ORDER)))
105*df129a0aSMatt Evans 		die("Can't allocate %d bytes for HPT\n", (1<<HPT_ORDER));
106*df129a0aSMatt Evans 
107*df129a0aSMatt Evans 	kvm->sdr1 = ((hpt + 0x3ffffULL) & ~0x3ffffULL) | (HPT_ORDER-18);
108*df129a0aSMatt Evans 
10963e158a0SMatt Evans 	/* FIXME: This is book3s-specific */
11063e158a0SMatt Evans 	cap_ppc_rma = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_PPC_RMA);
11163e158a0SMatt Evans 	if (cap_ppc_rma == 2)
11263e158a0SMatt Evans 		die("Need contiguous RMA allocation on this hardware, "
11363e158a0SMatt Evans 		    "which is not yet supported.");
11463e158a0SMatt Evans }
11563e158a0SMatt Evans 
116e56e2de7SLai Jiangshan void kvm__arch_delete_ram(struct kvm *kvm)
117e56e2de7SLai Jiangshan {
118e56e2de7SLai Jiangshan 	munmap(kvm->ram_start, kvm->ram_size);
119e56e2de7SLai Jiangshan }
120e56e2de7SLai Jiangshan 
12163e158a0SMatt Evans void kvm__irq_line(struct kvm *kvm, int irq, int level)
12263e158a0SMatt Evans {
12363e158a0SMatt Evans 	fprintf(stderr, "irq_line(%d, %d)\n", irq, level);
12463e158a0SMatt Evans }
12563e158a0SMatt Evans 
12663e158a0SMatt Evans void kvm__irq_trigger(struct kvm *kvm, int irq)
12763e158a0SMatt Evans {
12863e158a0SMatt Evans 	kvm__irq_line(kvm, irq, 1);
12963e158a0SMatt Evans 	kvm__irq_line(kvm, irq, 0);
13063e158a0SMatt Evans }
13163e158a0SMatt Evans 
13263e158a0SMatt Evans int load_flat_binary(struct kvm *kvm, int fd_kernel, int fd_initrd, const char *kernel_cmdline)
13363e158a0SMatt Evans {
13463e158a0SMatt Evans 	void *p;
13563e158a0SMatt Evans 	void *k_start;
13663e158a0SMatt Evans 	void *i_start;
13763e158a0SMatt Evans 	int nr;
13863e158a0SMatt Evans 
13963e158a0SMatt Evans 	if (lseek(fd_kernel, 0, SEEK_SET) < 0)
14063e158a0SMatt Evans 		die_perror("lseek");
14163e158a0SMatt Evans 
14263e158a0SMatt Evans 	p = k_start = guest_flat_to_host(kvm, KERNEL_LOAD_ADDR);
14363e158a0SMatt Evans 
14463e158a0SMatt Evans 	while ((nr = read(fd_kernel, p, 65536)) > 0)
14563e158a0SMatt Evans 		p += nr;
14663e158a0SMatt Evans 
14763e158a0SMatt Evans 	pr_info("Loaded kernel to 0x%x (%ld bytes)", KERNEL_LOAD_ADDR, p-k_start);
14863e158a0SMatt Evans 
14963e158a0SMatt Evans 	if (fd_initrd != -1) {
15063e158a0SMatt Evans 		if (lseek(fd_initrd, 0, SEEK_SET) < 0)
15163e158a0SMatt Evans 			die_perror("lseek");
15263e158a0SMatt Evans 
15363e158a0SMatt Evans 		if (p-k_start > INITRD_LOAD_ADDR)
15463e158a0SMatt Evans 			die("Kernel overlaps initrd!");
15563e158a0SMatt Evans 
15663e158a0SMatt Evans 		/* Round up kernel size to 8byte alignment, and load initrd right after. */
15763e158a0SMatt Evans 		i_start = p = guest_flat_to_host(kvm, INITRD_LOAD_ADDR);
15863e158a0SMatt Evans 
15963e158a0SMatt Evans 		while (((nr = read(fd_initrd, p, 65536)) > 0) &&
16063e158a0SMatt Evans 		       p < (kvm->ram_start + kvm->ram_size))
16163e158a0SMatt Evans 			p += nr;
16263e158a0SMatt Evans 
16363e158a0SMatt Evans 		if (p >= (kvm->ram_start + kvm->ram_size))
16463e158a0SMatt Evans 			die("initrd too big to contain in guest RAM.\n");
16563e158a0SMatt Evans 
16663e158a0SMatt Evans 		pr_info("Loaded initrd to 0x%x (%ld bytes)",
16763e158a0SMatt Evans 			INITRD_LOAD_ADDR, p-i_start);
16863e158a0SMatt Evans 		kvm->initrd_gra = INITRD_LOAD_ADDR;
16963e158a0SMatt Evans 		kvm->initrd_size = p-i_start;
17063e158a0SMatt Evans 	} else {
17163e158a0SMatt Evans 		kvm->initrd_size = 0;
17263e158a0SMatt Evans 	}
17363e158a0SMatt Evans 	strncpy(kern_cmdline, kernel_cmdline, 2048);
17463e158a0SMatt Evans 	kern_cmdline[2047] = '\0';
17563e158a0SMatt Evans 
17663e158a0SMatt Evans 	return true;
17763e158a0SMatt Evans }
17863e158a0SMatt Evans 
17963e158a0SMatt Evans bool load_bzimage(struct kvm *kvm, int fd_kernel,
18063e158a0SMatt Evans 		  int fd_initrd, const char *kernel_cmdline, u16 vidmode)
18163e158a0SMatt Evans {
18263e158a0SMatt Evans 	/* We don't support bzImages. */
18363e158a0SMatt Evans 	return false;
18463e158a0SMatt Evans }
18563e158a0SMatt Evans 
18663e158a0SMatt Evans static void setup_fdt(struct kvm *kvm)
18763e158a0SMatt Evans {
18863e158a0SMatt Evans 
18963e158a0SMatt Evans }
19063e158a0SMatt Evans 
19163e158a0SMatt Evans /**
19263e158a0SMatt Evans  * kvm__arch_setup_firmware
19363e158a0SMatt Evans  */
194f7f9d02bSCyrill Gorcunov int kvm__arch_setup_firmware(struct kvm *kvm)
19563e158a0SMatt Evans {
19663e158a0SMatt Evans 	/* Load RTAS */
19763e158a0SMatt Evans 
19863e158a0SMatt Evans 	/* Load SLOF */
19963e158a0SMatt Evans 
20063e158a0SMatt Evans 	/* Init FDT */
20163e158a0SMatt Evans 	setup_fdt(kvm);
202f7f9d02bSCyrill Gorcunov 
203f7f9d02bSCyrill Gorcunov 	return 0;
20463e158a0SMatt Evans }
2051add9f73SSasha Levin 
2061add9f73SSasha Levin int kvm__arch_free_firmware(struct kvm *kvm)
2071add9f73SSasha Levin {
2081add9f73SSasha Levin 	return 0;
2091add9f73SSasha Levin }
210