xref: /kvmtool/powerpc/spapr_hcall.c (revision 015785d4efcc6fde5f6d1c79e4adced42283efb2)
1be76823fSMatt Evans /*
2be76823fSMatt Evans  * SPAPR hypercalls
3be76823fSMatt Evans  *
4be76823fSMatt Evans  * Borrowed heavily from QEMU's spapr_hcall.c,
5be76823fSMatt Evans  * Copyright (c) 2010 David Gibson, IBM Corporation.
6be76823fSMatt Evans  *
7be76823fSMatt Evans  * Copyright (c) 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation.
8be76823fSMatt Evans  *
9be76823fSMatt Evans  * This program is free software; you can redistribute it and/or modify it
10be76823fSMatt Evans  * under the terms of the GNU General Public License version 2 as published
11be76823fSMatt Evans  * by the Free Software Foundation.
12be76823fSMatt Evans  */
13be76823fSMatt Evans 
14be76823fSMatt Evans #include "spapr.h"
15be76823fSMatt Evans #include "kvm/util.h"
16be76823fSMatt Evans #include "kvm/kvm.h"
17be76823fSMatt Evans #include "kvm/kvm-cpu.h"
18be76823fSMatt Evans 
19be76823fSMatt Evans #include <stdio.h>
20be76823fSMatt Evans #include <assert.h>
21*015785d4SBalbir Singh #include <sys/eventfd.h>
22be76823fSMatt Evans 
23be76823fSMatt Evans static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
24be76823fSMatt Evans static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX -
25be76823fSMatt Evans 					     KVMPPC_HCALL_BASE + 1];
26be76823fSMatt Evans 
h_set_dabr(struct kvm_cpu * vcpu,target_ulong opcode,target_ulong * args)27be76823fSMatt Evans static target_ulong h_set_dabr(struct kvm_cpu *vcpu, target_ulong opcode, target_ulong *args)
28be76823fSMatt Evans {
29be76823fSMatt Evans 	/* FIXME:  Implement this for -PR.  (-HV does this in kernel.) */
30be76823fSMatt Evans 	return H_HARDWARE;
31be76823fSMatt Evans }
32be76823fSMatt Evans 
h_rtas(struct kvm_cpu * vcpu,target_ulong opcode,target_ulong * args)33be76823fSMatt Evans static target_ulong h_rtas(struct kvm_cpu *vcpu, target_ulong opcode, target_ulong *args)
34be76823fSMatt Evans {
35be76823fSMatt Evans 	target_ulong rtas_r3 = args[0];
36be76823fSMatt Evans 	/*
37be76823fSMatt Evans 	 * Pointer read from phys mem; these ptrs cannot be MMIO (!) so just
38be76823fSMatt Evans 	 * reference guest RAM directly.
39be76823fSMatt Evans 	 */
40be76823fSMatt Evans 	uint32_t token, nargs, nret;
41be76823fSMatt Evans 
42be76823fSMatt Evans 	token = rtas_ld(vcpu->kvm, rtas_r3, 0);
43be76823fSMatt Evans 	nargs = rtas_ld(vcpu->kvm, rtas_r3, 1);
44be76823fSMatt Evans 	nret  = rtas_ld(vcpu->kvm, rtas_r3, 2);
45be76823fSMatt Evans 
46be76823fSMatt Evans 	return spapr_rtas_call(vcpu, token, nargs, rtas_r3 + 12,
47be76823fSMatt Evans 			       nret, rtas_r3 + 12 + 4*nargs);
48be76823fSMatt Evans }
49be76823fSMatt Evans 
h_logical_load(struct kvm_cpu * vcpu,target_ulong opcode,target_ulong * args)50be76823fSMatt Evans static target_ulong h_logical_load(struct kvm_cpu *vcpu, target_ulong opcode, target_ulong *args)
51be76823fSMatt Evans {
52be76823fSMatt Evans 	/* SLOF will require these, though kernel doesn't. */
53be76823fSMatt Evans 	die(__PRETTY_FUNCTION__);
54be76823fSMatt Evans 	return H_PARAMETER;
55be76823fSMatt Evans }
56be76823fSMatt Evans 
h_logical_store(struct kvm_cpu * vcpu,target_ulong opcode,target_ulong * args)57be76823fSMatt Evans static target_ulong h_logical_store(struct kvm_cpu *vcpu, target_ulong opcode, target_ulong *args)
58be76823fSMatt Evans {
59be76823fSMatt Evans 	/* SLOF will require these, though kernel doesn't. */
60be76823fSMatt Evans 	die(__PRETTY_FUNCTION__);
61be76823fSMatt Evans 	return H_PARAMETER;
62be76823fSMatt Evans }
63be76823fSMatt Evans 
h_logical_icbi(struct kvm_cpu * vcpu,target_ulong opcode,target_ulong * args)64be76823fSMatt Evans static target_ulong h_logical_icbi(struct kvm_cpu *vcpu, target_ulong opcode, target_ulong *args)
65be76823fSMatt Evans {
66be76823fSMatt Evans 	/* KVM will trap this in the kernel.  Die if it misses. */
67be76823fSMatt Evans 	die(__PRETTY_FUNCTION__);
68be76823fSMatt Evans 	return H_SUCCESS;
69be76823fSMatt Evans }
70be76823fSMatt Evans 
h_logical_dcbf(struct kvm_cpu * vcpu,target_ulong opcode,target_ulong * args)71be76823fSMatt Evans static target_ulong h_logical_dcbf(struct kvm_cpu *vcpu, target_ulong opcode, target_ulong *args)
72be76823fSMatt Evans {
73be76823fSMatt Evans 	/* KVM will trap this in the kernel.  Die if it misses. */
74be76823fSMatt Evans 	die(__PRETTY_FUNCTION__);
75be76823fSMatt Evans 	return H_SUCCESS;
76be76823fSMatt Evans }
77be76823fSMatt Evans 
78*015785d4SBalbir Singh struct lpcr_data {
79*015785d4SBalbir Singh 	struct kvm_cpu	*cpu;
80*015785d4SBalbir Singh 	int		mode;
81*015785d4SBalbir Singh };
82*015785d4SBalbir Singh 
get_cpu_lpcr(struct kvm_cpu * vcpu,target_ulong * lpcr)83*015785d4SBalbir Singh static void get_cpu_lpcr(struct kvm_cpu *vcpu, target_ulong *lpcr)
84*015785d4SBalbir Singh {
85*015785d4SBalbir Singh 	struct kvm_one_reg reg = {
86*015785d4SBalbir Singh 		.id = KVM_REG_PPC_LPCR_64,
87*015785d4SBalbir Singh 		.addr = (__u64)lpcr
88*015785d4SBalbir Singh 	};
89*015785d4SBalbir Singh 
90*015785d4SBalbir Singh 	if (ioctl(vcpu->vcpu_fd, KVM_GET_ONE_REG, &reg))
91*015785d4SBalbir Singh 		die("Couldn't read vcpu reg?!");
92*015785d4SBalbir Singh }
93*015785d4SBalbir Singh 
set_cpu_lpcr(struct kvm_cpu * vcpu,target_ulong * lpcr)94*015785d4SBalbir Singh static void set_cpu_lpcr(struct kvm_cpu *vcpu, target_ulong *lpcr)
95*015785d4SBalbir Singh {
96*015785d4SBalbir Singh 	struct kvm_one_reg reg = {
97*015785d4SBalbir Singh 		.id = KVM_REG_PPC_LPCR_64,
98*015785d4SBalbir Singh 		.addr = (__u64)lpcr
99*015785d4SBalbir Singh 	};
100*015785d4SBalbir Singh 
101*015785d4SBalbir Singh 	if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, &reg))
102*015785d4SBalbir Singh 		die("Couldn't write vcpu reg?!");
103*015785d4SBalbir Singh }
104*015785d4SBalbir Singh 
set_endian_task(struct kvm_cpu * vcpu,void * data)105*015785d4SBalbir Singh static void set_endian_task(struct kvm_cpu *vcpu, void *data)
106*015785d4SBalbir Singh {
107*015785d4SBalbir Singh 	target_ulong mflags = (target_ulong)data;
108*015785d4SBalbir Singh 	target_ulong lpcr;
109*015785d4SBalbir Singh 
110*015785d4SBalbir Singh 	get_cpu_lpcr(vcpu, &lpcr);
111*015785d4SBalbir Singh 
112*015785d4SBalbir Singh 	if (mflags == H_SET_MODE_ENDIAN_BIG)
113*015785d4SBalbir Singh 		lpcr &= ~LPCR_ILE;
114*015785d4SBalbir Singh 	else
115*015785d4SBalbir Singh 		lpcr |= LPCR_ILE;
116*015785d4SBalbir Singh 
117*015785d4SBalbir Singh 	set_cpu_lpcr(vcpu, &lpcr);
118*015785d4SBalbir Singh }
119*015785d4SBalbir Singh 
h_set_mode(struct kvm_cpu * vcpu,target_ulong opcode,target_ulong * args)120*015785d4SBalbir Singh static target_ulong h_set_mode(struct kvm_cpu *vcpu, target_ulong opcode, target_ulong *args)
121*015785d4SBalbir Singh {
122*015785d4SBalbir Singh 	int ret;
123*015785d4SBalbir Singh 
124*015785d4SBalbir Singh 	switch (args[1]) {
125*015785d4SBalbir Singh 	case H_SET_MODE_RESOURCE_LE: {
126*015785d4SBalbir Singh 		struct kvm_cpu_task task;
127*015785d4SBalbir Singh 		task.func = set_endian_task;
128*015785d4SBalbir Singh 		task.data = (void *)args[0];
129*015785d4SBalbir Singh 		kvm_cpu__run_on_all_cpus(vcpu->kvm, &task);
130*015785d4SBalbir Singh 		ret = H_SUCCESS;
131*015785d4SBalbir Singh 		break;
132*015785d4SBalbir Singh 	}
133*015785d4SBalbir Singh 	default:
134*015785d4SBalbir Singh 		ret = H_FUNCTION;
135*015785d4SBalbir Singh 		break;
136*015785d4SBalbir Singh 	}
137*015785d4SBalbir Singh 
138*015785d4SBalbir Singh 	return ret;
139*015785d4SBalbir Singh }
140*015785d4SBalbir Singh 
141*015785d4SBalbir Singh 
spapr_register_hypercall(target_ulong opcode,spapr_hcall_fn fn)142be76823fSMatt Evans void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
143be76823fSMatt Evans {
144be76823fSMatt Evans 	spapr_hcall_fn *slot;
145be76823fSMatt Evans 
146be76823fSMatt Evans 	if (opcode <= MAX_HCALL_OPCODE) {
147be76823fSMatt Evans 		assert((opcode & 0x3) == 0);
148be76823fSMatt Evans 
149be76823fSMatt Evans 		slot = &papr_hypercall_table[opcode / 4];
150be76823fSMatt Evans 	} else {
151be76823fSMatt Evans 		assert((opcode >= KVMPPC_HCALL_BASE) &&
152be76823fSMatt Evans 		       (opcode <= KVMPPC_HCALL_MAX));
153be76823fSMatt Evans 
154be76823fSMatt Evans 		slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
155be76823fSMatt Evans 	}
156be76823fSMatt Evans 
157be76823fSMatt Evans 	assert(!(*slot) || (fn == *slot));
158be76823fSMatt Evans 	*slot = fn;
159be76823fSMatt Evans }
160be76823fSMatt Evans 
spapr_hypercall(struct kvm_cpu * vcpu,target_ulong opcode,target_ulong * args)161be76823fSMatt Evans target_ulong spapr_hypercall(struct kvm_cpu *vcpu, target_ulong opcode,
162be76823fSMatt Evans 			     target_ulong *args)
163be76823fSMatt Evans {
164be76823fSMatt Evans 	if ((opcode <= MAX_HCALL_OPCODE)
165be76823fSMatt Evans 	    && ((opcode & 0x3) == 0)) {
166be76823fSMatt Evans 		spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
167be76823fSMatt Evans 
168be76823fSMatt Evans 		if (fn) {
169be76823fSMatt Evans 			return fn(vcpu, opcode, args);
170be76823fSMatt Evans 		}
171be76823fSMatt Evans 	} else if ((opcode >= KVMPPC_HCALL_BASE) &&
172be76823fSMatt Evans 		   (opcode <= KVMPPC_HCALL_MAX)) {
173be76823fSMatt Evans 		spapr_hcall_fn fn = kvmppc_hypercall_table[opcode -
174be76823fSMatt Evans 							   KVMPPC_HCALL_BASE];
175be76823fSMatt Evans 
176be76823fSMatt Evans 		if (fn) {
177be76823fSMatt Evans 			return fn(vcpu, opcode, args);
178be76823fSMatt Evans 		}
179be76823fSMatt Evans 	}
180be76823fSMatt Evans 
181be76823fSMatt Evans 	hcall_dprintf("Unimplemented hcall 0x%lx\n", opcode);
182be76823fSMatt Evans 	return H_FUNCTION;
183be76823fSMatt Evans }
184be76823fSMatt Evans 
hypercall_init(void)185be76823fSMatt Evans void hypercall_init(void)
186be76823fSMatt Evans {
187be76823fSMatt Evans 	/* hcall-dabr */
188be76823fSMatt Evans 	spapr_register_hypercall(H_SET_DABR, h_set_dabr);
189be76823fSMatt Evans 
190be76823fSMatt Evans 	spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
191be76823fSMatt Evans 	spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
192be76823fSMatt Evans 	spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
193be76823fSMatt Evans 	spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
194be76823fSMatt Evans 	spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
195be76823fSMatt Evans 	spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
196*015785d4SBalbir Singh 	spapr_register_hypercall(H_SET_MODE, h_set_mode);
197be76823fSMatt Evans 
198be76823fSMatt Evans 	/* KVM-PPC specific hcalls */
199be76823fSMatt Evans 	spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
200be76823fSMatt Evans }
201