1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * AArch64 processor specific defines
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 */
7 #ifndef SELFTEST_KVM_PROCESSOR_H
8 #define SELFTEST_KVM_PROCESSOR_H
9
10 #include "kvm_util.h"
11 #include <linux/stringify.h>
12 #include <linux/types.h>
13 #include <asm/sysreg.h>
14
15
16 #define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
17 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
18
19 /*
20 * KVM_ARM64_SYS_REG(sys_reg_id): Helper macro to convert
21 * SYS_* register definitions in asm/sysreg.h to use in KVM
22 * calls such as vcpu_get_reg() and vcpu_set_reg().
23 */
24 #define KVM_ARM64_SYS_REG(sys_reg_id) \
25 ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \
26 sys_reg_Op1(sys_reg_id), \
27 sys_reg_CRn(sys_reg_id), \
28 sys_reg_CRm(sys_reg_id), \
29 sys_reg_Op2(sys_reg_id))
30
31 /*
32 * Default MAIR
33 * index attribute
34 * DEVICE_nGnRnE 0 0000:0000
35 * DEVICE_nGnRE 1 0000:0100
36 * DEVICE_GRE 2 0000:1100
37 * NORMAL_NC 3 0100:0100
38 * NORMAL 4 1111:1111
39 * NORMAL_WT 5 1011:1011
40 */
41
42 /* Linux doesn't use these memory types, so let's define them. */
43 #define MAIR_ATTR_DEVICE_GRE UL(0x0c)
44 #define MAIR_ATTR_NORMAL_WT UL(0xbb)
45
46 #define MT_DEVICE_nGnRnE 0
47 #define MT_DEVICE_nGnRE 1
48 #define MT_DEVICE_GRE 2
49 #define MT_NORMAL_NC 3
50 #define MT_NORMAL 4
51 #define MT_NORMAL_WT 5
52
53 #define DEFAULT_MAIR_EL1 \
54 (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
55 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
56 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
57 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
58 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
59 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT))
60
61 #define MPIDR_HWID_BITMASK (0xff00fffffful)
62
63 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init);
64 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
65 struct kvm_vcpu_init *init, void *guest_code);
66
67 struct ex_regs {
68 u64 regs[31];
69 u64 sp;
70 u64 pc;
71 u64 pstate;
72 };
73
74 #define VECTOR_NUM 16
75
76 enum {
77 VECTOR_SYNC_CURRENT_SP0,
78 VECTOR_IRQ_CURRENT_SP0,
79 VECTOR_FIQ_CURRENT_SP0,
80 VECTOR_ERROR_CURRENT_SP0,
81
82 VECTOR_SYNC_CURRENT,
83 VECTOR_IRQ_CURRENT,
84 VECTOR_FIQ_CURRENT,
85 VECTOR_ERROR_CURRENT,
86
87 VECTOR_SYNC_LOWER_64,
88 VECTOR_IRQ_LOWER_64,
89 VECTOR_FIQ_LOWER_64,
90 VECTOR_ERROR_LOWER_64,
91
92 VECTOR_SYNC_LOWER_32,
93 VECTOR_IRQ_LOWER_32,
94 VECTOR_FIQ_LOWER_32,
95 VECTOR_ERROR_LOWER_32,
96 };
97
98 #define VECTOR_IS_SYNC(v) ((v) == VECTOR_SYNC_CURRENT_SP0 || \
99 (v) == VECTOR_SYNC_CURRENT || \
100 (v) == VECTOR_SYNC_LOWER_64 || \
101 (v) == VECTOR_SYNC_LOWER_32)
102
103 #define ESR_EC_NUM 64
104 #define ESR_EC_SHIFT 26
105 #define ESR_EC_MASK (ESR_EC_NUM - 1)
106
107 #define ESR_EC_UNKNOWN 0x0
108 #define ESR_EC_SVC64 0x15
109 #define ESR_EC_IABT 0x21
110 #define ESR_EC_DABT 0x25
111 #define ESR_EC_HW_BP_CURRENT 0x31
112 #define ESR_EC_SSTEP_CURRENT 0x33
113 #define ESR_EC_WP_CURRENT 0x35
114 #define ESR_EC_BRK_INS 0x3c
115
116 /* Access flag */
117 #define PTE_AF (1ULL << 10)
118
119 /* Access flag update enable/disable */
120 #define TCR_EL1_HA (1ULL << 39)
121
122 void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
123 uint32_t *ipa16k, uint32_t *ipa64k);
124
125 void vm_init_descriptor_tables(struct kvm_vm *vm);
126 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
127
128 typedef void(*handler_fn)(struct ex_regs *);
129 void vm_install_exception_handler(struct kvm_vm *vm,
130 int vector, handler_fn handler);
131 void vm_install_sync_handler(struct kvm_vm *vm,
132 int vector, int ec, handler_fn handler);
133
134 uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva);
135
cpu_relax(void)136 static inline void cpu_relax(void)
137 {
138 asm volatile("yield" ::: "memory");
139 }
140
141 #define isb() asm volatile("isb" : : : "memory")
142 #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
143 #define dmb(opt) asm volatile("dmb " #opt : : : "memory")
144
145 #define dma_wmb() dmb(oshst)
146 #define __iowmb() dma_wmb()
147
148 #define dma_rmb() dmb(oshld)
149
150 #define __iormb(v) \
151 ({ \
152 unsigned long tmp; \
153 \
154 dma_rmb(); \
155 \
156 /* \
157 * Courtesy of arch/arm64/include/asm/io.h: \
158 * Create a dummy control dependency from the IO read to any \
159 * later instructions. This ensures that a subsequent call \
160 * to udelay() will be ordered due to the ISB in __delay(). \
161 */ \
162 asm volatile("eor %0, %1, %1\n" \
163 "cbnz %0, ." \
164 : "=r" (tmp) : "r" ((unsigned long)(v)) \
165 : "memory"); \
166 })
167
__raw_writel(u32 val,volatile void * addr)168 static __always_inline void __raw_writel(u32 val, volatile void *addr)
169 {
170 asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
171 }
172
__raw_readl(const volatile void * addr)173 static __always_inline u32 __raw_readl(const volatile void *addr)
174 {
175 u32 val;
176 asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
177 return val;
178 }
179
180 #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
181 #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
182
183 #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c));})
184 #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
185
local_irq_enable(void)186 static inline void local_irq_enable(void)
187 {
188 asm volatile("msr daifclr, #3" : : : "memory");
189 }
190
local_irq_disable(void)191 static inline void local_irq_disable(void)
192 {
193 asm volatile("msr daifset, #3" : : : "memory");
194 }
195
196 /**
197 * struct arm_smccc_res - Result from SMC/HVC call
198 * @a0-a3 result values from registers 0 to 3
199 */
200 struct arm_smccc_res {
201 unsigned long a0;
202 unsigned long a1;
203 unsigned long a2;
204 unsigned long a3;
205 };
206
207 /**
208 * smccc_hvc - Invoke a SMCCC function using the hvc conduit
209 * @function_id: the SMCCC function to be called
210 * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7
211 * @res: pointer to write the return values from registers x0-x3
212 *
213 */
214 void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
215 uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
216 uint64_t arg6, struct arm_smccc_res *res);
217
218 /**
219 * smccc_smc - Invoke a SMCCC function using the smc conduit
220 * @function_id: the SMCCC function to be called
221 * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7
222 * @res: pointer to write the return values from registers x0-x3
223 *
224 */
225 void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
226 uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
227 uint64_t arg6, struct arm_smccc_res *res);
228
229
230
231 uint32_t guest_get_vcpuid(void);
232
233 #endif /* SELFTEST_KVM_PROCESSOR_H */
234