1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/coproc.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11 #ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__
12 #define __ARM64_KVM_SYS_REGS_LOCAL_H__
13
14 #include <linux/bsearch.h>
15
16 #define reg_to_encoding(x) \
17 sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
18 (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
19
20 struct sys_reg_params {
21 u8 Op0;
22 u8 Op1;
23 u8 CRn;
24 u8 CRm;
25 u8 Op2;
26 u64 regval;
27 bool is_write;
28 };
29
30 #define encoding_to_params(reg) \
31 ((struct sys_reg_params){ .Op0 = sys_reg_Op0(reg), \
32 .Op1 = sys_reg_Op1(reg), \
33 .CRn = sys_reg_CRn(reg), \
34 .CRm = sys_reg_CRm(reg), \
35 .Op2 = sys_reg_Op2(reg) })
36
37 #define esr_sys64_to_params(esr) \
38 ((struct sys_reg_params){ .Op0 = ((esr) >> 20) & 3, \
39 .Op1 = ((esr) >> 14) & 0x7, \
40 .CRn = ((esr) >> 10) & 0xf, \
41 .CRm = ((esr) >> 1) & 0xf, \
42 .Op2 = ((esr) >> 17) & 0x7, \
43 .is_write = !((esr) & 1) })
44
45 #define esr_cp1x_32_to_params(esr) \
46 ((struct sys_reg_params){ .Op1 = ((esr) >> 14) & 0x7, \
47 .CRn = ((esr) >> 10) & 0xf, \
48 .CRm = ((esr) >> 1) & 0xf, \
49 .Op2 = ((esr) >> 17) & 0x7, \
50 .is_write = !((esr) & 1) })
51
52 /*
53 * The Feature ID space is defined as the System register space in AArch64
54 * with op0==3, op1=={0, 1, 3}, CRn==0, CRm=={0-7}, op2=={0-7}.
55 */
in_feat_id_space(struct sys_reg_params * p)56 static inline bool in_feat_id_space(struct sys_reg_params *p)
57 {
58 return (p->Op0 == 3 && !(p->Op1 & 0b100) && p->Op1 != 2 &&
59 p->CRn == 0 && !(p->CRm & 0b1000));
60 }
61
62 struct sys_reg_desc {
63 /* Sysreg string for debug */
64 const char *name;
65
66 enum {
67 AA32_DIRECT,
68 AA32_LO,
69 AA32_HI,
70 } aarch32_map;
71
72 /* MRS/MSR instruction which accesses it. */
73 u8 Op0;
74 u8 Op1;
75 u8 CRn;
76 u8 CRm;
77 u8 Op2;
78
79 /* Trapped access from guest, if non-NULL. */
80 bool (*access)(struct kvm_vcpu *,
81 struct sys_reg_params *,
82 const struct sys_reg_desc *);
83
84 /*
85 * Initialization for vcpu. Return initialized value, or KVM
86 * sanitized value for ID registers.
87 */
88 u64 (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *);
89
90 /* Index into sys_reg[], or 0 if we don't need to save it. */
91 int reg;
92
93 /* Value (usually reset value), or write mask for idregs */
94 u64 val;
95
96 /* Custom get/set_user functions, fallback to generic if NULL */
97 int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
98 u64 *val);
99 int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
100 u64 val);
101
102 /* Return mask of REG_* runtime visibility overrides */
103 unsigned int (*visibility)(const struct kvm_vcpu *vcpu,
104 const struct sys_reg_desc *rd);
105 };
106
107 #define REG_HIDDEN (1 << 0) /* hidden from userspace and guest */
108 #define REG_RAZ (1 << 1) /* RAZ from userspace and guest */
109 #define REG_USER_WI (1 << 2) /* WI from userspace only */
110
111 static __printf(2, 3)
print_sys_reg_msg(const struct sys_reg_params * p,char * fmt,...)112 inline void print_sys_reg_msg(const struct sys_reg_params *p,
113 char *fmt, ...)
114 {
115 va_list va;
116
117 va_start(va, fmt);
118 /* Look, we even formatted it for you to paste into the table! */
119 kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
120 &(struct va_format){ fmt, &va },
121 p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, str_write_read(p->is_write));
122 va_end(va);
123 }
124
print_sys_reg_instr(const struct sys_reg_params * p)125 static inline void print_sys_reg_instr(const struct sys_reg_params *p)
126 {
127 /* GCC warns on an empty format string */
128 print_sys_reg_msg(p, "%s", "");
129 }
130
ignore_write(struct kvm_vcpu * vcpu,const struct sys_reg_params * p)131 static inline bool ignore_write(struct kvm_vcpu *vcpu,
132 const struct sys_reg_params *p)
133 {
134 return true;
135 }
136
read_zero(struct kvm_vcpu * vcpu,struct sys_reg_params * p)137 static inline bool read_zero(struct kvm_vcpu *vcpu,
138 struct sys_reg_params *p)
139 {
140 p->regval = 0;
141 return true;
142 }
143
144 /* Reset functions */
reset_unknown(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)145 static inline u64 reset_unknown(struct kvm_vcpu *vcpu,
146 const struct sys_reg_desc *r)
147 {
148 BUG_ON(!r->reg);
149 BUG_ON(r->reg >= NR_SYS_REGS);
150 __vcpu_assign_sys_reg(vcpu, r->reg, 0x1de7ec7edbadc0deULL);
151 return __vcpu_sys_reg(vcpu, r->reg);
152 }
153
reset_val(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)154 static inline u64 reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
155 {
156 BUG_ON(!r->reg);
157 BUG_ON(r->reg >= NR_SYS_REGS);
158 __vcpu_assign_sys_reg(vcpu, r->reg, r->val);
159 return __vcpu_sys_reg(vcpu, r->reg);
160 }
161
sysreg_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)162 static inline unsigned int sysreg_visibility(const struct kvm_vcpu *vcpu,
163 const struct sys_reg_desc *r)
164 {
165 if (likely(!r->visibility))
166 return 0;
167
168 return r->visibility(vcpu, r);
169 }
170
sysreg_hidden(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)171 static inline bool sysreg_hidden(const struct kvm_vcpu *vcpu,
172 const struct sys_reg_desc *r)
173 {
174 return sysreg_visibility(vcpu, r) & REG_HIDDEN;
175 }
176
sysreg_visible_as_raz(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)177 static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu,
178 const struct sys_reg_desc *r)
179 {
180 return sysreg_visibility(vcpu, r) & REG_RAZ;
181 }
182
sysreg_user_write_ignore(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)183 static inline bool sysreg_user_write_ignore(const struct kvm_vcpu *vcpu,
184 const struct sys_reg_desc *r)
185 {
186 return sysreg_visibility(vcpu, r) & REG_USER_WI;
187 }
188
cmp_sys_reg(const struct sys_reg_desc * i1,const struct sys_reg_desc * i2)189 static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
190 const struct sys_reg_desc *i2)
191 {
192 BUG_ON(i1 == i2);
193 if (!i1)
194 return 1;
195 else if (!i2)
196 return -1;
197 if (i1->Op0 != i2->Op0)
198 return i1->Op0 - i2->Op0;
199 if (i1->Op1 != i2->Op1)
200 return i1->Op1 - i2->Op1;
201 if (i1->CRn != i2->CRn)
202 return i1->CRn - i2->CRn;
203 if (i1->CRm != i2->CRm)
204 return i1->CRm - i2->CRm;
205 return i1->Op2 - i2->Op2;
206 }
207
match_sys_reg(const void * key,const void * elt)208 static inline int match_sys_reg(const void *key, const void *elt)
209 {
210 const unsigned long pval = (unsigned long)key;
211 const struct sys_reg_desc *r = elt;
212
213 return pval - reg_to_encoding(r);
214 }
215
216 static inline const struct sys_reg_desc *
find_reg(const struct sys_reg_params * params,const struct sys_reg_desc table[],unsigned int num)217 find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[],
218 unsigned int num)
219 {
220 unsigned long pval = reg_to_encoding(params);
221
222 return __inline_bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
223 }
224
225 const struct sys_reg_desc *get_reg_by_id(u64 id,
226 const struct sys_reg_desc table[],
227 unsigned int num);
228
229 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
230 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
231 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
232 const struct sys_reg_desc table[], unsigned int num);
233 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
234 const struct sys_reg_desc table[], unsigned int num);
235
236 bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index);
237
238 int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu);
239
240 #define AA32(_x) .aarch32_map = AA32_##_x
241 #define Op0(_x) .Op0 = _x
242 #define Op1(_x) .Op1 = _x
243 #define CRn(_x) .CRn = _x
244 #define CRm(_x) .CRm = _x
245 #define Op2(_x) .Op2 = _x
246
247 #define SYS_DESC(reg) \
248 .name = #reg, \
249 Op0(sys_reg_Op0(reg)), Op1(sys_reg_Op1(reg)), \
250 CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \
251 Op2(sys_reg_Op2(reg))
252
253 #define CP15_SYS_DESC(reg) \
254 .name = #reg, \
255 .aarch32_map = AA32_DIRECT, \
256 Op0(0), Op1(sys_reg_Op1(reg)), \
257 CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \
258 Op2(sys_reg_Op2(reg))
259
260 #define ID_REG_LIMIT_FIELD_ENUM(val, reg, field, limit) \
261 ({ \
262 u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \
263 (val) &= ~reg##_##field##_MASK; \
264 (val) |= FIELD_PREP(reg##_##field##_MASK, \
265 min(__f_val, \
266 (u64)SYS_FIELD_VALUE(reg, field, limit))); \
267 (val); \
268 })
269
270 #define TO_ARM64_SYS_REG(r) ARM64_SYS_REG(sys_reg_Op0(SYS_ ## r), \
271 sys_reg_Op1(SYS_ ## r), \
272 sys_reg_CRn(SYS_ ## r), \
273 sys_reg_CRm(SYS_ ## r), \
274 sys_reg_Op2(SYS_ ## r))
275
276 #endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
277