xref: /linux/arch/arm64/kvm/vgic/vgic.h (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2015, 2016 ARM Ltd.
4  */
5 #ifndef __KVM_ARM_VGIC_NEW_H__
6 #define __KVM_ARM_VGIC_NEW_H__
7 
8 #include <linux/irqchip/arm-gic-common.h>
9 #include <asm/kvm_mmu.h>
10 
11 #define PRODUCT_ID_KVM		0x4b	/* ASCII code K */
12 #define IMPLEMENTER_ARM		0x43b
13 
14 #define VGIC_ADDR_UNDEF		(-1)
15 #define IS_VGIC_ADDR_UNDEF(_x)  ((_x) == VGIC_ADDR_UNDEF)
16 
17 #define INTERRUPT_ID_BITS_SPIS	10
18 #define INTERRUPT_ID_BITS_ITS	16
19 #define VGIC_LPI_MAX_INTID	((1 << INTERRUPT_ID_BITS_ITS) - 1)
20 #define VGIC_PRI_BITS		5
21 
22 #define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
23 
24 #define VGIC_AFFINITY_0_SHIFT 0
25 #define VGIC_AFFINITY_0_MASK (0xffUL << VGIC_AFFINITY_0_SHIFT)
26 #define VGIC_AFFINITY_1_SHIFT 8
27 #define VGIC_AFFINITY_1_MASK (0xffUL << VGIC_AFFINITY_1_SHIFT)
28 #define VGIC_AFFINITY_2_SHIFT 16
29 #define VGIC_AFFINITY_2_MASK (0xffUL << VGIC_AFFINITY_2_SHIFT)
30 #define VGIC_AFFINITY_3_SHIFT 24
31 #define VGIC_AFFINITY_3_MASK (0xffUL << VGIC_AFFINITY_3_SHIFT)
32 
33 #define VGIC_AFFINITY_LEVEL(reg, level) \
34 	((((reg) & VGIC_AFFINITY_## level ##_MASK) \
35 	>> VGIC_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
36 
37 /*
38  * The Userspace encodes the affinity differently from the MPIDR,
39  * Below macro converts vgic userspace format to MPIDR reg format.
40  */
41 #define VGIC_TO_MPIDR(val) (VGIC_AFFINITY_LEVEL(val, 0) | \
42 			    VGIC_AFFINITY_LEVEL(val, 1) | \
43 			    VGIC_AFFINITY_LEVEL(val, 2) | \
44 			    VGIC_AFFINITY_LEVEL(val, 3))
45 
46 /*
47  * As per Documentation/virt/kvm/devices/arm-vgic-v3.rst,
48  * below macros are defined for CPUREG encoding.
49  */
50 #define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK   0x000000000000c000
51 #define KVM_REG_ARM_VGIC_SYSREG_OP0_SHIFT  14
52 #define KVM_REG_ARM_VGIC_SYSREG_OP1_MASK   0x0000000000003800
53 #define KVM_REG_ARM_VGIC_SYSREG_OP1_SHIFT  11
54 #define KVM_REG_ARM_VGIC_SYSREG_CRN_MASK   0x0000000000000780
55 #define KVM_REG_ARM_VGIC_SYSREG_CRN_SHIFT  7
56 #define KVM_REG_ARM_VGIC_SYSREG_CRM_MASK   0x0000000000000078
57 #define KVM_REG_ARM_VGIC_SYSREG_CRM_SHIFT  3
58 #define KVM_REG_ARM_VGIC_SYSREG_OP2_MASK   0x0000000000000007
59 #define KVM_REG_ARM_VGIC_SYSREG_OP2_SHIFT  0
60 
61 #define KVM_DEV_ARM_VGIC_SYSREG_MASK (KVM_REG_ARM_VGIC_SYSREG_OP0_MASK | \
62 				      KVM_REG_ARM_VGIC_SYSREG_OP1_MASK | \
63 				      KVM_REG_ARM_VGIC_SYSREG_CRN_MASK | \
64 				      KVM_REG_ARM_VGIC_SYSREG_CRM_MASK | \
65 				      KVM_REG_ARM_VGIC_SYSREG_OP2_MASK)
66 
67 #define KVM_ICC_SRE_EL2		(ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE |	\
68 				 ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB)
69 #define KVM_ICH_VTR_EL2_RES0	(ICH_VTR_EL2_DVIM 	|	\
70 				 ICH_VTR_EL2_A3V	|	\
71 				 ICH_VTR_EL2_IDbits)
72 #define KVM_ICH_VTR_EL2_RES1	ICH_VTR_EL2_nV4
73 
kvm_get_guest_vtr_el2(void)74 static inline u64 kvm_get_guest_vtr_el2(void)
75 {
76 	u64 vtr;
77 
78 	vtr  = kvm_vgic_global_state.ich_vtr_el2;
79 	vtr &= ~KVM_ICH_VTR_EL2_RES0;
80 	vtr |= KVM_ICH_VTR_EL2_RES1;
81 
82 	return vtr;
83 }
84 
85 /*
86  * As per Documentation/virt/kvm/devices/arm-vgic-its.rst,
87  * below macros are defined for ITS table entry encoding.
88  */
89 #define KVM_ITS_CTE_VALID_SHIFT		63
90 #define KVM_ITS_CTE_VALID_MASK		BIT_ULL(63)
91 #define KVM_ITS_CTE_RDBASE_SHIFT	16
92 #define KVM_ITS_CTE_ICID_MASK		GENMASK_ULL(15, 0)
93 #define KVM_ITS_ITE_NEXT_SHIFT		48
94 #define KVM_ITS_ITE_PINTID_SHIFT	16
95 #define KVM_ITS_ITE_PINTID_MASK		GENMASK_ULL(47, 16)
96 #define KVM_ITS_ITE_ICID_MASK		GENMASK_ULL(15, 0)
97 #define KVM_ITS_DTE_VALID_SHIFT		63
98 #define KVM_ITS_DTE_VALID_MASK		BIT_ULL(63)
99 #define KVM_ITS_DTE_NEXT_SHIFT		49
100 #define KVM_ITS_DTE_NEXT_MASK		GENMASK_ULL(62, 49)
101 #define KVM_ITS_DTE_ITTADDR_SHIFT	5
102 #define KVM_ITS_DTE_ITTADDR_MASK	GENMASK_ULL(48, 5)
103 #define KVM_ITS_DTE_SIZE_MASK		GENMASK_ULL(4, 0)
104 #define KVM_ITS_L1E_VALID_MASK		BIT_ULL(63)
105 /* we only support 64 kB translation table page size */
106 #define KVM_ITS_L1E_ADDR_MASK		GENMASK_ULL(51, 16)
107 
108 #define KVM_VGIC_V3_RDIST_INDEX_MASK	GENMASK_ULL(11, 0)
109 #define KVM_VGIC_V3_RDIST_FLAGS_MASK	GENMASK_ULL(15, 12)
110 #define KVM_VGIC_V3_RDIST_FLAGS_SHIFT	12
111 #define KVM_VGIC_V3_RDIST_BASE_MASK	GENMASK_ULL(51, 16)
112 #define KVM_VGIC_V3_RDIST_COUNT_MASK	GENMASK_ULL(63, 52)
113 #define KVM_VGIC_V3_RDIST_COUNT_SHIFT	52
114 
115 #ifdef CONFIG_DEBUG_SPINLOCK
116 #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
117 #else
118 #define DEBUG_SPINLOCK_BUG_ON(p)
119 #endif
120 
vgic_get_implementation_rev(struct kvm_vcpu * vcpu)121 static inline u32 vgic_get_implementation_rev(struct kvm_vcpu *vcpu)
122 {
123 	return vcpu->kvm->arch.vgic.implementation_rev;
124 }
125 
126 /* Requires the irq_lock to be held by the caller. */
irq_is_pending(struct vgic_irq * irq)127 static inline bool irq_is_pending(struct vgic_irq *irq)
128 {
129 	if (irq->config == VGIC_CONFIG_EDGE)
130 		return irq->pending_latch;
131 	else
132 		return irq->pending_latch || irq->line_level;
133 }
134 
vgic_irq_is_mapped_level(struct vgic_irq * irq)135 static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
136 {
137 	return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
138 }
139 
vgic_irq_get_lr_count(struct vgic_irq * irq)140 static inline int vgic_irq_get_lr_count(struct vgic_irq *irq)
141 {
142 	/* Account for the active state as an interrupt */
143 	if (vgic_irq_is_sgi(irq->intid) && irq->source)
144 		return hweight8(irq->source) + irq->active;
145 
146 	return irq_is_pending(irq) || irq->active;
147 }
148 
vgic_irq_is_multi_sgi(struct vgic_irq * irq)149 static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
150 {
151 	return vgic_irq_get_lr_count(irq) > 1;
152 }
153 
vgic_write_guest_lock(struct kvm * kvm,gpa_t gpa,const void * data,unsigned long len)154 static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa,
155 					const void *data, unsigned long len)
156 {
157 	struct vgic_dist *dist = &kvm->arch.vgic;
158 	int ret;
159 
160 	dist->table_write_in_progress = true;
161 	ret = kvm_write_guest_lock(kvm, gpa, data, len);
162 	dist->table_write_in_progress = false;
163 
164 	return ret;
165 }
166 
167 /*
168  * This struct provides an intermediate representation of the fields contained
169  * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
170  * state to userspace can generate either GICv2 or GICv3 CPU interface
171  * registers regardless of the hardware backed GIC used.
172  */
173 struct vgic_vmcr {
174 	u32	grpen0;
175 	u32	grpen1;
176 
177 	u32	ackctl;
178 	u32	fiqen;
179 	u32	cbpr;
180 	u32	eoim;
181 
182 	u32	abpr;
183 	u32	bpr;
184 	u32	pmr;  /* Priority mask field in the GICC_PMR and
185 		       * ICC_PMR_EL1 priority field format */
186 };
187 
188 struct vgic_reg_attr {
189 	struct kvm_vcpu *vcpu;
190 	gpa_t addr;
191 };
192 
193 struct its_device {
194 	struct list_head dev_list;
195 
196 	/* the head for the list of ITTEs */
197 	struct list_head itt_head;
198 	u32 num_eventid_bits;
199 	gpa_t itt_addr;
200 	u32 device_id;
201 };
202 
203 #define COLLECTION_NOT_MAPPED ((u32)~0)
204 
205 struct its_collection {
206 	struct list_head coll_list;
207 
208 	u32 collection_id;
209 	u32 target_addr;
210 };
211 
212 #define its_is_collection_mapped(coll) ((coll) && \
213 				((coll)->target_addr != COLLECTION_NOT_MAPPED))
214 
215 struct its_ite {
216 	struct list_head ite_list;
217 
218 	struct vgic_irq *irq;
219 	struct its_collection *collection;
220 	u32 event_id;
221 };
222 
223 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
224 		       struct vgic_reg_attr *reg_attr);
225 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
226 		       struct vgic_reg_attr *reg_attr);
227 const struct vgic_register_region *
228 vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
229 		     gpa_t addr, int len);
230 struct vgic_irq *vgic_get_irq(struct kvm *kvm, u32 intid);
231 struct vgic_irq *vgic_get_vcpu_irq(struct kvm_vcpu *vcpu, u32 intid);
232 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
233 bool vgic_get_phys_line_level(struct vgic_irq *irq);
234 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
235 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
236 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
237 			   unsigned long flags) __releases(&irq->irq_lock);
238 void vgic_kick_vcpus(struct kvm *kvm);
239 void vgic_irq_handle_resampling(struct vgic_irq *irq,
240 				bool lr_deactivated, bool lr_pending);
241 
242 int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
243 		       phys_addr_t addr, phys_addr_t alignment,
244 		       phys_addr_t size);
245 
246 void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
247 void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
248 void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
249 void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
250 int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
251 int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
252 			 int offset, u32 *val);
253 int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
254 			  int offset, u32 *val);
255 void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
256 void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
257 void vgic_v2_enable(struct kvm_vcpu *vcpu);
258 int vgic_v2_probe(const struct gic_kvm_info *info);
259 int vgic_v2_map_resources(struct kvm *kvm);
260 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
261 			     enum vgic_type);
262 
263 void vgic_v2_init_lrs(void);
264 void vgic_v2_load(struct kvm_vcpu *vcpu);
265 void vgic_v2_put(struct kvm_vcpu *vcpu);
266 
267 void vgic_v2_save_state(struct kvm_vcpu *vcpu);
268 void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
269 
vgic_try_get_irq_kref(struct vgic_irq * irq)270 static inline bool vgic_try_get_irq_kref(struct vgic_irq *irq)
271 {
272 	if (!irq)
273 		return false;
274 
275 	if (irq->intid < VGIC_MIN_LPI)
276 		return true;
277 
278 	return kref_get_unless_zero(&irq->refcount);
279 }
280 
vgic_get_irq_kref(struct vgic_irq * irq)281 static inline void vgic_get_irq_kref(struct vgic_irq *irq)
282 {
283 	WARN_ON_ONCE(!vgic_try_get_irq_kref(irq));
284 }
285 
286 void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
287 void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
288 void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
289 void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
290 void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
291 void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
292 void vgic_v3_enable(struct kvm_vcpu *vcpu);
293 int vgic_v3_probe(const struct gic_kvm_info *info);
294 int vgic_v3_map_resources(struct kvm *kvm);
295 int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
296 int vgic_v3_save_pending_tables(struct kvm *kvm);
297 int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
298 int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
299 void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu);
300 bool vgic_v3_check_base(struct kvm *kvm);
301 
302 void vgic_v3_load(struct kvm_vcpu *vcpu);
303 void vgic_v3_put(struct kvm_vcpu *vcpu);
304 
305 bool vgic_has_its(struct kvm *kvm);
306 int kvm_vgic_register_its_device(void);
307 void vgic_enable_lpis(struct kvm_vcpu *vcpu);
308 void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu);
309 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
310 int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
311 int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
312 			 int offset, u32 *val);
313 int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
314 			 int offset, u32 *val);
315 int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu,
316 				struct kvm_device_attr *attr, bool is_write);
317 int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
318 const struct sys_reg_desc *vgic_v3_get_sysreg_table(unsigned int *sz);
319 int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
320 				    u32 intid, u32 *val);
321 int kvm_register_vgic_device(unsigned long type);
322 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
323 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
324 int vgic_lazy_init(struct kvm *kvm);
325 int vgic_init(struct kvm *kvm);
326 
327 void vgic_debug_init(struct kvm *kvm);
328 void vgic_debug_destroy(struct kvm *kvm);
329 
330 int vgic_v5_probe(const struct gic_kvm_info *info);
331 
vgic_v3_max_apr_idx(struct kvm_vcpu * vcpu)332 static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
333 {
334 	struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;
335 
336 	/*
337 	 * num_pri_bits are initialized with HW supported values.
338 	 * We can rely safely on num_pri_bits even if VM has not
339 	 * restored ICC_CTLR_EL1 before restoring APnR registers.
340 	 */
341 	switch (cpu_if->num_pri_bits) {
342 	case 7: return 3;
343 	case 6: return 1;
344 	default: return 0;
345 	}
346 }
347 
348 static inline bool
vgic_v3_redist_region_full(struct vgic_redist_region * region)349 vgic_v3_redist_region_full(struct vgic_redist_region *region)
350 {
351 	if (!region->count)
352 		return false;
353 
354 	return (region->free_index >= region->count);
355 }
356 
357 struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rdregs);
358 
359 static inline size_t
vgic_v3_rd_region_size(struct kvm * kvm,struct vgic_redist_region * rdreg)360 vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
361 {
362 	if (!rdreg->count)
363 		return atomic_read(&kvm->online_vcpus) * KVM_VGIC_V3_REDIST_SIZE;
364 	else
365 		return rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
366 }
367 
368 struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
369 							   u32 index);
370 void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg);
371 
372 bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
373 
vgic_dist_overlap(struct kvm * kvm,gpa_t base,size_t size)374 static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size)
375 {
376 	struct vgic_dist *d = &kvm->arch.vgic;
377 
378 	return (base + size > d->vgic_dist_base) &&
379 		(base < d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE);
380 }
381 
382 bool vgic_lpis_enabled(struct kvm_vcpu *vcpu);
383 int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
384 			 u32 devid, u32 eventid, struct vgic_irq **irq);
385 struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
386 int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi);
387 void vgic_its_invalidate_all_caches(struct kvm *kvm);
388 
389 /* GICv4.1 MMIO interface */
390 int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq);
391 int vgic_its_invall(struct kvm_vcpu *vcpu);
392 
393 bool system_supports_direct_sgis(void);
394 bool vgic_supports_direct_msis(struct kvm *kvm);
395 bool vgic_supports_direct_sgis(struct kvm *kvm);
396 
vgic_supports_direct_irqs(struct kvm * kvm)397 static inline bool vgic_supports_direct_irqs(struct kvm *kvm)
398 {
399 	/*
400 	 * Deliberately conflate vLPI and vSGI support on GICv4.1 hardware,
401 	 * indirectly allowing userspace to control whether or not vPEs are
402 	 * allocated for the VM.
403 	 */
404 	if (system_supports_direct_sgis())
405 		return vgic_supports_direct_sgis(kvm);
406 
407 	return vgic_supports_direct_msis(kvm);
408 }
409 
410 int vgic_v4_init(struct kvm *kvm);
411 void vgic_v4_teardown(struct kvm *kvm);
412 void vgic_v4_configure_vsgis(struct kvm *kvm);
413 void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
414 int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
415 
416 void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu);
417 
kvm_has_gicv3(struct kvm * kvm)418 static inline bool kvm_has_gicv3(struct kvm *kvm)
419 {
420 	return kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP);
421 }
422 
423 void vgic_v3_sync_nested(struct kvm_vcpu *vcpu);
424 void vgic_v3_load_nested(struct kvm_vcpu *vcpu);
425 void vgic_v3_put_nested(struct kvm_vcpu *vcpu);
426 void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu);
427 void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu);
428 
vgic_is_v3_compat(struct kvm * kvm)429 static inline bool vgic_is_v3_compat(struct kvm *kvm)
430 {
431 	return cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF) &&
432 		kvm_vgic_global_state.has_gcie_v3_compat;
433 }
434 
vgic_is_v3(struct kvm * kvm)435 static inline bool vgic_is_v3(struct kvm *kvm)
436 {
437 	return kvm_vgic_global_state.type == VGIC_V3 || vgic_is_v3_compat(kvm);
438 }
439 
440 int vgic_its_debug_init(struct kvm_device *dev);
441 void vgic_its_debug_destroy(struct kvm_device *dev);
442 
443 #endif
444