1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VGIC: KVM DEVICE API
4 *
5 * Copyright (C) 2015 ARM Ltd.
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 */
8 #include <linux/kvm_host.h>
9 #include <kvm/arm_vgic.h>
10 #include <linux/uaccess.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/cputype.h>
13 #include "vgic.h"
14
15 /* common helpers */
16
vgic_check_iorange(struct kvm * kvm,phys_addr_t ioaddr,phys_addr_t addr,phys_addr_t alignment,phys_addr_t size)17 int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
18 phys_addr_t addr, phys_addr_t alignment,
19 phys_addr_t size)
20 {
21 if (!IS_VGIC_ADDR_UNDEF(ioaddr))
22 return -EEXIST;
23
24 if (!IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment))
25 return -EINVAL;
26
27 if (addr + size < addr)
28 return -EINVAL;
29
30 if (addr & ~kvm_phys_mask(&kvm->arch.mmu) ||
31 (addr + size) > kvm_phys_size(&kvm->arch.mmu))
32 return -E2BIG;
33
34 return 0;
35 }
36
vgic_check_type(struct kvm * kvm,int type_needed)37 static int vgic_check_type(struct kvm *kvm, int type_needed)
38 {
39 if (kvm->arch.vgic.vgic_model != type_needed)
40 return -ENODEV;
41 else
42 return 0;
43 }
44
kvm_set_legacy_vgic_v2_addr(struct kvm * kvm,struct kvm_arm_device_addr * dev_addr)45 int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
46 {
47 struct vgic_dist *vgic = &kvm->arch.vgic;
48 int r;
49
50 mutex_lock(&kvm->arch.config_lock);
51 switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
52 case KVM_VGIC_V2_ADDR_TYPE_DIST:
53 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
54 if (!r)
55 r = vgic_check_iorange(kvm, vgic->vgic_dist_base, dev_addr->addr,
56 SZ_4K, KVM_VGIC_V2_DIST_SIZE);
57 if (!r)
58 vgic->vgic_dist_base = dev_addr->addr;
59 break;
60 case KVM_VGIC_V2_ADDR_TYPE_CPU:
61 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
62 if (!r)
63 r = vgic_check_iorange(kvm, vgic->vgic_cpu_base, dev_addr->addr,
64 SZ_4K, KVM_VGIC_V2_CPU_SIZE);
65 if (!r)
66 vgic->vgic_cpu_base = dev_addr->addr;
67 break;
68 default:
69 r = -ENODEV;
70 }
71
72 mutex_unlock(&kvm->arch.config_lock);
73
74 return r;
75 }
76
77 /**
78 * kvm_vgic_addr - set or get vgic VM base addresses
79 * @kvm: pointer to the vm struct
80 * @attr: pointer to the attribute being retrieved/updated
81 * @write: if true set the address in the VM address space, if false read the
82 * address
83 *
84 * Set or get the vgic base addresses for the distributor and the virtual CPU
85 * interface in the VM physical address space. These addresses are properties
86 * of the emulated core/SoC and therefore user space initially knows this
87 * information.
88 * Check them for sanity (alignment, double assignment). We can't check for
89 * overlapping regions in case of a virtual GICv3 here, since we don't know
90 * the number of VCPUs yet, so we defer this check to map_resources().
91 */
kvm_vgic_addr(struct kvm * kvm,struct kvm_device_attr * attr,bool write)92 static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool write)
93 {
94 u64 __user *uaddr = (u64 __user *)attr->addr;
95 struct vgic_dist *vgic = &kvm->arch.vgic;
96 phys_addr_t *addr_ptr, alignment, size;
97 u64 undef_value = VGIC_ADDR_UNDEF;
98 u64 addr;
99 int r;
100
101 /* Reading a redistributor region addr implies getting the index */
102 if (write || attr->attr == KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION)
103 if (get_user(addr, uaddr))
104 return -EFAULT;
105
106 /*
107 * Since we can't hold config_lock while registering the redistributor
108 * iodevs, take the slots_lock immediately.
109 */
110 mutex_lock(&kvm->slots_lock);
111 switch (attr->attr) {
112 case KVM_VGIC_V2_ADDR_TYPE_DIST:
113 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
114 addr_ptr = &vgic->vgic_dist_base;
115 alignment = SZ_4K;
116 size = KVM_VGIC_V2_DIST_SIZE;
117 break;
118 case KVM_VGIC_V2_ADDR_TYPE_CPU:
119 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
120 addr_ptr = &vgic->vgic_cpu_base;
121 alignment = SZ_4K;
122 size = KVM_VGIC_V2_CPU_SIZE;
123 break;
124 case KVM_VGIC_V3_ADDR_TYPE_DIST:
125 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
126 addr_ptr = &vgic->vgic_dist_base;
127 alignment = SZ_64K;
128 size = KVM_VGIC_V3_DIST_SIZE;
129 break;
130 case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
131 struct vgic_redist_region *rdreg;
132
133 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
134 if (r)
135 break;
136 if (write) {
137 r = vgic_v3_set_redist_base(kvm, 0, addr, 0);
138 goto out;
139 }
140 rdreg = list_first_entry_or_null(&vgic->rd_regions,
141 struct vgic_redist_region, list);
142 if (!rdreg)
143 addr_ptr = &undef_value;
144 else
145 addr_ptr = &rdreg->base;
146 break;
147 }
148 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
149 {
150 struct vgic_redist_region *rdreg;
151 u8 index;
152
153 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
154 if (r)
155 break;
156
157 index = addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
158
159 if (write) {
160 gpa_t base = addr & KVM_VGIC_V3_RDIST_BASE_MASK;
161 u32 count = FIELD_GET(KVM_VGIC_V3_RDIST_COUNT_MASK, addr);
162 u8 flags = FIELD_GET(KVM_VGIC_V3_RDIST_FLAGS_MASK, addr);
163
164 if (!count || flags)
165 r = -EINVAL;
166 else
167 r = vgic_v3_set_redist_base(kvm, index,
168 base, count);
169 goto out;
170 }
171
172 rdreg = vgic_v3_rdist_region_from_index(kvm, index);
173 if (!rdreg) {
174 r = -ENOENT;
175 goto out;
176 }
177
178 addr = index;
179 addr |= rdreg->base;
180 addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
181 goto out;
182 }
183 default:
184 r = -ENODEV;
185 }
186
187 if (r)
188 goto out;
189
190 mutex_lock(&kvm->arch.config_lock);
191 if (write) {
192 r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
193 if (!r)
194 *addr_ptr = addr;
195 } else {
196 addr = *addr_ptr;
197 }
198 mutex_unlock(&kvm->arch.config_lock);
199
200 out:
201 mutex_unlock(&kvm->slots_lock);
202
203 if (!r && !write)
204 r = put_user(addr, uaddr);
205
206 return r;
207 }
208
vgic_set_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)209 static int vgic_set_common_attr(struct kvm_device *dev,
210 struct kvm_device_attr *attr)
211 {
212 int r;
213
214 switch (attr->group) {
215 case KVM_DEV_ARM_VGIC_GRP_ADDR:
216 r = kvm_vgic_addr(dev->kvm, attr, true);
217 return (r == -ENODEV) ? -ENXIO : r;
218 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
219 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
220 u32 val;
221 int ret = 0;
222
223 if (get_user(val, uaddr))
224 return -EFAULT;
225
226 /*
227 * We require:
228 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
229 * - at most 1024 interrupts
230 * - a multiple of 32 interrupts
231 */
232 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
233 val > VGIC_MAX_RESERVED ||
234 (val & 31))
235 return -EINVAL;
236
237 mutex_lock(&dev->kvm->arch.config_lock);
238
239 /*
240 * Either userspace has already configured NR_IRQS or
241 * the vgic has already been initialized and vgic_init()
242 * supplied a default amount of SPIs.
243 */
244 if (dev->kvm->arch.vgic.nr_spis)
245 ret = -EBUSY;
246 else
247 dev->kvm->arch.vgic.nr_spis =
248 val - VGIC_NR_PRIVATE_IRQS;
249
250 mutex_unlock(&dev->kvm->arch.config_lock);
251
252 return ret;
253 }
254 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
255 switch (attr->attr) {
256 case KVM_DEV_ARM_VGIC_CTRL_INIT:
257 mutex_lock(&dev->kvm->arch.config_lock);
258 r = vgic_init(dev->kvm);
259 mutex_unlock(&dev->kvm->arch.config_lock);
260 return r;
261 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
262 /*
263 * OK, this one isn't common at all, but we
264 * want to handle all control group attributes
265 * in a single place.
266 */
267 if (vgic_check_type(dev->kvm, KVM_DEV_TYPE_ARM_VGIC_V3))
268 return -ENXIO;
269 mutex_lock(&dev->kvm->lock);
270
271 if (!lock_all_vcpus(dev->kvm)) {
272 mutex_unlock(&dev->kvm->lock);
273 return -EBUSY;
274 }
275
276 mutex_lock(&dev->kvm->arch.config_lock);
277 r = vgic_v3_save_pending_tables(dev->kvm);
278 mutex_unlock(&dev->kvm->arch.config_lock);
279 unlock_all_vcpus(dev->kvm);
280 mutex_unlock(&dev->kvm->lock);
281 return r;
282 }
283 break;
284 }
285 }
286
287 return -ENXIO;
288 }
289
vgic_get_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)290 static int vgic_get_common_attr(struct kvm_device *dev,
291 struct kvm_device_attr *attr)
292 {
293 int r = -ENXIO;
294
295 switch (attr->group) {
296 case KVM_DEV_ARM_VGIC_GRP_ADDR:
297 r = kvm_vgic_addr(dev->kvm, attr, false);
298 return (r == -ENODEV) ? -ENXIO : r;
299 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
300 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
301
302 r = put_user(dev->kvm->arch.vgic.nr_spis +
303 VGIC_NR_PRIVATE_IRQS, uaddr);
304 break;
305 }
306 case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: {
307 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
308
309 r = put_user(dev->kvm->arch.vgic.mi_intid, uaddr);
310 break;
311 }
312 }
313
314 return r;
315 }
316
vgic_create(struct kvm_device * dev,u32 type)317 static int vgic_create(struct kvm_device *dev, u32 type)
318 {
319 return kvm_vgic_create(dev->kvm, type);
320 }
321
vgic_destroy(struct kvm_device * dev)322 static void vgic_destroy(struct kvm_device *dev)
323 {
324 kfree(dev);
325 }
326
kvm_register_vgic_device(unsigned long type)327 int kvm_register_vgic_device(unsigned long type)
328 {
329 int ret = -ENODEV;
330
331 switch (type) {
332 case KVM_DEV_TYPE_ARM_VGIC_V2:
333 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
334 KVM_DEV_TYPE_ARM_VGIC_V2);
335 break;
336 case KVM_DEV_TYPE_ARM_VGIC_V3:
337 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
338 KVM_DEV_TYPE_ARM_VGIC_V3);
339
340 if (ret)
341 break;
342 ret = kvm_vgic_register_its_device();
343 break;
344 }
345
346 return ret;
347 }
348
vgic_v2_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)349 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
350 struct vgic_reg_attr *reg_attr)
351 {
352 int cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
353
354 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
355 reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
356 if (!reg_attr->vcpu)
357 return -EINVAL;
358
359 return 0;
360 }
361
362 /**
363 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
364 *
365 * @dev: kvm device handle
366 * @attr: kvm device attribute
367 * @is_write: true if userspace is writing a register
368 */
vgic_v2_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)369 static int vgic_v2_attr_regs_access(struct kvm_device *dev,
370 struct kvm_device_attr *attr,
371 bool is_write)
372 {
373 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
374 struct vgic_reg_attr reg_attr;
375 gpa_t addr;
376 struct kvm_vcpu *vcpu;
377 int ret;
378 u32 val;
379
380 ret = vgic_v2_parse_attr(dev, attr, ®_attr);
381 if (ret)
382 return ret;
383
384 vcpu = reg_attr.vcpu;
385 addr = reg_attr.addr;
386
387 if (is_write)
388 if (get_user(val, uaddr))
389 return -EFAULT;
390
391 mutex_lock(&dev->kvm->lock);
392
393 if (!lock_all_vcpus(dev->kvm)) {
394 mutex_unlock(&dev->kvm->lock);
395 return -EBUSY;
396 }
397
398 mutex_lock(&dev->kvm->arch.config_lock);
399
400 ret = vgic_init(dev->kvm);
401 if (ret)
402 goto out;
403
404 switch (attr->group) {
405 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
406 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
407 break;
408 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
409 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, &val);
410 break;
411 default:
412 ret = -EINVAL;
413 break;
414 }
415
416 out:
417 mutex_unlock(&dev->kvm->arch.config_lock);
418 unlock_all_vcpus(dev->kvm);
419 mutex_unlock(&dev->kvm->lock);
420
421 if (!ret && !is_write)
422 ret = put_user(val, uaddr);
423
424 return ret;
425 }
426
vgic_v2_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)427 static int vgic_v2_set_attr(struct kvm_device *dev,
428 struct kvm_device_attr *attr)
429 {
430 switch (attr->group) {
431 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
432 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
433 return vgic_v2_attr_regs_access(dev, attr, true);
434 default:
435 return vgic_set_common_attr(dev, attr);
436 }
437 }
438
vgic_v2_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)439 static int vgic_v2_get_attr(struct kvm_device *dev,
440 struct kvm_device_attr *attr)
441 {
442 switch (attr->group) {
443 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
444 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
445 return vgic_v2_attr_regs_access(dev, attr, false);
446 default:
447 return vgic_get_common_attr(dev, attr);
448 }
449 }
450
vgic_v2_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)451 static int vgic_v2_has_attr(struct kvm_device *dev,
452 struct kvm_device_attr *attr)
453 {
454 switch (attr->group) {
455 case KVM_DEV_ARM_VGIC_GRP_ADDR:
456 switch (attr->attr) {
457 case KVM_VGIC_V2_ADDR_TYPE_DIST:
458 case KVM_VGIC_V2_ADDR_TYPE_CPU:
459 return 0;
460 }
461 break;
462 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
463 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
464 return vgic_v2_has_attr_regs(dev, attr);
465 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
466 return 0;
467 case KVM_DEV_ARM_VGIC_GRP_CTRL:
468 switch (attr->attr) {
469 case KVM_DEV_ARM_VGIC_CTRL_INIT:
470 return 0;
471 }
472 }
473 return -ENXIO;
474 }
475
476 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
477 .name = "kvm-arm-vgic-v2",
478 .create = vgic_create,
479 .destroy = vgic_destroy,
480 .set_attr = vgic_v2_set_attr,
481 .get_attr = vgic_v2_get_attr,
482 .has_attr = vgic_v2_has_attr,
483 };
484
vgic_v3_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)485 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
486 struct vgic_reg_attr *reg_attr)
487 {
488 unsigned long vgic_mpidr, mpidr_reg;
489
490 /*
491 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
492 * attr might not hold MPIDR. Hence assume vcpu0.
493 */
494 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
495 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
496 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
497
498 mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
499 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
500 } else {
501 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
502 }
503
504 if (!reg_attr->vcpu)
505 return -EINVAL;
506
507 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
508
509 return 0;
510 }
511
512 /*
513 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
514 *
515 * @dev: kvm device handle
516 * @attr: kvm device attribute
517 * @is_write: true if userspace is writing a register
518 */
vgic_v3_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)519 static int vgic_v3_attr_regs_access(struct kvm_device *dev,
520 struct kvm_device_attr *attr,
521 bool is_write)
522 {
523 struct vgic_reg_attr reg_attr;
524 gpa_t addr;
525 struct kvm_vcpu *vcpu;
526 bool uaccess, post_init = true;
527 u32 val;
528 int ret;
529
530 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
531 if (ret)
532 return ret;
533
534 vcpu = reg_attr.vcpu;
535 addr = reg_attr.addr;
536
537 switch (attr->group) {
538 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
539 /* Sysregs uaccess is performed by the sysreg handling code */
540 uaccess = false;
541 break;
542 case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
543 post_init = false;
544 fallthrough;
545 default:
546 uaccess = true;
547 }
548
549 if (uaccess && is_write) {
550 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
551 if (get_user(val, uaddr))
552 return -EFAULT;
553 }
554
555 mutex_lock(&dev->kvm->lock);
556
557 if (!lock_all_vcpus(dev->kvm)) {
558 mutex_unlock(&dev->kvm->lock);
559 return -EBUSY;
560 }
561
562 mutex_lock(&dev->kvm->arch.config_lock);
563
564 if (post_init != vgic_initialized(dev->kvm)) {
565 ret = -EBUSY;
566 goto out;
567 }
568
569 switch (attr->group) {
570 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
571 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &val);
572 break;
573 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
574 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &val);
575 break;
576 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
577 ret = vgic_v3_cpu_sysregs_uaccess(vcpu, attr, is_write);
578 break;
579 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
580 unsigned int info, intid;
581
582 info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
583 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
584 if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
585 intid = attr->attr &
586 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
587 ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
588 intid, &val);
589 } else {
590 ret = -EINVAL;
591 }
592 break;
593 }
594 case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
595 if (!is_write) {
596 val = dev->kvm->arch.vgic.mi_intid;
597 ret = 0;
598 break;
599 }
600
601 ret = -EINVAL;
602 if ((val < VGIC_NR_PRIVATE_IRQS) && (val >= VGIC_NR_SGIS)) {
603 dev->kvm->arch.vgic.mi_intid = val;
604 ret = 0;
605 }
606 break;
607 default:
608 ret = -EINVAL;
609 break;
610 }
611
612 out:
613 mutex_unlock(&dev->kvm->arch.config_lock);
614 unlock_all_vcpus(dev->kvm);
615 mutex_unlock(&dev->kvm->lock);
616
617 if (!ret && uaccess && !is_write) {
618 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
619 ret = put_user(val, uaddr);
620 }
621
622 return ret;
623 }
624
vgic_v3_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)625 static int vgic_v3_set_attr(struct kvm_device *dev,
626 struct kvm_device_attr *attr)
627 {
628 switch (attr->group) {
629 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
630 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
631 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
632 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
633 case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
634 return vgic_v3_attr_regs_access(dev, attr, true);
635 default:
636 return vgic_set_common_attr(dev, attr);
637 }
638 }
639
vgic_v3_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)640 static int vgic_v3_get_attr(struct kvm_device *dev,
641 struct kvm_device_attr *attr)
642 {
643 switch (attr->group) {
644 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
645 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
646 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
647 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
648 case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
649 return vgic_v3_attr_regs_access(dev, attr, false);
650 default:
651 return vgic_get_common_attr(dev, attr);
652 }
653 }
654
vgic_v3_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)655 static int vgic_v3_has_attr(struct kvm_device *dev,
656 struct kvm_device_attr *attr)
657 {
658 switch (attr->group) {
659 case KVM_DEV_ARM_VGIC_GRP_ADDR:
660 switch (attr->attr) {
661 case KVM_VGIC_V3_ADDR_TYPE_DIST:
662 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
663 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
664 return 0;
665 }
666 break;
667 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
668 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
669 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
670 return vgic_v3_has_attr_regs(dev, attr);
671 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
672 case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
673 return 0;
674 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
675 if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
676 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
677 VGIC_LEVEL_INFO_LINE_LEVEL)
678 return 0;
679 break;
680 }
681 case KVM_DEV_ARM_VGIC_GRP_CTRL:
682 switch (attr->attr) {
683 case KVM_DEV_ARM_VGIC_CTRL_INIT:
684 return 0;
685 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
686 return 0;
687 }
688 }
689 return -ENXIO;
690 }
691
692 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
693 .name = "kvm-arm-vgic-v3",
694 .create = vgic_create,
695 .destroy = vgic_destroy,
696 .set_attr = vgic_v3_set_attr,
697 .get_attr = vgic_v3_get_attr,
698 .has_attr = vgic_v3_has_attr,
699 };
700