xref: /linux/arch/riscv/kvm/aia.c (revision 11e8c7e9471cf8e6ae6ec7324a3174191cd965e3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2022 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <linux/irq.h>
13 #include <linux/irqchip/riscv-imsic.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kvm_host.h>
16 #include <linux/nospec.h>
17 #include <linux/percpu.h>
18 #include <linux/spinlock.h>
19 #include <asm/cpufeature.h>
20 #include <asm/kvm_nacl.h>
21 
22 struct aia_hgei_control {
23 	raw_spinlock_t lock;
24 	unsigned long free_bitmap;
25 	struct kvm_vcpu *owners[BITS_PER_LONG];
26 };
27 static DEFINE_PER_CPU(struct aia_hgei_control, aia_hgei);
28 static int hgei_parent_irq;
29 
30 unsigned int kvm_riscv_aia_nr_hgei;
31 unsigned int kvm_riscv_aia_max_ids;
32 DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
33 
aia_hvictl_value(bool ext_irq_pending)34 static inline unsigned long aia_hvictl_value(bool ext_irq_pending)
35 {
36 	unsigned long hvictl;
37 
38 	/*
39 	 * HVICTL.IID == 9 and HVICTL.IPRIO == 0 represents
40 	 * no interrupt in HVICTL.
41 	 */
42 
43 	hvictl = (IRQ_S_EXT << HVICTL_IID_SHIFT) & HVICTL_IID;
44 	hvictl |= ext_irq_pending;
45 	return hvictl;
46 }
47 
48 #ifdef CONFIG_32BIT
kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu * vcpu)49 void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
50 {
51 	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
52 	unsigned long mask, val;
53 
54 	if (!kvm_riscv_aia_available())
55 		return;
56 
57 	if (READ_ONCE(vcpu->arch.irqs_pending_mask[1])) {
58 		mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[1], 0);
59 		val = READ_ONCE(vcpu->arch.irqs_pending[1]) & mask;
60 
61 		csr->hviph &= ~mask;
62 		csr->hviph |= val;
63 	}
64 }
65 
kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu * vcpu)66 void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
67 {
68 	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
69 
70 	if (kvm_riscv_aia_available())
71 		csr->vsieh = ncsr_read(CSR_VSIEH);
72 }
73 #endif
74 
kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu * vcpu,u64 mask)75 bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
76 {
77 	unsigned long seip;
78 
79 	if (!kvm_riscv_aia_available())
80 		return false;
81 
82 #ifdef CONFIG_32BIT
83 	if (READ_ONCE(vcpu->arch.irqs_pending[1]) &
84 	    (vcpu->arch.aia_context.guest_csr.vsieh & upper_32_bits(mask)))
85 		return true;
86 #endif
87 
88 	seip = vcpu->arch.guest_csr.vsie;
89 	seip &= (unsigned long)mask;
90 	seip &= BIT(IRQ_S_EXT);
91 
92 	if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
93 		return false;
94 
95 	return kvm_riscv_vcpu_aia_imsic_has_interrupt(vcpu);
96 }
97 
kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu * vcpu)98 void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
99 {
100 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
101 
102 	if (!kvm_riscv_aia_available())
103 		return;
104 
105 #ifdef CONFIG_32BIT
106 	ncsr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
107 #endif
108 	ncsr_write(CSR_HVICTL, aia_hvictl_value(!!(csr->hvip & BIT(IRQ_VS_EXT))));
109 }
110 
kvm_riscv_vcpu_aia_load(struct kvm_vcpu * vcpu,int cpu)111 void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
112 {
113 	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
114 	void *nsh;
115 
116 	if (!kvm_riscv_aia_available())
117 		return;
118 
119 	if (kvm_riscv_nacl_sync_csr_available()) {
120 		nsh = nacl_shmem();
121 		nacl_csr_write(nsh, CSR_VSISELECT, csr->vsiselect);
122 		nacl_csr_write(nsh, CSR_HVIPRIO1, csr->hviprio1);
123 		nacl_csr_write(nsh, CSR_HVIPRIO2, csr->hviprio2);
124 #ifdef CONFIG_32BIT
125 		nacl_csr_write(nsh, CSR_VSIEH, csr->vsieh);
126 		nacl_csr_write(nsh, CSR_HVIPH, csr->hviph);
127 		nacl_csr_write(nsh, CSR_HVIPRIO1H, csr->hviprio1h);
128 		nacl_csr_write(nsh, CSR_HVIPRIO2H, csr->hviprio2h);
129 #endif
130 	} else {
131 		csr_write(CSR_VSISELECT, csr->vsiselect);
132 		csr_write(CSR_HVIPRIO1, csr->hviprio1);
133 		csr_write(CSR_HVIPRIO2, csr->hviprio2);
134 #ifdef CONFIG_32BIT
135 		csr_write(CSR_VSIEH, csr->vsieh);
136 		csr_write(CSR_HVIPH, csr->hviph);
137 		csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
138 		csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
139 #endif
140 	}
141 
142 	if (kvm_riscv_aia_initialized(vcpu->kvm))
143 		kvm_riscv_vcpu_aia_imsic_load(vcpu, cpu);
144 }
145 
kvm_riscv_vcpu_aia_put(struct kvm_vcpu * vcpu)146 void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
147 {
148 	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
149 	void *nsh;
150 
151 	if (!kvm_riscv_aia_available())
152 		return;
153 
154 	if (kvm_riscv_aia_initialized(vcpu->kvm))
155 		kvm_riscv_vcpu_aia_imsic_put(vcpu);
156 
157 	if (kvm_riscv_nacl_available()) {
158 		nsh = nacl_shmem();
159 		csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT);
160 		csr->hviprio1 = nacl_csr_read(nsh, CSR_HVIPRIO1);
161 		csr->hviprio2 = nacl_csr_read(nsh, CSR_HVIPRIO2);
162 #ifdef CONFIG_32BIT
163 		csr->vsieh = nacl_csr_read(nsh, CSR_VSIEH);
164 		csr->hviph = nacl_csr_read(nsh, CSR_HVIPH);
165 		csr->hviprio1h = nacl_csr_read(nsh, CSR_HVIPRIO1H);
166 		csr->hviprio2h = nacl_csr_read(nsh, CSR_HVIPRIO2H);
167 #endif
168 	} else {
169 		csr->vsiselect = csr_read(CSR_VSISELECT);
170 		csr->hviprio1 = csr_read(CSR_HVIPRIO1);
171 		csr->hviprio2 = csr_read(CSR_HVIPRIO2);
172 #ifdef CONFIG_32BIT
173 		csr->vsieh = csr_read(CSR_VSIEH);
174 		csr->hviph = csr_read(CSR_HVIPH);
175 		csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
176 		csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
177 #endif
178 	}
179 }
180 
kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)181 int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
182 			       unsigned long reg_num,
183 			       unsigned long *out_val)
184 {
185 	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
186 	unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
187 
188 	if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
189 		return -ENOENT;
190 	if (reg_num >= regs_max)
191 		return -ENOENT;
192 
193 	reg_num = array_index_nospec(reg_num, regs_max);
194 
195 	*out_val = 0;
196 	if (kvm_riscv_aia_available())
197 		*out_val = ((unsigned long *)csr)[reg_num];
198 
199 	return 0;
200 }
201 
kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long val)202 int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
203 			       unsigned long reg_num,
204 			       unsigned long val)
205 {
206 	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
207 	unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
208 
209 	if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
210 		return -ENOENT;
211 	if (reg_num >= regs_max)
212 		return -ENOENT;
213 
214 	reg_num = array_index_nospec(reg_num, regs_max);
215 
216 	if (kvm_riscv_aia_available()) {
217 		((unsigned long *)csr)[reg_num] = val;
218 
219 #ifdef CONFIG_32BIT
220 		if (reg_num == KVM_REG_RISCV_CSR_AIA_REG(siph))
221 			WRITE_ONCE(vcpu->arch.irqs_pending_mask[1], 0);
222 #endif
223 	}
224 
225 	return 0;
226 }
227 
kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu * vcpu,unsigned int csr_num,unsigned long * val,unsigned long new_val,unsigned long wr_mask)228 int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
229 				 unsigned int csr_num,
230 				 unsigned long *val,
231 				 unsigned long new_val,
232 				 unsigned long wr_mask)
233 {
234 	/* If AIA not available then redirect trap */
235 	if (!kvm_riscv_aia_available())
236 		return KVM_INSN_ILLEGAL_TRAP;
237 
238 	/* If AIA not initialized then forward to user space */
239 	if (!kvm_riscv_aia_initialized(vcpu->kvm))
240 		return KVM_INSN_EXIT_TO_USER_SPACE;
241 
242 	return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, KVM_RISCV_AIA_IMSIC_TOPEI,
243 					    val, new_val, wr_mask);
244 }
245 
246 /*
247  * External IRQ priority always read-only zero. This means default
248  * priority order  is always preferred for external IRQs unless
249  * HVICTL.IID == 9 and HVICTL.IPRIO != 0
250  */
251 static int aia_irq2bitpos[] = {
252 0,     8,   -1,   -1,   16,   24,   -1,   -1, /* 0 - 7 */
253 32,   -1,   -1,   -1,   -1,   40,   48,   56, /* 8 - 15 */
254 64,   72,   80,   88,   96,  104,  112,  120, /* 16 - 23 */
255 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 24 - 31 */
256 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 32 - 39 */
257 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 40 - 47 */
258 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 48 - 55 */
259 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 56 - 63 */
260 };
261 
aia_get_iprio8(struct kvm_vcpu * vcpu,unsigned int irq)262 static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq)
263 {
264 	unsigned long hviprio;
265 	int bitpos = aia_irq2bitpos[irq];
266 
267 	if (bitpos < 0)
268 		return 0;
269 
270 	switch (bitpos / BITS_PER_LONG) {
271 	case 0:
272 		hviprio = ncsr_read(CSR_HVIPRIO1);
273 		break;
274 	case 1:
275 #ifndef CONFIG_32BIT
276 		hviprio = ncsr_read(CSR_HVIPRIO2);
277 		break;
278 #else
279 		hviprio = ncsr_read(CSR_HVIPRIO1H);
280 		break;
281 	case 2:
282 		hviprio = ncsr_read(CSR_HVIPRIO2);
283 		break;
284 	case 3:
285 		hviprio = ncsr_read(CSR_HVIPRIO2H);
286 		break;
287 #endif
288 	default:
289 		return 0;
290 	}
291 
292 	return (hviprio >> (bitpos % BITS_PER_LONG)) & TOPI_IPRIO_MASK;
293 }
294 
aia_set_iprio8(struct kvm_vcpu * vcpu,unsigned int irq,u8 prio)295 static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
296 {
297 	unsigned long hviprio;
298 	int bitpos = aia_irq2bitpos[irq];
299 
300 	if (bitpos < 0)
301 		return;
302 
303 	switch (bitpos / BITS_PER_LONG) {
304 	case 0:
305 		hviprio = ncsr_read(CSR_HVIPRIO1);
306 		break;
307 	case 1:
308 #ifndef CONFIG_32BIT
309 		hviprio = ncsr_read(CSR_HVIPRIO2);
310 		break;
311 #else
312 		hviprio = ncsr_read(CSR_HVIPRIO1H);
313 		break;
314 	case 2:
315 		hviprio = ncsr_read(CSR_HVIPRIO2);
316 		break;
317 	case 3:
318 		hviprio = ncsr_read(CSR_HVIPRIO2H);
319 		break;
320 #endif
321 	default:
322 		return;
323 	}
324 
325 	hviprio &= ~(TOPI_IPRIO_MASK << (bitpos % BITS_PER_LONG));
326 	hviprio |= (unsigned long)prio << (bitpos % BITS_PER_LONG);
327 
328 	switch (bitpos / BITS_PER_LONG) {
329 	case 0:
330 		ncsr_write(CSR_HVIPRIO1, hviprio);
331 		break;
332 	case 1:
333 #ifndef CONFIG_32BIT
334 		ncsr_write(CSR_HVIPRIO2, hviprio);
335 		break;
336 #else
337 		ncsr_write(CSR_HVIPRIO1H, hviprio);
338 		break;
339 	case 2:
340 		ncsr_write(CSR_HVIPRIO2, hviprio);
341 		break;
342 	case 3:
343 		ncsr_write(CSR_HVIPRIO2H, hviprio);
344 		break;
345 #endif
346 	default:
347 		return;
348 	}
349 }
350 
aia_rmw_iprio(struct kvm_vcpu * vcpu,unsigned int isel,unsigned long * val,unsigned long new_val,unsigned long wr_mask)351 static int aia_rmw_iprio(struct kvm_vcpu *vcpu, unsigned int isel,
352 			 unsigned long *val, unsigned long new_val,
353 			 unsigned long wr_mask)
354 {
355 	int i, first_irq, nirqs;
356 	unsigned long old_val;
357 	u8 prio;
358 
359 #ifndef CONFIG_32BIT
360 	if (isel & 0x1)
361 		return KVM_INSN_ILLEGAL_TRAP;
362 #endif
363 
364 	nirqs = 4 * (BITS_PER_LONG / 32);
365 	first_irq = (isel - ISELECT_IPRIO0) * 4;
366 
367 	old_val = 0;
368 	for (i = 0; i < nirqs; i++) {
369 		prio = aia_get_iprio8(vcpu, first_irq + i);
370 		old_val |= (unsigned long)prio << (TOPI_IPRIO_BITS * i);
371 	}
372 
373 	if (val)
374 		*val = old_val;
375 
376 	if (wr_mask) {
377 		new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
378 		for (i = 0; i < nirqs; i++) {
379 			prio = (new_val >> (TOPI_IPRIO_BITS * i)) &
380 				TOPI_IPRIO_MASK;
381 			aia_set_iprio8(vcpu, first_irq + i, prio);
382 		}
383 	}
384 
385 	return KVM_INSN_CONTINUE_NEXT_SEPC;
386 }
387 
kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu * vcpu,unsigned int csr_num,unsigned long * val,unsigned long new_val,unsigned long wr_mask)388 int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
389 				unsigned long *val, unsigned long new_val,
390 				unsigned long wr_mask)
391 {
392 	unsigned int isel;
393 
394 	/* If AIA not available then redirect trap */
395 	if (!kvm_riscv_aia_available())
396 		return KVM_INSN_ILLEGAL_TRAP;
397 
398 	/* First try to emulate in kernel space */
399 	isel = ncsr_read(CSR_VSISELECT) & ISELECT_MASK;
400 	if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15)
401 		return aia_rmw_iprio(vcpu, isel, val, new_val, wr_mask);
402 	else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST &&
403 		 kvm_riscv_aia_initialized(vcpu->kvm))
404 		return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, isel, val, new_val,
405 						    wr_mask);
406 
407 	/* We can't handle it here so redirect to user space */
408 	return KVM_INSN_EXIT_TO_USER_SPACE;
409 }
410 
kvm_riscv_aia_alloc_hgei(int cpu,struct kvm_vcpu * owner,void __iomem ** hgei_va,phys_addr_t * hgei_pa)411 int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
412 			     void __iomem **hgei_va, phys_addr_t *hgei_pa)
413 {
414 	int ret = -ENOENT;
415 	unsigned long flags;
416 	const struct imsic_global_config *gc;
417 	const struct imsic_local_config *lc;
418 	struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
419 
420 	if (!kvm_riscv_aia_available() || !hgctrl)
421 		return -ENODEV;
422 
423 	raw_spin_lock_irqsave(&hgctrl->lock, flags);
424 
425 	if (hgctrl->free_bitmap) {
426 		ret = __ffs(hgctrl->free_bitmap);
427 		hgctrl->free_bitmap &= ~BIT(ret);
428 		hgctrl->owners[ret] = owner;
429 	}
430 
431 	raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
432 
433 	gc = imsic_get_global_config();
434 	lc = (gc) ? per_cpu_ptr(gc->local, cpu) : NULL;
435 	if (lc && ret > 0) {
436 		if (hgei_va)
437 			*hgei_va = lc->msi_va + (ret * IMSIC_MMIO_PAGE_SZ);
438 		if (hgei_pa)
439 			*hgei_pa = lc->msi_pa + (ret * IMSIC_MMIO_PAGE_SZ);
440 	}
441 
442 	return ret;
443 }
444 
kvm_riscv_aia_free_hgei(int cpu,int hgei)445 void kvm_riscv_aia_free_hgei(int cpu, int hgei)
446 {
447 	unsigned long flags;
448 	struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
449 
450 	if (!kvm_riscv_aia_available() || !hgctrl)
451 		return;
452 
453 	raw_spin_lock_irqsave(&hgctrl->lock, flags);
454 
455 	if (hgei > 0 && hgei <= kvm_riscv_aia_nr_hgei) {
456 		if (!(hgctrl->free_bitmap & BIT(hgei))) {
457 			hgctrl->free_bitmap |= BIT(hgei);
458 			hgctrl->owners[hgei] = NULL;
459 		}
460 	}
461 
462 	raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
463 }
464 
hgei_interrupt(int irq,void * dev_id)465 static irqreturn_t hgei_interrupt(int irq, void *dev_id)
466 {
467 	int i;
468 	unsigned long hgei_mask, flags;
469 	struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
470 
471 	hgei_mask = csr_read(CSR_HGEIP) & csr_read(CSR_HGEIE);
472 	csr_clear(CSR_HGEIE, hgei_mask);
473 
474 	raw_spin_lock_irqsave(&hgctrl->lock, flags);
475 
476 	for_each_set_bit(i, &hgei_mask, BITS_PER_LONG) {
477 		if (hgctrl->owners[i])
478 			kvm_vcpu_kick(hgctrl->owners[i]);
479 	}
480 
481 	raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
482 
483 	put_cpu_ptr(&aia_hgei);
484 	return IRQ_HANDLED;
485 }
486 
aia_hgei_init(void)487 static int aia_hgei_init(void)
488 {
489 	int cpu, rc;
490 	struct irq_domain *domain;
491 	struct aia_hgei_control *hgctrl;
492 
493 	/* Initialize per-CPU guest external interrupt line management */
494 	for_each_possible_cpu(cpu) {
495 		hgctrl = per_cpu_ptr(&aia_hgei, cpu);
496 		raw_spin_lock_init(&hgctrl->lock);
497 		if (kvm_riscv_aia_nr_hgei) {
498 			hgctrl->free_bitmap =
499 				BIT(kvm_riscv_aia_nr_hgei + 1) - 1;
500 			hgctrl->free_bitmap &= ~BIT(0);
501 		} else
502 			hgctrl->free_bitmap = 0;
503 	}
504 
505 	/* Skip SGEI interrupt setup for zero guest external interrupts */
506 	if (!kvm_riscv_aia_nr_hgei)
507 		goto skip_sgei_interrupt;
508 
509 	/* Find INTC irq domain */
510 	domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
511 					  DOMAIN_BUS_ANY);
512 	if (!domain) {
513 		kvm_err("unable to find INTC domain\n");
514 		return -ENOENT;
515 	}
516 
517 	/* Map per-CPU SGEI interrupt from INTC domain */
518 	hgei_parent_irq = irq_create_mapping(domain, IRQ_S_GEXT);
519 	if (!hgei_parent_irq) {
520 		kvm_err("unable to map SGEI IRQ\n");
521 		return -ENOMEM;
522 	}
523 
524 	/* Request per-CPU SGEI interrupt */
525 	rc = request_percpu_irq(hgei_parent_irq, hgei_interrupt,
526 				"riscv-kvm", &aia_hgei);
527 	if (rc) {
528 		kvm_err("failed to request SGEI IRQ\n");
529 		return rc;
530 	}
531 
532 skip_sgei_interrupt:
533 	return 0;
534 }
535 
aia_hgei_exit(void)536 static void aia_hgei_exit(void)
537 {
538 	/* Do nothing for zero guest external interrupts */
539 	if (!kvm_riscv_aia_nr_hgei)
540 		return;
541 
542 	/* Free per-CPU SGEI interrupt */
543 	free_percpu_irq(hgei_parent_irq, &aia_hgei);
544 }
545 
kvm_riscv_aia_enable(void)546 void kvm_riscv_aia_enable(void)
547 {
548 	if (!kvm_riscv_aia_available())
549 		return;
550 
551 	csr_write(CSR_HVICTL, aia_hvictl_value(false));
552 	csr_write(CSR_HVIPRIO1, 0x0);
553 	csr_write(CSR_HVIPRIO2, 0x0);
554 #ifdef CONFIG_32BIT
555 	csr_write(CSR_HVIPH, 0x0);
556 	csr_write(CSR_HIDELEGH, 0x0);
557 	csr_write(CSR_HVIPRIO1H, 0x0);
558 	csr_write(CSR_HVIPRIO2H, 0x0);
559 #endif
560 
561 	/* Enable per-CPU SGEI interrupt */
562 	enable_percpu_irq(hgei_parent_irq,
563 			  irq_get_trigger_type(hgei_parent_irq));
564 	csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
565 	/* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
566 	if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
567 		csr_set(CSR_HVIEN, BIT(IRQ_PMU_OVF));
568 }
569 
kvm_riscv_aia_disable(void)570 void kvm_riscv_aia_disable(void)
571 {
572 	int i;
573 	unsigned long flags;
574 	struct kvm_vcpu *vcpu;
575 	struct aia_hgei_control *hgctrl;
576 
577 	if (!kvm_riscv_aia_available())
578 		return;
579 	hgctrl = get_cpu_ptr(&aia_hgei);
580 
581 	if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
582 		csr_clear(CSR_HVIEN, BIT(IRQ_PMU_OVF));
583 	/* Disable per-CPU SGEI interrupt */
584 	csr_clear(CSR_HIE, BIT(IRQ_S_GEXT));
585 	disable_percpu_irq(hgei_parent_irq);
586 
587 	csr_write(CSR_HVICTL, aia_hvictl_value(false));
588 
589 	raw_spin_lock_irqsave(&hgctrl->lock, flags);
590 
591 	for (i = 0; i <= kvm_riscv_aia_nr_hgei; i++) {
592 		vcpu = hgctrl->owners[i];
593 		if (!vcpu)
594 			continue;
595 
596 		/*
597 		 * We release hgctrl->lock before notifying IMSIC
598 		 * so that we don't have lock ordering issues.
599 		 */
600 		raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
601 
602 		/* Notify IMSIC */
603 		kvm_riscv_vcpu_aia_imsic_release(vcpu);
604 
605 		/*
606 		 * Wakeup VCPU if it was blocked so that it can
607 		 * run on other HARTs
608 		 */
609 		if (csr_read(CSR_HGEIE) & BIT(i)) {
610 			csr_clear(CSR_HGEIE, BIT(i));
611 			kvm_vcpu_kick(vcpu);
612 		}
613 
614 		raw_spin_lock_irqsave(&hgctrl->lock, flags);
615 	}
616 
617 	raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
618 
619 	put_cpu_ptr(&aia_hgei);
620 }
621 
kvm_riscv_aia_init(void)622 int kvm_riscv_aia_init(void)
623 {
624 	int rc;
625 	const struct imsic_global_config *gc;
626 
627 	if (!riscv_isa_extension_available(NULL, SxAIA))
628 		return -ENODEV;
629 	gc = imsic_get_global_config();
630 
631 	/* Figure-out number of bits in HGEIE */
632 	csr_write(CSR_HGEIE, -1UL);
633 	kvm_riscv_aia_nr_hgei = fls_long(csr_read(CSR_HGEIE));
634 	csr_write(CSR_HGEIE, 0);
635 	if (kvm_riscv_aia_nr_hgei)
636 		kvm_riscv_aia_nr_hgei--;
637 
638 	/*
639 	 * Number of usable HGEI lines should be minimum of per-HART
640 	 * IMSIC guest files and number of bits in HGEIE
641 	 */
642 	if (gc)
643 		kvm_riscv_aia_nr_hgei = min((ulong)kvm_riscv_aia_nr_hgei,
644 					    gc->nr_guest_files);
645 	else
646 		kvm_riscv_aia_nr_hgei = 0;
647 
648 	/* Find number of guest MSI IDs */
649 	kvm_riscv_aia_max_ids = IMSIC_MAX_ID;
650 	if (gc && kvm_riscv_aia_nr_hgei)
651 		kvm_riscv_aia_max_ids = gc->nr_guest_ids + 1;
652 
653 	/* Initialize guest external interrupt line management */
654 	rc = aia_hgei_init();
655 	if (rc)
656 		return rc;
657 
658 	/* Register device operations */
659 	rc = kvm_register_device_ops(&kvm_riscv_aia_device_ops,
660 				     KVM_DEV_TYPE_RISCV_AIA);
661 	if (rc) {
662 		aia_hgei_exit();
663 		return rc;
664 	}
665 
666 	/* Enable KVM AIA support */
667 	static_branch_enable(&kvm_riscv_aia_available);
668 
669 	return 0;
670 }
671 
kvm_riscv_aia_exit(void)672 void kvm_riscv_aia_exit(void)
673 {
674 	if (!kvm_riscv_aia_available())
675 		return;
676 
677 	/* Unregister device operations */
678 	kvm_unregister_device_ops(KVM_DEV_TYPE_RISCV_AIA);
679 
680 	/* Cleanup the HGEI state */
681 	aia_hgei_exit();
682 }
683