xref: /linux/arch/loongarch/kvm/intc/eiointc.c (revision ddb7a62af2e766eabb4ab7080e6ed8d6b8915302)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024 Loongson Technology Corporation Limited
4  */
5 
6 #include <asm/kvm_eiointc.h>
7 #include <asm/kvm_vcpu.h>
8 #include <linux/count_zeros.h>
9 
eiointc_set_sw_coreisr(struct loongarch_eiointc * s)10 static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
11 {
12 	int ipnum, cpu, cpuid, irq;
13 	struct kvm_vcpu *vcpu;
14 
15 	for (irq = 0; irq < EIOINTC_IRQS; irq++) {
16 		ipnum = s->ipmap.reg_u8[irq / 32];
17 		if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
18 			ipnum = count_trailing_zeros(ipnum);
19 			ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
20 		}
21 
22 		cpuid = s->coremap.reg_u8[irq];
23 		vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
24 		if (!vcpu)
25 			continue;
26 
27 		cpu = vcpu->vcpu_id;
28 		if (test_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]))
29 			__set_bit(irq, s->sw_coreisr[cpu][ipnum]);
30 		else
31 			__clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
32 	}
33 }
34 
eiointc_update_irq(struct loongarch_eiointc * s,int irq,int level)35 static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
36 {
37 	int ipnum, cpu, found;
38 	struct kvm_vcpu *vcpu;
39 	struct kvm_interrupt vcpu_irq;
40 
41 	ipnum = s->ipmap.reg_u8[irq / 32];
42 	if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
43 		ipnum = count_trailing_zeros(ipnum);
44 		ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
45 	}
46 
47 	cpu = s->sw_coremap[irq];
48 	vcpu = kvm_get_vcpu(s->kvm, cpu);
49 	if (level) {
50 		/* if not enable return false */
51 		if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
52 			return;
53 		__set_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
54 		found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
55 		__set_bit(irq, s->sw_coreisr[cpu][ipnum]);
56 	} else {
57 		__clear_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
58 		__clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
59 		found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
60 	}
61 
62 	if (found < EIOINTC_IRQS)
63 		return; /* other irq is handling, needn't update parent irq */
64 
65 	vcpu_irq.irq = level ? (INT_HWI0 + ipnum) : -(INT_HWI0 + ipnum);
66 	kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq);
67 }
68 
eiointc_update_sw_coremap(struct loongarch_eiointc * s,int irq,u64 val,u32 len,bool notify)69 static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s,
70 					int irq, u64 val, u32 len, bool notify)
71 {
72 	int i, cpu, cpuid;
73 	struct kvm_vcpu *vcpu;
74 
75 	for (i = 0; i < len; i++) {
76 		cpuid = val & 0xff;
77 		val = val >> 8;
78 
79 		if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) {
80 			cpuid = ffs(cpuid) - 1;
81 			cpuid = (cpuid >= 4) ? 0 : cpuid;
82 		}
83 
84 		vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
85 		if (!vcpu)
86 			continue;
87 
88 		cpu = vcpu->vcpu_id;
89 		if (s->sw_coremap[irq + i] == cpu)
90 			continue;
91 
92 		if (notify && test_bit(irq + i, (unsigned long *)s->isr.reg_u8)) {
93 			/* lower irq at old cpu and raise irq at new cpu */
94 			eiointc_update_irq(s, irq + i, 0);
95 			s->sw_coremap[irq + i] = cpu;
96 			eiointc_update_irq(s, irq + i, 1);
97 		} else {
98 			s->sw_coremap[irq + i] = cpu;
99 		}
100 	}
101 }
102 
eiointc_set_irq(struct loongarch_eiointc * s,int irq,int level)103 void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
104 {
105 	unsigned long flags;
106 	unsigned long *isr = (unsigned long *)s->isr.reg_u8;
107 
108 	spin_lock_irqsave(&s->lock, flags);
109 	level ? __set_bit(irq, isr) : __clear_bit(irq, isr);
110 	eiointc_update_irq(s, irq, level);
111 	spin_unlock_irqrestore(&s->lock, flags);
112 }
113 
loongarch_eiointc_read(struct kvm_vcpu * vcpu,struct loongarch_eiointc * s,gpa_t addr,unsigned long * val)114 static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
115 				gpa_t addr, unsigned long *val)
116 {
117 	int index, ret = 0;
118 	u64 data = 0;
119 	gpa_t offset;
120 
121 	offset = addr - EIOINTC_BASE;
122 	switch (offset) {
123 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
124 		index = (offset - EIOINTC_NODETYPE_START) >> 3;
125 		data = s->nodetype.reg_u64[index];
126 		break;
127 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
128 		index = (offset - EIOINTC_IPMAP_START) >> 3;
129 		data = s->ipmap.reg_u64;
130 		break;
131 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
132 		index = (offset - EIOINTC_ENABLE_START) >> 3;
133 		data = s->enable.reg_u64[index];
134 		break;
135 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
136 		index = (offset - EIOINTC_BOUNCE_START) >> 3;
137 		data = s->bounce.reg_u64[index];
138 		break;
139 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
140 		index = (offset - EIOINTC_COREISR_START) >> 3;
141 		data = s->coreisr.reg_u64[vcpu->vcpu_id][index];
142 		break;
143 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
144 		index = (offset - EIOINTC_COREMAP_START) >> 3;
145 		data = s->coremap.reg_u64[index];
146 		break;
147 	default:
148 		ret = -EINVAL;
149 		break;
150 	}
151 	*val = data;
152 
153 	return ret;
154 }
155 
kvm_eiointc_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)156 static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
157 			struct kvm_io_device *dev,
158 			gpa_t addr, int len, void *val)
159 {
160 	int ret = -EINVAL;
161 	unsigned long flags, data, offset;
162 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
163 
164 	if (!eiointc) {
165 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
166 		return -EINVAL;
167 	}
168 
169 	if (addr & (len - 1)) {
170 		kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
171 		return -EINVAL;
172 	}
173 
174 	offset = addr & 0x7;
175 	addr -= offset;
176 	vcpu->stat.eiointc_read_exits++;
177 	spin_lock_irqsave(&eiointc->lock, flags);
178 	ret = loongarch_eiointc_read(vcpu, eiointc, addr, &data);
179 	spin_unlock_irqrestore(&eiointc->lock, flags);
180 	if (ret)
181 		return ret;
182 
183 	data = data >> (offset * 8);
184 	switch (len) {
185 	case 1:
186 		*(long *)val = (s8)data;
187 		break;
188 	case 2:
189 		*(long *)val = (s16)data;
190 		break;
191 	case 4:
192 		*(long *)val = (s32)data;
193 		break;
194 	default:
195 		*(long *)val = (long)data;
196 		break;
197 	}
198 
199 	return 0;
200 }
201 
loongarch_eiointc_write(struct kvm_vcpu * vcpu,struct loongarch_eiointc * s,gpa_t addr,u64 value,u64 field_mask)202 static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
203 				struct loongarch_eiointc *s,
204 				gpa_t addr, u64 value, u64 field_mask)
205 {
206 	int index, irq, ret = 0;
207 	u8 cpu;
208 	u64 data, old, mask;
209 	gpa_t offset;
210 
211 	offset = addr & 7;
212 	mask = field_mask << (offset * 8);
213 	data = (value & field_mask) << (offset * 8);
214 
215 	addr -= offset;
216 	offset = addr - EIOINTC_BASE;
217 
218 	switch (offset) {
219 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
220 		index = (offset - EIOINTC_NODETYPE_START) >> 3;
221 		old = s->nodetype.reg_u64[index];
222 		s->nodetype.reg_u64[index] = (old & ~mask) | data;
223 		break;
224 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
225 		/*
226 		 * ipmap cannot be set at runtime, can be set only at the beginning
227 		 * of irqchip driver, need not update upper irq level
228 		 */
229 		old = s->ipmap.reg_u64;
230 		s->ipmap.reg_u64 = (old & ~mask) | data;
231 		break;
232 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
233 		index = (offset - EIOINTC_ENABLE_START) >> 3;
234 		old = s->enable.reg_u64[index];
235 		s->enable.reg_u64[index] = (old & ~mask) | data;
236 		/*
237 		 * 1: enable irq.
238 		 * update irq when isr is set.
239 		 */
240 		data = s->enable.reg_u64[index] & ~old & s->isr.reg_u64[index];
241 		while (data) {
242 			irq = __ffs(data);
243 			eiointc_update_irq(s, irq + index * 64, 1);
244 			data &= ~BIT_ULL(irq);
245 		}
246 		/*
247 		 * 0: disable irq.
248 		 * update irq when isr is set.
249 		 */
250 		data = ~s->enable.reg_u64[index] & old & s->isr.reg_u64[index];
251 		while (data) {
252 			irq = __ffs(data);
253 			eiointc_update_irq(s, irq + index * 64, 0);
254 			data &= ~BIT_ULL(irq);
255 		}
256 		break;
257 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
258 		/* do not emulate hw bounced irq routing */
259 		index = (offset - EIOINTC_BOUNCE_START) >> 3;
260 		old = s->bounce.reg_u64[index];
261 		s->bounce.reg_u64[index] = (old & ~mask) | data;
262 		break;
263 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
264 		index = (offset - EIOINTC_COREISR_START) >> 3;
265 		/* use attrs to get current cpu index */
266 		cpu = vcpu->vcpu_id;
267 		old = s->coreisr.reg_u64[cpu][index];
268 		/* write 1 to clear interrupt */
269 		s->coreisr.reg_u64[cpu][index] = old & ~data;
270 		data &= old;
271 		while (data) {
272 			irq = __ffs(data);
273 			eiointc_update_irq(s, irq + index * 64, 0);
274 			data &= ~BIT_ULL(irq);
275 		}
276 		break;
277 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
278 		index = (offset - EIOINTC_COREMAP_START) >> 3;
279 		old = s->coremap.reg_u64[index];
280 		s->coremap.reg_u64[index] = (old & ~mask) | data;
281 		data = s->coremap.reg_u64[index];
282 		eiointc_update_sw_coremap(s, index * 8, data, sizeof(data), true);
283 		break;
284 	default:
285 		ret = -EINVAL;
286 		break;
287 	}
288 
289 	return ret;
290 }
291 
kvm_eiointc_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)292 static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
293 			struct kvm_io_device *dev,
294 			gpa_t addr, int len, const void *val)
295 {
296 	int ret = -EINVAL;
297 	unsigned long flags, value;
298 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
299 
300 	if (!eiointc) {
301 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
302 		return -EINVAL;
303 	}
304 
305 	if (addr & (len - 1)) {
306 		kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
307 		return -EINVAL;
308 	}
309 
310 	vcpu->stat.eiointc_write_exits++;
311 	spin_lock_irqsave(&eiointc->lock, flags);
312 	switch (len) {
313 	case 1:
314 		value = *(unsigned char *)val;
315 		ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF);
316 		break;
317 	case 2:
318 		value = *(unsigned short *)val;
319 		ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX);
320 		break;
321 	case 4:
322 		value = *(unsigned int *)val;
323 		ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX);
324 		break;
325 	default:
326 		value = *(unsigned long *)val;
327 		ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX);
328 		break;
329 	}
330 	spin_unlock_irqrestore(&eiointc->lock, flags);
331 
332 	return ret;
333 }
334 
335 static const struct kvm_io_device_ops kvm_eiointc_ops = {
336 	.read	= kvm_eiointc_read,
337 	.write	= kvm_eiointc_write,
338 };
339 
kvm_eiointc_virt_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)340 static int kvm_eiointc_virt_read(struct kvm_vcpu *vcpu,
341 				struct kvm_io_device *dev,
342 				gpa_t addr, int len, void *val)
343 {
344 	unsigned long flags;
345 	u32 *data = val;
346 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
347 
348 	if (!eiointc) {
349 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
350 		return -EINVAL;
351 	}
352 
353 	addr -= EIOINTC_VIRT_BASE;
354 	spin_lock_irqsave(&eiointc->lock, flags);
355 	switch (addr) {
356 	case EIOINTC_VIRT_FEATURES:
357 		*data = eiointc->features;
358 		break;
359 	case EIOINTC_VIRT_CONFIG:
360 		*data = eiointc->status;
361 		break;
362 	default:
363 		break;
364 	}
365 	spin_unlock_irqrestore(&eiointc->lock, flags);
366 
367 	return 0;
368 }
369 
kvm_eiointc_virt_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)370 static int kvm_eiointc_virt_write(struct kvm_vcpu *vcpu,
371 				struct kvm_io_device *dev,
372 				gpa_t addr, int len, const void *val)
373 {
374 	int ret = 0;
375 	unsigned long flags;
376 	u32 value = *(u32 *)val;
377 	struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
378 
379 	if (!eiointc) {
380 		kvm_err("%s: eiointc irqchip not valid!\n", __func__);
381 		return -EINVAL;
382 	}
383 
384 	addr -= EIOINTC_VIRT_BASE;
385 	spin_lock_irqsave(&eiointc->lock, flags);
386 	switch (addr) {
387 	case EIOINTC_VIRT_FEATURES:
388 		ret = -EPERM;
389 		break;
390 	case EIOINTC_VIRT_CONFIG:
391 		/*
392 		 * eiointc features can only be set at disabled status
393 		 */
394 		if ((eiointc->status & BIT(EIOINTC_ENABLE)) && value) {
395 			ret = -EPERM;
396 			break;
397 		}
398 		eiointc->status = value & eiointc->features;
399 		break;
400 	default:
401 		break;
402 	}
403 	spin_unlock_irqrestore(&eiointc->lock, flags);
404 
405 	return ret;
406 }
407 
408 static const struct kvm_io_device_ops kvm_eiointc_virt_ops = {
409 	.read	= kvm_eiointc_virt_read,
410 	.write	= kvm_eiointc_virt_write,
411 };
412 
kvm_eiointc_ctrl_access(struct kvm_device * dev,struct kvm_device_attr * attr)413 static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
414 					struct kvm_device_attr *attr)
415 {
416 	int ret = 0;
417 	unsigned long flags;
418 	unsigned long type = (unsigned long)attr->attr;
419 	u32 i, start_irq, val;
420 	void __user *data;
421 	struct loongarch_eiointc *s = dev->kvm->arch.eiointc;
422 
423 	data = (void __user *)attr->addr;
424 	spin_lock_irqsave(&s->lock, flags);
425 	switch (type) {
426 	case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
427 		if (copy_from_user(&val, data, 4))
428 			ret = -EFAULT;
429 		else {
430 			if (val >= EIOINTC_ROUTE_MAX_VCPUS)
431 				ret = -EINVAL;
432 			else
433 				s->num_cpu = val;
434 		}
435 		break;
436 	case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
437 		if (copy_from_user(&s->features, data, 4))
438 			ret = -EFAULT;
439 		if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION)))
440 			s->status |= BIT(EIOINTC_ENABLE);
441 		break;
442 	case KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED:
443 		eiointc_set_sw_coreisr(s);
444 		for (i = 0; i < (EIOINTC_IRQS / 4); i++) {
445 			start_irq = i * 4;
446 			eiointc_update_sw_coremap(s, start_irq,
447 					s->coremap.reg_u32[i], sizeof(u32), false);
448 		}
449 		break;
450 	default:
451 		break;
452 	}
453 	spin_unlock_irqrestore(&s->lock, flags);
454 
455 	return ret;
456 }
457 
kvm_eiointc_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)458 static int kvm_eiointc_regs_access(struct kvm_device *dev,
459 					struct kvm_device_attr *attr,
460 					bool is_write)
461 {
462 	int addr, cpu, offset, ret = 0;
463 	unsigned long flags;
464 	void *p = NULL;
465 	void __user *data;
466 	struct loongarch_eiointc *s;
467 
468 	s = dev->kvm->arch.eiointc;
469 	addr = attr->attr;
470 	cpu = addr >> 16;
471 	addr &= 0xffff;
472 	data = (void __user *)attr->addr;
473 	switch (addr) {
474 	case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
475 		offset = (addr - EIOINTC_NODETYPE_START) / 4;
476 		p = &s->nodetype.reg_u32[offset];
477 		break;
478 	case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
479 		offset = (addr - EIOINTC_IPMAP_START) / 4;
480 		p = &s->ipmap.reg_u32[offset];
481 		break;
482 	case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
483 		offset = (addr - EIOINTC_ENABLE_START) / 4;
484 		p = &s->enable.reg_u32[offset];
485 		break;
486 	case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
487 		offset = (addr - EIOINTC_BOUNCE_START) / 4;
488 		p = &s->bounce.reg_u32[offset];
489 		break;
490 	case EIOINTC_ISR_START ... EIOINTC_ISR_END:
491 		offset = (addr - EIOINTC_ISR_START) / 4;
492 		p = &s->isr.reg_u32[offset];
493 		break;
494 	case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
495 		if (cpu >= s->num_cpu)
496 			return -EINVAL;
497 
498 		offset = (addr - EIOINTC_COREISR_START) / 4;
499 		p = &s->coreisr.reg_u32[cpu][offset];
500 		break;
501 	case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
502 		offset = (addr - EIOINTC_COREMAP_START) / 4;
503 		p = &s->coremap.reg_u32[offset];
504 		break;
505 	default:
506 		kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
507 		return -EINVAL;
508 	}
509 
510 	spin_lock_irqsave(&s->lock, flags);
511 	if (is_write) {
512 		if (copy_from_user(p, data, 4))
513 			ret = -EFAULT;
514 	} else {
515 		if (copy_to_user(data, p, 4))
516 			ret = -EFAULT;
517 	}
518 	spin_unlock_irqrestore(&s->lock, flags);
519 
520 	return ret;
521 }
522 
kvm_eiointc_sw_status_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)523 static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
524 					struct kvm_device_attr *attr,
525 					bool is_write)
526 {
527 	int addr, ret = 0;
528 	unsigned long flags;
529 	void *p = NULL;
530 	void __user *data;
531 	struct loongarch_eiointc *s;
532 
533 	s = dev->kvm->arch.eiointc;
534 	addr = attr->attr;
535 	addr &= 0xffff;
536 
537 	data = (void __user *)attr->addr;
538 	switch (addr) {
539 	case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU:
540 		if (is_write)
541 			return ret;
542 
543 		p = &s->num_cpu;
544 		break;
545 	case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE:
546 		if (is_write)
547 			return ret;
548 
549 		p = &s->features;
550 		break;
551 	case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE:
552 		p = &s->status;
553 		break;
554 	default:
555 		kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
556 		return -EINVAL;
557 	}
558 	spin_lock_irqsave(&s->lock, flags);
559 	if (is_write) {
560 		if (copy_from_user(p, data, 4))
561 			ret = -EFAULT;
562 	} else {
563 		if (copy_to_user(data, p, 4))
564 			ret = -EFAULT;
565 	}
566 	spin_unlock_irqrestore(&s->lock, flags);
567 
568 	return ret;
569 }
570 
kvm_eiointc_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)571 static int kvm_eiointc_get_attr(struct kvm_device *dev,
572 				struct kvm_device_attr *attr)
573 {
574 	switch (attr->group) {
575 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
576 		return kvm_eiointc_regs_access(dev, attr, false);
577 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
578 		return kvm_eiointc_sw_status_access(dev, attr, false);
579 	default:
580 		return -EINVAL;
581 	}
582 }
583 
kvm_eiointc_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)584 static int kvm_eiointc_set_attr(struct kvm_device *dev,
585 				struct kvm_device_attr *attr)
586 {
587 	switch (attr->group) {
588 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL:
589 		return kvm_eiointc_ctrl_access(dev, attr);
590 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
591 		return kvm_eiointc_regs_access(dev, attr, true);
592 	case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
593 		return kvm_eiointc_sw_status_access(dev, attr, true);
594 	default:
595 		return -EINVAL;
596 	}
597 }
598 
kvm_eiointc_create(struct kvm_device * dev,u32 type)599 static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
600 {
601 	int ret;
602 	struct loongarch_eiointc *s;
603 	struct kvm_io_device *device;
604 	struct kvm *kvm = dev->kvm;
605 
606 	/* eiointc has been created */
607 	if (kvm->arch.eiointc)
608 		return -EINVAL;
609 
610 	s = kzalloc(sizeof(struct loongarch_eiointc), GFP_KERNEL);
611 	if (!s)
612 		return -ENOMEM;
613 
614 	spin_lock_init(&s->lock);
615 	s->kvm = kvm;
616 
617 	/*
618 	 * Initialize IOCSR device
619 	 */
620 	device = &s->device;
621 	kvm_iodevice_init(device, &kvm_eiointc_ops);
622 	mutex_lock(&kvm->slots_lock);
623 	ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
624 			EIOINTC_BASE, EIOINTC_SIZE, device);
625 	mutex_unlock(&kvm->slots_lock);
626 	if (ret < 0) {
627 		kfree(s);
628 		return ret;
629 	}
630 
631 	device = &s->device_vext;
632 	kvm_iodevice_init(device, &kvm_eiointc_virt_ops);
633 	ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
634 			EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device);
635 	if (ret < 0) {
636 		kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device);
637 		kfree(s);
638 		return ret;
639 	}
640 	kvm->arch.eiointc = s;
641 
642 	return 0;
643 }
644 
kvm_eiointc_destroy(struct kvm_device * dev)645 static void kvm_eiointc_destroy(struct kvm_device *dev)
646 {
647 	struct kvm *kvm;
648 	struct loongarch_eiointc *eiointc;
649 
650 	if (!dev || !dev->kvm || !dev->kvm->arch.eiointc)
651 		return;
652 
653 	kvm = dev->kvm;
654 	eiointc = kvm->arch.eiointc;
655 	kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device);
656 	kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device_vext);
657 	kfree(eiointc);
658 }
659 
660 static struct kvm_device_ops kvm_eiointc_dev_ops = {
661 	.name = "kvm-loongarch-eiointc",
662 	.create = kvm_eiointc_create,
663 	.destroy = kvm_eiointc_destroy,
664 	.set_attr = kvm_eiointc_set_attr,
665 	.get_attr = kvm_eiointc_get_attr,
666 };
667 
kvm_loongarch_register_eiointc_device(void)668 int kvm_loongarch_register_eiointc_device(void)
669 {
670 	return kvm_register_device_ops(&kvm_eiointc_dev_ops, KVM_DEV_TYPE_LOONGARCH_EIOINTC);
671 }
672