xref: /kvmtool/riscv/aia.c (revision 328f0879eeaee9ac167e3d00e4a9b5553e99d2f9)
1*328f0879SAnup Patel #include "kvm/devices.h"
2*328f0879SAnup Patel #include "kvm/fdt.h"
3*328f0879SAnup Patel #include "kvm/ioeventfd.h"
4*328f0879SAnup Patel #include "kvm/ioport.h"
5*328f0879SAnup Patel #include "kvm/kvm.h"
6*328f0879SAnup Patel #include "kvm/kvm-cpu.h"
7*328f0879SAnup Patel #include "kvm/irq.h"
8*328f0879SAnup Patel #include "kvm/util.h"
9*328f0879SAnup Patel 
10*328f0879SAnup Patel static int aia_fd = -1;
11*328f0879SAnup Patel 
12*328f0879SAnup Patel static u32 aia_mode = KVM_DEV_RISCV_AIA_MODE_EMUL;
13*328f0879SAnup Patel static struct kvm_device_attr aia_mode_attr = {
14*328f0879SAnup Patel 	.group	= KVM_DEV_RISCV_AIA_GRP_CONFIG,
15*328f0879SAnup Patel 	.attr	= KVM_DEV_RISCV_AIA_CONFIG_MODE,
16*328f0879SAnup Patel };
17*328f0879SAnup Patel 
18*328f0879SAnup Patel static u32 aia_nr_ids = 0;
19*328f0879SAnup Patel static struct kvm_device_attr aia_nr_ids_attr = {
20*328f0879SAnup Patel 	.group	= KVM_DEV_RISCV_AIA_GRP_CONFIG,
21*328f0879SAnup Patel 	.attr	= KVM_DEV_RISCV_AIA_CONFIG_IDS,
22*328f0879SAnup Patel };
23*328f0879SAnup Patel 
24*328f0879SAnup Patel static u32 aia_nr_sources = 0;
25*328f0879SAnup Patel static struct kvm_device_attr aia_nr_sources_attr = {
26*328f0879SAnup Patel 	.group	= KVM_DEV_RISCV_AIA_GRP_CONFIG,
27*328f0879SAnup Patel 	.attr	= KVM_DEV_RISCV_AIA_CONFIG_SRCS,
28*328f0879SAnup Patel };
29*328f0879SAnup Patel 
30*328f0879SAnup Patel static u32 aia_hart_bits = 0;
31*328f0879SAnup Patel static struct kvm_device_attr aia_hart_bits_attr = {
32*328f0879SAnup Patel 	.group	= KVM_DEV_RISCV_AIA_GRP_CONFIG,
33*328f0879SAnup Patel 	.attr	= KVM_DEV_RISCV_AIA_CONFIG_HART_BITS,
34*328f0879SAnup Patel };
35*328f0879SAnup Patel 
36*328f0879SAnup Patel static u32 aia_nr_harts = 0;
37*328f0879SAnup Patel 
38*328f0879SAnup Patel #define IRQCHIP_AIA_NR			0
39*328f0879SAnup Patel 
40*328f0879SAnup Patel #define AIA_IMSIC_BASE			RISCV_IRQCHIP
41*328f0879SAnup Patel #define AIA_IMSIC_ADDR(__hart)		\
42*328f0879SAnup Patel 	(AIA_IMSIC_BASE + (__hart) * KVM_DEV_RISCV_IMSIC_SIZE)
43*328f0879SAnup Patel #define AIA_IMSIC_SIZE			\
44*328f0879SAnup Patel 	(aia_nr_harts * KVM_DEV_RISCV_IMSIC_SIZE)
45*328f0879SAnup Patel #define AIA_APLIC_ADDR			\
46*328f0879SAnup Patel 	(AIA_IMSIC_BASE + AIA_IMSIC_SIZE)
47*328f0879SAnup Patel 
48*328f0879SAnup Patel static void aia__generate_fdt_node(void *fdt, struct kvm *kvm)
49*328f0879SAnup Patel {
50*328f0879SAnup Patel 	u32 i;
51*328f0879SAnup Patel 	char name[64];
52*328f0879SAnup Patel 	u32 reg_cells[4], *irq_cells;
53*328f0879SAnup Patel 
54*328f0879SAnup Patel 	irq_cells = calloc(aia_nr_harts * 2, sizeof(u32));
55*328f0879SAnup Patel 	if (!irq_cells)
56*328f0879SAnup Patel 		die("Failed to alloc irq_cells");
57*328f0879SAnup Patel 
58*328f0879SAnup Patel 	sprintf(name, "imsics@%08x", (u32)AIA_IMSIC_BASE);
59*328f0879SAnup Patel 	_FDT(fdt_begin_node(fdt, name));
60*328f0879SAnup Patel 	_FDT(fdt_property_string(fdt, "compatible", "riscv,imsics"));
61*328f0879SAnup Patel 	reg_cells[0] = 0;
62*328f0879SAnup Patel 	reg_cells[1] = cpu_to_fdt32(AIA_IMSIC_BASE);
63*328f0879SAnup Patel 	reg_cells[2] = 0;
64*328f0879SAnup Patel 	reg_cells[3] = cpu_to_fdt32(AIA_IMSIC_SIZE);
65*328f0879SAnup Patel 	_FDT(fdt_property(fdt, "reg", reg_cells, sizeof(reg_cells)));
66*328f0879SAnup Patel 	_FDT(fdt_property_cell(fdt, "#interrupt-cells", 0));
67*328f0879SAnup Patel 	_FDT(fdt_property(fdt, "interrupt-controller", NULL, 0));
68*328f0879SAnup Patel 	_FDT(fdt_property(fdt, "msi-controller", NULL, 0));
69*328f0879SAnup Patel 	_FDT(fdt_property_cell(fdt, "riscv,num-ids", aia_nr_ids));
70*328f0879SAnup Patel 	_FDT(fdt_property_cell(fdt, "phandle", PHANDLE_AIA_IMSIC));
71*328f0879SAnup Patel 	for (i = 0; i < aia_nr_harts; i++) {
72*328f0879SAnup Patel 		irq_cells[2*i + 0] = cpu_to_fdt32(PHANDLE_CPU_INTC_BASE + i);
73*328f0879SAnup Patel 		irq_cells[2*i + 1] = cpu_to_fdt32(9);
74*328f0879SAnup Patel 	}
75*328f0879SAnup Patel 	_FDT(fdt_property(fdt, "interrupts-extended", irq_cells,
76*328f0879SAnup Patel 			  sizeof(u32) * aia_nr_harts * 2));
77*328f0879SAnup Patel 	_FDT(fdt_end_node(fdt));
78*328f0879SAnup Patel 
79*328f0879SAnup Patel 	free(irq_cells);
80*328f0879SAnup Patel 
81*328f0879SAnup Patel 	/* Skip APLIC node if we have no interrupt sources */
82*328f0879SAnup Patel 	if (!aia_nr_sources)
83*328f0879SAnup Patel 		return;
84*328f0879SAnup Patel 
85*328f0879SAnup Patel 	sprintf(name, "aplic@%08x", (u32)AIA_APLIC_ADDR);
86*328f0879SAnup Patel 	_FDT(fdt_begin_node(fdt, name));
87*328f0879SAnup Patel 	_FDT(fdt_property_string(fdt, "compatible", "riscv,aplic"));
88*328f0879SAnup Patel 	reg_cells[0] = 0;
89*328f0879SAnup Patel 	reg_cells[1] = cpu_to_fdt32(AIA_APLIC_ADDR);
90*328f0879SAnup Patel 	reg_cells[2] = 0;
91*328f0879SAnup Patel 	reg_cells[3] = cpu_to_fdt32(KVM_DEV_RISCV_APLIC_SIZE);
92*328f0879SAnup Patel 	_FDT(fdt_property(fdt, "reg", reg_cells, sizeof(reg_cells)));
93*328f0879SAnup Patel 	_FDT(fdt_property_cell(fdt, "#interrupt-cells", 2));
94*328f0879SAnup Patel 	_FDT(fdt_property(fdt, "interrupt-controller", NULL, 0));
95*328f0879SAnup Patel 	_FDT(fdt_property_cell(fdt, "riscv,num-sources", aia_nr_sources));
96*328f0879SAnup Patel 	_FDT(fdt_property_cell(fdt, "phandle", PHANDLE_AIA_APLIC));
97*328f0879SAnup Patel 	_FDT(fdt_property_cell(fdt, "msi-parent", PHANDLE_AIA_IMSIC));
98*328f0879SAnup Patel 	_FDT(fdt_end_node(fdt));
99*328f0879SAnup Patel }
100*328f0879SAnup Patel 
101*328f0879SAnup Patel static int aia__irq_routing_init(struct kvm *kvm)
102*328f0879SAnup Patel {
103*328f0879SAnup Patel 	int r;
104*328f0879SAnup Patel 	int irqlines = aia_nr_sources + 1;
105*328f0879SAnup Patel 
106*328f0879SAnup Patel 	/* Skip this if we have no interrupt sources */
107*328f0879SAnup Patel 	if (!aia_nr_sources)
108*328f0879SAnup Patel 		return 0;
109*328f0879SAnup Patel 
110*328f0879SAnup Patel 	/*
111*328f0879SAnup Patel 	 * This describes the default routing that the kernel uses without
112*328f0879SAnup Patel 	 * any routing explicitly set up via KVM_SET_GSI_ROUTING. So we
113*328f0879SAnup Patel 	 * don't need to commit these setting right now. The first actual
114*328f0879SAnup Patel 	 * user (MSI routing) will engage these mappings then.
115*328f0879SAnup Patel 	 */
116*328f0879SAnup Patel 	for (next_gsi = 0; next_gsi < irqlines; next_gsi++) {
117*328f0879SAnup Patel 		r = irq__allocate_routing_entry();
118*328f0879SAnup Patel 		if (r)
119*328f0879SAnup Patel 			return r;
120*328f0879SAnup Patel 
121*328f0879SAnup Patel 		irq_routing->entries[irq_routing->nr++] =
122*328f0879SAnup Patel 			(struct kvm_irq_routing_entry) {
123*328f0879SAnup Patel 				.gsi = next_gsi,
124*328f0879SAnup Patel 				.type = KVM_IRQ_ROUTING_IRQCHIP,
125*328f0879SAnup Patel 				.u.irqchip.irqchip = IRQCHIP_AIA_NR,
126*328f0879SAnup Patel 				.u.irqchip.pin = next_gsi,
127*328f0879SAnup Patel 		};
128*328f0879SAnup Patel 	}
129*328f0879SAnup Patel 
130*328f0879SAnup Patel 	return 0;
131*328f0879SAnup Patel }
132*328f0879SAnup Patel 
133*328f0879SAnup Patel static int aia__init(struct kvm *kvm)
134*328f0879SAnup Patel {
135*328f0879SAnup Patel 	int i, ret;
136*328f0879SAnup Patel 	u64 aia_addr = 0;
137*328f0879SAnup Patel 	struct kvm_device_attr aia_addr_attr = {
138*328f0879SAnup Patel 		.group	= KVM_DEV_RISCV_AIA_GRP_ADDR,
139*328f0879SAnup Patel 		.addr	= (u64)(unsigned long)&aia_addr,
140*328f0879SAnup Patel 	};
141*328f0879SAnup Patel 	struct kvm_device_attr aia_init_attr = {
142*328f0879SAnup Patel 		.group	= KVM_DEV_RISCV_AIA_GRP_CTRL,
143*328f0879SAnup Patel 		.attr	= KVM_DEV_RISCV_AIA_CTRL_INIT,
144*328f0879SAnup Patel 	};
145*328f0879SAnup Patel 
146*328f0879SAnup Patel 	/* Setup global device attribute variables */
147*328f0879SAnup Patel 	aia_mode_attr.addr = (u64)(unsigned long)&aia_mode;
148*328f0879SAnup Patel 	aia_nr_ids_attr.addr = (u64)(unsigned long)&aia_nr_ids;
149*328f0879SAnup Patel 	aia_nr_sources_attr.addr = (u64)(unsigned long)&aia_nr_sources;
150*328f0879SAnup Patel 	aia_hart_bits_attr.addr = (u64)(unsigned long)&aia_hart_bits;
151*328f0879SAnup Patel 
152*328f0879SAnup Patel 	/* Do nothing if AIA device not created */
153*328f0879SAnup Patel 	if (aia_fd < 0)
154*328f0879SAnup Patel 		return 0;
155*328f0879SAnup Patel 
156*328f0879SAnup Patel 	/* Set/Get AIA device config parameters */
157*328f0879SAnup Patel 	ret = ioctl(aia_fd, KVM_GET_DEVICE_ATTR, &aia_mode_attr);
158*328f0879SAnup Patel 	if (ret)
159*328f0879SAnup Patel 		return ret;
160*328f0879SAnup Patel 	ret = ioctl(aia_fd, KVM_GET_DEVICE_ATTR, &aia_nr_ids_attr);
161*328f0879SAnup Patel 	if (ret)
162*328f0879SAnup Patel 		return ret;
163*328f0879SAnup Patel 	aia_nr_sources = irq__get_nr_allocated_lines();
164*328f0879SAnup Patel 	ret = ioctl(aia_fd, KVM_SET_DEVICE_ATTR, &aia_nr_sources_attr);
165*328f0879SAnup Patel 	if (ret)
166*328f0879SAnup Patel 		return ret;
167*328f0879SAnup Patel 	aia_hart_bits = fls_long(kvm->nrcpus);
168*328f0879SAnup Patel 	ret = ioctl(aia_fd, KVM_SET_DEVICE_ATTR, &aia_hart_bits_attr);
169*328f0879SAnup Patel 	if (ret)
170*328f0879SAnup Patel 		return ret;
171*328f0879SAnup Patel 
172*328f0879SAnup Patel 	/* Save number of HARTs for FDT generation */
173*328f0879SAnup Patel 	aia_nr_harts = kvm->nrcpus;
174*328f0879SAnup Patel 
175*328f0879SAnup Patel 	/* Set AIA device addresses */
176*328f0879SAnup Patel 	aia_addr = AIA_APLIC_ADDR;
177*328f0879SAnup Patel 	aia_addr_attr.attr = KVM_DEV_RISCV_AIA_ADDR_APLIC;
178*328f0879SAnup Patel 	ret = ioctl(aia_fd, KVM_SET_DEVICE_ATTR, &aia_addr_attr);
179*328f0879SAnup Patel 	if (ret)
180*328f0879SAnup Patel 		return ret;
181*328f0879SAnup Patel 	for (i = 0; i < kvm->nrcpus; i++) {
182*328f0879SAnup Patel 		aia_addr = AIA_IMSIC_ADDR(i);
183*328f0879SAnup Patel 		aia_addr_attr.attr = KVM_DEV_RISCV_AIA_ADDR_IMSIC(i);
184*328f0879SAnup Patel 		ret = ioctl(aia_fd, KVM_SET_DEVICE_ATTR, &aia_addr_attr);
185*328f0879SAnup Patel 		if (ret)
186*328f0879SAnup Patel 			return ret;
187*328f0879SAnup Patel 	}
188*328f0879SAnup Patel 
189*328f0879SAnup Patel 	/* Setup default IRQ routing */
190*328f0879SAnup Patel 	aia__irq_routing_init(kvm);
191*328f0879SAnup Patel 
192*328f0879SAnup Patel 	/* Initialize the AIA device */
193*328f0879SAnup Patel 	ret = ioctl(aia_fd, KVM_SET_DEVICE_ATTR, &aia_init_attr);
194*328f0879SAnup Patel 	if (ret)
195*328f0879SAnup Patel 		return ret;
196*328f0879SAnup Patel 
197*328f0879SAnup Patel 	/* Mark IRQFD as ready */
198*328f0879SAnup Patel 	riscv_irqchip_irqfd_ready = true;
199*328f0879SAnup Patel 
200*328f0879SAnup Patel 	return 0;
201*328f0879SAnup Patel }
202*328f0879SAnup Patel late_init(aia__init);
203*328f0879SAnup Patel 
204*328f0879SAnup Patel void aia__create(struct kvm *kvm)
205*328f0879SAnup Patel {
206*328f0879SAnup Patel 	int err;
207*328f0879SAnup Patel 	struct kvm_create_device aia_device = {
208*328f0879SAnup Patel 		.type = KVM_DEV_TYPE_RISCV_AIA,
209*328f0879SAnup Patel 		.flags = 0,
210*328f0879SAnup Patel 	};
211*328f0879SAnup Patel 
212*328f0879SAnup Patel 	if (kvm->cfg.arch.ext_disabled[KVM_RISCV_ISA_EXT_SSAIA])
213*328f0879SAnup Patel 		return;
214*328f0879SAnup Patel 
215*328f0879SAnup Patel 	err = ioctl(kvm->vm_fd, KVM_CREATE_DEVICE, &aia_device);
216*328f0879SAnup Patel 	if (err)
217*328f0879SAnup Patel 		return;
218*328f0879SAnup Patel 	aia_fd = aia_device.fd;
219*328f0879SAnup Patel 
220*328f0879SAnup Patel 	riscv_irqchip = IRQCHIP_AIA;
221*328f0879SAnup Patel 	riscv_irqchip_inkernel = true;
222*328f0879SAnup Patel 	riscv_irqchip_trigger = NULL;
223*328f0879SAnup Patel 	riscv_irqchip_generate_fdt_node = aia__generate_fdt_node;
224*328f0879SAnup Patel 	riscv_irqchip_phandle = PHANDLE_AIA_APLIC;
225*328f0879SAnup Patel 	riscv_irqchip_msi_phandle = PHANDLE_AIA_IMSIC;
226*328f0879SAnup Patel 	riscv_irqchip_line_sensing = true;
227*328f0879SAnup Patel }
228