xref: /kvmtool/pci.c (revision 46e04130d264261fde1e016c83694b10e62c651f)
121ff329dSWill Deacon #include "kvm/devices.h"
260742802SPekka Enberg #include "kvm/pci.h"
360742802SPekka Enberg #include "kvm/ioport.h"
4b5981636SWill Deacon #include "kvm/irq.h"
576f9c841SCyrill Gorcunov #include "kvm/util.h"
69575e724SSasha Levin #include "kvm/kvm.h"
760742802SPekka Enberg 
86d987703SSasha Levin #include <linux/err.h>
96d987703SSasha Levin #include <assert.h>
106d987703SSasha Levin 
11a0a7d66fSDavid Daney static u32 pci_config_address_bits;
1260742802SPekka Enberg 
1340f2fd06SMatt Evans /* This is within our PCI gap - in an unused area.
1440f2fd06SMatt Evans  * Note this is a PCI *bus address*, is used to assign BARs etc.!
1540f2fd06SMatt Evans  * (That's why it can still 32bit even with 64bit guests-- 64bit
1640f2fd06SMatt Evans  * PCI isn't currently supported.)
1740f2fd06SMatt Evans  */
18854aa2efSJulien Thierry static u32 mmio_blocks			= KVM_PCI_MMIO_AREA;
19854aa2efSJulien Thierry static u16 io_port_blocks		= PCI_IOPORT_START;
20854aa2efSJulien Thierry 
21854aa2efSJulien Thierry u16 pci_get_io_port_block(u32 size)
22854aa2efSJulien Thierry {
2348843d10SJulien Thierry 	u16 port = ALIGN(io_port_blocks, PCI_IO_SIZE);
24854aa2efSJulien Thierry 
25854aa2efSJulien Thierry 	io_port_blocks = port + size;
26854aa2efSJulien Thierry 	return port;
27854aa2efSJulien Thierry }
289575e724SSasha Levin 
29c7575d17SWill Deacon /*
30c7575d17SWill Deacon  * BARs must be naturally aligned, so enforce this in the allocator.
31c7575d17SWill Deacon  */
32854aa2efSJulien Thierry u32 pci_get_mmio_block(u32 size)
339575e724SSasha Levin {
34854aa2efSJulien Thierry 	u32 block = ALIGN(mmio_blocks, size);
35854aa2efSJulien Thierry 	mmio_blocks = block + size;
369575e724SSasha Levin 	return block;
379575e724SSasha Levin }
389575e724SSasha Levin 
391a51c93dSJean-Philippe Brucker void *pci_find_cap(struct pci_device_header *hdr, u8 cap_type)
401a51c93dSJean-Philippe Brucker {
411a51c93dSJean-Philippe Brucker 	u8 pos;
421a51c93dSJean-Philippe Brucker 	struct pci_cap_hdr *cap;
431a51c93dSJean-Philippe Brucker 
441a51c93dSJean-Philippe Brucker 	pci_for_each_cap(pos, cap, hdr) {
451a51c93dSJean-Philippe Brucker 		if (cap->type == cap_type)
461a51c93dSJean-Philippe Brucker 			return cap;
471a51c93dSJean-Philippe Brucker 	}
481a51c93dSJean-Philippe Brucker 
491a51c93dSJean-Philippe Brucker 	return NULL;
501a51c93dSJean-Philippe Brucker }
511a51c93dSJean-Philippe Brucker 
52c0c45eedSAndre Przywara int pci__assign_irq(struct pci_device_header *pci_hdr)
53b5981636SWill Deacon {
54b5981636SWill Deacon 	/*
55b5981636SWill Deacon 	 * PCI supports only INTA#,B#,C#,D# per device.
56b5981636SWill Deacon 	 *
57b5981636SWill Deacon 	 * A#,B#,C#,D# are allowed for multifunctional devices so stick
58b5981636SWill Deacon 	 * with A# for our single function devices.
59b5981636SWill Deacon 	 */
60b5981636SWill Deacon 	pci_hdr->irq_pin	= 1;
61b5981636SWill Deacon 	pci_hdr->irq_line	= irq__alloc_line();
62ff01b5dbSJean-Philippe Brucker 
63ff01b5dbSJean-Philippe Brucker 	if (!pci_hdr->irq_type)
64ff01b5dbSJean-Philippe Brucker 		pci_hdr->irq_type = IRQ_TYPE_EDGE_RISING;
65c0c45eedSAndre Przywara 
66c0c45eedSAndre Przywara 	return pci_hdr->irq_line;
67b5981636SWill Deacon }
68b5981636SWill Deacon 
695a8e4f25SAlexandru Elisei static bool pci_bar_is_implemented(struct pci_device_header *pci_hdr, int bar_num)
705a8e4f25SAlexandru Elisei {
715a8e4f25SAlexandru Elisei 	return pci__bar_size(pci_hdr, bar_num);
725a8e4f25SAlexandru Elisei }
735a8e4f25SAlexandru Elisei 
743fdf659dSSasha Levin static void *pci_config_address_ptr(u16 port)
75ba824677SPekka Enberg {
76ba824677SPekka Enberg 	unsigned long offset;
77ba824677SPekka Enberg 	void *base;
78ba824677SPekka Enberg 
79ba824677SPekka Enberg 	offset	= port - PCI_CONFIG_ADDRESS;
80a0a7d66fSDavid Daney 	base	= &pci_config_address_bits;
81ba824677SPekka Enberg 
82ba824677SPekka Enberg 	return base + offset;
83ba824677SPekka Enberg }
84ba824677SPekka Enberg 
854123ca55SMarc Zyngier static bool pci_config_address_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
8660742802SPekka Enberg {
87ba824677SPekka Enberg 	void *p = pci_config_address_ptr(port);
8860742802SPekka Enberg 
89ba824677SPekka Enberg 	memcpy(p, data, size);
9060742802SPekka Enberg 
9160742802SPekka Enberg 	return true;
9260742802SPekka Enberg }
9360742802SPekka Enberg 
944123ca55SMarc Zyngier static bool pci_config_address_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
9560742802SPekka Enberg {
96ba824677SPekka Enberg 	void *p = pci_config_address_ptr(port);
9760742802SPekka Enberg 
98ba824677SPekka Enberg 	memcpy(data, p, size);
9960742802SPekka Enberg 
10060742802SPekka Enberg 	return true;
10160742802SPekka Enberg }
10260742802SPekka Enberg 
103305b72ceSCyrill Gorcunov static struct ioport_operations pci_config_address_ops = {
104305b72ceSCyrill Gorcunov 	.io_in	= pci_config_address_in,
105305b72ceSCyrill Gorcunov 	.io_out	= pci_config_address_out,
10660742802SPekka Enberg };
10760742802SPekka Enberg 
1083fdf659dSSasha Levin static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_number)
10976f9c841SCyrill Gorcunov {
110a0a7d66fSDavid Daney 	union pci_config_address pci_config_address;
111a0a7d66fSDavid Daney 
112a0a7d66fSDavid Daney 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
113a0a7d66fSDavid Daney 
11476f9c841SCyrill Gorcunov 	if (pci_config_address.bus_number != bus_number)
11576f9c841SCyrill Gorcunov 		return false;
11676f9c841SCyrill Gorcunov 
117b30d05adSPekka Enberg 	if (pci_config_address.function_number != function_number)
11876f9c841SCyrill Gorcunov 		return false;
11976f9c841SCyrill Gorcunov 
12021ff329dSWill Deacon 	return !IS_ERR_OR_NULL(device__find_dev(DEVICE_BUS_PCI, device_number));
12176f9c841SCyrill Gorcunov }
12276f9c841SCyrill Gorcunov 
1234123ca55SMarc Zyngier static bool pci_config_data_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
1249575e724SSasha Levin {
125a0a7d66fSDavid Daney 	union pci_config_address pci_config_address;
126a0a7d66fSDavid Daney 
1276ea32ebdSAlexandru Elisei 	if (size > 4)
1286ea32ebdSAlexandru Elisei 		size = 4;
1296ea32ebdSAlexandru Elisei 
130a0a7d66fSDavid Daney 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
1319575e724SSasha Levin 	/*
1329575e724SSasha Levin 	 * If someone accesses PCI configuration space offsets that are not
1339575e724SSasha Levin 	 * aligned to 4 bytes, it uses ioports to signify that.
1349575e724SSasha Levin 	 */
135d0297a59SMatt Evans 	pci_config_address.reg_offset = port - PCI_CONFIG_DATA;
1369575e724SSasha Levin 
1374123ca55SMarc Zyngier 	pci__config_wr(vcpu->kvm, pci_config_address, data, size);
138d0297a59SMatt Evans 
139d0297a59SMatt Evans 	return true;
140d0297a59SMatt Evans }
141d0297a59SMatt Evans 
1424123ca55SMarc Zyngier static bool pci_config_data_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
143d0297a59SMatt Evans {
144a0a7d66fSDavid Daney 	union pci_config_address pci_config_address;
145a0a7d66fSDavid Daney 
1466ea32ebdSAlexandru Elisei 	if (size > 4)
1476ea32ebdSAlexandru Elisei 		size = 4;
1486ea32ebdSAlexandru Elisei 
149a0a7d66fSDavid Daney 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
150d0297a59SMatt Evans 	/*
151d0297a59SMatt Evans 	 * If someone accesses PCI configuration space offsets that are not
152d0297a59SMatt Evans 	 * aligned to 4 bytes, it uses ioports to signify that.
153d0297a59SMatt Evans 	 */
154d0297a59SMatt Evans 	pci_config_address.reg_offset = port - PCI_CONFIG_DATA;
155d0297a59SMatt Evans 
1564123ca55SMarc Zyngier 	pci__config_rd(vcpu->kvm, pci_config_address, data, size);
157d0297a59SMatt Evans 
158d0297a59SMatt Evans 	return true;
159d0297a59SMatt Evans }
160d0297a59SMatt Evans 
161d0297a59SMatt Evans static struct ioport_operations pci_config_data_ops = {
162d0297a59SMatt Evans 	.io_in	= pci_config_data_in,
163d0297a59SMatt Evans 	.io_out	= pci_config_data_out,
164d0297a59SMatt Evans };
165d0297a59SMatt Evans 
166*46e04130SAlexandru Elisei static void pci_config_command_wr(struct kvm *kvm,
167*46e04130SAlexandru Elisei 				  struct pci_device_header *pci_hdr,
168*46e04130SAlexandru Elisei 				  u16 new_command)
169*46e04130SAlexandru Elisei {
170*46e04130SAlexandru Elisei 	int i;
171*46e04130SAlexandru Elisei 	bool toggle_io, toggle_mem;
172*46e04130SAlexandru Elisei 
173*46e04130SAlexandru Elisei 	toggle_io = (pci_hdr->command ^ new_command) & PCI_COMMAND_IO;
174*46e04130SAlexandru Elisei 	toggle_mem = (pci_hdr->command ^ new_command) & PCI_COMMAND_MEMORY;
175*46e04130SAlexandru Elisei 
176*46e04130SAlexandru Elisei 	for (i = 0; i < 6; i++) {
177*46e04130SAlexandru Elisei 		if (!pci_bar_is_implemented(pci_hdr, i))
178*46e04130SAlexandru Elisei 			continue;
179*46e04130SAlexandru Elisei 
180*46e04130SAlexandru Elisei 		if (toggle_io && pci__bar_is_io(pci_hdr, i)) {
181*46e04130SAlexandru Elisei 			if (__pci__io_space_enabled(new_command))
182*46e04130SAlexandru Elisei 				pci_hdr->bar_activate_fn(kvm, pci_hdr, i,
183*46e04130SAlexandru Elisei 							 pci_hdr->data);
184*46e04130SAlexandru Elisei 			else
185*46e04130SAlexandru Elisei 				pci_hdr->bar_deactivate_fn(kvm, pci_hdr, i,
186*46e04130SAlexandru Elisei 							   pci_hdr->data);
187*46e04130SAlexandru Elisei 		}
188*46e04130SAlexandru Elisei 
189*46e04130SAlexandru Elisei 		if (toggle_mem && pci__bar_is_memory(pci_hdr, i)) {
190*46e04130SAlexandru Elisei 			if (__pci__memory_space_enabled(new_command))
191*46e04130SAlexandru Elisei 				pci_hdr->bar_activate_fn(kvm, pci_hdr, i,
192*46e04130SAlexandru Elisei 							 pci_hdr->data);
193*46e04130SAlexandru Elisei 			else
194*46e04130SAlexandru Elisei 				pci_hdr->bar_deactivate_fn(kvm, pci_hdr, i,
195*46e04130SAlexandru Elisei 							   pci_hdr->data);
196*46e04130SAlexandru Elisei 		}
197*46e04130SAlexandru Elisei 	}
198*46e04130SAlexandru Elisei 
199*46e04130SAlexandru Elisei 	pci_hdr->command = new_command;
200*46e04130SAlexandru Elisei }
201*46e04130SAlexandru Elisei 
202d0297a59SMatt Evans void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size)
203d0297a59SMatt Evans {
204023fdaaeSJean-Philippe Brucker 	void *base;
205023fdaaeSJean-Philippe Brucker 	u8 bar, offset;
206023fdaaeSJean-Philippe Brucker 	struct pci_device_header *pci_hdr;
207023fdaaeSJean-Philippe Brucker 	u8 dev_num = addr.device_number;
208bb0d509bSSami Mujawar 	u32 value = 0;
209bb0d509bSSami Mujawar 	u32 mask;
210d0297a59SMatt Evans 
211023fdaaeSJean-Philippe Brucker 	if (!pci_device_exists(addr.bus_number, dev_num, 0))
212023fdaaeSJean-Philippe Brucker 		return;
2139575e724SSasha Levin 
214023fdaaeSJean-Philippe Brucker 	offset = addr.w & PCI_DEV_CFG_MASK;
215023fdaaeSJean-Philippe Brucker 	base = pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
2169575e724SSasha Levin 
217023fdaaeSJean-Philippe Brucker 	if (pci_hdr->cfg_ops.write)
218023fdaaeSJean-Philippe Brucker 		pci_hdr->cfg_ops.write(kvm, pci_hdr, offset, data, size);
219c64f7ff0SSasha Levin 
2209575e724SSasha Levin 	/*
221023fdaaeSJean-Philippe Brucker 	 * legacy hack: ignore writes to uninitialized regions (e.g. ROM BAR).
222023fdaaeSJean-Philippe Brucker 	 * Not very nice but has been working so far.
2239575e724SSasha Levin 	 */
224023fdaaeSJean-Philippe Brucker 	if (*(u32 *)(base + offset) == 0)
225023fdaaeSJean-Philippe Brucker 		return;
226023fdaaeSJean-Philippe Brucker 
227*46e04130SAlexandru Elisei 	if (offset == PCI_COMMAND) {
228*46e04130SAlexandru Elisei 		memcpy(&value, data, size);
229*46e04130SAlexandru Elisei 		pci_config_command_wr(kvm, pci_hdr, (u16)value);
230*46e04130SAlexandru Elisei 		return;
231*46e04130SAlexandru Elisei 	}
232*46e04130SAlexandru Elisei 
233023fdaaeSJean-Philippe Brucker 	bar = (offset - PCI_BAR_OFFSET(0)) / sizeof(u32);
234023fdaaeSJean-Philippe Brucker 
235023fdaaeSJean-Philippe Brucker 	/*
236bb0d509bSSami Mujawar 	 * If the kernel masks the BAR, it will expect to find the size of the
237bb0d509bSSami Mujawar 	 * BAR there next time it reads from it. After the kernel reads the
238bb0d509bSSami Mujawar 	 * size, it will write the address back.
239023fdaaeSJean-Philippe Brucker 	 */
240bb0d509bSSami Mujawar 	if (bar < 6) {
2412f6384f9SAlexandru Elisei 		if (pci__bar_is_io(pci_hdr, bar))
242bb0d509bSSami Mujawar 			mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
243bb0d509bSSami Mujawar 		else
244bb0d509bSSami Mujawar 			mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
245bb0d509bSSami Mujawar 		/*
246bb0d509bSSami Mujawar 		 * According to the PCI local bus specification REV 3.0:
247bb0d509bSSami Mujawar 		 * The number of upper bits that a device actually implements
248bb0d509bSSami Mujawar 		 * depends on how much of the address space the device will
249bb0d509bSSami Mujawar 		 * respond to. A device that wants a 1 MB memory address space
250bb0d509bSSami Mujawar 		 * (using a 32-bit base address register) would build the top
251bb0d509bSSami Mujawar 		 * 12 bits of the address register, hardwiring the other bits
252bb0d509bSSami Mujawar 		 * to 0.
253bb0d509bSSami Mujawar 		 *
254bb0d509bSSami Mujawar 		 * Furthermore, software can determine how much address space
255bb0d509bSSami Mujawar 		 * the device requires by writing a value of all 1's to the
256bb0d509bSSami Mujawar 		 * register and then reading the value back. The device will
257bb0d509bSSami Mujawar 		 * return 0's in all don't-care address bits, effectively
258bb0d509bSSami Mujawar 		 * specifying the address space required.
259bb0d509bSSami Mujawar 		 *
260bb0d509bSSami Mujawar 		 * Software computes the size of the address space with the
261bb0d509bSSami Mujawar 		 * formula S = ~B + 1, where S is the memory size and B is the
262bb0d509bSSami Mujawar 		 * value read from the BAR. This means that the BAR value that
263bb0d509bSSami Mujawar 		 * kvmtool should return is B = ~(S - 1).
264bb0d509bSSami Mujawar 		 */
265bb0d509bSSami Mujawar 		memcpy(&value, data, size);
266bb0d509bSSami Mujawar 		if (value == 0xffffffff)
2672f6384f9SAlexandru Elisei 			value = ~(pci__bar_size(pci_hdr, bar) - 1);
268bb0d509bSSami Mujawar 		/* Preserve the special bits. */
269bb0d509bSSami Mujawar 		value = (value & mask) | (pci_hdr->bar[bar] & ~mask);
270bb0d509bSSami Mujawar 		memcpy(base + offset, &value, size);
271023fdaaeSJean-Philippe Brucker 	} else {
272023fdaaeSJean-Philippe Brucker 		memcpy(base + offset, data, size);
2739575e724SSasha Levin 	}
2749575e724SSasha Levin }
2759575e724SSasha Levin 
276d0297a59SMatt Evans void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, int size)
27760742802SPekka Enberg {
278023fdaaeSJean-Philippe Brucker 	u8 offset;
279023fdaaeSJean-Philippe Brucker 	struct pci_device_header *pci_hdr;
280023fdaaeSJean-Philippe Brucker 	u8 dev_num = addr.device_number;
281e4d2cea2SPekka Enberg 
282023fdaaeSJean-Philippe Brucker 	if (pci_device_exists(addr.bus_number, dev_num, 0)) {
283023fdaaeSJean-Philippe Brucker 		pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
284023fdaaeSJean-Philippe Brucker 		offset = addr.w & PCI_DEV_CFG_MASK;
285b30d05adSPekka Enberg 
286023fdaaeSJean-Philippe Brucker 		if (pci_hdr->cfg_ops.read)
287023fdaaeSJean-Philippe Brucker 			pci_hdr->cfg_ops.read(kvm, pci_hdr, offset, data, size);
288598419d5SPekka Enberg 
289023fdaaeSJean-Philippe Brucker 		memcpy(data, (void *)pci_hdr + offset, size);
2903a60be06SSasha Levin 	} else {
291e498ea08SPekka Enberg 		memset(data, 0xff, size);
29260742802SPekka Enberg 	}
2933a60be06SSasha Levin }
29460742802SPekka Enberg 
2959b735910SMarc Zyngier static void pci_config_mmio_access(struct kvm_cpu *vcpu, u64 addr, u8 *data,
2969b735910SMarc Zyngier 				   u32 len, u8 is_write, void *kvm)
297b403f2f7SWill Deacon {
298b403f2f7SWill Deacon 	union pci_config_address cfg_addr;
299b403f2f7SWill Deacon 
300b403f2f7SWill Deacon 	addr			-= KVM_PCI_CFG_AREA;
301b403f2f7SWill Deacon 	cfg_addr.w		= (u32)addr;
302b403f2f7SWill Deacon 	cfg_addr.enable_bit	= 1;
303b403f2f7SWill Deacon 
3046ea32ebdSAlexandru Elisei 	if (len > 4)
3056ea32ebdSAlexandru Elisei 		len = 4;
3066ea32ebdSAlexandru Elisei 
307b403f2f7SWill Deacon 	if (is_write)
308b403f2f7SWill Deacon 		pci__config_wr(kvm, cfg_addr, data, len);
309b403f2f7SWill Deacon 	else
310b403f2f7SWill Deacon 		pci__config_rd(kvm, cfg_addr, data, len);
311b403f2f7SWill Deacon }
312b403f2f7SWill Deacon 
313d0297a59SMatt Evans struct pci_device_header *pci__find_dev(u8 dev_num)
314d0297a59SMatt Evans {
31521ff329dSWill Deacon 	struct device_header *hdr = device__find_dev(DEVICE_BUS_PCI, dev_num);
3166d987703SSasha Levin 
31721ff329dSWill Deacon 	if (IS_ERR_OR_NULL(hdr))
31821ff329dSWill Deacon 		return NULL;
31921ff329dSWill Deacon 
32021ff329dSWill Deacon 	return hdr->data;
321d0297a59SMatt Evans }
322d0297a59SMatt Evans 
3235a8e4f25SAlexandru Elisei int pci__register_bar_regions(struct kvm *kvm, struct pci_device_header *pci_hdr,
3245a8e4f25SAlexandru Elisei 			      bar_activate_fn_t bar_activate_fn,
3255a8e4f25SAlexandru Elisei 			      bar_deactivate_fn_t bar_deactivate_fn, void *data)
3265a8e4f25SAlexandru Elisei {
3275a8e4f25SAlexandru Elisei 	int i, r;
3285a8e4f25SAlexandru Elisei 
3295a8e4f25SAlexandru Elisei 	assert(bar_activate_fn && bar_deactivate_fn);
3305a8e4f25SAlexandru Elisei 
3315a8e4f25SAlexandru Elisei 	pci_hdr->bar_activate_fn = bar_activate_fn;
3325a8e4f25SAlexandru Elisei 	pci_hdr->bar_deactivate_fn = bar_deactivate_fn;
3335a8e4f25SAlexandru Elisei 	pci_hdr->data = data;
3345a8e4f25SAlexandru Elisei 
3355a8e4f25SAlexandru Elisei 	for (i = 0; i < 6; i++) {
3365a8e4f25SAlexandru Elisei 		if (!pci_bar_is_implemented(pci_hdr, i))
3375a8e4f25SAlexandru Elisei 			continue;
3385a8e4f25SAlexandru Elisei 
3395a8e4f25SAlexandru Elisei 		if (pci__bar_is_io(pci_hdr, i) &&
3405a8e4f25SAlexandru Elisei 		    pci__io_space_enabled(pci_hdr)) {
3415a8e4f25SAlexandru Elisei 			r = bar_activate_fn(kvm, pci_hdr, i, data);
3425a8e4f25SAlexandru Elisei 			if (r < 0)
3435a8e4f25SAlexandru Elisei 				return r;
3445a8e4f25SAlexandru Elisei 		}
3455a8e4f25SAlexandru Elisei 
3465a8e4f25SAlexandru Elisei 		if (pci__bar_is_memory(pci_hdr, i) &&
3475a8e4f25SAlexandru Elisei 		    pci__memory_space_enabled(pci_hdr)) {
3485a8e4f25SAlexandru Elisei 			r = bar_activate_fn(kvm, pci_hdr, i, data);
3495a8e4f25SAlexandru Elisei 			if (r < 0)
3505a8e4f25SAlexandru Elisei 				return r;
3515a8e4f25SAlexandru Elisei 		}
3525a8e4f25SAlexandru Elisei 	}
3535a8e4f25SAlexandru Elisei 
3545a8e4f25SAlexandru Elisei 	return 0;
3555a8e4f25SAlexandru Elisei }
3565a8e4f25SAlexandru Elisei 
3576d987703SSasha Levin int pci__init(struct kvm *kvm)
35860742802SPekka Enberg {
3596d987703SSasha Levin 	int r;
3606d987703SSasha Levin 
3614346fd8fSSasha Levin 	r = ioport__register(kvm, PCI_CONFIG_DATA + 0, &pci_config_data_ops, 4, NULL);
3626d987703SSasha Levin 	if (r < 0)
3636d987703SSasha Levin 		return r;
3646d987703SSasha Levin 
3654346fd8fSSasha Levin 	r = ioport__register(kvm, PCI_CONFIG_ADDRESS + 0, &pci_config_address_ops, 4, NULL);
366b403f2f7SWill Deacon 	if (r < 0)
367b403f2f7SWill Deacon 		goto err_unregister_data;
368b403f2f7SWill Deacon 
369b403f2f7SWill Deacon 	r = kvm__register_mmio(kvm, KVM_PCI_CFG_AREA, PCI_CFG_SIZE, false,
370b403f2f7SWill Deacon 			       pci_config_mmio_access, kvm);
371b403f2f7SWill Deacon 	if (r < 0)
372b403f2f7SWill Deacon 		goto err_unregister_addr;
3736d987703SSasha Levin 
3746d987703SSasha Levin 	return 0;
375b403f2f7SWill Deacon 
376b403f2f7SWill Deacon err_unregister_addr:
377b403f2f7SWill Deacon 	ioport__unregister(kvm, PCI_CONFIG_ADDRESS);
378b403f2f7SWill Deacon err_unregister_data:
379b403f2f7SWill Deacon 	ioport__unregister(kvm, PCI_CONFIG_DATA);
380b403f2f7SWill Deacon 	return r;
3816d987703SSasha Levin }
382bca12bf6SSasha Levin dev_base_init(pci__init);
3836d987703SSasha Levin 
3846d987703SSasha Levin int pci__exit(struct kvm *kvm)
3856d987703SSasha Levin {
3864346fd8fSSasha Levin 	ioport__unregister(kvm, PCI_CONFIG_DATA);
3874346fd8fSSasha Levin 	ioport__unregister(kvm, PCI_CONFIG_ADDRESS);
3886d987703SSasha Levin 
3896d987703SSasha Levin 	return 0;
39060742802SPekka Enberg }
391bca12bf6SSasha Levin dev_base_exit(pci__exit);
392