xref: /kvmtool/pci.c (revision 854aa2eff9402d4a11edf6beb5073434cf3b1e8d)
121ff329dSWill Deacon #include "kvm/devices.h"
260742802SPekka Enberg #include "kvm/pci.h"
360742802SPekka Enberg #include "kvm/ioport.h"
4b5981636SWill Deacon #include "kvm/irq.h"
576f9c841SCyrill Gorcunov #include "kvm/util.h"
69575e724SSasha Levin #include "kvm/kvm.h"
760742802SPekka Enberg 
86d987703SSasha Levin #include <linux/err.h>
96d987703SSasha Levin #include <assert.h>
106d987703SSasha Levin 
11a0a7d66fSDavid Daney static u32 pci_config_address_bits;
1260742802SPekka Enberg 
1340f2fd06SMatt Evans /* This is within our PCI gap - in an unused area.
1440f2fd06SMatt Evans  * Note this is a PCI *bus address*, is used to assign BARs etc.!
1540f2fd06SMatt Evans  * (That's why it can still 32bit even with 64bit guests-- 64bit
1640f2fd06SMatt Evans  * PCI isn't currently supported.)
1740f2fd06SMatt Evans  */
18*854aa2efSJulien Thierry static u32 mmio_blocks			= KVM_PCI_MMIO_AREA;
19*854aa2efSJulien Thierry static u16 io_port_blocks		= PCI_IOPORT_START;
20*854aa2efSJulien Thierry 
21*854aa2efSJulien Thierry u16 pci_get_io_port_block(u32 size)
22*854aa2efSJulien Thierry {
23*854aa2efSJulien Thierry 	u16 port = ALIGN(io_port_blocks, IOPORT_SIZE);
24*854aa2efSJulien Thierry 
25*854aa2efSJulien Thierry 	io_port_blocks = port + size;
26*854aa2efSJulien Thierry 	return port;
27*854aa2efSJulien Thierry }
289575e724SSasha Levin 
29c7575d17SWill Deacon /*
30c7575d17SWill Deacon  * BARs must be naturally aligned, so enforce this in the allocator.
31c7575d17SWill Deacon  */
32*854aa2efSJulien Thierry u32 pci_get_mmio_block(u32 size)
339575e724SSasha Levin {
34*854aa2efSJulien Thierry 	u32 block = ALIGN(mmio_blocks, size);
35*854aa2efSJulien Thierry 	mmio_blocks = block + size;
369575e724SSasha Levin 	return block;
379575e724SSasha Levin }
389575e724SSasha Levin 
391a51c93dSJean-Philippe Brucker void *pci_find_cap(struct pci_device_header *hdr, u8 cap_type)
401a51c93dSJean-Philippe Brucker {
411a51c93dSJean-Philippe Brucker 	u8 pos;
421a51c93dSJean-Philippe Brucker 	struct pci_cap_hdr *cap;
431a51c93dSJean-Philippe Brucker 
441a51c93dSJean-Philippe Brucker 	pci_for_each_cap(pos, cap, hdr) {
451a51c93dSJean-Philippe Brucker 		if (cap->type == cap_type)
461a51c93dSJean-Philippe Brucker 			return cap;
471a51c93dSJean-Philippe Brucker 	}
481a51c93dSJean-Philippe Brucker 
491a51c93dSJean-Philippe Brucker 	return NULL;
501a51c93dSJean-Philippe Brucker }
511a51c93dSJean-Philippe Brucker 
52b5981636SWill Deacon void pci__assign_irq(struct device_header *dev_hdr)
53b5981636SWill Deacon {
54b5981636SWill Deacon 	struct pci_device_header *pci_hdr = dev_hdr->data;
55b5981636SWill Deacon 
56b5981636SWill Deacon 	/*
57b5981636SWill Deacon 	 * PCI supports only INTA#,B#,C#,D# per device.
58b5981636SWill Deacon 	 *
59b5981636SWill Deacon 	 * A#,B#,C#,D# are allowed for multifunctional devices so stick
60b5981636SWill Deacon 	 * with A# for our single function devices.
61b5981636SWill Deacon 	 */
62b5981636SWill Deacon 	pci_hdr->irq_pin	= 1;
63b5981636SWill Deacon 	pci_hdr->irq_line	= irq__alloc_line();
64ff01b5dbSJean-Philippe Brucker 
65ff01b5dbSJean-Philippe Brucker 	if (!pci_hdr->irq_type)
66ff01b5dbSJean-Philippe Brucker 		pci_hdr->irq_type = IRQ_TYPE_EDGE_RISING;
67b5981636SWill Deacon }
68b5981636SWill Deacon 
693fdf659dSSasha Levin static void *pci_config_address_ptr(u16 port)
70ba824677SPekka Enberg {
71ba824677SPekka Enberg 	unsigned long offset;
72ba824677SPekka Enberg 	void *base;
73ba824677SPekka Enberg 
74ba824677SPekka Enberg 	offset	= port - PCI_CONFIG_ADDRESS;
75a0a7d66fSDavid Daney 	base	= &pci_config_address_bits;
76ba824677SPekka Enberg 
77ba824677SPekka Enberg 	return base + offset;
78ba824677SPekka Enberg }
79ba824677SPekka Enberg 
804123ca55SMarc Zyngier static bool pci_config_address_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
8160742802SPekka Enberg {
82ba824677SPekka Enberg 	void *p = pci_config_address_ptr(port);
8360742802SPekka Enberg 
84ba824677SPekka Enberg 	memcpy(p, data, size);
8560742802SPekka Enberg 
8660742802SPekka Enberg 	return true;
8760742802SPekka Enberg }
8860742802SPekka Enberg 
894123ca55SMarc Zyngier static bool pci_config_address_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
9060742802SPekka Enberg {
91ba824677SPekka Enberg 	void *p = pci_config_address_ptr(port);
9260742802SPekka Enberg 
93ba824677SPekka Enberg 	memcpy(data, p, size);
9460742802SPekka Enberg 
9560742802SPekka Enberg 	return true;
9660742802SPekka Enberg }
9760742802SPekka Enberg 
98305b72ceSCyrill Gorcunov static struct ioport_operations pci_config_address_ops = {
99305b72ceSCyrill Gorcunov 	.io_in	= pci_config_address_in,
100305b72ceSCyrill Gorcunov 	.io_out	= pci_config_address_out,
10160742802SPekka Enberg };
10260742802SPekka Enberg 
1033fdf659dSSasha Levin static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_number)
10476f9c841SCyrill Gorcunov {
105a0a7d66fSDavid Daney 	union pci_config_address pci_config_address;
106a0a7d66fSDavid Daney 
107a0a7d66fSDavid Daney 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
108a0a7d66fSDavid Daney 
10976f9c841SCyrill Gorcunov 	if (pci_config_address.bus_number != bus_number)
11076f9c841SCyrill Gorcunov 		return false;
11176f9c841SCyrill Gorcunov 
112b30d05adSPekka Enberg 	if (pci_config_address.function_number != function_number)
11376f9c841SCyrill Gorcunov 		return false;
11476f9c841SCyrill Gorcunov 
11521ff329dSWill Deacon 	return !IS_ERR_OR_NULL(device__find_dev(DEVICE_BUS_PCI, device_number));
11676f9c841SCyrill Gorcunov }
11776f9c841SCyrill Gorcunov 
1184123ca55SMarc Zyngier static bool pci_config_data_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
1199575e724SSasha Levin {
120a0a7d66fSDavid Daney 	union pci_config_address pci_config_address;
121a0a7d66fSDavid Daney 
122a0a7d66fSDavid Daney 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
1239575e724SSasha Levin 	/*
1249575e724SSasha Levin 	 * If someone accesses PCI configuration space offsets that are not
1259575e724SSasha Levin 	 * aligned to 4 bytes, it uses ioports to signify that.
1269575e724SSasha Levin 	 */
127d0297a59SMatt Evans 	pci_config_address.reg_offset = port - PCI_CONFIG_DATA;
1289575e724SSasha Levin 
1294123ca55SMarc Zyngier 	pci__config_wr(vcpu->kvm, pci_config_address, data, size);
130d0297a59SMatt Evans 
131d0297a59SMatt Evans 	return true;
132d0297a59SMatt Evans }
133d0297a59SMatt Evans 
1344123ca55SMarc Zyngier static bool pci_config_data_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size)
135d0297a59SMatt Evans {
136a0a7d66fSDavid Daney 	union pci_config_address pci_config_address;
137a0a7d66fSDavid Daney 
138a0a7d66fSDavid Daney 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
139d0297a59SMatt Evans 	/*
140d0297a59SMatt Evans 	 * If someone accesses PCI configuration space offsets that are not
141d0297a59SMatt Evans 	 * aligned to 4 bytes, it uses ioports to signify that.
142d0297a59SMatt Evans 	 */
143d0297a59SMatt Evans 	pci_config_address.reg_offset = port - PCI_CONFIG_DATA;
144d0297a59SMatt Evans 
1454123ca55SMarc Zyngier 	pci__config_rd(vcpu->kvm, pci_config_address, data, size);
146d0297a59SMatt Evans 
147d0297a59SMatt Evans 	return true;
148d0297a59SMatt Evans }
149d0297a59SMatt Evans 
150d0297a59SMatt Evans static struct ioport_operations pci_config_data_ops = {
151d0297a59SMatt Evans 	.io_in	= pci_config_data_in,
152d0297a59SMatt Evans 	.io_out	= pci_config_data_out,
153d0297a59SMatt Evans };
154d0297a59SMatt Evans 
155d0297a59SMatt Evans void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size)
156d0297a59SMatt Evans {
157023fdaaeSJean-Philippe Brucker 	void *base;
158023fdaaeSJean-Philippe Brucker 	u8 bar, offset;
159023fdaaeSJean-Philippe Brucker 	struct pci_device_header *pci_hdr;
160023fdaaeSJean-Philippe Brucker 	u8 dev_num = addr.device_number;
161bb0d509bSSami Mujawar 	u32 value = 0;
162bb0d509bSSami Mujawar 	u32 mask;
163d0297a59SMatt Evans 
164023fdaaeSJean-Philippe Brucker 	if (!pci_device_exists(addr.bus_number, dev_num, 0))
165023fdaaeSJean-Philippe Brucker 		return;
1669575e724SSasha Levin 
167023fdaaeSJean-Philippe Brucker 	offset = addr.w & PCI_DEV_CFG_MASK;
168023fdaaeSJean-Philippe Brucker 	base = pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
1699575e724SSasha Levin 
170023fdaaeSJean-Philippe Brucker 	if (pci_hdr->cfg_ops.write)
171023fdaaeSJean-Philippe Brucker 		pci_hdr->cfg_ops.write(kvm, pci_hdr, offset, data, size);
172c64f7ff0SSasha Levin 
1739575e724SSasha Levin 	/*
174023fdaaeSJean-Philippe Brucker 	 * legacy hack: ignore writes to uninitialized regions (e.g. ROM BAR).
175023fdaaeSJean-Philippe Brucker 	 * Not very nice but has been working so far.
1769575e724SSasha Levin 	 */
177023fdaaeSJean-Philippe Brucker 	if (*(u32 *)(base + offset) == 0)
178023fdaaeSJean-Philippe Brucker 		return;
179023fdaaeSJean-Philippe Brucker 
180023fdaaeSJean-Philippe Brucker 	bar = (offset - PCI_BAR_OFFSET(0)) / sizeof(u32);
181023fdaaeSJean-Philippe Brucker 
182023fdaaeSJean-Philippe Brucker 	/*
183bb0d509bSSami Mujawar 	 * If the kernel masks the BAR, it will expect to find the size of the
184bb0d509bSSami Mujawar 	 * BAR there next time it reads from it. After the kernel reads the
185bb0d509bSSami Mujawar 	 * size, it will write the address back.
186023fdaaeSJean-Philippe Brucker 	 */
187bb0d509bSSami Mujawar 	if (bar < 6) {
188bb0d509bSSami Mujawar 		if (pci_hdr->bar[bar] & PCI_BASE_ADDRESS_SPACE_IO)
189bb0d509bSSami Mujawar 			mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
190bb0d509bSSami Mujawar 		else
191bb0d509bSSami Mujawar 			mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
192bb0d509bSSami Mujawar 		/*
193bb0d509bSSami Mujawar 		 * According to the PCI local bus specification REV 3.0:
194bb0d509bSSami Mujawar 		 * The number of upper bits that a device actually implements
195bb0d509bSSami Mujawar 		 * depends on how much of the address space the device will
196bb0d509bSSami Mujawar 		 * respond to. A device that wants a 1 MB memory address space
197bb0d509bSSami Mujawar 		 * (using a 32-bit base address register) would build the top
198bb0d509bSSami Mujawar 		 * 12 bits of the address register, hardwiring the other bits
199bb0d509bSSami Mujawar 		 * to 0.
200bb0d509bSSami Mujawar 		 *
201bb0d509bSSami Mujawar 		 * Furthermore, software can determine how much address space
202bb0d509bSSami Mujawar 		 * the device requires by writing a value of all 1's to the
203bb0d509bSSami Mujawar 		 * register and then reading the value back. The device will
204bb0d509bSSami Mujawar 		 * return 0's in all don't-care address bits, effectively
205bb0d509bSSami Mujawar 		 * specifying the address space required.
206bb0d509bSSami Mujawar 		 *
207bb0d509bSSami Mujawar 		 * Software computes the size of the address space with the
208bb0d509bSSami Mujawar 		 * formula S = ~B + 1, where S is the memory size and B is the
209bb0d509bSSami Mujawar 		 * value read from the BAR. This means that the BAR value that
210bb0d509bSSami Mujawar 		 * kvmtool should return is B = ~(S - 1).
211bb0d509bSSami Mujawar 		 */
212bb0d509bSSami Mujawar 		memcpy(&value, data, size);
213bb0d509bSSami Mujawar 		if (value == 0xffffffff)
214bb0d509bSSami Mujawar 			value = ~(pci_hdr->bar_size[bar] - 1);
215bb0d509bSSami Mujawar 		/* Preserve the special bits. */
216bb0d509bSSami Mujawar 		value = (value & mask) | (pci_hdr->bar[bar] & ~mask);
217bb0d509bSSami Mujawar 		memcpy(base + offset, &value, size);
218023fdaaeSJean-Philippe Brucker 	} else {
219023fdaaeSJean-Philippe Brucker 		memcpy(base + offset, data, size);
2209575e724SSasha Levin 	}
2219575e724SSasha Levin }
2229575e724SSasha Levin 
223d0297a59SMatt Evans void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, int size)
22460742802SPekka Enberg {
225023fdaaeSJean-Philippe Brucker 	u8 offset;
226023fdaaeSJean-Philippe Brucker 	struct pci_device_header *pci_hdr;
227023fdaaeSJean-Philippe Brucker 	u8 dev_num = addr.device_number;
228e4d2cea2SPekka Enberg 
229023fdaaeSJean-Philippe Brucker 	if (pci_device_exists(addr.bus_number, dev_num, 0)) {
230023fdaaeSJean-Philippe Brucker 		pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
231023fdaaeSJean-Philippe Brucker 		offset = addr.w & PCI_DEV_CFG_MASK;
232b30d05adSPekka Enberg 
233023fdaaeSJean-Philippe Brucker 		if (pci_hdr->cfg_ops.read)
234023fdaaeSJean-Philippe Brucker 			pci_hdr->cfg_ops.read(kvm, pci_hdr, offset, data, size);
235598419d5SPekka Enberg 
236023fdaaeSJean-Philippe Brucker 		memcpy(data, (void *)pci_hdr + offset, size);
2373a60be06SSasha Levin 	} else {
238e498ea08SPekka Enberg 		memset(data, 0xff, size);
23960742802SPekka Enberg 	}
2403a60be06SSasha Levin }
24160742802SPekka Enberg 
2429b735910SMarc Zyngier static void pci_config_mmio_access(struct kvm_cpu *vcpu, u64 addr, u8 *data,
2439b735910SMarc Zyngier 				   u32 len, u8 is_write, void *kvm)
244b403f2f7SWill Deacon {
245b403f2f7SWill Deacon 	union pci_config_address cfg_addr;
246b403f2f7SWill Deacon 
247b403f2f7SWill Deacon 	addr			-= KVM_PCI_CFG_AREA;
248b403f2f7SWill Deacon 	cfg_addr.w		= (u32)addr;
249b403f2f7SWill Deacon 	cfg_addr.enable_bit	= 1;
250b403f2f7SWill Deacon 
251b403f2f7SWill Deacon 	if (is_write)
252b403f2f7SWill Deacon 		pci__config_wr(kvm, cfg_addr, data, len);
253b403f2f7SWill Deacon 	else
254b403f2f7SWill Deacon 		pci__config_rd(kvm, cfg_addr, data, len);
255b403f2f7SWill Deacon }
256b403f2f7SWill Deacon 
257d0297a59SMatt Evans struct pci_device_header *pci__find_dev(u8 dev_num)
258d0297a59SMatt Evans {
25921ff329dSWill Deacon 	struct device_header *hdr = device__find_dev(DEVICE_BUS_PCI, dev_num);
2606d987703SSasha Levin 
26121ff329dSWill Deacon 	if (IS_ERR_OR_NULL(hdr))
26221ff329dSWill Deacon 		return NULL;
26321ff329dSWill Deacon 
26421ff329dSWill Deacon 	return hdr->data;
265d0297a59SMatt Evans }
266d0297a59SMatt Evans 
2676d987703SSasha Levin int pci__init(struct kvm *kvm)
26860742802SPekka Enberg {
2696d987703SSasha Levin 	int r;
2706d987703SSasha Levin 
2714346fd8fSSasha Levin 	r = ioport__register(kvm, PCI_CONFIG_DATA + 0, &pci_config_data_ops, 4, NULL);
2726d987703SSasha Levin 	if (r < 0)
2736d987703SSasha Levin 		return r;
2746d987703SSasha Levin 
2754346fd8fSSasha Levin 	r = ioport__register(kvm, PCI_CONFIG_ADDRESS + 0, &pci_config_address_ops, 4, NULL);
276b403f2f7SWill Deacon 	if (r < 0)
277b403f2f7SWill Deacon 		goto err_unregister_data;
278b403f2f7SWill Deacon 
279b403f2f7SWill Deacon 	r = kvm__register_mmio(kvm, KVM_PCI_CFG_AREA, PCI_CFG_SIZE, false,
280b403f2f7SWill Deacon 			       pci_config_mmio_access, kvm);
281b403f2f7SWill Deacon 	if (r < 0)
282b403f2f7SWill Deacon 		goto err_unregister_addr;
2836d987703SSasha Levin 
2846d987703SSasha Levin 	return 0;
285b403f2f7SWill Deacon 
286b403f2f7SWill Deacon err_unregister_addr:
287b403f2f7SWill Deacon 	ioport__unregister(kvm, PCI_CONFIG_ADDRESS);
288b403f2f7SWill Deacon err_unregister_data:
289b403f2f7SWill Deacon 	ioport__unregister(kvm, PCI_CONFIG_DATA);
290b403f2f7SWill Deacon 	return r;
2916d987703SSasha Levin }
292bca12bf6SSasha Levin dev_base_init(pci__init);
2936d987703SSasha Levin 
2946d987703SSasha Levin int pci__exit(struct kvm *kvm)
2956d987703SSasha Levin {
2964346fd8fSSasha Levin 	ioport__unregister(kvm, PCI_CONFIG_DATA);
2974346fd8fSSasha Levin 	ioport__unregister(kvm, PCI_CONFIG_ADDRESS);
2986d987703SSasha Levin 
2996d987703SSasha Levin 	return 0;
30060742802SPekka Enberg }
301bca12bf6SSasha Levin dev_base_exit(pci__exit);
302