xref: /kvmtool/pci.c (revision e69b7663b06e8af9cc2dae16e6ec906a64c3c63d)
121ff329dSWill Deacon #include "kvm/devices.h"
260742802SPekka Enberg #include "kvm/pci.h"
360742802SPekka Enberg #include "kvm/ioport.h"
4b5981636SWill Deacon #include "kvm/irq.h"
576f9c841SCyrill Gorcunov #include "kvm/util.h"
69575e724SSasha Levin #include "kvm/kvm.h"
760742802SPekka Enberg 
86d987703SSasha Levin #include <linux/err.h>
96d987703SSasha Levin #include <assert.h>
106d987703SSasha Levin 
11a0a7d66fSDavid Daney static u32 pci_config_address_bits;
1260742802SPekka Enberg 
1340f2fd06SMatt Evans /* This is within our PCI gap - in an unused area.
1440f2fd06SMatt Evans  * Note this is a PCI *bus address*, is used to assign BARs etc.!
1540f2fd06SMatt Evans  * (That's why it can still 32bit even with 64bit guests-- 64bit
1640f2fd06SMatt Evans  * PCI isn't currently supported.)
1740f2fd06SMatt Evans  */
18854aa2efSJulien Thierry static u32 mmio_blocks			= KVM_PCI_MMIO_AREA;
19854aa2efSJulien Thierry static u16 io_port_blocks		= PCI_IOPORT_START;
20854aa2efSJulien Thierry 
21854aa2efSJulien Thierry u16 pci_get_io_port_block(u32 size)
22854aa2efSJulien Thierry {
2348843d10SJulien Thierry 	u16 port = ALIGN(io_port_blocks, PCI_IO_SIZE);
24854aa2efSJulien Thierry 
25854aa2efSJulien Thierry 	io_port_blocks = port + size;
26854aa2efSJulien Thierry 	return port;
27854aa2efSJulien Thierry }
289575e724SSasha Levin 
29c7575d17SWill Deacon /*
30c7575d17SWill Deacon  * BARs must be naturally aligned, so enforce this in the allocator.
31c7575d17SWill Deacon  */
32854aa2efSJulien Thierry u32 pci_get_mmio_block(u32 size)
339575e724SSasha Levin {
34854aa2efSJulien Thierry 	u32 block = ALIGN(mmio_blocks, size);
35854aa2efSJulien Thierry 	mmio_blocks = block + size;
369575e724SSasha Levin 	return block;
379575e724SSasha Levin }
389575e724SSasha Levin 
391a51c93dSJean-Philippe Brucker void *pci_find_cap(struct pci_device_header *hdr, u8 cap_type)
401a51c93dSJean-Philippe Brucker {
411a51c93dSJean-Philippe Brucker 	u8 pos;
421a51c93dSJean-Philippe Brucker 	struct pci_cap_hdr *cap;
431a51c93dSJean-Philippe Brucker 
441a51c93dSJean-Philippe Brucker 	pci_for_each_cap(pos, cap, hdr) {
451a51c93dSJean-Philippe Brucker 		if (cap->type == cap_type)
461a51c93dSJean-Philippe Brucker 			return cap;
471a51c93dSJean-Philippe Brucker 	}
481a51c93dSJean-Philippe Brucker 
491a51c93dSJean-Philippe Brucker 	return NULL;
501a51c93dSJean-Philippe Brucker }
511a51c93dSJean-Philippe Brucker 
52c0c45eedSAndre Przywara int pci__assign_irq(struct pci_device_header *pci_hdr)
53b5981636SWill Deacon {
54b5981636SWill Deacon 	/*
55b5981636SWill Deacon 	 * PCI supports only INTA#,B#,C#,D# per device.
56b5981636SWill Deacon 	 *
57b5981636SWill Deacon 	 * A#,B#,C#,D# are allowed for multifunctional devices so stick
58b5981636SWill Deacon 	 * with A# for our single function devices.
59b5981636SWill Deacon 	 */
60b5981636SWill Deacon 	pci_hdr->irq_pin	= 1;
61b5981636SWill Deacon 	pci_hdr->irq_line	= irq__alloc_line();
62ff01b5dbSJean-Philippe Brucker 
63ff01b5dbSJean-Philippe Brucker 	if (!pci_hdr->irq_type)
64ff01b5dbSJean-Philippe Brucker 		pci_hdr->irq_type = IRQ_TYPE_EDGE_RISING;
65c0c45eedSAndre Przywara 
66c0c45eedSAndre Przywara 	return pci_hdr->irq_line;
67b5981636SWill Deacon }
68b5981636SWill Deacon 
695a8e4f25SAlexandru Elisei static bool pci_bar_is_implemented(struct pci_device_header *pci_hdr, int bar_num)
705a8e4f25SAlexandru Elisei {
715a8e4f25SAlexandru Elisei 	return pci__bar_size(pci_hdr, bar_num);
725a8e4f25SAlexandru Elisei }
735a8e4f25SAlexandru Elisei 
74465edc9dSAlexandru Elisei static bool pci_bar_is_active(struct pci_device_header *pci_hdr, int bar_num)
75465edc9dSAlexandru Elisei {
76465edc9dSAlexandru Elisei 	return  pci_hdr->bar_active[bar_num];
77465edc9dSAlexandru Elisei }
78465edc9dSAlexandru Elisei 
793fdf659dSSasha Levin static void *pci_config_address_ptr(u16 port)
80ba824677SPekka Enberg {
81ba824677SPekka Enberg 	unsigned long offset;
82ba824677SPekka Enberg 	void *base;
83ba824677SPekka Enberg 
84ba824677SPekka Enberg 	offset	= port - PCI_CONFIG_ADDRESS;
85a0a7d66fSDavid Daney 	base	= &pci_config_address_bits;
86ba824677SPekka Enberg 
87ba824677SPekka Enberg 	return base + offset;
88ba824677SPekka Enberg }
89ba824677SPekka Enberg 
901f56b9d1SAndre Przywara static void pci_config_address_mmio(struct kvm_cpu *vcpu, u64 addr, u8 *data,
911f56b9d1SAndre Przywara 				    u32 len, u8 is_write, void *ptr)
9260742802SPekka Enberg {
931f56b9d1SAndre Przywara 	void *p = pci_config_address_ptr(addr);
9460742802SPekka Enberg 
951f56b9d1SAndre Przywara 	if (is_write)
961f56b9d1SAndre Przywara 		memcpy(p, data, len);
971f56b9d1SAndre Przywara 	else
981f56b9d1SAndre Przywara 		memcpy(data, p, len);
9960742802SPekka Enberg }
1003fdf659dSSasha Levin static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_number)
10176f9c841SCyrill Gorcunov {
102a0a7d66fSDavid Daney 	union pci_config_address pci_config_address;
103a0a7d66fSDavid Daney 
104a0a7d66fSDavid Daney 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
105a0a7d66fSDavid Daney 
10676f9c841SCyrill Gorcunov 	if (pci_config_address.bus_number != bus_number)
10776f9c841SCyrill Gorcunov 		return false;
10876f9c841SCyrill Gorcunov 
109b30d05adSPekka Enberg 	if (pci_config_address.function_number != function_number)
11076f9c841SCyrill Gorcunov 		return false;
11176f9c841SCyrill Gorcunov 
11221ff329dSWill Deacon 	return !IS_ERR_OR_NULL(device__find_dev(DEVICE_BUS_PCI, device_number));
11376f9c841SCyrill Gorcunov }
11476f9c841SCyrill Gorcunov 
1151f56b9d1SAndre Przywara static void pci_config_data_mmio(struct kvm_cpu *vcpu, u64 addr, u8 *data,
1161f56b9d1SAndre Przywara 				 u32 len, u8 is_write, void *kvm)
1179575e724SSasha Levin {
118a0a7d66fSDavid Daney 	union pci_config_address pci_config_address;
119a0a7d66fSDavid Daney 
1201f56b9d1SAndre Przywara 	if (len > 4)
1211f56b9d1SAndre Przywara 		len = 4;
1226ea32ebdSAlexandru Elisei 
123a0a7d66fSDavid Daney 	pci_config_address.w = ioport__read32(&pci_config_address_bits);
1249575e724SSasha Levin 	/*
1259575e724SSasha Levin 	 * If someone accesses PCI configuration space offsets that are not
1269575e724SSasha Levin 	 * aligned to 4 bytes, it uses ioports to signify that.
1279575e724SSasha Levin 	 */
1281f56b9d1SAndre Przywara 	pci_config_address.reg_offset = addr - PCI_CONFIG_DATA;
1299575e724SSasha Levin 
1301f56b9d1SAndre Przywara 	if (is_write)
1311f56b9d1SAndre Przywara 		pci__config_wr(vcpu->kvm, pci_config_address, data, len);
1321f56b9d1SAndre Przywara 	else
1331f56b9d1SAndre Przywara 		pci__config_rd(vcpu->kvm, pci_config_address, data, len);
134d0297a59SMatt Evans }
135d0297a59SMatt Evans 
136465edc9dSAlexandru Elisei static int pci_activate_bar(struct kvm *kvm, struct pci_device_header *pci_hdr,
137465edc9dSAlexandru Elisei 			    int bar_num)
138465edc9dSAlexandru Elisei {
139465edc9dSAlexandru Elisei 	int r = 0;
140465edc9dSAlexandru Elisei 
141465edc9dSAlexandru Elisei 	if (pci_bar_is_active(pci_hdr, bar_num))
142465edc9dSAlexandru Elisei 		goto out;
143465edc9dSAlexandru Elisei 
144465edc9dSAlexandru Elisei 	r = pci_hdr->bar_activate_fn(kvm, pci_hdr, bar_num, pci_hdr->data);
145465edc9dSAlexandru Elisei 	if (r < 0) {
146465edc9dSAlexandru Elisei 		pci_dev_warn(pci_hdr, "Error activating emulation for BAR %d",
147465edc9dSAlexandru Elisei 			     bar_num);
148465edc9dSAlexandru Elisei 		goto out;
149465edc9dSAlexandru Elisei 	}
150465edc9dSAlexandru Elisei 	pci_hdr->bar_active[bar_num] = true;
151465edc9dSAlexandru Elisei 
152465edc9dSAlexandru Elisei out:
153465edc9dSAlexandru Elisei 	return r;
154465edc9dSAlexandru Elisei }
155465edc9dSAlexandru Elisei 
156465edc9dSAlexandru Elisei static int pci_deactivate_bar(struct kvm *kvm, struct pci_device_header *pci_hdr,
157465edc9dSAlexandru Elisei 			      int bar_num)
158465edc9dSAlexandru Elisei {
159465edc9dSAlexandru Elisei 	int r = 0;
160465edc9dSAlexandru Elisei 
161465edc9dSAlexandru Elisei 	if (!pci_bar_is_active(pci_hdr, bar_num))
162465edc9dSAlexandru Elisei 		goto out;
163465edc9dSAlexandru Elisei 
164465edc9dSAlexandru Elisei 	r = pci_hdr->bar_deactivate_fn(kvm, pci_hdr, bar_num, pci_hdr->data);
165465edc9dSAlexandru Elisei 	if (r < 0) {
166465edc9dSAlexandru Elisei 		pci_dev_warn(pci_hdr, "Error deactivating emulation for BAR %d",
167465edc9dSAlexandru Elisei 			     bar_num);
168465edc9dSAlexandru Elisei 		goto out;
169465edc9dSAlexandru Elisei 	}
170465edc9dSAlexandru Elisei 	pci_hdr->bar_active[bar_num] = false;
171465edc9dSAlexandru Elisei 
172465edc9dSAlexandru Elisei out:
173465edc9dSAlexandru Elisei 	return r;
174465edc9dSAlexandru Elisei }
175465edc9dSAlexandru Elisei 
17646e04130SAlexandru Elisei static void pci_config_command_wr(struct kvm *kvm,
17746e04130SAlexandru Elisei 				  struct pci_device_header *pci_hdr,
17846e04130SAlexandru Elisei 				  u16 new_command)
17946e04130SAlexandru Elisei {
18046e04130SAlexandru Elisei 	int i;
18146e04130SAlexandru Elisei 	bool toggle_io, toggle_mem;
18246e04130SAlexandru Elisei 
18346e04130SAlexandru Elisei 	toggle_io = (pci_hdr->command ^ new_command) & PCI_COMMAND_IO;
18446e04130SAlexandru Elisei 	toggle_mem = (pci_hdr->command ^ new_command) & PCI_COMMAND_MEMORY;
18546e04130SAlexandru Elisei 
18646e04130SAlexandru Elisei 	for (i = 0; i < 6; i++) {
18746e04130SAlexandru Elisei 		if (!pci_bar_is_implemented(pci_hdr, i))
18846e04130SAlexandru Elisei 			continue;
18946e04130SAlexandru Elisei 
19046e04130SAlexandru Elisei 		if (toggle_io && pci__bar_is_io(pci_hdr, i)) {
19146e04130SAlexandru Elisei 			if (__pci__io_space_enabled(new_command))
192465edc9dSAlexandru Elisei 				pci_activate_bar(kvm, pci_hdr, i);
19346e04130SAlexandru Elisei 			else
194465edc9dSAlexandru Elisei 				pci_deactivate_bar(kvm, pci_hdr, i);
19546e04130SAlexandru Elisei 		}
19646e04130SAlexandru Elisei 
19746e04130SAlexandru Elisei 		if (toggle_mem && pci__bar_is_memory(pci_hdr, i)) {
19846e04130SAlexandru Elisei 			if (__pci__memory_space_enabled(new_command))
199465edc9dSAlexandru Elisei 				pci_activate_bar(kvm, pci_hdr, i);
20046e04130SAlexandru Elisei 			else
201465edc9dSAlexandru Elisei 				pci_deactivate_bar(kvm, pci_hdr, i);
20246e04130SAlexandru Elisei 		}
20346e04130SAlexandru Elisei 	}
20446e04130SAlexandru Elisei 
20546e04130SAlexandru Elisei 	pci_hdr->command = new_command;
20646e04130SAlexandru Elisei }
20746e04130SAlexandru Elisei 
208465edc9dSAlexandru Elisei static int pci_toggle_bar_regions(bool activate, struct kvm *kvm, u32 start, u32 size)
209465edc9dSAlexandru Elisei {
210465edc9dSAlexandru Elisei 	struct device_header *dev_hdr;
211465edc9dSAlexandru Elisei 	struct pci_device_header *tmp_hdr;
212465edc9dSAlexandru Elisei 	u32 tmp_start, tmp_size;
213465edc9dSAlexandru Elisei 	int i, r;
214465edc9dSAlexandru Elisei 
215465edc9dSAlexandru Elisei 	dev_hdr = device__first_dev(DEVICE_BUS_PCI);
216465edc9dSAlexandru Elisei 	while (dev_hdr) {
217465edc9dSAlexandru Elisei 		tmp_hdr = dev_hdr->data;
218465edc9dSAlexandru Elisei 		for (i = 0; i < 6; i++) {
219465edc9dSAlexandru Elisei 			if (!pci_bar_is_implemented(tmp_hdr, i))
220465edc9dSAlexandru Elisei 				continue;
221465edc9dSAlexandru Elisei 
222465edc9dSAlexandru Elisei 			tmp_start = pci__bar_address(tmp_hdr, i);
223465edc9dSAlexandru Elisei 			tmp_size = pci__bar_size(tmp_hdr, i);
224465edc9dSAlexandru Elisei 			if (tmp_start + tmp_size <= start ||
225465edc9dSAlexandru Elisei 			    tmp_start >= start + size)
226465edc9dSAlexandru Elisei 				continue;
227465edc9dSAlexandru Elisei 
228465edc9dSAlexandru Elisei 			if (activate)
229465edc9dSAlexandru Elisei 				r = pci_activate_bar(kvm, tmp_hdr, i);
230465edc9dSAlexandru Elisei 			else
231465edc9dSAlexandru Elisei 				r = pci_deactivate_bar(kvm, tmp_hdr, i);
232465edc9dSAlexandru Elisei 			if (r < 0)
233465edc9dSAlexandru Elisei 				return r;
234465edc9dSAlexandru Elisei 		}
235465edc9dSAlexandru Elisei 		dev_hdr = device__next_dev(dev_hdr);
236465edc9dSAlexandru Elisei 	}
237465edc9dSAlexandru Elisei 
238465edc9dSAlexandru Elisei 	return 0;
239465edc9dSAlexandru Elisei }
240465edc9dSAlexandru Elisei 
241465edc9dSAlexandru Elisei static inline int pci_activate_bar_regions(struct kvm *kvm, u32 start, u32 size)
242465edc9dSAlexandru Elisei {
243465edc9dSAlexandru Elisei 	return pci_toggle_bar_regions(true, kvm, start, size);
244465edc9dSAlexandru Elisei }
245465edc9dSAlexandru Elisei 
246465edc9dSAlexandru Elisei static inline int pci_deactivate_bar_regions(struct kvm *kvm, u32 start, u32 size)
247465edc9dSAlexandru Elisei {
248465edc9dSAlexandru Elisei 	return pci_toggle_bar_regions(false, kvm, start, size);
249465edc9dSAlexandru Elisei }
250465edc9dSAlexandru Elisei 
251465edc9dSAlexandru Elisei static void pci_config_bar_wr(struct kvm *kvm,
252465edc9dSAlexandru Elisei 			      struct pci_device_header *pci_hdr, int bar_num,
253465edc9dSAlexandru Elisei 			      u32 value)
254465edc9dSAlexandru Elisei {
255465edc9dSAlexandru Elisei 	u32 old_addr, new_addr, bar_size;
256465edc9dSAlexandru Elisei 	u32 mask;
257465edc9dSAlexandru Elisei 	int r;
258465edc9dSAlexandru Elisei 
259465edc9dSAlexandru Elisei 	if (pci__bar_is_io(pci_hdr, bar_num))
260465edc9dSAlexandru Elisei 		mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
261465edc9dSAlexandru Elisei 	else
262465edc9dSAlexandru Elisei 		mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
263465edc9dSAlexandru Elisei 
264465edc9dSAlexandru Elisei 	/*
265465edc9dSAlexandru Elisei 	 * If the kernel masks the BAR, it will expect to find the size of the
266465edc9dSAlexandru Elisei 	 * BAR there next time it reads from it. After the kernel reads the
267465edc9dSAlexandru Elisei 	 * size, it will write the address back.
268465edc9dSAlexandru Elisei 	 *
269465edc9dSAlexandru Elisei 	 * According to the PCI local bus specification REV 3.0: The number of
270465edc9dSAlexandru Elisei 	 * upper bits that a device actually implements depends on how much of
271465edc9dSAlexandru Elisei 	 * the address space the device will respond to. A device that wants a 1
272465edc9dSAlexandru Elisei 	 * MB memory address space (using a 32-bit base address register) would
273465edc9dSAlexandru Elisei 	 * build the top 12 bits of the address register, hardwiring the other
274465edc9dSAlexandru Elisei 	 * bits to 0.
275465edc9dSAlexandru Elisei 	 *
276465edc9dSAlexandru Elisei 	 * Furthermore, software can determine how much address space the device
277465edc9dSAlexandru Elisei 	 * requires by writing a value of all 1's to the register and then
278465edc9dSAlexandru Elisei 	 * reading the value back. The device will return 0's in all don't-care
279465edc9dSAlexandru Elisei 	 * address bits, effectively specifying the address space required.
280465edc9dSAlexandru Elisei 	 *
281465edc9dSAlexandru Elisei 	 * Software computes the size of the address space with the formula
282465edc9dSAlexandru Elisei 	 * S =  ~B + 1, where S is the memory size and B is the value read from
283465edc9dSAlexandru Elisei 	 * the BAR. This means that the BAR value that kvmtool should return is
284465edc9dSAlexandru Elisei 	 * B = ~(S - 1).
285465edc9dSAlexandru Elisei 	 */
286465edc9dSAlexandru Elisei 	if (value == 0xffffffff) {
287465edc9dSAlexandru Elisei 		value = ~(pci__bar_size(pci_hdr, bar_num) - 1);
288465edc9dSAlexandru Elisei 		/* Preserve the special bits. */
289465edc9dSAlexandru Elisei 		value = (value & mask) | (pci_hdr->bar[bar_num] & ~mask);
290465edc9dSAlexandru Elisei 		pci_hdr->bar[bar_num] = value;
291465edc9dSAlexandru Elisei 		return;
292465edc9dSAlexandru Elisei 	}
293465edc9dSAlexandru Elisei 
294465edc9dSAlexandru Elisei 	value = (value & mask) | (pci_hdr->bar[bar_num] & ~mask);
295465edc9dSAlexandru Elisei 
296465edc9dSAlexandru Elisei 	/* Don't toggle emulation when region type access is disbled. */
297465edc9dSAlexandru Elisei 	if (pci__bar_is_io(pci_hdr, bar_num) &&
298465edc9dSAlexandru Elisei 	    !pci__io_space_enabled(pci_hdr)) {
299465edc9dSAlexandru Elisei 		pci_hdr->bar[bar_num] = value;
300465edc9dSAlexandru Elisei 		return;
301465edc9dSAlexandru Elisei 	}
302465edc9dSAlexandru Elisei 
303465edc9dSAlexandru Elisei 	if (pci__bar_is_memory(pci_hdr, bar_num) &&
304465edc9dSAlexandru Elisei 	    !pci__memory_space_enabled(pci_hdr)) {
305465edc9dSAlexandru Elisei 		pci_hdr->bar[bar_num] = value;
306465edc9dSAlexandru Elisei 		return;
307465edc9dSAlexandru Elisei 	}
308465edc9dSAlexandru Elisei 
309465edc9dSAlexandru Elisei 	/*
310465edc9dSAlexandru Elisei 	 * BAR reassignment can be done while device access is enabled and
311465edc9dSAlexandru Elisei 	 * memory regions for different devices can overlap as long as no access
312465edc9dSAlexandru Elisei 	 * is made to the overlapping memory regions. To implement BAR
313465edc9dSAlexandru Elisei 	 * reasignment, we deactivate emulation for the region described by the
314465edc9dSAlexandru Elisei 	 * BAR value that the guest is changing, we disable emulation for the
315465edc9dSAlexandru Elisei 	 * regions that overlap with the new one (by scanning through all PCI
316465edc9dSAlexandru Elisei 	 * devices), we enable emulation for the new BAR value and finally we
317465edc9dSAlexandru Elisei 	 * enable emulation for all device regions that were overlapping with
318465edc9dSAlexandru Elisei 	 * the old value.
319465edc9dSAlexandru Elisei 	 */
320465edc9dSAlexandru Elisei 	old_addr = pci__bar_address(pci_hdr, bar_num);
321465edc9dSAlexandru Elisei 	new_addr = __pci__bar_address(value);
322465edc9dSAlexandru Elisei 	bar_size = pci__bar_size(pci_hdr, bar_num);
323465edc9dSAlexandru Elisei 
324465edc9dSAlexandru Elisei 	r = pci_deactivate_bar(kvm, pci_hdr, bar_num);
325465edc9dSAlexandru Elisei 	if (r < 0)
326465edc9dSAlexandru Elisei 		return;
327465edc9dSAlexandru Elisei 
328465edc9dSAlexandru Elisei 	r = pci_deactivate_bar_regions(kvm, new_addr, bar_size);
329465edc9dSAlexandru Elisei 	if (r < 0) {
330465edc9dSAlexandru Elisei 		/*
331465edc9dSAlexandru Elisei 		 * We cannot update the BAR because of an overlapping region
332465edc9dSAlexandru Elisei 		 * that failed to deactivate emulation, so keep the old BAR
333465edc9dSAlexandru Elisei 		 * value and re-activate emulation for it.
334465edc9dSAlexandru Elisei 		 */
335465edc9dSAlexandru Elisei 		pci_activate_bar(kvm, pci_hdr, bar_num);
336465edc9dSAlexandru Elisei 		return;
337465edc9dSAlexandru Elisei 	}
338465edc9dSAlexandru Elisei 
339465edc9dSAlexandru Elisei 	pci_hdr->bar[bar_num] = value;
340465edc9dSAlexandru Elisei 	r = pci_activate_bar(kvm, pci_hdr, bar_num);
341465edc9dSAlexandru Elisei 	if (r < 0) {
342465edc9dSAlexandru Elisei 		/*
343465edc9dSAlexandru Elisei 		 * New region cannot be emulated, re-enable the regions that
344465edc9dSAlexandru Elisei 		 * were overlapping.
345465edc9dSAlexandru Elisei 		 */
346465edc9dSAlexandru Elisei 		pci_activate_bar_regions(kvm, new_addr, bar_size);
347465edc9dSAlexandru Elisei 		return;
348465edc9dSAlexandru Elisei 	}
349465edc9dSAlexandru Elisei 
350465edc9dSAlexandru Elisei 	pci_activate_bar_regions(kvm, old_addr, bar_size);
351465edc9dSAlexandru Elisei }
352465edc9dSAlexandru Elisei 
353d0297a59SMatt Evans void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size)
354d0297a59SMatt Evans {
355023fdaaeSJean-Philippe Brucker 	void *base;
356*e69b7663SAlexandru Elisei 	u8 bar;
357*e69b7663SAlexandru Elisei 	u16 offset;
358023fdaaeSJean-Philippe Brucker 	struct pci_device_header *pci_hdr;
359023fdaaeSJean-Philippe Brucker 	u8 dev_num = addr.device_number;
360bb0d509bSSami Mujawar 	u32 value = 0;
361d0297a59SMatt Evans 
362023fdaaeSJean-Philippe Brucker 	if (!pci_device_exists(addr.bus_number, dev_num, 0))
363023fdaaeSJean-Philippe Brucker 		return;
3649575e724SSasha Levin 
365023fdaaeSJean-Philippe Brucker 	offset = addr.w & PCI_DEV_CFG_MASK;
366023fdaaeSJean-Philippe Brucker 	base = pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
3679575e724SSasha Levin 
368023fdaaeSJean-Philippe Brucker 	if (pci_hdr->cfg_ops.write)
369023fdaaeSJean-Philippe Brucker 		pci_hdr->cfg_ops.write(kvm, pci_hdr, offset, data, size);
370c64f7ff0SSasha Levin 
3719575e724SSasha Levin 	/*
372023fdaaeSJean-Philippe Brucker 	 * legacy hack: ignore writes to uninitialized regions (e.g. ROM BAR).
373023fdaaeSJean-Philippe Brucker 	 * Not very nice but has been working so far.
3749575e724SSasha Levin 	 */
375023fdaaeSJean-Philippe Brucker 	if (*(u32 *)(base + offset) == 0)
376023fdaaeSJean-Philippe Brucker 		return;
377023fdaaeSJean-Philippe Brucker 
37846e04130SAlexandru Elisei 	if (offset == PCI_COMMAND) {
37946e04130SAlexandru Elisei 		memcpy(&value, data, size);
38046e04130SAlexandru Elisei 		pci_config_command_wr(kvm, pci_hdr, (u16)value);
38146e04130SAlexandru Elisei 		return;
38246e04130SAlexandru Elisei 	}
38346e04130SAlexandru Elisei 
384023fdaaeSJean-Philippe Brucker 	bar = (offset - PCI_BAR_OFFSET(0)) / sizeof(u32);
385bb0d509bSSami Mujawar 	if (bar < 6) {
386bb0d509bSSami Mujawar 		memcpy(&value, data, size);
387465edc9dSAlexandru Elisei 		pci_config_bar_wr(kvm, pci_hdr, bar, value);
388465edc9dSAlexandru Elisei 		return;
3899575e724SSasha Levin 	}
390465edc9dSAlexandru Elisei 
391465edc9dSAlexandru Elisei 	memcpy(base + offset, data, size);
3929575e724SSasha Levin }
3939575e724SSasha Levin 
394d0297a59SMatt Evans void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, int size)
39560742802SPekka Enberg {
396*e69b7663SAlexandru Elisei 	u16 offset;
397023fdaaeSJean-Philippe Brucker 	struct pci_device_header *pci_hdr;
398023fdaaeSJean-Philippe Brucker 	u8 dev_num = addr.device_number;
399e4d2cea2SPekka Enberg 
400023fdaaeSJean-Philippe Brucker 	if (pci_device_exists(addr.bus_number, dev_num, 0)) {
401023fdaaeSJean-Philippe Brucker 		pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
402023fdaaeSJean-Philippe Brucker 		offset = addr.w & PCI_DEV_CFG_MASK;
403b30d05adSPekka Enberg 
404023fdaaeSJean-Philippe Brucker 		if (pci_hdr->cfg_ops.read)
405023fdaaeSJean-Philippe Brucker 			pci_hdr->cfg_ops.read(kvm, pci_hdr, offset, data, size);
406598419d5SPekka Enberg 
407023fdaaeSJean-Philippe Brucker 		memcpy(data, (void *)pci_hdr + offset, size);
4083a60be06SSasha Levin 	} else {
409e498ea08SPekka Enberg 		memset(data, 0xff, size);
41060742802SPekka Enberg 	}
4113a60be06SSasha Levin }
41260742802SPekka Enberg 
4139b735910SMarc Zyngier static void pci_config_mmio_access(struct kvm_cpu *vcpu, u64 addr, u8 *data,
4149b735910SMarc Zyngier 				   u32 len, u8 is_write, void *kvm)
415b403f2f7SWill Deacon {
416b403f2f7SWill Deacon 	union pci_config_address cfg_addr;
417b403f2f7SWill Deacon 
418b403f2f7SWill Deacon 	addr			-= KVM_PCI_CFG_AREA;
419b403f2f7SWill Deacon 	cfg_addr.w		= (u32)addr;
420b403f2f7SWill Deacon 	cfg_addr.enable_bit	= 1;
421b403f2f7SWill Deacon 
4226ea32ebdSAlexandru Elisei 	if (len > 4)
4236ea32ebdSAlexandru Elisei 		len = 4;
4246ea32ebdSAlexandru Elisei 
425b403f2f7SWill Deacon 	if (is_write)
426b403f2f7SWill Deacon 		pci__config_wr(kvm, cfg_addr, data, len);
427b403f2f7SWill Deacon 	else
428b403f2f7SWill Deacon 		pci__config_rd(kvm, cfg_addr, data, len);
429b403f2f7SWill Deacon }
430b403f2f7SWill Deacon 
431d0297a59SMatt Evans struct pci_device_header *pci__find_dev(u8 dev_num)
432d0297a59SMatt Evans {
43321ff329dSWill Deacon 	struct device_header *hdr = device__find_dev(DEVICE_BUS_PCI, dev_num);
4346d987703SSasha Levin 
43521ff329dSWill Deacon 	if (IS_ERR_OR_NULL(hdr))
43621ff329dSWill Deacon 		return NULL;
43721ff329dSWill Deacon 
43821ff329dSWill Deacon 	return hdr->data;
439d0297a59SMatt Evans }
440d0297a59SMatt Evans 
4415a8e4f25SAlexandru Elisei int pci__register_bar_regions(struct kvm *kvm, struct pci_device_header *pci_hdr,
4425a8e4f25SAlexandru Elisei 			      bar_activate_fn_t bar_activate_fn,
4435a8e4f25SAlexandru Elisei 			      bar_deactivate_fn_t bar_deactivate_fn, void *data)
4445a8e4f25SAlexandru Elisei {
4455a8e4f25SAlexandru Elisei 	int i, r;
4465a8e4f25SAlexandru Elisei 
4475a8e4f25SAlexandru Elisei 	assert(bar_activate_fn && bar_deactivate_fn);
4485a8e4f25SAlexandru Elisei 
4495a8e4f25SAlexandru Elisei 	pci_hdr->bar_activate_fn = bar_activate_fn;
4505a8e4f25SAlexandru Elisei 	pci_hdr->bar_deactivate_fn = bar_deactivate_fn;
4515a8e4f25SAlexandru Elisei 	pci_hdr->data = data;
4525a8e4f25SAlexandru Elisei 
4535a8e4f25SAlexandru Elisei 	for (i = 0; i < 6; i++) {
4545a8e4f25SAlexandru Elisei 		if (!pci_bar_is_implemented(pci_hdr, i))
4555a8e4f25SAlexandru Elisei 			continue;
4565a8e4f25SAlexandru Elisei 
457465edc9dSAlexandru Elisei 		assert(!pci_bar_is_active(pci_hdr, i));
458465edc9dSAlexandru Elisei 
4595a8e4f25SAlexandru Elisei 		if (pci__bar_is_io(pci_hdr, i) &&
4605a8e4f25SAlexandru Elisei 		    pci__io_space_enabled(pci_hdr)) {
461465edc9dSAlexandru Elisei 			r = pci_activate_bar(kvm, pci_hdr, i);
4625a8e4f25SAlexandru Elisei 			if (r < 0)
4635a8e4f25SAlexandru Elisei 				return r;
4645a8e4f25SAlexandru Elisei 		}
4655a8e4f25SAlexandru Elisei 
4665a8e4f25SAlexandru Elisei 		if (pci__bar_is_memory(pci_hdr, i) &&
4675a8e4f25SAlexandru Elisei 		    pci__memory_space_enabled(pci_hdr)) {
468465edc9dSAlexandru Elisei 			r = pci_activate_bar(kvm, pci_hdr, i);
4695a8e4f25SAlexandru Elisei 			if (r < 0)
4705a8e4f25SAlexandru Elisei 				return r;
4715a8e4f25SAlexandru Elisei 		}
4725a8e4f25SAlexandru Elisei 	}
4735a8e4f25SAlexandru Elisei 
4745a8e4f25SAlexandru Elisei 	return 0;
4755a8e4f25SAlexandru Elisei }
4765a8e4f25SAlexandru Elisei 
4776d987703SSasha Levin int pci__init(struct kvm *kvm)
47860742802SPekka Enberg {
4796d987703SSasha Levin 	int r;
4806d987703SSasha Levin 
4811f56b9d1SAndre Przywara 	r = kvm__register_pio(kvm, PCI_CONFIG_DATA, 4,
4821f56b9d1SAndre Przywara 				 pci_config_data_mmio, NULL);
4836d987703SSasha Levin 	if (r < 0)
4846d987703SSasha Levin 		return r;
4851f56b9d1SAndre Przywara 	r = kvm__register_pio(kvm, PCI_CONFIG_ADDRESS, 4,
4861f56b9d1SAndre Przywara 				 pci_config_address_mmio, NULL);
487b403f2f7SWill Deacon 	if (r < 0)
488b403f2f7SWill Deacon 		goto err_unregister_data;
489b403f2f7SWill Deacon 
490b403f2f7SWill Deacon 	r = kvm__register_mmio(kvm, KVM_PCI_CFG_AREA, PCI_CFG_SIZE, false,
491b403f2f7SWill Deacon 			       pci_config_mmio_access, kvm);
492b403f2f7SWill Deacon 	if (r < 0)
493b403f2f7SWill Deacon 		goto err_unregister_addr;
4946d987703SSasha Levin 
4956d987703SSasha Levin 	return 0;
496b403f2f7SWill Deacon 
497b403f2f7SWill Deacon err_unregister_addr:
4981f56b9d1SAndre Przywara 	kvm__deregister_pio(kvm, PCI_CONFIG_ADDRESS);
499b403f2f7SWill Deacon err_unregister_data:
5001f56b9d1SAndre Przywara 	kvm__deregister_pio(kvm, PCI_CONFIG_DATA);
501b403f2f7SWill Deacon 	return r;
5026d987703SSasha Levin }
503bca12bf6SSasha Levin dev_base_init(pci__init);
5046d987703SSasha Levin 
5056d987703SSasha Levin int pci__exit(struct kvm *kvm)
5066d987703SSasha Levin {
5071f56b9d1SAndre Przywara 	kvm__deregister_pio(kvm, PCI_CONFIG_DATA);
5081f56b9d1SAndre Przywara 	kvm__deregister_pio(kvm, PCI_CONFIG_ADDRESS);
5096d987703SSasha Levin 
5106d987703SSasha Levin 	return 0;
51160742802SPekka Enberg }
512bca12bf6SSasha Levin dev_base_exit(pci__exit);
513