121ff329dSWill Deacon #include "kvm/devices.h"
260742802SPekka Enberg #include "kvm/pci.h"
360742802SPekka Enberg #include "kvm/ioport.h"
4b5981636SWill Deacon #include "kvm/irq.h"
576f9c841SCyrill Gorcunov #include "kvm/util.h"
69575e724SSasha Levin #include "kvm/kvm.h"
760742802SPekka Enberg
86d987703SSasha Levin #include <linux/err.h>
96d987703SSasha Levin #include <assert.h>
106d987703SSasha Levin
11a0a7d66fSDavid Daney static u32 pci_config_address_bits;
1260742802SPekka Enberg
1340f2fd06SMatt Evans /* This is within our PCI gap - in an unused area.
1440f2fd06SMatt Evans * Note this is a PCI *bus address*, is used to assign BARs etc.!
1540f2fd06SMatt Evans * (That's why it can still 32bit even with 64bit guests-- 64bit
1640f2fd06SMatt Evans * PCI isn't currently supported.)
1740f2fd06SMatt Evans */
18854aa2efSJulien Thierry static u32 mmio_blocks = KVM_PCI_MMIO_AREA;
19854aa2efSJulien Thierry static u16 io_port_blocks = PCI_IOPORT_START;
20854aa2efSJulien Thierry
pci_get_io_port_block(u32 size)21854aa2efSJulien Thierry u16 pci_get_io_port_block(u32 size)
22854aa2efSJulien Thierry {
2348843d10SJulien Thierry u16 port = ALIGN(io_port_blocks, PCI_IO_SIZE);
24854aa2efSJulien Thierry
25854aa2efSJulien Thierry io_port_blocks = port + size;
26854aa2efSJulien Thierry return port;
27854aa2efSJulien Thierry }
289575e724SSasha Levin
29c7575d17SWill Deacon /*
30c7575d17SWill Deacon * BARs must be naturally aligned, so enforce this in the allocator.
31c7575d17SWill Deacon */
pci_get_mmio_block(u32 size)32854aa2efSJulien Thierry u32 pci_get_mmio_block(u32 size)
339575e724SSasha Levin {
34854aa2efSJulien Thierry u32 block = ALIGN(mmio_blocks, size);
35854aa2efSJulien Thierry mmio_blocks = block + size;
369575e724SSasha Levin return block;
379575e724SSasha Levin }
389575e724SSasha Levin
pci_find_cap(struct pci_device_header * hdr,u8 cap_type)391a51c93dSJean-Philippe Brucker void *pci_find_cap(struct pci_device_header *hdr, u8 cap_type)
401a51c93dSJean-Philippe Brucker {
411a51c93dSJean-Philippe Brucker u8 pos;
421a51c93dSJean-Philippe Brucker struct pci_cap_hdr *cap;
431a51c93dSJean-Philippe Brucker
441a51c93dSJean-Philippe Brucker pci_for_each_cap(pos, cap, hdr) {
451a51c93dSJean-Philippe Brucker if (cap->type == cap_type)
461a51c93dSJean-Philippe Brucker return cap;
471a51c93dSJean-Philippe Brucker }
481a51c93dSJean-Philippe Brucker
491a51c93dSJean-Philippe Brucker return NULL;
501a51c93dSJean-Philippe Brucker }
511a51c93dSJean-Philippe Brucker
pci__assign_irq(struct pci_device_header * pci_hdr)52c0c45eedSAndre Przywara int pci__assign_irq(struct pci_device_header *pci_hdr)
53b5981636SWill Deacon {
54b5981636SWill Deacon /*
55b5981636SWill Deacon * PCI supports only INTA#,B#,C#,D# per device.
56b5981636SWill Deacon *
57b5981636SWill Deacon * A#,B#,C#,D# are allowed for multifunctional devices so stick
58b5981636SWill Deacon * with A# for our single function devices.
59b5981636SWill Deacon */
60b5981636SWill Deacon pci_hdr->irq_pin = 1;
61b5981636SWill Deacon pci_hdr->irq_line = irq__alloc_line();
62ff01b5dbSJean-Philippe Brucker
63ff01b5dbSJean-Philippe Brucker if (!pci_hdr->irq_type)
642108c86dSMarc Zyngier pci_hdr->irq_type = IRQ_TYPE_LEVEL_HIGH;
65c0c45eedSAndre Przywara
66c0c45eedSAndre Przywara return pci_hdr->irq_line;
67b5981636SWill Deacon }
68b5981636SWill Deacon
pci_bar_is_implemented(struct pci_device_header * pci_hdr,int bar_num)695a8e4f25SAlexandru Elisei static bool pci_bar_is_implemented(struct pci_device_header *pci_hdr, int bar_num)
705a8e4f25SAlexandru Elisei {
715a8e4f25SAlexandru Elisei return pci__bar_size(pci_hdr, bar_num);
725a8e4f25SAlexandru Elisei }
735a8e4f25SAlexandru Elisei
pci_bar_is_active(struct pci_device_header * pci_hdr,int bar_num)74465edc9dSAlexandru Elisei static bool pci_bar_is_active(struct pci_device_header *pci_hdr, int bar_num)
75465edc9dSAlexandru Elisei {
76465edc9dSAlexandru Elisei return pci_hdr->bar_active[bar_num];
77465edc9dSAlexandru Elisei }
78465edc9dSAlexandru Elisei
pci_config_address_ptr(u16 port)793fdf659dSSasha Levin static void *pci_config_address_ptr(u16 port)
80ba824677SPekka Enberg {
81ba824677SPekka Enberg unsigned long offset;
82ba824677SPekka Enberg void *base;
83ba824677SPekka Enberg
84ba824677SPekka Enberg offset = port - PCI_CONFIG_ADDRESS;
85a0a7d66fSDavid Daney base = &pci_config_address_bits;
86ba824677SPekka Enberg
87ba824677SPekka Enberg return base + offset;
88ba824677SPekka Enberg }
89ba824677SPekka Enberg
pci_config_address_mmio(struct kvm_cpu * vcpu,u64 addr,u8 * data,u32 len,u8 is_write,void * ptr)901f56b9d1SAndre Przywara static void pci_config_address_mmio(struct kvm_cpu *vcpu, u64 addr, u8 *data,
911f56b9d1SAndre Przywara u32 len, u8 is_write, void *ptr)
9260742802SPekka Enberg {
931f56b9d1SAndre Przywara void *p = pci_config_address_ptr(addr);
9460742802SPekka Enberg
951f56b9d1SAndre Przywara if (is_write)
961f56b9d1SAndre Przywara memcpy(p, data, len);
971f56b9d1SAndre Przywara else
981f56b9d1SAndre Przywara memcpy(data, p, len);
9960742802SPekka Enberg }
pci_device_exists(u8 bus_number,u8 device_number,u8 function_number)1003fdf659dSSasha Levin static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_number)
10176f9c841SCyrill Gorcunov {
102a0a7d66fSDavid Daney union pci_config_address pci_config_address;
103a0a7d66fSDavid Daney
104a0a7d66fSDavid Daney pci_config_address.w = ioport__read32(&pci_config_address_bits);
105a0a7d66fSDavid Daney
10676f9c841SCyrill Gorcunov if (pci_config_address.bus_number != bus_number)
10776f9c841SCyrill Gorcunov return false;
10876f9c841SCyrill Gorcunov
109b30d05adSPekka Enberg if (pci_config_address.function_number != function_number)
11076f9c841SCyrill Gorcunov return false;
11176f9c841SCyrill Gorcunov
11221ff329dSWill Deacon return !IS_ERR_OR_NULL(device__find_dev(DEVICE_BUS_PCI, device_number));
11376f9c841SCyrill Gorcunov }
11476f9c841SCyrill Gorcunov
pci_config_data_mmio(struct kvm_cpu * vcpu,u64 addr,u8 * data,u32 len,u8 is_write,void * kvm)1151f56b9d1SAndre Przywara static void pci_config_data_mmio(struct kvm_cpu *vcpu, u64 addr, u8 *data,
1161f56b9d1SAndre Przywara u32 len, u8 is_write, void *kvm)
1179575e724SSasha Levin {
118a0a7d66fSDavid Daney union pci_config_address pci_config_address;
119a0a7d66fSDavid Daney
120a0a7d66fSDavid Daney pci_config_address.w = ioport__read32(&pci_config_address_bits);
1219575e724SSasha Levin /*
1229575e724SSasha Levin * If someone accesses PCI configuration space offsets that are not
1239575e724SSasha Levin * aligned to 4 bytes, it uses ioports to signify that.
1249575e724SSasha Levin */
1251f56b9d1SAndre Przywara pci_config_address.reg_offset = addr - PCI_CONFIG_DATA;
1269575e724SSasha Levin
12778771e77SJean-Philippe Brucker /* Ensure the access does not cross a 4-byte boundary */
12878771e77SJean-Philippe Brucker len = min(len, 4U - pci_config_address.reg_offset);
12978771e77SJean-Philippe Brucker
1301f56b9d1SAndre Przywara if (is_write)
1311f56b9d1SAndre Przywara pci__config_wr(vcpu->kvm, pci_config_address, data, len);
1321f56b9d1SAndre Przywara else
1331f56b9d1SAndre Przywara pci__config_rd(vcpu->kvm, pci_config_address, data, len);
134d0297a59SMatt Evans }
135d0297a59SMatt Evans
pci_activate_bar(struct kvm * kvm,struct pci_device_header * pci_hdr,int bar_num)136465edc9dSAlexandru Elisei static int pci_activate_bar(struct kvm *kvm, struct pci_device_header *pci_hdr,
137465edc9dSAlexandru Elisei int bar_num)
138465edc9dSAlexandru Elisei {
139465edc9dSAlexandru Elisei int r = 0;
140465edc9dSAlexandru Elisei
141465edc9dSAlexandru Elisei if (pci_bar_is_active(pci_hdr, bar_num))
142465edc9dSAlexandru Elisei goto out;
143465edc9dSAlexandru Elisei
144465edc9dSAlexandru Elisei r = pci_hdr->bar_activate_fn(kvm, pci_hdr, bar_num, pci_hdr->data);
145465edc9dSAlexandru Elisei if (r < 0) {
146465edc9dSAlexandru Elisei pci_dev_warn(pci_hdr, "Error activating emulation for BAR %d",
147465edc9dSAlexandru Elisei bar_num);
148465edc9dSAlexandru Elisei goto out;
149465edc9dSAlexandru Elisei }
150465edc9dSAlexandru Elisei pci_hdr->bar_active[bar_num] = true;
151465edc9dSAlexandru Elisei
152465edc9dSAlexandru Elisei out:
153465edc9dSAlexandru Elisei return r;
154465edc9dSAlexandru Elisei }
155465edc9dSAlexandru Elisei
pci_deactivate_bar(struct kvm * kvm,struct pci_device_header * pci_hdr,int bar_num)156465edc9dSAlexandru Elisei static int pci_deactivate_bar(struct kvm *kvm, struct pci_device_header *pci_hdr,
157465edc9dSAlexandru Elisei int bar_num)
158465edc9dSAlexandru Elisei {
159465edc9dSAlexandru Elisei int r = 0;
160465edc9dSAlexandru Elisei
161465edc9dSAlexandru Elisei if (!pci_bar_is_active(pci_hdr, bar_num))
162465edc9dSAlexandru Elisei goto out;
163465edc9dSAlexandru Elisei
164465edc9dSAlexandru Elisei r = pci_hdr->bar_deactivate_fn(kvm, pci_hdr, bar_num, pci_hdr->data);
165465edc9dSAlexandru Elisei if (r < 0) {
166465edc9dSAlexandru Elisei pci_dev_warn(pci_hdr, "Error deactivating emulation for BAR %d",
167465edc9dSAlexandru Elisei bar_num);
168465edc9dSAlexandru Elisei goto out;
169465edc9dSAlexandru Elisei }
170465edc9dSAlexandru Elisei pci_hdr->bar_active[bar_num] = false;
171465edc9dSAlexandru Elisei
172465edc9dSAlexandru Elisei out:
173465edc9dSAlexandru Elisei return r;
174465edc9dSAlexandru Elisei }
175465edc9dSAlexandru Elisei
pci_config_command_wr(struct kvm * kvm,struct pci_device_header * pci_hdr,u16 new_command)17646e04130SAlexandru Elisei static void pci_config_command_wr(struct kvm *kvm,
17746e04130SAlexandru Elisei struct pci_device_header *pci_hdr,
17846e04130SAlexandru Elisei u16 new_command)
17946e04130SAlexandru Elisei {
18046e04130SAlexandru Elisei int i;
18146e04130SAlexandru Elisei bool toggle_io, toggle_mem;
18246e04130SAlexandru Elisei
18346e04130SAlexandru Elisei toggle_io = (pci_hdr->command ^ new_command) & PCI_COMMAND_IO;
18446e04130SAlexandru Elisei toggle_mem = (pci_hdr->command ^ new_command) & PCI_COMMAND_MEMORY;
18546e04130SAlexandru Elisei
18646e04130SAlexandru Elisei for (i = 0; i < 6; i++) {
18746e04130SAlexandru Elisei if (!pci_bar_is_implemented(pci_hdr, i))
18846e04130SAlexandru Elisei continue;
18946e04130SAlexandru Elisei
19046e04130SAlexandru Elisei if (toggle_io && pci__bar_is_io(pci_hdr, i)) {
19146e04130SAlexandru Elisei if (__pci__io_space_enabled(new_command))
192465edc9dSAlexandru Elisei pci_activate_bar(kvm, pci_hdr, i);
19346e04130SAlexandru Elisei else
194465edc9dSAlexandru Elisei pci_deactivate_bar(kvm, pci_hdr, i);
19546e04130SAlexandru Elisei }
19646e04130SAlexandru Elisei
19746e04130SAlexandru Elisei if (toggle_mem && pci__bar_is_memory(pci_hdr, i)) {
19846e04130SAlexandru Elisei if (__pci__memory_space_enabled(new_command))
199465edc9dSAlexandru Elisei pci_activate_bar(kvm, pci_hdr, i);
20046e04130SAlexandru Elisei else
201465edc9dSAlexandru Elisei pci_deactivate_bar(kvm, pci_hdr, i);
20246e04130SAlexandru Elisei }
20346e04130SAlexandru Elisei }
20446e04130SAlexandru Elisei
20546e04130SAlexandru Elisei pci_hdr->command = new_command;
20646e04130SAlexandru Elisei }
20746e04130SAlexandru Elisei
pci_toggle_bar_regions(bool activate,struct kvm * kvm,u32 start,u32 size)208465edc9dSAlexandru Elisei static int pci_toggle_bar_regions(bool activate, struct kvm *kvm, u32 start, u32 size)
209465edc9dSAlexandru Elisei {
210465edc9dSAlexandru Elisei struct device_header *dev_hdr;
211465edc9dSAlexandru Elisei struct pci_device_header *tmp_hdr;
212465edc9dSAlexandru Elisei u32 tmp_start, tmp_size;
213465edc9dSAlexandru Elisei int i, r;
214465edc9dSAlexandru Elisei
215465edc9dSAlexandru Elisei dev_hdr = device__first_dev(DEVICE_BUS_PCI);
216465edc9dSAlexandru Elisei while (dev_hdr) {
217465edc9dSAlexandru Elisei tmp_hdr = dev_hdr->data;
218465edc9dSAlexandru Elisei for (i = 0; i < 6; i++) {
219465edc9dSAlexandru Elisei if (!pci_bar_is_implemented(tmp_hdr, i))
220465edc9dSAlexandru Elisei continue;
221465edc9dSAlexandru Elisei
222465edc9dSAlexandru Elisei tmp_start = pci__bar_address(tmp_hdr, i);
223465edc9dSAlexandru Elisei tmp_size = pci__bar_size(tmp_hdr, i);
224465edc9dSAlexandru Elisei if (tmp_start + tmp_size <= start ||
225465edc9dSAlexandru Elisei tmp_start >= start + size)
226465edc9dSAlexandru Elisei continue;
227465edc9dSAlexandru Elisei
228465edc9dSAlexandru Elisei if (activate)
229465edc9dSAlexandru Elisei r = pci_activate_bar(kvm, tmp_hdr, i);
230465edc9dSAlexandru Elisei else
231465edc9dSAlexandru Elisei r = pci_deactivate_bar(kvm, tmp_hdr, i);
232465edc9dSAlexandru Elisei if (r < 0)
233465edc9dSAlexandru Elisei return r;
234465edc9dSAlexandru Elisei }
235465edc9dSAlexandru Elisei dev_hdr = device__next_dev(dev_hdr);
236465edc9dSAlexandru Elisei }
237465edc9dSAlexandru Elisei
238465edc9dSAlexandru Elisei return 0;
239465edc9dSAlexandru Elisei }
240465edc9dSAlexandru Elisei
pci_activate_bar_regions(struct kvm * kvm,u32 start,u32 size)241465edc9dSAlexandru Elisei static inline int pci_activate_bar_regions(struct kvm *kvm, u32 start, u32 size)
242465edc9dSAlexandru Elisei {
243465edc9dSAlexandru Elisei return pci_toggle_bar_regions(true, kvm, start, size);
244465edc9dSAlexandru Elisei }
245465edc9dSAlexandru Elisei
pci_deactivate_bar_regions(struct kvm * kvm,u32 start,u32 size)246465edc9dSAlexandru Elisei static inline int pci_deactivate_bar_regions(struct kvm *kvm, u32 start, u32 size)
247465edc9dSAlexandru Elisei {
248465edc9dSAlexandru Elisei return pci_toggle_bar_regions(false, kvm, start, size);
249465edc9dSAlexandru Elisei }
250465edc9dSAlexandru Elisei
pci_config_bar_wr(struct kvm * kvm,struct pci_device_header * pci_hdr,int bar_num,u32 value)251465edc9dSAlexandru Elisei static void pci_config_bar_wr(struct kvm *kvm,
252465edc9dSAlexandru Elisei struct pci_device_header *pci_hdr, int bar_num,
253465edc9dSAlexandru Elisei u32 value)
254465edc9dSAlexandru Elisei {
255465edc9dSAlexandru Elisei u32 old_addr, new_addr, bar_size;
256465edc9dSAlexandru Elisei u32 mask;
257465edc9dSAlexandru Elisei int r;
258465edc9dSAlexandru Elisei
259465edc9dSAlexandru Elisei if (pci__bar_is_io(pci_hdr, bar_num))
260465edc9dSAlexandru Elisei mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
261465edc9dSAlexandru Elisei else
262465edc9dSAlexandru Elisei mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
263465edc9dSAlexandru Elisei
264465edc9dSAlexandru Elisei /*
265465edc9dSAlexandru Elisei * If the kernel masks the BAR, it will expect to find the size of the
266465edc9dSAlexandru Elisei * BAR there next time it reads from it. After the kernel reads the
267465edc9dSAlexandru Elisei * size, it will write the address back.
268465edc9dSAlexandru Elisei *
269465edc9dSAlexandru Elisei * According to the PCI local bus specification REV 3.0: The number of
270465edc9dSAlexandru Elisei * upper bits that a device actually implements depends on how much of
271465edc9dSAlexandru Elisei * the address space the device will respond to. A device that wants a 1
272465edc9dSAlexandru Elisei * MB memory address space (using a 32-bit base address register) would
273465edc9dSAlexandru Elisei * build the top 12 bits of the address register, hardwiring the other
274465edc9dSAlexandru Elisei * bits to 0.
275465edc9dSAlexandru Elisei *
276465edc9dSAlexandru Elisei * Furthermore, software can determine how much address space the device
277465edc9dSAlexandru Elisei * requires by writing a value of all 1's to the register and then
278465edc9dSAlexandru Elisei * reading the value back. The device will return 0's in all don't-care
279465edc9dSAlexandru Elisei * address bits, effectively specifying the address space required.
280465edc9dSAlexandru Elisei *
281465edc9dSAlexandru Elisei * Software computes the size of the address space with the formula
282465edc9dSAlexandru Elisei * S = ~B + 1, where S is the memory size and B is the value read from
283465edc9dSAlexandru Elisei * the BAR. This means that the BAR value that kvmtool should return is
284465edc9dSAlexandru Elisei * B = ~(S - 1).
285465edc9dSAlexandru Elisei */
286465edc9dSAlexandru Elisei if (value == 0xffffffff) {
287465edc9dSAlexandru Elisei value = ~(pci__bar_size(pci_hdr, bar_num) - 1);
288465edc9dSAlexandru Elisei /* Preserve the special bits. */
289465edc9dSAlexandru Elisei value = (value & mask) | (pci_hdr->bar[bar_num] & ~mask);
290465edc9dSAlexandru Elisei pci_hdr->bar[bar_num] = value;
291465edc9dSAlexandru Elisei return;
292465edc9dSAlexandru Elisei }
293465edc9dSAlexandru Elisei
294465edc9dSAlexandru Elisei value = (value & mask) | (pci_hdr->bar[bar_num] & ~mask);
295465edc9dSAlexandru Elisei
296465edc9dSAlexandru Elisei /* Don't toggle emulation when region type access is disbled. */
297465edc9dSAlexandru Elisei if (pci__bar_is_io(pci_hdr, bar_num) &&
298465edc9dSAlexandru Elisei !pci__io_space_enabled(pci_hdr)) {
299465edc9dSAlexandru Elisei pci_hdr->bar[bar_num] = value;
300465edc9dSAlexandru Elisei return;
301465edc9dSAlexandru Elisei }
302465edc9dSAlexandru Elisei
303465edc9dSAlexandru Elisei if (pci__bar_is_memory(pci_hdr, bar_num) &&
304465edc9dSAlexandru Elisei !pci__memory_space_enabled(pci_hdr)) {
305465edc9dSAlexandru Elisei pci_hdr->bar[bar_num] = value;
306465edc9dSAlexandru Elisei return;
307465edc9dSAlexandru Elisei }
308465edc9dSAlexandru Elisei
309465edc9dSAlexandru Elisei /*
310465edc9dSAlexandru Elisei * BAR reassignment can be done while device access is enabled and
311465edc9dSAlexandru Elisei * memory regions for different devices can overlap as long as no access
312465edc9dSAlexandru Elisei * is made to the overlapping memory regions. To implement BAR
313465edc9dSAlexandru Elisei * reasignment, we deactivate emulation for the region described by the
314465edc9dSAlexandru Elisei * BAR value that the guest is changing, we disable emulation for the
315465edc9dSAlexandru Elisei * regions that overlap with the new one (by scanning through all PCI
316465edc9dSAlexandru Elisei * devices), we enable emulation for the new BAR value and finally we
317465edc9dSAlexandru Elisei * enable emulation for all device regions that were overlapping with
318465edc9dSAlexandru Elisei * the old value.
319465edc9dSAlexandru Elisei */
320465edc9dSAlexandru Elisei old_addr = pci__bar_address(pci_hdr, bar_num);
321465edc9dSAlexandru Elisei new_addr = __pci__bar_address(value);
322465edc9dSAlexandru Elisei bar_size = pci__bar_size(pci_hdr, bar_num);
323465edc9dSAlexandru Elisei
324465edc9dSAlexandru Elisei r = pci_deactivate_bar(kvm, pci_hdr, bar_num);
325465edc9dSAlexandru Elisei if (r < 0)
326465edc9dSAlexandru Elisei return;
327465edc9dSAlexandru Elisei
328465edc9dSAlexandru Elisei r = pci_deactivate_bar_regions(kvm, new_addr, bar_size);
329465edc9dSAlexandru Elisei if (r < 0) {
330465edc9dSAlexandru Elisei /*
331465edc9dSAlexandru Elisei * We cannot update the BAR because of an overlapping region
332465edc9dSAlexandru Elisei * that failed to deactivate emulation, so keep the old BAR
333465edc9dSAlexandru Elisei * value and re-activate emulation for it.
334465edc9dSAlexandru Elisei */
335465edc9dSAlexandru Elisei pci_activate_bar(kvm, pci_hdr, bar_num);
336465edc9dSAlexandru Elisei return;
337465edc9dSAlexandru Elisei }
338465edc9dSAlexandru Elisei
339465edc9dSAlexandru Elisei pci_hdr->bar[bar_num] = value;
340465edc9dSAlexandru Elisei r = pci_activate_bar(kvm, pci_hdr, bar_num);
341465edc9dSAlexandru Elisei if (r < 0) {
342465edc9dSAlexandru Elisei /*
343465edc9dSAlexandru Elisei * New region cannot be emulated, re-enable the regions that
344465edc9dSAlexandru Elisei * were overlapping.
345465edc9dSAlexandru Elisei */
346465edc9dSAlexandru Elisei pci_activate_bar_regions(kvm, new_addr, bar_size);
347465edc9dSAlexandru Elisei return;
348465edc9dSAlexandru Elisei }
349465edc9dSAlexandru Elisei
350465edc9dSAlexandru Elisei pci_activate_bar_regions(kvm, old_addr, bar_size);
351465edc9dSAlexandru Elisei }
352465edc9dSAlexandru Elisei
35378771e77SJean-Philippe Brucker /*
35478771e77SJean-Philippe Brucker * Bits that are writable in the config space header.
35578771e77SJean-Philippe Brucker * Write-1-to-clear Status bits are missing since we never set them.
35678771e77SJean-Philippe Brucker */
35778771e77SJean-Philippe Brucker static const u8 pci_config_writable[PCI_STD_HEADER_SIZEOF] = {
35878771e77SJean-Philippe Brucker [PCI_COMMAND] =
35978771e77SJean-Philippe Brucker PCI_COMMAND_IO |
36078771e77SJean-Philippe Brucker PCI_COMMAND_MEMORY |
36178771e77SJean-Philippe Brucker PCI_COMMAND_MASTER |
36278771e77SJean-Philippe Brucker PCI_COMMAND_PARITY,
36378771e77SJean-Philippe Brucker [PCI_COMMAND + 1] =
36478771e77SJean-Philippe Brucker (PCI_COMMAND_SERR |
36578771e77SJean-Philippe Brucker PCI_COMMAND_INTX_DISABLE) >> 8,
36678771e77SJean-Philippe Brucker [PCI_INTERRUPT_LINE] = 0xff,
36778771e77SJean-Philippe Brucker [PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5 + 3] = 0xff,
36878771e77SJean-Philippe Brucker [PCI_CACHE_LINE_SIZE] = 0xff,
36978771e77SJean-Philippe Brucker };
37078771e77SJean-Philippe Brucker
pci__config_wr(struct kvm * kvm,union pci_config_address addr,void * data,int size)371d0297a59SMatt Evans void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size)
372d0297a59SMatt Evans {
373023fdaaeSJean-Philippe Brucker void *base;
374e69b7663SAlexandru Elisei u8 bar;
375e69b7663SAlexandru Elisei u16 offset;
376023fdaaeSJean-Philippe Brucker struct pci_device_header *pci_hdr;
377023fdaaeSJean-Philippe Brucker u8 dev_num = addr.device_number;
37878771e77SJean-Philippe Brucker u32 value = 0, mask = 0;
379d0297a59SMatt Evans
380023fdaaeSJean-Philippe Brucker if (!pci_device_exists(addr.bus_number, dev_num, 0))
381023fdaaeSJean-Philippe Brucker return;
3829575e724SSasha Levin
383023fdaaeSJean-Philippe Brucker offset = addr.w & PCI_DEV_CFG_MASK;
384023fdaaeSJean-Philippe Brucker base = pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
3859575e724SSasha Levin
38678771e77SJean-Philippe Brucker /* We don't sanity-check capabilities for the moment */
38778771e77SJean-Philippe Brucker if (offset < PCI_STD_HEADER_SIZEOF) {
38878771e77SJean-Philippe Brucker memcpy(&mask, pci_config_writable + offset, size);
38978771e77SJean-Philippe Brucker if (!mask)
39078771e77SJean-Philippe Brucker return;
39178771e77SJean-Philippe Brucker }
39278771e77SJean-Philippe Brucker
393023fdaaeSJean-Philippe Brucker if (pci_hdr->cfg_ops.write)
394023fdaaeSJean-Philippe Brucker pci_hdr->cfg_ops.write(kvm, pci_hdr, offset, data, size);
395c64f7ff0SSasha Levin
39646e04130SAlexandru Elisei if (offset == PCI_COMMAND) {
39746e04130SAlexandru Elisei memcpy(&value, data, size);
39878771e77SJean-Philippe Brucker pci_config_command_wr(kvm, pci_hdr, (u16)value & mask);
39946e04130SAlexandru Elisei return;
40046e04130SAlexandru Elisei }
40146e04130SAlexandru Elisei
402023fdaaeSJean-Philippe Brucker bar = (offset - PCI_BAR_OFFSET(0)) / sizeof(u32);
403bb0d509bSSami Mujawar if (bar < 6) {
404bb0d509bSSami Mujawar memcpy(&value, data, size);
405465edc9dSAlexandru Elisei pci_config_bar_wr(kvm, pci_hdr, bar, value);
406465edc9dSAlexandru Elisei return;
4079575e724SSasha Levin }
408465edc9dSAlexandru Elisei
409465edc9dSAlexandru Elisei memcpy(base + offset, data, size);
4109575e724SSasha Levin }
4119575e724SSasha Levin
pci__config_rd(struct kvm * kvm,union pci_config_address addr,void * data,int size)412d0297a59SMatt Evans void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, int size)
41360742802SPekka Enberg {
414e69b7663SAlexandru Elisei u16 offset;
415023fdaaeSJean-Philippe Brucker struct pci_device_header *pci_hdr;
416023fdaaeSJean-Philippe Brucker u8 dev_num = addr.device_number;
417e4d2cea2SPekka Enberg
418023fdaaeSJean-Philippe Brucker if (pci_device_exists(addr.bus_number, dev_num, 0)) {
419023fdaaeSJean-Philippe Brucker pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data;
420023fdaaeSJean-Philippe Brucker offset = addr.w & PCI_DEV_CFG_MASK;
421b30d05adSPekka Enberg
422023fdaaeSJean-Philippe Brucker if (pci_hdr->cfg_ops.read)
423023fdaaeSJean-Philippe Brucker pci_hdr->cfg_ops.read(kvm, pci_hdr, offset, data, size);
424598419d5SPekka Enberg
425023fdaaeSJean-Philippe Brucker memcpy(data, (void *)pci_hdr + offset, size);
4263a60be06SSasha Levin } else {
427e498ea08SPekka Enberg memset(data, 0xff, size);
42860742802SPekka Enberg }
4293a60be06SSasha Levin }
43060742802SPekka Enberg
pci_config_mmio_access(struct kvm_cpu * vcpu,u64 addr,u8 * data,u32 len,u8 is_write,void * kvm)4319b735910SMarc Zyngier static void pci_config_mmio_access(struct kvm_cpu *vcpu, u64 addr, u8 *data,
4329b735910SMarc Zyngier u32 len, u8 is_write, void *kvm)
433b403f2f7SWill Deacon {
434b403f2f7SWill Deacon union pci_config_address cfg_addr;
435b403f2f7SWill Deacon
436b403f2f7SWill Deacon addr -= KVM_PCI_CFG_AREA;
437b403f2f7SWill Deacon cfg_addr.w = (u32)addr;
438b403f2f7SWill Deacon cfg_addr.enable_bit = 1;
439b403f2f7SWill Deacon
44078771e77SJean-Philippe Brucker /*
44178771e77SJean-Philippe Brucker * To prevent some overflows, reject accesses that cross a 4-byte
44278771e77SJean-Philippe Brucker * boundary. The PCIe specification says:
44378771e77SJean-Philippe Brucker *
44478771e77SJean-Philippe Brucker * "Root Complex implementations are not required to support the
44578771e77SJean-Philippe Brucker * generation of Configuration Requests from accesses that cross DW
44678771e77SJean-Philippe Brucker * [4 bytes] boundaries."
44778771e77SJean-Philippe Brucker */
44878771e77SJean-Philippe Brucker if ((addr & 3) + len > 4)
44978771e77SJean-Philippe Brucker return;
4506ea32ebdSAlexandru Elisei
451b403f2f7SWill Deacon if (is_write)
452b403f2f7SWill Deacon pci__config_wr(kvm, cfg_addr, data, len);
453b403f2f7SWill Deacon else
454b403f2f7SWill Deacon pci__config_rd(kvm, cfg_addr, data, len);
455b403f2f7SWill Deacon }
456b403f2f7SWill Deacon
pci__find_dev(u8 dev_num)457d0297a59SMatt Evans struct pci_device_header *pci__find_dev(u8 dev_num)
458d0297a59SMatt Evans {
45921ff329dSWill Deacon struct device_header *hdr = device__find_dev(DEVICE_BUS_PCI, dev_num);
4606d987703SSasha Levin
46121ff329dSWill Deacon if (IS_ERR_OR_NULL(hdr))
46221ff329dSWill Deacon return NULL;
46321ff329dSWill Deacon
46421ff329dSWill Deacon return hdr->data;
465d0297a59SMatt Evans }
466d0297a59SMatt Evans
pci__register_bar_regions(struct kvm * kvm,struct pci_device_header * pci_hdr,bar_activate_fn_t bar_activate_fn,bar_deactivate_fn_t bar_deactivate_fn,void * data)4675a8e4f25SAlexandru Elisei int pci__register_bar_regions(struct kvm *kvm, struct pci_device_header *pci_hdr,
4685a8e4f25SAlexandru Elisei bar_activate_fn_t bar_activate_fn,
4695a8e4f25SAlexandru Elisei bar_deactivate_fn_t bar_deactivate_fn, void *data)
4705a8e4f25SAlexandru Elisei {
4715a8e4f25SAlexandru Elisei int i, r;
4725a8e4f25SAlexandru Elisei
4735a8e4f25SAlexandru Elisei assert(bar_activate_fn && bar_deactivate_fn);
4745a8e4f25SAlexandru Elisei
4755a8e4f25SAlexandru Elisei pci_hdr->bar_activate_fn = bar_activate_fn;
4765a8e4f25SAlexandru Elisei pci_hdr->bar_deactivate_fn = bar_deactivate_fn;
4775a8e4f25SAlexandru Elisei pci_hdr->data = data;
4785a8e4f25SAlexandru Elisei
4795a8e4f25SAlexandru Elisei for (i = 0; i < 6; i++) {
4805a8e4f25SAlexandru Elisei if (!pci_bar_is_implemented(pci_hdr, i))
4815a8e4f25SAlexandru Elisei continue;
4825a8e4f25SAlexandru Elisei
483465edc9dSAlexandru Elisei assert(!pci_bar_is_active(pci_hdr, i));
484465edc9dSAlexandru Elisei
4855a8e4f25SAlexandru Elisei if (pci__bar_is_io(pci_hdr, i) &&
4865a8e4f25SAlexandru Elisei pci__io_space_enabled(pci_hdr)) {
487465edc9dSAlexandru Elisei r = pci_activate_bar(kvm, pci_hdr, i);
4885a8e4f25SAlexandru Elisei if (r < 0)
4895a8e4f25SAlexandru Elisei return r;
4905a8e4f25SAlexandru Elisei }
4915a8e4f25SAlexandru Elisei
4925a8e4f25SAlexandru Elisei if (pci__bar_is_memory(pci_hdr, i) &&
4935a8e4f25SAlexandru Elisei pci__memory_space_enabled(pci_hdr)) {
494465edc9dSAlexandru Elisei r = pci_activate_bar(kvm, pci_hdr, i);
4955a8e4f25SAlexandru Elisei if (r < 0)
4965a8e4f25SAlexandru Elisei return r;
4975a8e4f25SAlexandru Elisei }
4985a8e4f25SAlexandru Elisei }
4995a8e4f25SAlexandru Elisei
5005a8e4f25SAlexandru Elisei return 0;
5015a8e4f25SAlexandru Elisei }
5025a8e4f25SAlexandru Elisei
pci__init(struct kvm * kvm)5036d987703SSasha Levin int pci__init(struct kvm *kvm)
50460742802SPekka Enberg {
5056d987703SSasha Levin int r;
5066d987703SSasha Levin
5071f56b9d1SAndre Przywara r = kvm__register_pio(kvm, PCI_CONFIG_DATA, 4,
5081f56b9d1SAndre Przywara pci_config_data_mmio, NULL);
5096d987703SSasha Levin if (r < 0)
5106d987703SSasha Levin return r;
5111f56b9d1SAndre Przywara r = kvm__register_pio(kvm, PCI_CONFIG_ADDRESS, 4,
5121f56b9d1SAndre Przywara pci_config_address_mmio, NULL);
513b403f2f7SWill Deacon if (r < 0)
514b403f2f7SWill Deacon goto err_unregister_data;
515b403f2f7SWill Deacon
516b403f2f7SWill Deacon r = kvm__register_mmio(kvm, KVM_PCI_CFG_AREA, PCI_CFG_SIZE, false,
517b403f2f7SWill Deacon pci_config_mmio_access, kvm);
518b403f2f7SWill Deacon if (r < 0)
519b403f2f7SWill Deacon goto err_unregister_addr;
5206d987703SSasha Levin
5216d987703SSasha Levin return 0;
522b403f2f7SWill Deacon
523b403f2f7SWill Deacon err_unregister_addr:
5241f56b9d1SAndre Przywara kvm__deregister_pio(kvm, PCI_CONFIG_ADDRESS);
525b403f2f7SWill Deacon err_unregister_data:
5261f56b9d1SAndre Przywara kvm__deregister_pio(kvm, PCI_CONFIG_DATA);
527b403f2f7SWill Deacon return r;
5286d987703SSasha Levin }
529bca12bf6SSasha Levin dev_base_init(pci__init);
5306d987703SSasha Levin
pci__exit(struct kvm * kvm)5316d987703SSasha Levin int pci__exit(struct kvm *kvm)
5326d987703SSasha Levin {
5331f56b9d1SAndre Przywara kvm__deregister_pio(kvm, PCI_CONFIG_DATA);
5341f56b9d1SAndre Przywara kvm__deregister_pio(kvm, PCI_CONFIG_ADDRESS);
535*9cb1b46cSTan En De kvm__deregister_mmio(kvm, KVM_PCI_CFG_AREA);
5366d987703SSasha Levin
5376d987703SSasha Levin return 0;
53860742802SPekka Enberg }
539bca12bf6SSasha Levin dev_base_exit(pci__exit);
540