121ff329dSWill Deacon #include "kvm/devices.h" 260742802SPekka Enberg #include "kvm/pci.h" 360742802SPekka Enberg #include "kvm/ioport.h" 4b5981636SWill Deacon #include "kvm/irq.h" 576f9c841SCyrill Gorcunov #include "kvm/util.h" 69575e724SSasha Levin #include "kvm/kvm.h" 760742802SPekka Enberg 86d987703SSasha Levin #include <linux/err.h> 96d987703SSasha Levin #include <assert.h> 106d987703SSasha Levin 11a0a7d66fSDavid Daney static u32 pci_config_address_bits; 1260742802SPekka Enberg 1340f2fd06SMatt Evans /* This is within our PCI gap - in an unused area. 1440f2fd06SMatt Evans * Note this is a PCI *bus address*, is used to assign BARs etc.! 1540f2fd06SMatt Evans * (That's why it can still 32bit even with 64bit guests-- 64bit 1640f2fd06SMatt Evans * PCI isn't currently supported.) 1740f2fd06SMatt Evans */ 18854aa2efSJulien Thierry static u32 mmio_blocks = KVM_PCI_MMIO_AREA; 19854aa2efSJulien Thierry static u16 io_port_blocks = PCI_IOPORT_START; 20854aa2efSJulien Thierry 21854aa2efSJulien Thierry u16 pci_get_io_port_block(u32 size) 22854aa2efSJulien Thierry { 2348843d10SJulien Thierry u16 port = ALIGN(io_port_blocks, PCI_IO_SIZE); 24854aa2efSJulien Thierry 25854aa2efSJulien Thierry io_port_blocks = port + size; 26854aa2efSJulien Thierry return port; 27854aa2efSJulien Thierry } 289575e724SSasha Levin 29c7575d17SWill Deacon /* 30c7575d17SWill Deacon * BARs must be naturally aligned, so enforce this in the allocator. 31c7575d17SWill Deacon */ 32854aa2efSJulien Thierry u32 pci_get_mmio_block(u32 size) 339575e724SSasha Levin { 34854aa2efSJulien Thierry u32 block = ALIGN(mmio_blocks, size); 35854aa2efSJulien Thierry mmio_blocks = block + size; 369575e724SSasha Levin return block; 379575e724SSasha Levin } 389575e724SSasha Levin 391a51c93dSJean-Philippe Brucker void *pci_find_cap(struct pci_device_header *hdr, u8 cap_type) 401a51c93dSJean-Philippe Brucker { 411a51c93dSJean-Philippe Brucker u8 pos; 421a51c93dSJean-Philippe Brucker struct pci_cap_hdr *cap; 431a51c93dSJean-Philippe Brucker 441a51c93dSJean-Philippe Brucker pci_for_each_cap(pos, cap, hdr) { 451a51c93dSJean-Philippe Brucker if (cap->type == cap_type) 461a51c93dSJean-Philippe Brucker return cap; 471a51c93dSJean-Philippe Brucker } 481a51c93dSJean-Philippe Brucker 491a51c93dSJean-Philippe Brucker return NULL; 501a51c93dSJean-Philippe Brucker } 511a51c93dSJean-Philippe Brucker 52c0c45eedSAndre Przywara int pci__assign_irq(struct pci_device_header *pci_hdr) 53b5981636SWill Deacon { 54b5981636SWill Deacon /* 55b5981636SWill Deacon * PCI supports only INTA#,B#,C#,D# per device. 56b5981636SWill Deacon * 57b5981636SWill Deacon * A#,B#,C#,D# are allowed for multifunctional devices so stick 58b5981636SWill Deacon * with A# for our single function devices. 59b5981636SWill Deacon */ 60b5981636SWill Deacon pci_hdr->irq_pin = 1; 61b5981636SWill Deacon pci_hdr->irq_line = irq__alloc_line(); 62ff01b5dbSJean-Philippe Brucker 63ff01b5dbSJean-Philippe Brucker if (!pci_hdr->irq_type) 64ff01b5dbSJean-Philippe Brucker pci_hdr->irq_type = IRQ_TYPE_EDGE_RISING; 65c0c45eedSAndre Przywara 66c0c45eedSAndre Przywara return pci_hdr->irq_line; 67b5981636SWill Deacon } 68b5981636SWill Deacon 695a8e4f25SAlexandru Elisei static bool pci_bar_is_implemented(struct pci_device_header *pci_hdr, int bar_num) 705a8e4f25SAlexandru Elisei { 715a8e4f25SAlexandru Elisei return pci__bar_size(pci_hdr, bar_num); 725a8e4f25SAlexandru Elisei } 735a8e4f25SAlexandru Elisei 74*465edc9dSAlexandru Elisei static bool pci_bar_is_active(struct pci_device_header *pci_hdr, int bar_num) 75*465edc9dSAlexandru Elisei { 76*465edc9dSAlexandru Elisei return pci_hdr->bar_active[bar_num]; 77*465edc9dSAlexandru Elisei } 78*465edc9dSAlexandru Elisei 793fdf659dSSasha Levin static void *pci_config_address_ptr(u16 port) 80ba824677SPekka Enberg { 81ba824677SPekka Enberg unsigned long offset; 82ba824677SPekka Enberg void *base; 83ba824677SPekka Enberg 84ba824677SPekka Enberg offset = port - PCI_CONFIG_ADDRESS; 85a0a7d66fSDavid Daney base = &pci_config_address_bits; 86ba824677SPekka Enberg 87ba824677SPekka Enberg return base + offset; 88ba824677SPekka Enberg } 89ba824677SPekka Enberg 904123ca55SMarc Zyngier static bool pci_config_address_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 9160742802SPekka Enberg { 92ba824677SPekka Enberg void *p = pci_config_address_ptr(port); 9360742802SPekka Enberg 94ba824677SPekka Enberg memcpy(p, data, size); 9560742802SPekka Enberg 9660742802SPekka Enberg return true; 9760742802SPekka Enberg } 9860742802SPekka Enberg 994123ca55SMarc Zyngier static bool pci_config_address_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 10060742802SPekka Enberg { 101ba824677SPekka Enberg void *p = pci_config_address_ptr(port); 10260742802SPekka Enberg 103ba824677SPekka Enberg memcpy(data, p, size); 10460742802SPekka Enberg 10560742802SPekka Enberg return true; 10660742802SPekka Enberg } 10760742802SPekka Enberg 108305b72ceSCyrill Gorcunov static struct ioport_operations pci_config_address_ops = { 109305b72ceSCyrill Gorcunov .io_in = pci_config_address_in, 110305b72ceSCyrill Gorcunov .io_out = pci_config_address_out, 11160742802SPekka Enberg }; 11260742802SPekka Enberg 1133fdf659dSSasha Levin static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_number) 11476f9c841SCyrill Gorcunov { 115a0a7d66fSDavid Daney union pci_config_address pci_config_address; 116a0a7d66fSDavid Daney 117a0a7d66fSDavid Daney pci_config_address.w = ioport__read32(&pci_config_address_bits); 118a0a7d66fSDavid Daney 11976f9c841SCyrill Gorcunov if (pci_config_address.bus_number != bus_number) 12076f9c841SCyrill Gorcunov return false; 12176f9c841SCyrill Gorcunov 122b30d05adSPekka Enberg if (pci_config_address.function_number != function_number) 12376f9c841SCyrill Gorcunov return false; 12476f9c841SCyrill Gorcunov 12521ff329dSWill Deacon return !IS_ERR_OR_NULL(device__find_dev(DEVICE_BUS_PCI, device_number)); 12676f9c841SCyrill Gorcunov } 12776f9c841SCyrill Gorcunov 1284123ca55SMarc Zyngier static bool pci_config_data_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 1299575e724SSasha Levin { 130a0a7d66fSDavid Daney union pci_config_address pci_config_address; 131a0a7d66fSDavid Daney 1326ea32ebdSAlexandru Elisei if (size > 4) 1336ea32ebdSAlexandru Elisei size = 4; 1346ea32ebdSAlexandru Elisei 135a0a7d66fSDavid Daney pci_config_address.w = ioport__read32(&pci_config_address_bits); 1369575e724SSasha Levin /* 1379575e724SSasha Levin * If someone accesses PCI configuration space offsets that are not 1389575e724SSasha Levin * aligned to 4 bytes, it uses ioports to signify that. 1399575e724SSasha Levin */ 140d0297a59SMatt Evans pci_config_address.reg_offset = port - PCI_CONFIG_DATA; 1419575e724SSasha Levin 1424123ca55SMarc Zyngier pci__config_wr(vcpu->kvm, pci_config_address, data, size); 143d0297a59SMatt Evans 144d0297a59SMatt Evans return true; 145d0297a59SMatt Evans } 146d0297a59SMatt Evans 1474123ca55SMarc Zyngier static bool pci_config_data_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 148d0297a59SMatt Evans { 149a0a7d66fSDavid Daney union pci_config_address pci_config_address; 150a0a7d66fSDavid Daney 1516ea32ebdSAlexandru Elisei if (size > 4) 1526ea32ebdSAlexandru Elisei size = 4; 1536ea32ebdSAlexandru Elisei 154a0a7d66fSDavid Daney pci_config_address.w = ioport__read32(&pci_config_address_bits); 155d0297a59SMatt Evans /* 156d0297a59SMatt Evans * If someone accesses PCI configuration space offsets that are not 157d0297a59SMatt Evans * aligned to 4 bytes, it uses ioports to signify that. 158d0297a59SMatt Evans */ 159d0297a59SMatt Evans pci_config_address.reg_offset = port - PCI_CONFIG_DATA; 160d0297a59SMatt Evans 1614123ca55SMarc Zyngier pci__config_rd(vcpu->kvm, pci_config_address, data, size); 162d0297a59SMatt Evans 163d0297a59SMatt Evans return true; 164d0297a59SMatt Evans } 165d0297a59SMatt Evans 166d0297a59SMatt Evans static struct ioport_operations pci_config_data_ops = { 167d0297a59SMatt Evans .io_in = pci_config_data_in, 168d0297a59SMatt Evans .io_out = pci_config_data_out, 169d0297a59SMatt Evans }; 170d0297a59SMatt Evans 171*465edc9dSAlexandru Elisei static int pci_activate_bar(struct kvm *kvm, struct pci_device_header *pci_hdr, 172*465edc9dSAlexandru Elisei int bar_num) 173*465edc9dSAlexandru Elisei { 174*465edc9dSAlexandru Elisei int r = 0; 175*465edc9dSAlexandru Elisei 176*465edc9dSAlexandru Elisei if (pci_bar_is_active(pci_hdr, bar_num)) 177*465edc9dSAlexandru Elisei goto out; 178*465edc9dSAlexandru Elisei 179*465edc9dSAlexandru Elisei r = pci_hdr->bar_activate_fn(kvm, pci_hdr, bar_num, pci_hdr->data); 180*465edc9dSAlexandru Elisei if (r < 0) { 181*465edc9dSAlexandru Elisei pci_dev_warn(pci_hdr, "Error activating emulation for BAR %d", 182*465edc9dSAlexandru Elisei bar_num); 183*465edc9dSAlexandru Elisei goto out; 184*465edc9dSAlexandru Elisei } 185*465edc9dSAlexandru Elisei pci_hdr->bar_active[bar_num] = true; 186*465edc9dSAlexandru Elisei 187*465edc9dSAlexandru Elisei out: 188*465edc9dSAlexandru Elisei return r; 189*465edc9dSAlexandru Elisei } 190*465edc9dSAlexandru Elisei 191*465edc9dSAlexandru Elisei static int pci_deactivate_bar(struct kvm *kvm, struct pci_device_header *pci_hdr, 192*465edc9dSAlexandru Elisei int bar_num) 193*465edc9dSAlexandru Elisei { 194*465edc9dSAlexandru Elisei int r = 0; 195*465edc9dSAlexandru Elisei 196*465edc9dSAlexandru Elisei if (!pci_bar_is_active(pci_hdr, bar_num)) 197*465edc9dSAlexandru Elisei goto out; 198*465edc9dSAlexandru Elisei 199*465edc9dSAlexandru Elisei r = pci_hdr->bar_deactivate_fn(kvm, pci_hdr, bar_num, pci_hdr->data); 200*465edc9dSAlexandru Elisei if (r < 0) { 201*465edc9dSAlexandru Elisei pci_dev_warn(pci_hdr, "Error deactivating emulation for BAR %d", 202*465edc9dSAlexandru Elisei bar_num); 203*465edc9dSAlexandru Elisei goto out; 204*465edc9dSAlexandru Elisei } 205*465edc9dSAlexandru Elisei pci_hdr->bar_active[bar_num] = false; 206*465edc9dSAlexandru Elisei 207*465edc9dSAlexandru Elisei out: 208*465edc9dSAlexandru Elisei return r; 209*465edc9dSAlexandru Elisei } 210*465edc9dSAlexandru Elisei 21146e04130SAlexandru Elisei static void pci_config_command_wr(struct kvm *kvm, 21246e04130SAlexandru Elisei struct pci_device_header *pci_hdr, 21346e04130SAlexandru Elisei u16 new_command) 21446e04130SAlexandru Elisei { 21546e04130SAlexandru Elisei int i; 21646e04130SAlexandru Elisei bool toggle_io, toggle_mem; 21746e04130SAlexandru Elisei 21846e04130SAlexandru Elisei toggle_io = (pci_hdr->command ^ new_command) & PCI_COMMAND_IO; 21946e04130SAlexandru Elisei toggle_mem = (pci_hdr->command ^ new_command) & PCI_COMMAND_MEMORY; 22046e04130SAlexandru Elisei 22146e04130SAlexandru Elisei for (i = 0; i < 6; i++) { 22246e04130SAlexandru Elisei if (!pci_bar_is_implemented(pci_hdr, i)) 22346e04130SAlexandru Elisei continue; 22446e04130SAlexandru Elisei 22546e04130SAlexandru Elisei if (toggle_io && pci__bar_is_io(pci_hdr, i)) { 22646e04130SAlexandru Elisei if (__pci__io_space_enabled(new_command)) 227*465edc9dSAlexandru Elisei pci_activate_bar(kvm, pci_hdr, i); 22846e04130SAlexandru Elisei else 229*465edc9dSAlexandru Elisei pci_deactivate_bar(kvm, pci_hdr, i); 23046e04130SAlexandru Elisei } 23146e04130SAlexandru Elisei 23246e04130SAlexandru Elisei if (toggle_mem && pci__bar_is_memory(pci_hdr, i)) { 23346e04130SAlexandru Elisei if (__pci__memory_space_enabled(new_command)) 234*465edc9dSAlexandru Elisei pci_activate_bar(kvm, pci_hdr, i); 23546e04130SAlexandru Elisei else 236*465edc9dSAlexandru Elisei pci_deactivate_bar(kvm, pci_hdr, i); 23746e04130SAlexandru Elisei } 23846e04130SAlexandru Elisei } 23946e04130SAlexandru Elisei 24046e04130SAlexandru Elisei pci_hdr->command = new_command; 24146e04130SAlexandru Elisei } 24246e04130SAlexandru Elisei 243*465edc9dSAlexandru Elisei static int pci_toggle_bar_regions(bool activate, struct kvm *kvm, u32 start, u32 size) 244*465edc9dSAlexandru Elisei { 245*465edc9dSAlexandru Elisei struct device_header *dev_hdr; 246*465edc9dSAlexandru Elisei struct pci_device_header *tmp_hdr; 247*465edc9dSAlexandru Elisei u32 tmp_start, tmp_size; 248*465edc9dSAlexandru Elisei int i, r; 249*465edc9dSAlexandru Elisei 250*465edc9dSAlexandru Elisei dev_hdr = device__first_dev(DEVICE_BUS_PCI); 251*465edc9dSAlexandru Elisei while (dev_hdr) { 252*465edc9dSAlexandru Elisei tmp_hdr = dev_hdr->data; 253*465edc9dSAlexandru Elisei for (i = 0; i < 6; i++) { 254*465edc9dSAlexandru Elisei if (!pci_bar_is_implemented(tmp_hdr, i)) 255*465edc9dSAlexandru Elisei continue; 256*465edc9dSAlexandru Elisei 257*465edc9dSAlexandru Elisei tmp_start = pci__bar_address(tmp_hdr, i); 258*465edc9dSAlexandru Elisei tmp_size = pci__bar_size(tmp_hdr, i); 259*465edc9dSAlexandru Elisei if (tmp_start + tmp_size <= start || 260*465edc9dSAlexandru Elisei tmp_start >= start + size) 261*465edc9dSAlexandru Elisei continue; 262*465edc9dSAlexandru Elisei 263*465edc9dSAlexandru Elisei if (activate) 264*465edc9dSAlexandru Elisei r = pci_activate_bar(kvm, tmp_hdr, i); 265*465edc9dSAlexandru Elisei else 266*465edc9dSAlexandru Elisei r = pci_deactivate_bar(kvm, tmp_hdr, i); 267*465edc9dSAlexandru Elisei if (r < 0) 268*465edc9dSAlexandru Elisei return r; 269*465edc9dSAlexandru Elisei } 270*465edc9dSAlexandru Elisei dev_hdr = device__next_dev(dev_hdr); 271*465edc9dSAlexandru Elisei } 272*465edc9dSAlexandru Elisei 273*465edc9dSAlexandru Elisei return 0; 274*465edc9dSAlexandru Elisei } 275*465edc9dSAlexandru Elisei 276*465edc9dSAlexandru Elisei static inline int pci_activate_bar_regions(struct kvm *kvm, u32 start, u32 size) 277*465edc9dSAlexandru Elisei { 278*465edc9dSAlexandru Elisei return pci_toggle_bar_regions(true, kvm, start, size); 279*465edc9dSAlexandru Elisei } 280*465edc9dSAlexandru Elisei 281*465edc9dSAlexandru Elisei static inline int pci_deactivate_bar_regions(struct kvm *kvm, u32 start, u32 size) 282*465edc9dSAlexandru Elisei { 283*465edc9dSAlexandru Elisei return pci_toggle_bar_regions(false, kvm, start, size); 284*465edc9dSAlexandru Elisei } 285*465edc9dSAlexandru Elisei 286*465edc9dSAlexandru Elisei static void pci_config_bar_wr(struct kvm *kvm, 287*465edc9dSAlexandru Elisei struct pci_device_header *pci_hdr, int bar_num, 288*465edc9dSAlexandru Elisei u32 value) 289*465edc9dSAlexandru Elisei { 290*465edc9dSAlexandru Elisei u32 old_addr, new_addr, bar_size; 291*465edc9dSAlexandru Elisei u32 mask; 292*465edc9dSAlexandru Elisei int r; 293*465edc9dSAlexandru Elisei 294*465edc9dSAlexandru Elisei if (pci__bar_is_io(pci_hdr, bar_num)) 295*465edc9dSAlexandru Elisei mask = (u32)PCI_BASE_ADDRESS_IO_MASK; 296*465edc9dSAlexandru Elisei else 297*465edc9dSAlexandru Elisei mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 298*465edc9dSAlexandru Elisei 299*465edc9dSAlexandru Elisei /* 300*465edc9dSAlexandru Elisei * If the kernel masks the BAR, it will expect to find the size of the 301*465edc9dSAlexandru Elisei * BAR there next time it reads from it. After the kernel reads the 302*465edc9dSAlexandru Elisei * size, it will write the address back. 303*465edc9dSAlexandru Elisei * 304*465edc9dSAlexandru Elisei * According to the PCI local bus specification REV 3.0: The number of 305*465edc9dSAlexandru Elisei * upper bits that a device actually implements depends on how much of 306*465edc9dSAlexandru Elisei * the address space the device will respond to. A device that wants a 1 307*465edc9dSAlexandru Elisei * MB memory address space (using a 32-bit base address register) would 308*465edc9dSAlexandru Elisei * build the top 12 bits of the address register, hardwiring the other 309*465edc9dSAlexandru Elisei * bits to 0. 310*465edc9dSAlexandru Elisei * 311*465edc9dSAlexandru Elisei * Furthermore, software can determine how much address space the device 312*465edc9dSAlexandru Elisei * requires by writing a value of all 1's to the register and then 313*465edc9dSAlexandru Elisei * reading the value back. The device will return 0's in all don't-care 314*465edc9dSAlexandru Elisei * address bits, effectively specifying the address space required. 315*465edc9dSAlexandru Elisei * 316*465edc9dSAlexandru Elisei * Software computes the size of the address space with the formula 317*465edc9dSAlexandru Elisei * S = ~B + 1, where S is the memory size and B is the value read from 318*465edc9dSAlexandru Elisei * the BAR. This means that the BAR value that kvmtool should return is 319*465edc9dSAlexandru Elisei * B = ~(S - 1). 320*465edc9dSAlexandru Elisei */ 321*465edc9dSAlexandru Elisei if (value == 0xffffffff) { 322*465edc9dSAlexandru Elisei value = ~(pci__bar_size(pci_hdr, bar_num) - 1); 323*465edc9dSAlexandru Elisei /* Preserve the special bits. */ 324*465edc9dSAlexandru Elisei value = (value & mask) | (pci_hdr->bar[bar_num] & ~mask); 325*465edc9dSAlexandru Elisei pci_hdr->bar[bar_num] = value; 326*465edc9dSAlexandru Elisei return; 327*465edc9dSAlexandru Elisei } 328*465edc9dSAlexandru Elisei 329*465edc9dSAlexandru Elisei value = (value & mask) | (pci_hdr->bar[bar_num] & ~mask); 330*465edc9dSAlexandru Elisei 331*465edc9dSAlexandru Elisei /* Don't toggle emulation when region type access is disbled. */ 332*465edc9dSAlexandru Elisei if (pci__bar_is_io(pci_hdr, bar_num) && 333*465edc9dSAlexandru Elisei !pci__io_space_enabled(pci_hdr)) { 334*465edc9dSAlexandru Elisei pci_hdr->bar[bar_num] = value; 335*465edc9dSAlexandru Elisei return; 336*465edc9dSAlexandru Elisei } 337*465edc9dSAlexandru Elisei 338*465edc9dSAlexandru Elisei if (pci__bar_is_memory(pci_hdr, bar_num) && 339*465edc9dSAlexandru Elisei !pci__memory_space_enabled(pci_hdr)) { 340*465edc9dSAlexandru Elisei pci_hdr->bar[bar_num] = value; 341*465edc9dSAlexandru Elisei return; 342*465edc9dSAlexandru Elisei } 343*465edc9dSAlexandru Elisei 344*465edc9dSAlexandru Elisei /* 345*465edc9dSAlexandru Elisei * BAR reassignment can be done while device access is enabled and 346*465edc9dSAlexandru Elisei * memory regions for different devices can overlap as long as no access 347*465edc9dSAlexandru Elisei * is made to the overlapping memory regions. To implement BAR 348*465edc9dSAlexandru Elisei * reasignment, we deactivate emulation for the region described by the 349*465edc9dSAlexandru Elisei * BAR value that the guest is changing, we disable emulation for the 350*465edc9dSAlexandru Elisei * regions that overlap with the new one (by scanning through all PCI 351*465edc9dSAlexandru Elisei * devices), we enable emulation for the new BAR value and finally we 352*465edc9dSAlexandru Elisei * enable emulation for all device regions that were overlapping with 353*465edc9dSAlexandru Elisei * the old value. 354*465edc9dSAlexandru Elisei */ 355*465edc9dSAlexandru Elisei old_addr = pci__bar_address(pci_hdr, bar_num); 356*465edc9dSAlexandru Elisei new_addr = __pci__bar_address(value); 357*465edc9dSAlexandru Elisei bar_size = pci__bar_size(pci_hdr, bar_num); 358*465edc9dSAlexandru Elisei 359*465edc9dSAlexandru Elisei r = pci_deactivate_bar(kvm, pci_hdr, bar_num); 360*465edc9dSAlexandru Elisei if (r < 0) 361*465edc9dSAlexandru Elisei return; 362*465edc9dSAlexandru Elisei 363*465edc9dSAlexandru Elisei r = pci_deactivate_bar_regions(kvm, new_addr, bar_size); 364*465edc9dSAlexandru Elisei if (r < 0) { 365*465edc9dSAlexandru Elisei /* 366*465edc9dSAlexandru Elisei * We cannot update the BAR because of an overlapping region 367*465edc9dSAlexandru Elisei * that failed to deactivate emulation, so keep the old BAR 368*465edc9dSAlexandru Elisei * value and re-activate emulation for it. 369*465edc9dSAlexandru Elisei */ 370*465edc9dSAlexandru Elisei pci_activate_bar(kvm, pci_hdr, bar_num); 371*465edc9dSAlexandru Elisei return; 372*465edc9dSAlexandru Elisei } 373*465edc9dSAlexandru Elisei 374*465edc9dSAlexandru Elisei pci_hdr->bar[bar_num] = value; 375*465edc9dSAlexandru Elisei r = pci_activate_bar(kvm, pci_hdr, bar_num); 376*465edc9dSAlexandru Elisei if (r < 0) { 377*465edc9dSAlexandru Elisei /* 378*465edc9dSAlexandru Elisei * New region cannot be emulated, re-enable the regions that 379*465edc9dSAlexandru Elisei * were overlapping. 380*465edc9dSAlexandru Elisei */ 381*465edc9dSAlexandru Elisei pci_activate_bar_regions(kvm, new_addr, bar_size); 382*465edc9dSAlexandru Elisei return; 383*465edc9dSAlexandru Elisei } 384*465edc9dSAlexandru Elisei 385*465edc9dSAlexandru Elisei pci_activate_bar_regions(kvm, old_addr, bar_size); 386*465edc9dSAlexandru Elisei } 387*465edc9dSAlexandru Elisei 388d0297a59SMatt Evans void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size) 389d0297a59SMatt Evans { 390023fdaaeSJean-Philippe Brucker void *base; 391023fdaaeSJean-Philippe Brucker u8 bar, offset; 392023fdaaeSJean-Philippe Brucker struct pci_device_header *pci_hdr; 393023fdaaeSJean-Philippe Brucker u8 dev_num = addr.device_number; 394bb0d509bSSami Mujawar u32 value = 0; 395d0297a59SMatt Evans 396023fdaaeSJean-Philippe Brucker if (!pci_device_exists(addr.bus_number, dev_num, 0)) 397023fdaaeSJean-Philippe Brucker return; 3989575e724SSasha Levin 399023fdaaeSJean-Philippe Brucker offset = addr.w & PCI_DEV_CFG_MASK; 400023fdaaeSJean-Philippe Brucker base = pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data; 4019575e724SSasha Levin 402023fdaaeSJean-Philippe Brucker if (pci_hdr->cfg_ops.write) 403023fdaaeSJean-Philippe Brucker pci_hdr->cfg_ops.write(kvm, pci_hdr, offset, data, size); 404c64f7ff0SSasha Levin 4059575e724SSasha Levin /* 406023fdaaeSJean-Philippe Brucker * legacy hack: ignore writes to uninitialized regions (e.g. ROM BAR). 407023fdaaeSJean-Philippe Brucker * Not very nice but has been working so far. 4089575e724SSasha Levin */ 409023fdaaeSJean-Philippe Brucker if (*(u32 *)(base + offset) == 0) 410023fdaaeSJean-Philippe Brucker return; 411023fdaaeSJean-Philippe Brucker 41246e04130SAlexandru Elisei if (offset == PCI_COMMAND) { 41346e04130SAlexandru Elisei memcpy(&value, data, size); 41446e04130SAlexandru Elisei pci_config_command_wr(kvm, pci_hdr, (u16)value); 41546e04130SAlexandru Elisei return; 41646e04130SAlexandru Elisei } 41746e04130SAlexandru Elisei 418023fdaaeSJean-Philippe Brucker bar = (offset - PCI_BAR_OFFSET(0)) / sizeof(u32); 419bb0d509bSSami Mujawar if (bar < 6) { 420bb0d509bSSami Mujawar memcpy(&value, data, size); 421*465edc9dSAlexandru Elisei pci_config_bar_wr(kvm, pci_hdr, bar, value); 422*465edc9dSAlexandru Elisei return; 4239575e724SSasha Levin } 424*465edc9dSAlexandru Elisei 425*465edc9dSAlexandru Elisei memcpy(base + offset, data, size); 4269575e724SSasha Levin } 4279575e724SSasha Levin 428d0297a59SMatt Evans void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, int size) 42960742802SPekka Enberg { 430023fdaaeSJean-Philippe Brucker u8 offset; 431023fdaaeSJean-Philippe Brucker struct pci_device_header *pci_hdr; 432023fdaaeSJean-Philippe Brucker u8 dev_num = addr.device_number; 433e4d2cea2SPekka Enberg 434023fdaaeSJean-Philippe Brucker if (pci_device_exists(addr.bus_number, dev_num, 0)) { 435023fdaaeSJean-Philippe Brucker pci_hdr = device__find_dev(DEVICE_BUS_PCI, dev_num)->data; 436023fdaaeSJean-Philippe Brucker offset = addr.w & PCI_DEV_CFG_MASK; 437b30d05adSPekka Enberg 438023fdaaeSJean-Philippe Brucker if (pci_hdr->cfg_ops.read) 439023fdaaeSJean-Philippe Brucker pci_hdr->cfg_ops.read(kvm, pci_hdr, offset, data, size); 440598419d5SPekka Enberg 441023fdaaeSJean-Philippe Brucker memcpy(data, (void *)pci_hdr + offset, size); 4423a60be06SSasha Levin } else { 443e498ea08SPekka Enberg memset(data, 0xff, size); 44460742802SPekka Enberg } 4453a60be06SSasha Levin } 44660742802SPekka Enberg 4479b735910SMarc Zyngier static void pci_config_mmio_access(struct kvm_cpu *vcpu, u64 addr, u8 *data, 4489b735910SMarc Zyngier u32 len, u8 is_write, void *kvm) 449b403f2f7SWill Deacon { 450b403f2f7SWill Deacon union pci_config_address cfg_addr; 451b403f2f7SWill Deacon 452b403f2f7SWill Deacon addr -= KVM_PCI_CFG_AREA; 453b403f2f7SWill Deacon cfg_addr.w = (u32)addr; 454b403f2f7SWill Deacon cfg_addr.enable_bit = 1; 455b403f2f7SWill Deacon 4566ea32ebdSAlexandru Elisei if (len > 4) 4576ea32ebdSAlexandru Elisei len = 4; 4586ea32ebdSAlexandru Elisei 459b403f2f7SWill Deacon if (is_write) 460b403f2f7SWill Deacon pci__config_wr(kvm, cfg_addr, data, len); 461b403f2f7SWill Deacon else 462b403f2f7SWill Deacon pci__config_rd(kvm, cfg_addr, data, len); 463b403f2f7SWill Deacon } 464b403f2f7SWill Deacon 465d0297a59SMatt Evans struct pci_device_header *pci__find_dev(u8 dev_num) 466d0297a59SMatt Evans { 46721ff329dSWill Deacon struct device_header *hdr = device__find_dev(DEVICE_BUS_PCI, dev_num); 4686d987703SSasha Levin 46921ff329dSWill Deacon if (IS_ERR_OR_NULL(hdr)) 47021ff329dSWill Deacon return NULL; 47121ff329dSWill Deacon 47221ff329dSWill Deacon return hdr->data; 473d0297a59SMatt Evans } 474d0297a59SMatt Evans 4755a8e4f25SAlexandru Elisei int pci__register_bar_regions(struct kvm *kvm, struct pci_device_header *pci_hdr, 4765a8e4f25SAlexandru Elisei bar_activate_fn_t bar_activate_fn, 4775a8e4f25SAlexandru Elisei bar_deactivate_fn_t bar_deactivate_fn, void *data) 4785a8e4f25SAlexandru Elisei { 4795a8e4f25SAlexandru Elisei int i, r; 4805a8e4f25SAlexandru Elisei 4815a8e4f25SAlexandru Elisei assert(bar_activate_fn && bar_deactivate_fn); 4825a8e4f25SAlexandru Elisei 4835a8e4f25SAlexandru Elisei pci_hdr->bar_activate_fn = bar_activate_fn; 4845a8e4f25SAlexandru Elisei pci_hdr->bar_deactivate_fn = bar_deactivate_fn; 4855a8e4f25SAlexandru Elisei pci_hdr->data = data; 4865a8e4f25SAlexandru Elisei 4875a8e4f25SAlexandru Elisei for (i = 0; i < 6; i++) { 4885a8e4f25SAlexandru Elisei if (!pci_bar_is_implemented(pci_hdr, i)) 4895a8e4f25SAlexandru Elisei continue; 4905a8e4f25SAlexandru Elisei 491*465edc9dSAlexandru Elisei assert(!pci_bar_is_active(pci_hdr, i)); 492*465edc9dSAlexandru Elisei 4935a8e4f25SAlexandru Elisei if (pci__bar_is_io(pci_hdr, i) && 4945a8e4f25SAlexandru Elisei pci__io_space_enabled(pci_hdr)) { 495*465edc9dSAlexandru Elisei r = pci_activate_bar(kvm, pci_hdr, i); 4965a8e4f25SAlexandru Elisei if (r < 0) 4975a8e4f25SAlexandru Elisei return r; 4985a8e4f25SAlexandru Elisei } 4995a8e4f25SAlexandru Elisei 5005a8e4f25SAlexandru Elisei if (pci__bar_is_memory(pci_hdr, i) && 5015a8e4f25SAlexandru Elisei pci__memory_space_enabled(pci_hdr)) { 502*465edc9dSAlexandru Elisei r = pci_activate_bar(kvm, pci_hdr, i); 5035a8e4f25SAlexandru Elisei if (r < 0) 5045a8e4f25SAlexandru Elisei return r; 5055a8e4f25SAlexandru Elisei } 5065a8e4f25SAlexandru Elisei } 5075a8e4f25SAlexandru Elisei 5085a8e4f25SAlexandru Elisei return 0; 5095a8e4f25SAlexandru Elisei } 5105a8e4f25SAlexandru Elisei 5116d987703SSasha Levin int pci__init(struct kvm *kvm) 51260742802SPekka Enberg { 5136d987703SSasha Levin int r; 5146d987703SSasha Levin 5154346fd8fSSasha Levin r = ioport__register(kvm, PCI_CONFIG_DATA + 0, &pci_config_data_ops, 4, NULL); 5166d987703SSasha Levin if (r < 0) 5176d987703SSasha Levin return r; 5186d987703SSasha Levin 5194346fd8fSSasha Levin r = ioport__register(kvm, PCI_CONFIG_ADDRESS + 0, &pci_config_address_ops, 4, NULL); 520b403f2f7SWill Deacon if (r < 0) 521b403f2f7SWill Deacon goto err_unregister_data; 522b403f2f7SWill Deacon 523b403f2f7SWill Deacon r = kvm__register_mmio(kvm, KVM_PCI_CFG_AREA, PCI_CFG_SIZE, false, 524b403f2f7SWill Deacon pci_config_mmio_access, kvm); 525b403f2f7SWill Deacon if (r < 0) 526b403f2f7SWill Deacon goto err_unregister_addr; 5276d987703SSasha Levin 5286d987703SSasha Levin return 0; 529b403f2f7SWill Deacon 530b403f2f7SWill Deacon err_unregister_addr: 531b403f2f7SWill Deacon ioport__unregister(kvm, PCI_CONFIG_ADDRESS); 532b403f2f7SWill Deacon err_unregister_data: 533b403f2f7SWill Deacon ioport__unregister(kvm, PCI_CONFIG_DATA); 534b403f2f7SWill Deacon return r; 5356d987703SSasha Levin } 536bca12bf6SSasha Levin dev_base_init(pci__init); 5376d987703SSasha Levin 5386d987703SSasha Levin int pci__exit(struct kvm *kvm) 5396d987703SSasha Levin { 5404346fd8fSSasha Levin ioport__unregister(kvm, PCI_CONFIG_DATA); 5414346fd8fSSasha Levin ioport__unregister(kvm, PCI_CONFIG_ADDRESS); 5426d987703SSasha Levin 5436d987703SSasha Levin return 0; 54460742802SPekka Enberg } 545bca12bf6SSasha Levin dev_base_exit(pci__exit); 546