121ff329dSWill Deacon #include "kvm/devices.h" 260742802SPekka Enberg #include "kvm/pci.h" 360742802SPekka Enberg #include "kvm/ioport.h" 4b5981636SWill Deacon #include "kvm/irq.h" 576f9c841SCyrill Gorcunov #include "kvm/util.h" 69575e724SSasha Levin #include "kvm/kvm.h" 760742802SPekka Enberg 86d987703SSasha Levin #include <linux/err.h> 96d987703SSasha Levin #include <assert.h> 106d987703SSasha Levin 119575e724SSasha Levin #define PCI_BAR_OFFSET(b) (offsetof(struct pci_device_header, bar[b])) 12b30d05adSPekka Enberg 13*a0a7d66fSDavid Daney static u32 pci_config_address_bits; 1460742802SPekka Enberg 1540f2fd06SMatt Evans /* This is within our PCI gap - in an unused area. 1640f2fd06SMatt Evans * Note this is a PCI *bus address*, is used to assign BARs etc.! 1740f2fd06SMatt Evans * (That's why it can still 32bit even with 64bit guests-- 64bit 1840f2fd06SMatt Evans * PCI isn't currently supported.) 1940f2fd06SMatt Evans */ 2040f2fd06SMatt Evans static u32 io_space_blocks = KVM_PCI_MMIO_AREA; 219575e724SSasha Levin 22c7575d17SWill Deacon /* 23c7575d17SWill Deacon * BARs must be naturally aligned, so enforce this in the allocator. 24c7575d17SWill Deacon */ 2595d13a52SSasha Levin u32 pci_get_io_space_block(u32 size) 269575e724SSasha Levin { 27c7575d17SWill Deacon u32 block = ALIGN(io_space_blocks, size); 28c7575d17SWill Deacon io_space_blocks = block + size; 299575e724SSasha Levin return block; 309575e724SSasha Levin } 319575e724SSasha Levin 32b5981636SWill Deacon void pci__assign_irq(struct device_header *dev_hdr) 33b5981636SWill Deacon { 34b5981636SWill Deacon struct pci_device_header *pci_hdr = dev_hdr->data; 35b5981636SWill Deacon 36b5981636SWill Deacon /* 37b5981636SWill Deacon * PCI supports only INTA#,B#,C#,D# per device. 38b5981636SWill Deacon * 39b5981636SWill Deacon * A#,B#,C#,D# are allowed for multifunctional devices so stick 40b5981636SWill Deacon * with A# for our single function devices. 41b5981636SWill Deacon */ 42b5981636SWill Deacon pci_hdr->irq_pin = 1; 43b5981636SWill Deacon pci_hdr->irq_line = irq__alloc_line(); 44b5981636SWill Deacon } 45b5981636SWill Deacon 463fdf659dSSasha Levin static void *pci_config_address_ptr(u16 port) 47ba824677SPekka Enberg { 48ba824677SPekka Enberg unsigned long offset; 49ba824677SPekka Enberg void *base; 50ba824677SPekka Enberg 51ba824677SPekka Enberg offset = port - PCI_CONFIG_ADDRESS; 52*a0a7d66fSDavid Daney base = &pci_config_address_bits; 53ba824677SPekka Enberg 54ba824677SPekka Enberg return base + offset; 55ba824677SPekka Enberg } 56ba824677SPekka Enberg 574123ca55SMarc Zyngier static bool pci_config_address_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 5860742802SPekka Enberg { 59ba824677SPekka Enberg void *p = pci_config_address_ptr(port); 6060742802SPekka Enberg 61ba824677SPekka Enberg memcpy(p, data, size); 6260742802SPekka Enberg 6360742802SPekka Enberg return true; 6460742802SPekka Enberg } 6560742802SPekka Enberg 664123ca55SMarc Zyngier static bool pci_config_address_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 6760742802SPekka Enberg { 68ba824677SPekka Enberg void *p = pci_config_address_ptr(port); 6960742802SPekka Enberg 70ba824677SPekka Enberg memcpy(data, p, size); 7160742802SPekka Enberg 7260742802SPekka Enberg return true; 7360742802SPekka Enberg } 7460742802SPekka Enberg 75305b72ceSCyrill Gorcunov static struct ioport_operations pci_config_address_ops = { 76305b72ceSCyrill Gorcunov .io_in = pci_config_address_in, 77305b72ceSCyrill Gorcunov .io_out = pci_config_address_out, 7860742802SPekka Enberg }; 7960742802SPekka Enberg 803fdf659dSSasha Levin static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_number) 8176f9c841SCyrill Gorcunov { 82*a0a7d66fSDavid Daney union pci_config_address pci_config_address; 83*a0a7d66fSDavid Daney 84*a0a7d66fSDavid Daney pci_config_address.w = ioport__read32(&pci_config_address_bits); 85*a0a7d66fSDavid Daney 8676f9c841SCyrill Gorcunov if (pci_config_address.bus_number != bus_number) 8776f9c841SCyrill Gorcunov return false; 8876f9c841SCyrill Gorcunov 89b30d05adSPekka Enberg if (pci_config_address.function_number != function_number) 9076f9c841SCyrill Gorcunov return false; 9176f9c841SCyrill Gorcunov 9221ff329dSWill Deacon return !IS_ERR_OR_NULL(device__find_dev(DEVICE_BUS_PCI, device_number)); 9376f9c841SCyrill Gorcunov } 9476f9c841SCyrill Gorcunov 954123ca55SMarc Zyngier static bool pci_config_data_out(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 969575e724SSasha Levin { 97*a0a7d66fSDavid Daney union pci_config_address pci_config_address; 98*a0a7d66fSDavid Daney 99*a0a7d66fSDavid Daney pci_config_address.w = ioport__read32(&pci_config_address_bits); 1009575e724SSasha Levin /* 1019575e724SSasha Levin * If someone accesses PCI configuration space offsets that are not 1029575e724SSasha Levin * aligned to 4 bytes, it uses ioports to signify that. 1039575e724SSasha Levin */ 104d0297a59SMatt Evans pci_config_address.reg_offset = port - PCI_CONFIG_DATA; 1059575e724SSasha Levin 1064123ca55SMarc Zyngier pci__config_wr(vcpu->kvm, pci_config_address, data, size); 107d0297a59SMatt Evans 108d0297a59SMatt Evans return true; 109d0297a59SMatt Evans } 110d0297a59SMatt Evans 1114123ca55SMarc Zyngier static bool pci_config_data_in(struct ioport *ioport, struct kvm_cpu *vcpu, u16 port, void *data, int size) 112d0297a59SMatt Evans { 113*a0a7d66fSDavid Daney union pci_config_address pci_config_address; 114*a0a7d66fSDavid Daney 115*a0a7d66fSDavid Daney pci_config_address.w = ioport__read32(&pci_config_address_bits); 116d0297a59SMatt Evans /* 117d0297a59SMatt Evans * If someone accesses PCI configuration space offsets that are not 118d0297a59SMatt Evans * aligned to 4 bytes, it uses ioports to signify that. 119d0297a59SMatt Evans */ 120d0297a59SMatt Evans pci_config_address.reg_offset = port - PCI_CONFIG_DATA; 121d0297a59SMatt Evans 1224123ca55SMarc Zyngier pci__config_rd(vcpu->kvm, pci_config_address, data, size); 123d0297a59SMatt Evans 124d0297a59SMatt Evans return true; 125d0297a59SMatt Evans } 126d0297a59SMatt Evans 127d0297a59SMatt Evans static struct ioport_operations pci_config_data_ops = { 128d0297a59SMatt Evans .io_in = pci_config_data_in, 129d0297a59SMatt Evans .io_out = pci_config_data_out, 130d0297a59SMatt Evans }; 131d0297a59SMatt Evans 132d0297a59SMatt Evans void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size) 133d0297a59SMatt Evans { 134d0297a59SMatt Evans u8 dev_num; 135d0297a59SMatt Evans 136d0297a59SMatt Evans dev_num = addr.device_number; 1379575e724SSasha Levin 1389575e724SSasha Levin if (pci_device_exists(0, dev_num, 0)) { 1399575e724SSasha Levin unsigned long offset; 1409575e724SSasha Levin 141d0297a59SMatt Evans offset = addr.w & 0xff; 1429575e724SSasha Levin if (offset < sizeof(struct pci_device_header)) { 14321ff329dSWill Deacon void *p = device__find_dev(DEVICE_BUS_PCI, dev_num)->data; 14421ff329dSWill Deacon struct pci_device_header *hdr = p; 1457e012d3cSSasha Levin u8 bar = (offset - PCI_BAR_OFFSET(0)) / (sizeof(u32)); 146*a0a7d66fSDavid Daney u32 sz = cpu_to_le32(PCI_IO_SIZE); 1479575e724SSasha Levin 14821ff329dSWill Deacon if (bar < 6 && hdr->bar_size[bar]) 14921ff329dSWill Deacon sz = hdr->bar_size[bar]; 150c64f7ff0SSasha Levin 1519575e724SSasha Levin /* 1529575e724SSasha Levin * If the kernel masks the BAR it would expect to find the 1539575e724SSasha Levin * size of the BAR there next time it reads from it. 1549575e724SSasha Levin * When the kernel got the size it would write the address 1559575e724SSasha Levin * back. 1569575e724SSasha Levin */ 157aa73be70SMatt Evans if (*(u32 *)(p + offset)) { 1589575e724SSasha Levin /* See if kernel tries to mask one of the BARs */ 1599575e724SSasha Levin if ((offset >= PCI_BAR_OFFSET(0)) && 160c64f7ff0SSasha Levin (offset <= PCI_BAR_OFFSET(6)) && 161c64f7ff0SSasha Levin (ioport__read32(data) == 0xFFFFFFFF)) 1629575e724SSasha Levin memcpy(p + offset, &sz, sizeof(sz)); 1639575e724SSasha Levin else 1649575e724SSasha Levin memcpy(p + offset, data, size); 1659575e724SSasha Levin } 1669575e724SSasha Levin } 1679575e724SSasha Levin } 1689575e724SSasha Levin } 1699575e724SSasha Levin 170d0297a59SMatt Evans void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, int size) 17160742802SPekka Enberg { 1723fdf659dSSasha Levin u8 dev_num; 173e4d2cea2SPekka Enberg 174d0297a59SMatt Evans dev_num = addr.device_number; 175b30d05adSPekka Enberg 176b30d05adSPekka Enberg if (pci_device_exists(0, dev_num, 0)) { 177598419d5SPekka Enberg unsigned long offset; 178598419d5SPekka Enberg 179d0297a59SMatt Evans offset = addr.w & 0xff; 180598419d5SPekka Enberg if (offset < sizeof(struct pci_device_header)) { 18121ff329dSWill Deacon void *p = device__find_dev(DEVICE_BUS_PCI, dev_num)->data; 182b30d05adSPekka Enberg 18318ae021aSPekka Enberg memcpy(data, p + offset, size); 1843a60be06SSasha Levin } else { 185598419d5SPekka Enberg memset(data, 0x00, size); 1863a60be06SSasha Levin } 1873a60be06SSasha Levin } else { 188e498ea08SPekka Enberg memset(data, 0xff, size); 18960742802SPekka Enberg } 1903a60be06SSasha Levin } 19160742802SPekka Enberg 1929b735910SMarc Zyngier static void pci_config_mmio_access(struct kvm_cpu *vcpu, u64 addr, u8 *data, 1939b735910SMarc Zyngier u32 len, u8 is_write, void *kvm) 194b403f2f7SWill Deacon { 195b403f2f7SWill Deacon union pci_config_address cfg_addr; 196b403f2f7SWill Deacon 197b403f2f7SWill Deacon addr -= KVM_PCI_CFG_AREA; 198b403f2f7SWill Deacon cfg_addr.w = (u32)addr; 199b403f2f7SWill Deacon cfg_addr.enable_bit = 1; 200b403f2f7SWill Deacon 201b403f2f7SWill Deacon if (is_write) 202b403f2f7SWill Deacon pci__config_wr(kvm, cfg_addr, data, len); 203b403f2f7SWill Deacon else 204b403f2f7SWill Deacon pci__config_rd(kvm, cfg_addr, data, len); 205b403f2f7SWill Deacon } 206b403f2f7SWill Deacon 207d0297a59SMatt Evans struct pci_device_header *pci__find_dev(u8 dev_num) 208d0297a59SMatt Evans { 20921ff329dSWill Deacon struct device_header *hdr = device__find_dev(DEVICE_BUS_PCI, dev_num); 2106d987703SSasha Levin 21121ff329dSWill Deacon if (IS_ERR_OR_NULL(hdr)) 21221ff329dSWill Deacon return NULL; 21321ff329dSWill Deacon 21421ff329dSWill Deacon return hdr->data; 215d0297a59SMatt Evans } 216d0297a59SMatt Evans 2176d987703SSasha Levin int pci__init(struct kvm *kvm) 21860742802SPekka Enberg { 2196d987703SSasha Levin int r; 2206d987703SSasha Levin 2214346fd8fSSasha Levin r = ioport__register(kvm, PCI_CONFIG_DATA + 0, &pci_config_data_ops, 4, NULL); 2226d987703SSasha Levin if (r < 0) 2236d987703SSasha Levin return r; 2246d987703SSasha Levin 2254346fd8fSSasha Levin r = ioport__register(kvm, PCI_CONFIG_ADDRESS + 0, &pci_config_address_ops, 4, NULL); 226b403f2f7SWill Deacon if (r < 0) 227b403f2f7SWill Deacon goto err_unregister_data; 228b403f2f7SWill Deacon 229b403f2f7SWill Deacon r = kvm__register_mmio(kvm, KVM_PCI_CFG_AREA, PCI_CFG_SIZE, false, 230b403f2f7SWill Deacon pci_config_mmio_access, kvm); 231b403f2f7SWill Deacon if (r < 0) 232b403f2f7SWill Deacon goto err_unregister_addr; 2336d987703SSasha Levin 2346d987703SSasha Levin return 0; 235b403f2f7SWill Deacon 236b403f2f7SWill Deacon err_unregister_addr: 237b403f2f7SWill Deacon ioport__unregister(kvm, PCI_CONFIG_ADDRESS); 238b403f2f7SWill Deacon err_unregister_data: 239b403f2f7SWill Deacon ioport__unregister(kvm, PCI_CONFIG_DATA); 240b403f2f7SWill Deacon return r; 2416d987703SSasha Levin } 242bca12bf6SSasha Levin dev_base_init(pci__init); 2436d987703SSasha Levin 2446d987703SSasha Levin int pci__exit(struct kvm *kvm) 2456d987703SSasha Levin { 2464346fd8fSSasha Levin ioport__unregister(kvm, PCI_CONFIG_DATA); 2474346fd8fSSasha Levin ioport__unregister(kvm, PCI_CONFIG_ADDRESS); 2486d987703SSasha Levin 2496d987703SSasha Levin return 0; 25060742802SPekka Enberg } 251bca12bf6SSasha Levin dev_base_exit(pci__exit); 252