121ff329dSWill Deacon #include "kvm/devices.h" 260742802SPekka Enberg #include "kvm/pci.h" 360742802SPekka Enberg #include "kvm/ioport.h" 4b5981636SWill Deacon #include "kvm/irq.h" 576f9c841SCyrill Gorcunov #include "kvm/util.h" 69575e724SSasha Levin #include "kvm/kvm.h" 760742802SPekka Enberg 86d987703SSasha Levin #include <linux/err.h> 96d987703SSasha Levin #include <assert.h> 106d987703SSasha Levin 119575e724SSasha Levin #define PCI_BAR_OFFSET(b) (offsetof(struct pci_device_header, bar[b])) 12b30d05adSPekka Enberg 13aa73be70SMatt Evans static union pci_config_address pci_config_address; 1460742802SPekka Enberg 1540f2fd06SMatt Evans /* This is within our PCI gap - in an unused area. 1640f2fd06SMatt Evans * Note this is a PCI *bus address*, is used to assign BARs etc.! 1740f2fd06SMatt Evans * (That's why it can still 32bit even with 64bit guests-- 64bit 1840f2fd06SMatt Evans * PCI isn't currently supported.) 1940f2fd06SMatt Evans */ 2040f2fd06SMatt Evans static u32 io_space_blocks = KVM_PCI_MMIO_AREA; 219575e724SSasha Levin 22c7575d17SWill Deacon /* 23c7575d17SWill Deacon * BARs must be naturally aligned, so enforce this in the allocator. 24c7575d17SWill Deacon */ 2595d13a52SSasha Levin u32 pci_get_io_space_block(u32 size) 269575e724SSasha Levin { 27c7575d17SWill Deacon u32 block = ALIGN(io_space_blocks, size); 28c7575d17SWill Deacon io_space_blocks = block + size; 299575e724SSasha Levin return block; 309575e724SSasha Levin } 319575e724SSasha Levin 32b5981636SWill Deacon void pci__assign_irq(struct device_header *dev_hdr) 33b5981636SWill Deacon { 34b5981636SWill Deacon struct pci_device_header *pci_hdr = dev_hdr->data; 35b5981636SWill Deacon 36b5981636SWill Deacon /* 37b5981636SWill Deacon * PCI supports only INTA#,B#,C#,D# per device. 38b5981636SWill Deacon * 39b5981636SWill Deacon * A#,B#,C#,D# are allowed for multifunctional devices so stick 40b5981636SWill Deacon * with A# for our single function devices. 41b5981636SWill Deacon */ 42b5981636SWill Deacon pci_hdr->irq_pin = 1; 43b5981636SWill Deacon pci_hdr->irq_line = irq__alloc_line(); 44b5981636SWill Deacon } 45b5981636SWill Deacon 463fdf659dSSasha Levin static void *pci_config_address_ptr(u16 port) 47ba824677SPekka Enberg { 48ba824677SPekka Enberg unsigned long offset; 49ba824677SPekka Enberg void *base; 50ba824677SPekka Enberg 51ba824677SPekka Enberg offset = port - PCI_CONFIG_ADDRESS; 52ba824677SPekka Enberg base = &pci_config_address; 53ba824677SPekka Enberg 54ba824677SPekka Enberg return base + offset; 55ba824677SPekka Enberg } 56ba824677SPekka Enberg 57c9f6a037SXiao Guangrong static bool pci_config_address_out(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size) 5860742802SPekka Enberg { 59ba824677SPekka Enberg void *p = pci_config_address_ptr(port); 6060742802SPekka Enberg 61ba824677SPekka Enberg memcpy(p, data, size); 6260742802SPekka Enberg 6360742802SPekka Enberg return true; 6460742802SPekka Enberg } 6560742802SPekka Enberg 66c9f6a037SXiao Guangrong static bool pci_config_address_in(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size) 6760742802SPekka Enberg { 68ba824677SPekka Enberg void *p = pci_config_address_ptr(port); 6960742802SPekka Enberg 70ba824677SPekka Enberg memcpy(data, p, size); 7160742802SPekka Enberg 7260742802SPekka Enberg return true; 7360742802SPekka Enberg } 7460742802SPekka Enberg 75305b72ceSCyrill Gorcunov static struct ioport_operations pci_config_address_ops = { 76305b72ceSCyrill Gorcunov .io_in = pci_config_address_in, 77305b72ceSCyrill Gorcunov .io_out = pci_config_address_out, 7860742802SPekka Enberg }; 7960742802SPekka Enberg 803fdf659dSSasha Levin static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_number) 8176f9c841SCyrill Gorcunov { 8276f9c841SCyrill Gorcunov if (pci_config_address.bus_number != bus_number) 8376f9c841SCyrill Gorcunov return false; 8476f9c841SCyrill Gorcunov 85b30d05adSPekka Enberg if (pci_config_address.function_number != function_number) 8676f9c841SCyrill Gorcunov return false; 8776f9c841SCyrill Gorcunov 8821ff329dSWill Deacon return !IS_ERR_OR_NULL(device__find_dev(DEVICE_BUS_PCI, device_number)); 8976f9c841SCyrill Gorcunov } 9076f9c841SCyrill Gorcunov 91c9f6a037SXiao Guangrong static bool pci_config_data_out(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size) 929575e724SSasha Levin { 939575e724SSasha Levin /* 949575e724SSasha Levin * If someone accesses PCI configuration space offsets that are not 959575e724SSasha Levin * aligned to 4 bytes, it uses ioports to signify that. 969575e724SSasha Levin */ 97d0297a59SMatt Evans pci_config_address.reg_offset = port - PCI_CONFIG_DATA; 989575e724SSasha Levin 99d0297a59SMatt Evans pci__config_wr(kvm, pci_config_address, data, size); 100d0297a59SMatt Evans 101d0297a59SMatt Evans return true; 102d0297a59SMatt Evans } 103d0297a59SMatt Evans 104d0297a59SMatt Evans static bool pci_config_data_in(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size) 105d0297a59SMatt Evans { 106d0297a59SMatt Evans /* 107d0297a59SMatt Evans * If someone accesses PCI configuration space offsets that are not 108d0297a59SMatt Evans * aligned to 4 bytes, it uses ioports to signify that. 109d0297a59SMatt Evans */ 110d0297a59SMatt Evans pci_config_address.reg_offset = port - PCI_CONFIG_DATA; 111d0297a59SMatt Evans 112d0297a59SMatt Evans pci__config_rd(kvm, pci_config_address, data, size); 113d0297a59SMatt Evans 114d0297a59SMatt Evans return true; 115d0297a59SMatt Evans } 116d0297a59SMatt Evans 117d0297a59SMatt Evans static struct ioport_operations pci_config_data_ops = { 118d0297a59SMatt Evans .io_in = pci_config_data_in, 119d0297a59SMatt Evans .io_out = pci_config_data_out, 120d0297a59SMatt Evans }; 121d0297a59SMatt Evans 122d0297a59SMatt Evans void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size) 123d0297a59SMatt Evans { 124d0297a59SMatt Evans u8 dev_num; 125d0297a59SMatt Evans 126d0297a59SMatt Evans dev_num = addr.device_number; 1279575e724SSasha Levin 1289575e724SSasha Levin if (pci_device_exists(0, dev_num, 0)) { 1299575e724SSasha Levin unsigned long offset; 1309575e724SSasha Levin 131d0297a59SMatt Evans offset = addr.w & 0xff; 1329575e724SSasha Levin if (offset < sizeof(struct pci_device_header)) { 13321ff329dSWill Deacon void *p = device__find_dev(DEVICE_BUS_PCI, dev_num)->data; 13421ff329dSWill Deacon struct pci_device_header *hdr = p; 1357e012d3cSSasha Levin u8 bar = (offset - PCI_BAR_OFFSET(0)) / (sizeof(u32)); 1369575e724SSasha Levin u32 sz = PCI_IO_SIZE; 1379575e724SSasha Levin 13821ff329dSWill Deacon if (bar < 6 && hdr->bar_size[bar]) 13921ff329dSWill Deacon sz = hdr->bar_size[bar]; 140c64f7ff0SSasha Levin 1419575e724SSasha Levin /* 1429575e724SSasha Levin * If the kernel masks the BAR it would expect to find the 1439575e724SSasha Levin * size of the BAR there next time it reads from it. 1449575e724SSasha Levin * When the kernel got the size it would write the address 1459575e724SSasha Levin * back. 1469575e724SSasha Levin */ 147aa73be70SMatt Evans if (*(u32 *)(p + offset)) { 1489575e724SSasha Levin /* See if kernel tries to mask one of the BARs */ 1499575e724SSasha Levin if ((offset >= PCI_BAR_OFFSET(0)) && 150c64f7ff0SSasha Levin (offset <= PCI_BAR_OFFSET(6)) && 151c64f7ff0SSasha Levin (ioport__read32(data) == 0xFFFFFFFF)) 1529575e724SSasha Levin memcpy(p + offset, &sz, sizeof(sz)); 1539575e724SSasha Levin else 1549575e724SSasha Levin memcpy(p + offset, data, size); 1559575e724SSasha Levin } 1569575e724SSasha Levin } 1579575e724SSasha Levin } 1589575e724SSasha Levin } 1599575e724SSasha Levin 160d0297a59SMatt Evans void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, int size) 16160742802SPekka Enberg { 1623fdf659dSSasha Levin u8 dev_num; 163e4d2cea2SPekka Enberg 164d0297a59SMatt Evans dev_num = addr.device_number; 165b30d05adSPekka Enberg 166b30d05adSPekka Enberg if (pci_device_exists(0, dev_num, 0)) { 167598419d5SPekka Enberg unsigned long offset; 168598419d5SPekka Enberg 169d0297a59SMatt Evans offset = addr.w & 0xff; 170598419d5SPekka Enberg if (offset < sizeof(struct pci_device_header)) { 17121ff329dSWill Deacon void *p = device__find_dev(DEVICE_BUS_PCI, dev_num)->data; 172b30d05adSPekka Enberg 17318ae021aSPekka Enberg memcpy(data, p + offset, size); 1743a60be06SSasha Levin } else { 175598419d5SPekka Enberg memset(data, 0x00, size); 1763a60be06SSasha Levin } 1773a60be06SSasha Levin } else { 178e498ea08SPekka Enberg memset(data, 0xff, size); 17960742802SPekka Enberg } 1803a60be06SSasha Levin } 18160742802SPekka Enberg 182*9b735910SMarc Zyngier static void pci_config_mmio_access(struct kvm_cpu *vcpu, u64 addr, u8 *data, 183*9b735910SMarc Zyngier u32 len, u8 is_write, void *kvm) 184b403f2f7SWill Deacon { 185b403f2f7SWill Deacon union pci_config_address cfg_addr; 186b403f2f7SWill Deacon 187b403f2f7SWill Deacon addr -= KVM_PCI_CFG_AREA; 188b403f2f7SWill Deacon cfg_addr.w = (u32)addr; 189b403f2f7SWill Deacon cfg_addr.enable_bit = 1; 190b403f2f7SWill Deacon 191b403f2f7SWill Deacon if (is_write) 192b403f2f7SWill Deacon pci__config_wr(kvm, cfg_addr, data, len); 193b403f2f7SWill Deacon else 194b403f2f7SWill Deacon pci__config_rd(kvm, cfg_addr, data, len); 195b403f2f7SWill Deacon } 196b403f2f7SWill Deacon 197d0297a59SMatt Evans struct pci_device_header *pci__find_dev(u8 dev_num) 198d0297a59SMatt Evans { 19921ff329dSWill Deacon struct device_header *hdr = device__find_dev(DEVICE_BUS_PCI, dev_num); 2006d987703SSasha Levin 20121ff329dSWill Deacon if (IS_ERR_OR_NULL(hdr)) 20221ff329dSWill Deacon return NULL; 20321ff329dSWill Deacon 20421ff329dSWill Deacon return hdr->data; 205d0297a59SMatt Evans } 206d0297a59SMatt Evans 2076d987703SSasha Levin int pci__init(struct kvm *kvm) 20860742802SPekka Enberg { 2096d987703SSasha Levin int r; 2106d987703SSasha Levin 2114346fd8fSSasha Levin r = ioport__register(kvm, PCI_CONFIG_DATA + 0, &pci_config_data_ops, 4, NULL); 2126d987703SSasha Levin if (r < 0) 2136d987703SSasha Levin return r; 2146d987703SSasha Levin 2154346fd8fSSasha Levin r = ioport__register(kvm, PCI_CONFIG_ADDRESS + 0, &pci_config_address_ops, 4, NULL); 216b403f2f7SWill Deacon if (r < 0) 217b403f2f7SWill Deacon goto err_unregister_data; 218b403f2f7SWill Deacon 219b403f2f7SWill Deacon r = kvm__register_mmio(kvm, KVM_PCI_CFG_AREA, PCI_CFG_SIZE, false, 220b403f2f7SWill Deacon pci_config_mmio_access, kvm); 221b403f2f7SWill Deacon if (r < 0) 222b403f2f7SWill Deacon goto err_unregister_addr; 2236d987703SSasha Levin 2246d987703SSasha Levin return 0; 225b403f2f7SWill Deacon 226b403f2f7SWill Deacon err_unregister_addr: 227b403f2f7SWill Deacon ioport__unregister(kvm, PCI_CONFIG_ADDRESS); 228b403f2f7SWill Deacon err_unregister_data: 229b403f2f7SWill Deacon ioport__unregister(kvm, PCI_CONFIG_DATA); 230b403f2f7SWill Deacon return r; 2316d987703SSasha Levin } 232bca12bf6SSasha Levin dev_base_init(pci__init); 2336d987703SSasha Levin 2346d987703SSasha Levin int pci__exit(struct kvm *kvm) 2356d987703SSasha Levin { 2364346fd8fSSasha Levin ioport__unregister(kvm, PCI_CONFIG_DATA); 2374346fd8fSSasha Levin ioport__unregister(kvm, PCI_CONFIG_ADDRESS); 2386d987703SSasha Levin 2396d987703SSasha Levin return 0; 24060742802SPekka Enberg } 241bca12bf6SSasha Levin dev_base_exit(pci__exit); 242