1 #include "kvm/devices.h" 2 #include "kvm/pci.h" 3 #include "kvm/ioport.h" 4 #include "kvm/util.h" 5 #include "kvm/kvm.h" 6 7 #include <linux/err.h> 8 #include <assert.h> 9 10 #define PCI_BAR_OFFSET(b) (offsetof(struct pci_device_header, bar[b])) 11 12 static union pci_config_address pci_config_address; 13 14 /* This is within our PCI gap - in an unused area. 15 * Note this is a PCI *bus address*, is used to assign BARs etc.! 16 * (That's why it can still 32bit even with 64bit guests-- 64bit 17 * PCI isn't currently supported.) 18 */ 19 static u32 io_space_blocks = KVM_PCI_MMIO_AREA; 20 21 /* 22 * BARs must be naturally aligned, so enforce this in the allocator. 23 */ 24 u32 pci_get_io_space_block(u32 size) 25 { 26 u32 block = ALIGN(io_space_blocks, size); 27 io_space_blocks = block + size; 28 return block; 29 } 30 31 static void *pci_config_address_ptr(u16 port) 32 { 33 unsigned long offset; 34 void *base; 35 36 offset = port - PCI_CONFIG_ADDRESS; 37 base = &pci_config_address; 38 39 return base + offset; 40 } 41 42 static bool pci_config_address_out(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size) 43 { 44 void *p = pci_config_address_ptr(port); 45 46 memcpy(p, data, size); 47 48 return true; 49 } 50 51 static bool pci_config_address_in(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size) 52 { 53 void *p = pci_config_address_ptr(port); 54 55 memcpy(data, p, size); 56 57 return true; 58 } 59 60 static struct ioport_operations pci_config_address_ops = { 61 .io_in = pci_config_address_in, 62 .io_out = pci_config_address_out, 63 }; 64 65 static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_number) 66 { 67 if (pci_config_address.bus_number != bus_number) 68 return false; 69 70 if (pci_config_address.function_number != function_number) 71 return false; 72 73 return !IS_ERR_OR_NULL(device__find_dev(DEVICE_BUS_PCI, device_number)); 74 } 75 76 static bool pci_config_data_out(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size) 77 { 78 /* 79 * If someone accesses PCI configuration space offsets that are not 80 * aligned to 4 bytes, it uses ioports to signify that. 81 */ 82 pci_config_address.reg_offset = port - PCI_CONFIG_DATA; 83 84 pci__config_wr(kvm, pci_config_address, data, size); 85 86 return true; 87 } 88 89 static bool pci_config_data_in(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size) 90 { 91 /* 92 * If someone accesses PCI configuration space offsets that are not 93 * aligned to 4 bytes, it uses ioports to signify that. 94 */ 95 pci_config_address.reg_offset = port - PCI_CONFIG_DATA; 96 97 pci__config_rd(kvm, pci_config_address, data, size); 98 99 return true; 100 } 101 102 static struct ioport_operations pci_config_data_ops = { 103 .io_in = pci_config_data_in, 104 .io_out = pci_config_data_out, 105 }; 106 107 void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size) 108 { 109 u8 dev_num; 110 111 dev_num = addr.device_number; 112 113 if (pci_device_exists(0, dev_num, 0)) { 114 unsigned long offset; 115 116 offset = addr.w & 0xff; 117 if (offset < sizeof(struct pci_device_header)) { 118 void *p = device__find_dev(DEVICE_BUS_PCI, dev_num)->data; 119 struct pci_device_header *hdr = p; 120 u8 bar = (offset - PCI_BAR_OFFSET(0)) / (sizeof(u32)); 121 u32 sz = PCI_IO_SIZE; 122 123 if (bar < 6 && hdr->bar_size[bar]) 124 sz = hdr->bar_size[bar]; 125 126 /* 127 * If the kernel masks the BAR it would expect to find the 128 * size of the BAR there next time it reads from it. 129 * When the kernel got the size it would write the address 130 * back. 131 */ 132 if (*(u32 *)(p + offset)) { 133 /* See if kernel tries to mask one of the BARs */ 134 if ((offset >= PCI_BAR_OFFSET(0)) && 135 (offset <= PCI_BAR_OFFSET(6)) && 136 (ioport__read32(data) == 0xFFFFFFFF)) 137 memcpy(p + offset, &sz, sizeof(sz)); 138 else 139 memcpy(p + offset, data, size); 140 } 141 } 142 } 143 } 144 145 void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, int size) 146 { 147 u8 dev_num; 148 149 dev_num = addr.device_number; 150 151 if (pci_device_exists(0, dev_num, 0)) { 152 unsigned long offset; 153 154 offset = addr.w & 0xff; 155 if (offset < sizeof(struct pci_device_header)) { 156 void *p = device__find_dev(DEVICE_BUS_PCI, dev_num)->data; 157 158 memcpy(data, p + offset, size); 159 } else { 160 memset(data, 0x00, size); 161 } 162 } else { 163 memset(data, 0xff, size); 164 } 165 } 166 167 static void pci_config_mmio_access(u64 addr, u8 *data, u32 len, u8 is_write, void *kvm) 168 { 169 union pci_config_address cfg_addr; 170 171 addr -= KVM_PCI_CFG_AREA; 172 cfg_addr.w = (u32)addr; 173 cfg_addr.enable_bit = 1; 174 175 if (is_write) 176 pci__config_wr(kvm, cfg_addr, data, len); 177 else 178 pci__config_rd(kvm, cfg_addr, data, len); 179 } 180 181 struct pci_device_header *pci__find_dev(u8 dev_num) 182 { 183 struct device_header *hdr = device__find_dev(DEVICE_BUS_PCI, dev_num); 184 185 if (IS_ERR_OR_NULL(hdr)) 186 return NULL; 187 188 return hdr->data; 189 } 190 191 int pci__init(struct kvm *kvm) 192 { 193 int r; 194 195 r = ioport__register(kvm, PCI_CONFIG_DATA + 0, &pci_config_data_ops, 4, NULL); 196 if (r < 0) 197 return r; 198 199 r = ioport__register(kvm, PCI_CONFIG_ADDRESS + 0, &pci_config_address_ops, 4, NULL); 200 if (r < 0) 201 goto err_unregister_data; 202 203 r = kvm__register_mmio(kvm, KVM_PCI_CFG_AREA, PCI_CFG_SIZE, false, 204 pci_config_mmio_access, kvm); 205 if (r < 0) 206 goto err_unregister_addr; 207 208 return 0; 209 210 err_unregister_addr: 211 ioport__unregister(kvm, PCI_CONFIG_ADDRESS); 212 err_unregister_data: 213 ioport__unregister(kvm, PCI_CONFIG_DATA); 214 return r; 215 } 216 dev_base_init(pci__init); 217 218 int pci__exit(struct kvm *kvm) 219 { 220 ioport__unregister(kvm, PCI_CONFIG_DATA); 221 ioport__unregister(kvm, PCI_CONFIG_ADDRESS); 222 223 return 0; 224 } 225 dev_base_exit(pci__exit); 226