1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2009, Intel Corporation. 4 * 5 * Author: Weidong Han <weidong.han@intel.com> 6 */ 7 8 #include <linux/pci.h> 9 #include <linux/acpi.h> 10 #include <linux/pci-acpi.h> 11 #include <xen/pci.h> 12 #include <xen/xen.h> 13 #include <xen/interface/physdev.h> 14 #include <xen/interface/xen.h> 15 16 #include <asm/xen/hypervisor.h> 17 #include <asm/xen/hypercall.h> 18 #include "../pci/pci.h" 19 #ifdef CONFIG_PCI_MMCONFIG 20 #include <asm/pci_x86.h> 21 22 static int xen_mcfg_late(void); 23 #endif 24 25 static bool __read_mostly pci_seg_supported = true; 26 27 static int xen_add_device(struct device *dev) 28 { 29 int r; 30 struct pci_dev *pci_dev = to_pci_dev(dev); 31 #ifdef CONFIG_PCI_IOV 32 struct pci_dev *physfn = pci_dev->physfn; 33 #endif 34 #ifdef CONFIG_PCI_MMCONFIG 35 static bool pci_mcfg_reserved = false; 36 /* 37 * Reserve MCFG areas in Xen on first invocation due to this being 38 * potentially called from inside of acpi_init immediately after 39 * MCFG table has been finally parsed. 40 */ 41 if (!pci_mcfg_reserved) { 42 xen_mcfg_late(); 43 pci_mcfg_reserved = true; 44 } 45 #endif 46 47 if (pci_domain_nr(pci_dev->bus) >> 16) { 48 /* 49 * The hypercall interface is limited to 16bit PCI segment 50 * values, do not attempt to register devices with Xen in 51 * segments greater or equal than 0x10000. 52 */ 53 dev_info(dev, 54 "not registering with Xen: invalid PCI segment\n"); 55 return 0; 56 } 57 58 if (pci_seg_supported) { 59 DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1); 60 61 add->seg = pci_domain_nr(pci_dev->bus); 62 add->bus = pci_dev->bus->number; 63 add->devfn = pci_dev->devfn; 64 65 #ifdef CONFIG_ACPI 66 acpi_handle handle; 67 #endif 68 69 #ifdef CONFIG_PCI_IOV 70 if (pci_dev->is_virtfn) { 71 add->flags = XEN_PCI_DEV_VIRTFN; 72 add->physfn.bus = physfn->bus->number; 73 add->physfn.devfn = physfn->devfn; 74 } else 75 #endif 76 if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) 77 add->flags = XEN_PCI_DEV_EXTFN; 78 79 #ifdef CONFIG_ACPI 80 handle = ACPI_HANDLE(&pci_dev->dev); 81 #ifdef CONFIG_PCI_IOV 82 if (!handle && pci_dev->is_virtfn) 83 handle = ACPI_HANDLE(physfn->bus->bridge); 84 #endif 85 if (!handle) { 86 /* 87 * This device was not listed in the ACPI name space at 88 * all. Try to get acpi handle of parent pci bus. 89 */ 90 struct pci_bus *pbus; 91 for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) { 92 handle = acpi_pci_get_bridge_handle(pbus); 93 if (handle) 94 break; 95 } 96 } 97 if (handle) { 98 acpi_status status; 99 100 do { 101 unsigned long long pxm; 102 103 status = acpi_evaluate_integer(handle, "_PXM", 104 NULL, &pxm); 105 if (ACPI_SUCCESS(status)) { 106 add->optarr[0] = pxm; 107 add->flags |= XEN_PCI_DEV_PXM; 108 break; 109 } 110 status = acpi_get_parent(handle, &handle); 111 } while (ACPI_SUCCESS(status)); 112 } 113 #endif /* CONFIG_ACPI */ 114 115 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add); 116 if (r != -ENOSYS) 117 return r; 118 pci_seg_supported = false; 119 } 120 121 if (pci_domain_nr(pci_dev->bus)) 122 r = -ENOSYS; 123 #ifdef CONFIG_PCI_IOV 124 else if (pci_dev->is_virtfn) { 125 struct physdev_manage_pci_ext manage_pci_ext = { 126 .bus = pci_dev->bus->number, 127 .devfn = pci_dev->devfn, 128 .is_virtfn = 1, 129 .physfn.bus = physfn->bus->number, 130 .physfn.devfn = physfn->devfn, 131 }; 132 133 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext, 134 &manage_pci_ext); 135 } 136 #endif 137 else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) { 138 struct physdev_manage_pci_ext manage_pci_ext = { 139 .bus = pci_dev->bus->number, 140 .devfn = pci_dev->devfn, 141 .is_extfn = 1, 142 }; 143 144 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext, 145 &manage_pci_ext); 146 } else { 147 struct physdev_manage_pci manage_pci = { 148 .bus = pci_dev->bus->number, 149 .devfn = pci_dev->devfn, 150 }; 151 152 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add, 153 &manage_pci); 154 } 155 156 return r; 157 } 158 159 static int xen_remove_device(struct device *dev) 160 { 161 int r; 162 struct pci_dev *pci_dev = to_pci_dev(dev); 163 164 if (pci_domain_nr(pci_dev->bus) >> 16) { 165 /* 166 * The hypercall interface is limited to 16bit PCI segment 167 * values. 168 */ 169 dev_info(dev, 170 "not unregistering with Xen: invalid PCI segment\n"); 171 return 0; 172 } 173 174 if (pci_seg_supported) { 175 struct physdev_pci_device device = { 176 .seg = pci_domain_nr(pci_dev->bus), 177 .bus = pci_dev->bus->number, 178 .devfn = pci_dev->devfn 179 }; 180 181 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove, 182 &device); 183 } else if (pci_domain_nr(pci_dev->bus)) 184 r = -ENOSYS; 185 else { 186 struct physdev_manage_pci manage_pci = { 187 .bus = pci_dev->bus->number, 188 .devfn = pci_dev->devfn 189 }; 190 191 r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove, 192 &manage_pci); 193 } 194 195 return r; 196 } 197 198 int xen_reset_device(const struct pci_dev *dev) 199 { 200 struct pci_device_reset device = { 201 .dev.seg = pci_domain_nr(dev->bus), 202 .dev.bus = dev->bus->number, 203 .dev.devfn = dev->devfn, 204 .flags = PCI_DEVICE_RESET_FLR, 205 }; 206 207 if (pci_domain_nr(dev->bus) >> 16) { 208 /* 209 * The hypercall interface is limited to 16bit PCI segment 210 * values. 211 */ 212 dev_info(&dev->dev, 213 "unable to notify Xen of device reset: invalid PCI segment\n"); 214 return 0; 215 } 216 217 return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device); 218 } 219 EXPORT_SYMBOL_GPL(xen_reset_device); 220 221 static int xen_pci_notifier(struct notifier_block *nb, 222 unsigned long action, void *data) 223 { 224 struct device *dev = data; 225 int r = 0; 226 227 switch (action) { 228 case BUS_NOTIFY_ADD_DEVICE: 229 r = xen_add_device(dev); 230 break; 231 case BUS_NOTIFY_DEL_DEVICE: 232 r = xen_remove_device(dev); 233 break; 234 default: 235 return NOTIFY_DONE; 236 } 237 if (r) 238 dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n", 239 action == BUS_NOTIFY_ADD_DEVICE ? "add" : 240 (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?")); 241 return NOTIFY_OK; 242 } 243 244 static struct notifier_block device_nb = { 245 .notifier_call = xen_pci_notifier, 246 }; 247 248 static int __init register_xen_pci_notifier(void) 249 { 250 if (!xen_initial_domain()) 251 return 0; 252 253 return bus_register_notifier(&pci_bus_type, &device_nb); 254 } 255 256 arch_initcall(register_xen_pci_notifier); 257 258 #ifdef CONFIG_PCI_MMCONFIG 259 static int xen_mcfg_late(void) 260 { 261 struct pci_mmcfg_region *cfg; 262 int rc; 263 264 if (!xen_initial_domain()) 265 return 0; 266 267 if ((pci_probe & PCI_PROBE_MMCONF) == 0) 268 return 0; 269 270 if (list_empty(&pci_mmcfg_list)) 271 return 0; 272 273 /* Check whether they are in the right area. */ 274 list_for_each_entry(cfg, &pci_mmcfg_list, list) { 275 struct physdev_pci_mmcfg_reserved r; 276 277 r.address = cfg->address; 278 r.segment = cfg->segment; 279 r.start_bus = cfg->start_bus; 280 r.end_bus = cfg->end_bus; 281 r.flags = XEN_PCI_MMCFG_RESERVED; 282 283 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r); 284 switch (rc) { 285 case 0: 286 case -ENOSYS: 287 continue; 288 289 default: 290 pr_warn("Failed to report MMCONFIG reservation" 291 " state for %s to hypervisor" 292 " (%d)\n", 293 cfg->name, rc); 294 } 295 } 296 return 0; 297 } 298 #endif 299 300 #ifdef CONFIG_XEN_DOM0 301 struct xen_device_domain_owner { 302 domid_t domain; 303 struct pci_dev *dev; 304 struct list_head list; 305 }; 306 307 static DEFINE_SPINLOCK(dev_domain_list_spinlock); 308 static LIST_HEAD(dev_domain_list); 309 310 static struct xen_device_domain_owner *find_device(struct pci_dev *dev) 311 { 312 struct xen_device_domain_owner *owner; 313 314 list_for_each_entry(owner, &dev_domain_list, list) { 315 if (owner->dev == dev) 316 return owner; 317 } 318 return NULL; 319 } 320 321 int xen_find_device_domain_owner(struct pci_dev *dev) 322 { 323 struct xen_device_domain_owner *owner; 324 int domain = -ENODEV; 325 326 spin_lock(&dev_domain_list_spinlock); 327 owner = find_device(dev); 328 if (owner) 329 domain = owner->domain; 330 spin_unlock(&dev_domain_list_spinlock); 331 return domain; 332 } 333 EXPORT_SYMBOL_GPL(xen_find_device_domain_owner); 334 335 int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain) 336 { 337 struct xen_device_domain_owner *owner; 338 339 owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL); 340 if (!owner) 341 return -ENODEV; 342 343 spin_lock(&dev_domain_list_spinlock); 344 if (find_device(dev)) { 345 spin_unlock(&dev_domain_list_spinlock); 346 kfree(owner); 347 return -EEXIST; 348 } 349 owner->domain = domain; 350 owner->dev = dev; 351 list_add_tail(&owner->list, &dev_domain_list); 352 spin_unlock(&dev_domain_list_spinlock); 353 return 0; 354 } 355 EXPORT_SYMBOL_GPL(xen_register_device_domain_owner); 356 357 int xen_unregister_device_domain_owner(struct pci_dev *dev) 358 { 359 struct xen_device_domain_owner *owner; 360 361 spin_lock(&dev_domain_list_spinlock); 362 owner = find_device(dev); 363 if (!owner) { 364 spin_unlock(&dev_domain_list_spinlock); 365 return -ENODEV; 366 } 367 list_del(&owner->list); 368 spin_unlock(&dev_domain_list_spinlock); 369 kfree(owner); 370 return 0; 371 } 372 EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner); 373 #endif 374