1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
5 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
6 * Copyright (c) 2000, BSDi
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
14 * disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 #include "opt_acpi.h"
33 #include "opt_iommu.h"
34 #include "opt_bus.h"
35
36 #include <sys/param.h>
37 #include <sys/conf.h>
38 #include <sys/endian.h>
39 #include <sys/eventhandler.h>
40 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
43 #include <sys/linker.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/queue.h>
47 #include <sys/sbuf.h>
48 #include <sys/stdarg.h>
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
51 #include <sys/taskqueue.h>
52 #include <sys/tree.h>
53
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_extern.h>
57
58 #include <sys/bus.h>
59 #include <machine/bus.h>
60 #include <sys/rman.h>
61 #include <machine/resource.h>
62
63 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
64 #include <machine/intr_machdep.h>
65 #endif
66
67 #include <sys/pciio.h>
68 #include <dev/pci/pcireg.h>
69 #include <dev/pci/pcivar.h>
70 #include <dev/pci/pci_private.h>
71
72 #ifdef PCI_IOV
73 #include <sys/nv.h>
74 #include <dev/pci/pci_iov_private.h>
75 #endif
76
77 #include <dev/usb/controller/xhcireg.h>
78 #include <dev/usb/controller/ehcireg.h>
79 #include <dev/usb/controller/ohcireg.h>
80 #include <dev/usb/controller/uhcireg.h>
81
82 #include <dev/iommu/iommu.h>
83
84 #include "pcib_if.h"
85 #include "pci_if.h"
86
87 #define PCIR_IS_BIOS(cfg, reg) \
88 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
89 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
90
91 static device_probe_t pci_probe;
92
93 static bus_reset_post_t pci_reset_post;
94 static bus_reset_prepare_t pci_reset_prepare;
95 static bus_reset_child_t pci_reset_child;
96 static bus_hint_device_unit_t pci_hint_device_unit;
97 static bus_remap_intr_t pci_remap_intr_method;
98
99 static pci_get_id_t pci_get_id_method;
100
101 static int pci_has_quirk(uint32_t devid, int quirk);
102 static pci_addr_t pci_mapbase(uint64_t mapreg);
103 static const char *pci_maptype(uint64_t mapreg);
104 static int pci_maprange(uint64_t mapreg);
105 static pci_addr_t pci_rombase(uint64_t mapreg);
106 static int pci_romsize(uint64_t testval);
107 static void pci_fixancient(pcicfgregs *cfg);
108 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
109
110 static int pci_porten(device_t dev);
111 static int pci_memen(device_t dev);
112 static void pci_assign_interrupt(device_t bus, device_t dev,
113 int force_route);
114 static int pci_add_map(device_t bus, device_t dev, int reg,
115 struct resource_list *rl, int force, int prefetch);
116 static void pci_load_vendor_data(void);
117 static int pci_describe_parse_line(char **ptr, int *vendor,
118 int *device, char **desc);
119 static char *pci_describe_device(device_t dev);
120 static int pci_modevent(module_t mod, int what, void *arg);
121 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
122 pcicfgregs *cfg);
123 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
124 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
125 int reg, uint32_t *data);
126 #if 0
127 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
128 int reg, uint32_t data);
129 #endif
130 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
131 static void pci_mask_msix(device_t dev, u_int index);
132 static void pci_unmask_msix(device_t dev, u_int index);
133 static int pci_msi_blacklisted(void);
134 static int pci_msix_blacklisted(void);
135 static void pci_resume_msi(device_t dev);
136 static void pci_resume_msix(device_t dev);
137 static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d,
138 int b, int s, int f, uint16_t vid, uint16_t did);
139
140 static device_method_t pci_methods[] = {
141 /* Device interface */
142 DEVMETHOD(device_probe, pci_probe),
143 DEVMETHOD(device_attach, pci_attach),
144 DEVMETHOD(device_detach, pci_detach),
145 DEVMETHOD(device_shutdown, bus_generic_shutdown),
146 DEVMETHOD(device_suspend, bus_generic_suspend),
147 DEVMETHOD(device_resume, pci_resume),
148
149 /* Bus interface */
150 DEVMETHOD(bus_print_child, pci_print_child),
151 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
152 DEVMETHOD(bus_read_ivar, pci_read_ivar),
153 DEVMETHOD(bus_write_ivar, pci_write_ivar),
154 DEVMETHOD(bus_driver_added, pci_driver_added),
155 DEVMETHOD(bus_setup_intr, pci_setup_intr),
156 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
157 DEVMETHOD(bus_reset_prepare, pci_reset_prepare),
158 DEVMETHOD(bus_reset_post, pci_reset_post),
159 DEVMETHOD(bus_reset_child, pci_reset_child),
160
161 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
162 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
163 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
164 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
165 DEVMETHOD(bus_delete_resource, pci_delete_resource),
166 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
167 DEVMETHOD(bus_adjust_resource, pci_adjust_resource),
168 DEVMETHOD(bus_release_resource, pci_release_resource),
169 DEVMETHOD(bus_activate_resource, pci_activate_resource),
170 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
171 DEVMETHOD(bus_map_resource, pci_map_resource),
172 DEVMETHOD(bus_unmap_resource, pci_unmap_resource),
173 DEVMETHOD(bus_child_deleted, pci_child_deleted),
174 DEVMETHOD(bus_child_detached, pci_child_detached),
175 DEVMETHOD(bus_child_pnpinfo, pci_child_pnpinfo_method),
176 DEVMETHOD(bus_child_location, pci_child_location_method),
177 DEVMETHOD(bus_get_device_path, pci_get_device_path_method),
178 DEVMETHOD(bus_hint_device_unit, pci_hint_device_unit),
179 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
180 DEVMETHOD(bus_suspend_child, pci_suspend_child),
181 DEVMETHOD(bus_resume_child, pci_resume_child),
182 DEVMETHOD(bus_rescan, pci_rescan_method),
183
184 /* PCI interface */
185 DEVMETHOD(pci_read_config, pci_read_config_method),
186 DEVMETHOD(pci_write_config, pci_write_config_method),
187 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
188 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
189 DEVMETHOD(pci_enable_io, pci_enable_io_method),
190 DEVMETHOD(pci_disable_io, pci_disable_io_method),
191 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
192 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
193 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
194 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
195 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
196 DEVMETHOD(pci_find_cap, pci_find_cap_method),
197 DEVMETHOD(pci_find_next_cap, pci_find_next_cap_method),
198 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
199 DEVMETHOD(pci_find_next_extcap, pci_find_next_extcap_method),
200 DEVMETHOD(pci_find_htcap, pci_find_htcap_method),
201 DEVMETHOD(pci_find_next_htcap, pci_find_next_htcap_method),
202 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
203 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
204 DEVMETHOD(pci_enable_msi, pci_enable_msi_method),
205 DEVMETHOD(pci_enable_msix, pci_enable_msix_method),
206 DEVMETHOD(pci_disable_msi, pci_disable_msi_method),
207 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
208 DEVMETHOD(pci_release_msi, pci_release_msi_method),
209 DEVMETHOD(pci_msi_count, pci_msi_count_method),
210 DEVMETHOD(pci_msix_count, pci_msix_count_method),
211 DEVMETHOD(pci_msix_pba_bar, pci_msix_pba_bar_method),
212 DEVMETHOD(pci_msix_table_bar, pci_msix_table_bar_method),
213 DEVMETHOD(pci_get_id, pci_get_id_method),
214 DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method),
215 DEVMETHOD(pci_child_added, pci_child_added_method),
216 #ifdef PCI_IOV
217 DEVMETHOD(pci_iov_attach, pci_iov_attach_method),
218 DEVMETHOD(pci_iov_detach, pci_iov_detach_method),
219 DEVMETHOD(pci_create_iov_child, pci_create_iov_child_method),
220 #endif
221
222 DEVMETHOD_END
223 };
224
225 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
226
227 EARLY_DRIVER_MODULE(pci, pcib, pci_driver, pci_modevent, NULL, BUS_PASS_BUS);
228 MODULE_VERSION(pci, 1);
229
230 static char *pci_vendordata;
231 static size_t pci_vendordata_size;
232
233 struct pci_quirk {
234 uint32_t devid; /* Vendor/device of the card */
235 int type;
236 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
237 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
238 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
239 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
240 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
241 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
242 #define PCI_QUIRK_REALLOC_BAR 7 /* Can't allocate memory at the default address */
243 #define PCI_QUIRK_DISABLE_FLR 8 /* Function-Level Reset (FLR) not working. */
244 int arg1;
245 int arg2;
246 };
247
248 static const struct pci_quirk pci_quirks[] = {
249 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
250 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
251 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
252 /* As does the Serverworks OSB4 (the SMBus mapping register) */
253 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
254
255 /*
256 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
257 * or the CMIC-SL (AKA ServerWorks GC_LE).
258 */
259 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
260 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
261
262 /*
263 * MSI doesn't work on earlier Intel chipsets including
264 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
265 */
266 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
267 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
268 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
269 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
270 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
271 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
272 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
273
274 /*
275 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
276 * bridge.
277 */
278 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
279
280 /*
281 * Some virtualization environments emulate an older chipset
282 * but support MSI just fine. QEMU uses the Intel 82440.
283 */
284 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
285
286 /*
287 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
288 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
289 * It prevents us from attaching hpet(4) when the bit is unset.
290 * Note this quirk only affects SB600 revision A13 and earlier.
291 * For SB600 A21 and later, firmware must set the bit to hide it.
292 * For SB700 and later, it is unused and hardcoded to zero.
293 */
294 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
295
296 /*
297 * Atheros AR8161/AR8162/E2200/E2400/E2500 Ethernet controllers have
298 * a bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit
299 * of the command register is set.
300 */
301 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
302 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
303 { 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
304 { 0xE0B11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
305 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
306
307 /*
308 * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't
309 * issue MSI interrupts with PCIM_CMD_INTxDIS set either.
310 */
311 { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */
312 { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */
313 { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */
314 { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */
315 { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */
316 { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */
317
318 /*
319 * HPE Gen 10 VGA has a memory range that can't be allocated in the
320 * expected place.
321 */
322 { 0x98741002, PCI_QUIRK_REALLOC_BAR, 0, 0 },
323
324 /*
325 * With some MediaTek mt76 WiFi FLR does not work despite advertised.
326 */
327 { 0x061614c3, PCI_QUIRK_DISABLE_FLR, 0, 0 }, /* mt76 7922 */
328
329 /* end of table */
330 { 0 }
331 };
332
333 /* map register information */
334 #define PCI_MAPMEM 0x01 /* memory map */
335 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
336 #define PCI_MAPPORT 0x04 /* port map */
337
338 struct devlist pci_devq;
339 uint32_t pci_generation;
340 uint32_t pci_numdevs = 0;
341 static int pcie_chipset, pcix_chipset;
342
343 /* sysctl vars */
344 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
345 "PCI bus tuning parameters");
346
347 static int pci_enable_io_modes = 1;
348 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN,
349 &pci_enable_io_modes, 1,
350 "Enable I/O and memory bits in the config register. Some BIOSes do not"
351 " enable these bits correctly. We'd like to do this all the time, but"
352 " there are some peripherals that this causes problems with.");
353
354 static int pci_do_realloc_bars = 1;
355 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN,
356 &pci_do_realloc_bars, 0,
357 "Attempt to allocate a new range for any BARs whose original "
358 "firmware-assigned ranges fail to allocate during the initial device scan.");
359
360 static int pci_do_power_nodriver = 0;
361 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN,
362 &pci_do_power_nodriver, 0,
363 "Place a function into D3 state when no driver attaches to it. 0 means"
364 " disable. 1 means conservatively place function into D3 state. 2 means"
365 " aggressively place function into D3 state. 3 means put absolutely"
366 " everything in D3 state.");
367
368 int pci_do_power_resume = 1;
369 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN,
370 &pci_do_power_resume, 1,
371 "Transition from D3 -> D0 on resume.");
372
373 int pci_do_power_suspend = 1;
374 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN,
375 &pci_do_power_suspend, 1,
376 "Transition from D0 -> D3 on suspend.");
377
378 static int pci_do_msi = 1;
379 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1,
380 "Enable support for MSI interrupts");
381
382 static int pci_do_msix = 1;
383 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1,
384 "Enable support for MSI-X interrupts");
385
386 static int pci_msix_rewrite_table = 0;
387 SYSCTL_INT(_hw_pci, OID_AUTO, msix_rewrite_table, CTLFLAG_RWTUN,
388 &pci_msix_rewrite_table, 0,
389 "Rewrite entire MSI-X table when updating MSI-X entries");
390
391 static int pci_honor_msi_blacklist = 1;
392 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN,
393 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
394
395 #if defined(__i386__) || defined(__amd64__)
396 static int pci_usb_takeover = 1;
397 #else
398 static int pci_usb_takeover = 0;
399 #endif
400 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
401 &pci_usb_takeover, 1,
402 "Enable early takeover of USB controllers. Disable this if you depend on"
403 " BIOS emulation of USB devices, that is you use USB devices (like"
404 " keyboard or mouse) but do not load USB drivers");
405
406 static int pci_clear_bars;
407 SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
408 "Ignore firmware-assigned resources for BARs.");
409
410 static int pci_clear_buses;
411 SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0,
412 "Ignore firmware-assigned bus numbers.");
413
414 static int pci_enable_ari = 1;
415 SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
416 0, "Enable support for PCIe Alternative RID Interpretation");
417
418 /*
419 * Some x86 firmware only enables PCIe hotplug if we claim to support aspm,
420 * however enabling it breaks some arm64 firmware as it powers off devices.
421 */
422 #if defined(__i386__) || defined(__amd64__)
423 int pci_enable_aspm = 1;
424 #else
425 int pci_enable_aspm = 0;
426 #endif
427 SYSCTL_INT(_hw_pci, OID_AUTO, enable_aspm, CTLFLAG_RDTUN, &pci_enable_aspm,
428 0, "Enable support for PCIe Active State Power Management");
429
430 static int pci_clear_aer_on_attach = 0;
431 SYSCTL_INT(_hw_pci, OID_AUTO, clear_aer_on_attach, CTLFLAG_RWTUN,
432 &pci_clear_aer_on_attach, 0,
433 "Clear port and device AER state on driver attach");
434
435 static bool pci_enable_mps_tune = true;
436 SYSCTL_BOOL(_hw_pci, OID_AUTO, enable_mps_tune, CTLFLAG_RWTUN,
437 &pci_enable_mps_tune, 1,
438 "Enable tuning of MPS(maximum payload size)." );
439
440 static bool pci_intx_reroute = true;
441 SYSCTL_BOOL(_hw_pci, OID_AUTO, intx_reroute, CTLFLAG_RWTUN,
442 &pci_intx_reroute, 0, "Re-route INTx interrupts when scanning devices");
443
444 static int
pci_has_quirk(uint32_t devid,int quirk)445 pci_has_quirk(uint32_t devid, int quirk)
446 {
447 const struct pci_quirk *q;
448
449 for (q = &pci_quirks[0]; q->devid; q++) {
450 if (q->devid == devid && q->type == quirk)
451 return (1);
452 }
453 return (0);
454 }
455
456 /* Find a device_t by bus/slot/function in domain 0 */
457
458 device_t
pci_find_bsf(uint8_t bus,uint8_t slot,uint8_t func)459 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
460 {
461
462 return (pci_find_dbsf(0, bus, slot, func));
463 }
464
465 /* Find a device_t by domain/bus/slot/function */
466
467 device_t
pci_find_dbsf(uint32_t domain,uint8_t bus,uint8_t slot,uint8_t func)468 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
469 {
470 struct pci_devinfo *dinfo = NULL;
471
472 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
473 if ((dinfo->cfg.domain == domain) &&
474 (dinfo->cfg.bus == bus) &&
475 (dinfo->cfg.slot == slot) &&
476 (dinfo->cfg.func == func)) {
477 break;
478 }
479 }
480
481 return (dinfo != NULL ? dinfo->cfg.dev : NULL);
482 }
483
484 /* Find a device_t by vendor/device ID */
485
486 device_t
pci_find_device(uint16_t vendor,uint16_t device)487 pci_find_device(uint16_t vendor, uint16_t device)
488 {
489 struct pci_devinfo *dinfo;
490
491 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
492 if ((dinfo->cfg.vendor == vendor) &&
493 (dinfo->cfg.device == device)) {
494 return (dinfo->cfg.dev);
495 }
496 }
497
498 return (NULL);
499 }
500
501 device_t
pci_find_class(uint8_t class,uint8_t subclass)502 pci_find_class(uint8_t class, uint8_t subclass)
503 {
504 struct pci_devinfo *dinfo;
505
506 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
507 if (dinfo->cfg.baseclass == class &&
508 dinfo->cfg.subclass == subclass) {
509 return (dinfo->cfg.dev);
510 }
511 }
512
513 return (NULL);
514 }
515
516 device_t
pci_find_class_from(uint8_t class,uint8_t subclass,device_t from)517 pci_find_class_from(uint8_t class, uint8_t subclass, device_t from)
518 {
519 struct pci_devinfo *dinfo;
520 bool found = false;
521
522 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
523 if (from != NULL && found == false) {
524 if (from != dinfo->cfg.dev)
525 continue;
526 found = true;
527 continue;
528 }
529 if (dinfo->cfg.baseclass == class &&
530 dinfo->cfg.subclass == subclass) {
531 return (dinfo->cfg.dev);
532 }
533 }
534
535 return (NULL);
536 }
537
538 device_t
pci_find_base_class_from(uint8_t class,device_t from)539 pci_find_base_class_from(uint8_t class, device_t from)
540 {
541 struct pci_devinfo *dinfo;
542 bool found = false;
543
544 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
545 if (from != NULL && found == false) {
546 if (from != dinfo->cfg.dev)
547 continue;
548 found = true;
549 continue;
550 }
551 if (dinfo->cfg.baseclass == class) {
552 return (dinfo->cfg.dev);
553 }
554 }
555
556 return (NULL);
557 }
558
559 static int
pci_printf(pcicfgregs * cfg,const char * fmt,...)560 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
561 {
562 va_list ap;
563 int retval;
564
565 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
566 cfg->func);
567 va_start(ap, fmt);
568 retval += vprintf(fmt, ap);
569 va_end(ap);
570 return (retval);
571 }
572
573 /* return base address of memory or port map */
574
575 static pci_addr_t
pci_mapbase(uint64_t mapreg)576 pci_mapbase(uint64_t mapreg)
577 {
578
579 if (PCI_BAR_MEM(mapreg))
580 return (mapreg & PCIM_BAR_MEM_BASE);
581 else
582 return (mapreg & PCIM_BAR_IO_BASE);
583 }
584
585 /* return map type of memory or port map */
586
587 static const char *
pci_maptype(uint64_t mapreg)588 pci_maptype(uint64_t mapreg)
589 {
590
591 if (PCI_BAR_IO(mapreg))
592 return ("I/O Port");
593 if (mapreg & PCIM_BAR_MEM_PREFETCH)
594 return ("Prefetchable Memory");
595 return ("Memory");
596 }
597
598 /* return log2 of map size decoded for memory or port map */
599
600 int
pci_mapsize(uint64_t testval)601 pci_mapsize(uint64_t testval)
602 {
603 int ln2size;
604
605 testval = pci_mapbase(testval);
606 ln2size = 0;
607 if (testval != 0) {
608 while ((testval & 1) == 0)
609 {
610 ln2size++;
611 testval >>= 1;
612 }
613 }
614 return (ln2size);
615 }
616
617 /* return base address of device ROM */
618
619 static pci_addr_t
pci_rombase(uint64_t mapreg)620 pci_rombase(uint64_t mapreg)
621 {
622
623 return (mapreg & PCIM_BIOS_ADDR_MASK);
624 }
625
626 /* return log2 of map size decided for device ROM */
627
628 static int
pci_romsize(uint64_t testval)629 pci_romsize(uint64_t testval)
630 {
631 int ln2size;
632
633 testval = pci_rombase(testval);
634 ln2size = 0;
635 if (testval != 0) {
636 while ((testval & 1) == 0)
637 {
638 ln2size++;
639 testval >>= 1;
640 }
641 }
642 return (ln2size);
643 }
644
645 /* return log2 of address range supported by map register */
646
647 static int
pci_maprange(uint64_t mapreg)648 pci_maprange(uint64_t mapreg)
649 {
650 int ln2range = 0;
651
652 if (PCI_BAR_IO(mapreg))
653 ln2range = 32;
654 else
655 switch (mapreg & PCIM_BAR_MEM_TYPE) {
656 case PCIM_BAR_MEM_32:
657 ln2range = 32;
658 break;
659 case PCIM_BAR_MEM_1MB:
660 ln2range = 20;
661 break;
662 case PCIM_BAR_MEM_64:
663 ln2range = 64;
664 break;
665 }
666 return (ln2range);
667 }
668
669 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
670
671 static void
pci_fixancient(pcicfgregs * cfg)672 pci_fixancient(pcicfgregs *cfg)
673 {
674 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
675 return;
676
677 /* PCI to PCI bridges use header type 1 */
678 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
679 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
680 }
681
682 /* extract header type specific config data */
683
684 static void
pci_hdrtypedata(device_t pcib,int b,int s,int f,pcicfgregs * cfg)685 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
686 {
687 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
688 switch (cfg->hdrtype & PCIM_HDRTYPE) {
689 case PCIM_HDRTYPE_NORMAL:
690 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
691 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
692 cfg->mingnt = REG(PCIR_MINGNT, 1);
693 cfg->maxlat = REG(PCIR_MAXLAT, 1);
694 cfg->nummaps = PCI_MAXMAPS_0;
695 break;
696 case PCIM_HDRTYPE_BRIDGE:
697 cfg->bridge.br_seclat = REG(PCIR_SECLAT_1, 1);
698 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_1, 1);
699 cfg->bridge.br_secbus = REG(PCIR_SECBUS_1, 1);
700 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_1, 1);
701 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_1, 2);
702 cfg->nummaps = PCI_MAXMAPS_1;
703 break;
704 case PCIM_HDRTYPE_CARDBUS:
705 cfg->bridge.br_seclat = REG(PCIR_SECLAT_2, 1);
706 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_2, 1);
707 cfg->bridge.br_secbus = REG(PCIR_SECBUS_2, 1);
708 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_2, 1);
709 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_2, 2);
710 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
711 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
712 cfg->nummaps = PCI_MAXMAPS_2;
713 break;
714 }
715 #undef REG
716 }
717
718 /* read configuration header into pcicfgregs structure */
719 struct pci_devinfo *
pci_read_device(device_t pcib,device_t bus,int d,int b,int s,int f)720 pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f)
721 {
722 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
723 uint16_t vid, did;
724
725 vid = REG(PCIR_VENDOR, 2);
726 if (vid == PCIV_INVALID)
727 return (NULL);
728
729 did = REG(PCIR_DEVICE, 2);
730
731 return (pci_fill_devinfo(pcib, bus, d, b, s, f, vid, did));
732 }
733
734 struct pci_devinfo *
pci_alloc_devinfo_method(device_t dev)735 pci_alloc_devinfo_method(device_t dev)
736 {
737
738 return (malloc(sizeof(struct pci_devinfo), M_DEVBUF,
739 M_WAITOK | M_ZERO));
740 }
741
742 static struct pci_devinfo *
pci_fill_devinfo(device_t pcib,device_t bus,int d,int b,int s,int f,uint16_t vid,uint16_t did)743 pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f,
744 uint16_t vid, uint16_t did)
745 {
746 struct pci_devinfo *devlist_entry;
747 pcicfgregs *cfg;
748
749 devlist_entry = PCI_ALLOC_DEVINFO(bus);
750
751 cfg = &devlist_entry->cfg;
752
753 cfg->domain = d;
754 cfg->bus = b;
755 cfg->slot = s;
756 cfg->func = f;
757 cfg->vendor = vid;
758 cfg->device = did;
759 cfg->cmdreg = REG(PCIR_COMMAND, 2);
760 cfg->statreg = REG(PCIR_STATUS, 2);
761 cfg->baseclass = REG(PCIR_CLASS, 1);
762 cfg->subclass = REG(PCIR_SUBCLASS, 1);
763 cfg->progif = REG(PCIR_PROGIF, 1);
764 cfg->revid = REG(PCIR_REVID, 1);
765 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
766 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
767 cfg->lattimer = REG(PCIR_LATTIMER, 1);
768 cfg->intpin = REG(PCIR_INTPIN, 1);
769 cfg->intline = REG(PCIR_INTLINE, 1);
770
771 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
772 cfg->hdrtype &= ~PCIM_MFDEV;
773 STAILQ_INIT(&cfg->maps);
774
775 cfg->iov = NULL;
776
777 pci_fixancient(cfg);
778 pci_hdrtypedata(pcib, b, s, f, cfg);
779
780 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
781 pci_read_cap(pcib, cfg);
782
783 STAILQ_INSERT_TAIL(&pci_devq, devlist_entry, pci_links);
784
785 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
786 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
787 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
788 devlist_entry->conf.pc_sel.pc_func = cfg->func;
789 devlist_entry->conf.pc_hdr = cfg->hdrtype;
790
791 devlist_entry->conf.pc_subvendor = cfg->subvendor;
792 devlist_entry->conf.pc_subdevice = cfg->subdevice;
793 devlist_entry->conf.pc_vendor = cfg->vendor;
794 devlist_entry->conf.pc_device = cfg->device;
795
796 devlist_entry->conf.pc_class = cfg->baseclass;
797 devlist_entry->conf.pc_subclass = cfg->subclass;
798 devlist_entry->conf.pc_progif = cfg->progif;
799 devlist_entry->conf.pc_revid = cfg->revid;
800
801 devlist_entry->conf.pc_secbus = cfg->bridge.br_secbus;
802 devlist_entry->conf.pc_subbus = cfg->bridge.br_subbus;
803
804 pci_numdevs++;
805 pci_generation++;
806
807 return (devlist_entry);
808 }
809 #undef REG
810
811 static void
pci_ea_fill_info(device_t pcib,pcicfgregs * cfg)812 pci_ea_fill_info(device_t pcib, pcicfgregs *cfg)
813 {
814 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, \
815 cfg->ea.ea_location + (n), w)
816 int num_ent;
817 int ptr;
818 int a, b;
819 uint32_t val;
820 int ent_size;
821 uint32_t dw[4];
822 uint64_t base, max_offset;
823 struct pci_ea_entry *eae;
824
825 if (cfg->ea.ea_location == 0)
826 return;
827
828 STAILQ_INIT(&cfg->ea.ea_entries);
829
830 /* Determine the number of entries */
831 num_ent = REG(PCIR_EA_NUM_ENT, 2);
832 num_ent &= PCIM_EA_NUM_ENT_MASK;
833
834 /* Find the first entry to care of */
835 ptr = PCIR_EA_FIRST_ENT;
836
837 /* Skip DWORD 2 for type 1 functions */
838 if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE)
839 ptr += 4;
840
841 for (a = 0; a < num_ent; a++) {
842 eae = malloc(sizeof(*eae), M_DEVBUF, M_WAITOK | M_ZERO);
843 eae->eae_cfg_offset = cfg->ea.ea_location + ptr;
844
845 /* Read a number of dwords in the entry */
846 val = REG(ptr, 4);
847 ptr += 4;
848 ent_size = (val & PCIM_EA_ES);
849
850 for (b = 0; b < ent_size; b++) {
851 dw[b] = REG(ptr, 4);
852 ptr += 4;
853 }
854
855 eae->eae_flags = val;
856 eae->eae_bei = (PCIM_EA_BEI & val) >> PCIM_EA_BEI_OFFSET;
857
858 base = dw[0] & PCIM_EA_FIELD_MASK;
859 max_offset = dw[1] | ~PCIM_EA_FIELD_MASK;
860 b = 2;
861 if (((dw[0] & PCIM_EA_IS_64) != 0) && (b < ent_size)) {
862 base |= (uint64_t)dw[b] << 32UL;
863 b++;
864 }
865 if (((dw[1] & PCIM_EA_IS_64) != 0)
866 && (b < ent_size)) {
867 max_offset |= (uint64_t)dw[b] << 32UL;
868 b++;
869 }
870
871 eae->eae_base = base;
872 eae->eae_max_offset = max_offset;
873
874 STAILQ_INSERT_TAIL(&cfg->ea.ea_entries, eae, eae_link);
875
876 if (bootverbose) {
877 printf("PCI(EA) dev %04x:%04x, bei %d, flags #%x, base #%jx, max_offset #%jx\n",
878 cfg->vendor, cfg->device, eae->eae_bei, eae->eae_flags,
879 (uintmax_t)eae->eae_base, (uintmax_t)eae->eae_max_offset);
880 }
881 }
882 }
883 #undef REG
884
885 static void
pci_read_cap(device_t pcib,pcicfgregs * cfg)886 pci_read_cap(device_t pcib, pcicfgregs *cfg)
887 {
888 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
889 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
890 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
891 uint64_t addr;
892 #endif
893 uint32_t val;
894 int ptr, nextptr, ptrptr;
895
896 switch (cfg->hdrtype & PCIM_HDRTYPE) {
897 case PCIM_HDRTYPE_NORMAL:
898 case PCIM_HDRTYPE_BRIDGE:
899 ptrptr = PCIR_CAP_PTR;
900 break;
901 case PCIM_HDRTYPE_CARDBUS:
902 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
903 break;
904 default:
905 return; /* no extended capabilities support */
906 }
907 nextptr = REG(ptrptr, 1); /* sanity check? */
908
909 /*
910 * Read capability entries.
911 */
912 while (nextptr != 0) {
913 /* Sanity check */
914 if (nextptr > 255) {
915 printf("illegal PCI extended capability offset %d\n",
916 nextptr);
917 return;
918 }
919 /* Find the next entry */
920 ptr = nextptr;
921 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
922
923 /* Process this entry */
924 switch (REG(ptr + PCICAP_ID, 1)) {
925 case PCIY_PMG: /* PCI power management */
926 cfg->pp.pp_location = ptr;
927 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
928 break;
929 case PCIY_HT: /* HyperTransport */
930 /* Determine HT-specific capability type. */
931 val = REG(ptr + PCIR_HT_COMMAND, 2);
932
933 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
934 cfg->ht.ht_slave = ptr;
935
936 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
937 switch (val & PCIM_HTCMD_CAP_MASK) {
938 case PCIM_HTCAP_MSI_MAPPING:
939 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
940 /* Sanity check the mapping window. */
941 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
942 4);
943 addr <<= 32;
944 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
945 4);
946 if (addr != MSI_INTEL_ADDR_BASE)
947 device_printf(pcib,
948 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
949 cfg->domain, cfg->bus,
950 cfg->slot, cfg->func,
951 (long long)addr);
952 } else
953 addr = MSI_INTEL_ADDR_BASE;
954
955 cfg->ht.ht_msimap = ptr;
956 cfg->ht.ht_msictrl = val;
957 cfg->ht.ht_msiaddr = addr;
958 break;
959 }
960 #endif
961 break;
962 case PCIY_MSI: /* PCI MSI */
963 cfg->msi.msi_location = ptr;
964 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
965 break;
966 case PCIY_MSIX: /* PCI MSI-X */
967 cfg->msix.msix_location = ptr;
968 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
969 val = REG(ptr + PCIR_MSIX_TABLE, 4);
970 cfg->msix.msix_table_bar = PCIR_BAR(val &
971 PCIM_MSIX_BIR_MASK);
972 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
973 val = REG(ptr + PCIR_MSIX_PBA, 4);
974 cfg->msix.msix_pba_bar = PCIR_BAR(val &
975 PCIM_MSIX_BIR_MASK);
976 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
977 break;
978 case PCIY_VPD: /* PCI Vital Product Data */
979 cfg->vpd.vpd_reg = ptr;
980 break;
981 case PCIY_SUBVENDOR:
982 /* Should always be true. */
983 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
984 PCIM_HDRTYPE_BRIDGE) {
985 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
986 cfg->subvendor = val & 0xffff;
987 cfg->subdevice = val >> 16;
988 }
989 break;
990 case PCIY_PCIX: /* PCI-X */
991 /*
992 * Assume we have a PCI-X chipset if we have
993 * at least one PCI-PCI bridge with a PCI-X
994 * capability. Note that some systems with
995 * PCI-express or HT chipsets might match on
996 * this check as well.
997 */
998 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
999 PCIM_HDRTYPE_BRIDGE)
1000 pcix_chipset = 1;
1001 cfg->pcix.pcix_location = ptr;
1002 break;
1003 case PCIY_EXPRESS: /* PCI-express */
1004 /*
1005 * Assume we have a PCI-express chipset if we have
1006 * at least one PCI-express device.
1007 */
1008 pcie_chipset = 1;
1009 cfg->pcie.pcie_location = ptr;
1010 val = REG(ptr + PCIER_FLAGS, 2);
1011 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE;
1012 break;
1013 case PCIY_EA: /* Enhanced Allocation */
1014 cfg->ea.ea_location = ptr;
1015 pci_ea_fill_info(pcib, cfg);
1016 break;
1017 default:
1018 break;
1019 }
1020 }
1021
1022 #if defined(__powerpc__)
1023 /*
1024 * Enable the MSI mapping window for all HyperTransport
1025 * slaves. PCI-PCI bridges have their windows enabled via
1026 * PCIB_MAP_MSI().
1027 */
1028 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
1029 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
1030 device_printf(pcib,
1031 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
1032 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1033 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1034 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
1035 2);
1036 }
1037 #endif
1038 /* REG and WREG use carry through to next functions */
1039 }
1040
1041 /*
1042 * PCI Vital Product Data
1043 */
1044
1045 #define PCI_VPD_TIMEOUT 1000000
1046
1047 static int
pci_read_vpd_reg(device_t pcib,pcicfgregs * cfg,int reg,uint32_t * data)1048 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
1049 {
1050 int count = PCI_VPD_TIMEOUT;
1051
1052 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
1053
1054 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
1055
1056 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
1057 if (--count < 0)
1058 return (ENXIO);
1059 DELAY(1); /* limit looping */
1060 }
1061 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
1062
1063 return (0);
1064 }
1065
1066 #if 0
1067 static int
1068 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
1069 {
1070 int count = PCI_VPD_TIMEOUT;
1071
1072 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
1073
1074 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
1075 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
1076 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
1077 if (--count < 0)
1078 return (ENXIO);
1079 DELAY(1); /* limit looping */
1080 }
1081
1082 return (0);
1083 }
1084 #endif
1085
1086 #undef PCI_VPD_TIMEOUT
1087
1088 struct vpd_readstate {
1089 device_t pcib;
1090 pcicfgregs *cfg;
1091 uint32_t val;
1092 int bytesinval;
1093 int off;
1094 uint8_t cksum;
1095 };
1096
1097 /* return 0 and one byte in *data if no read error, -1 else */
1098 static int
vpd_nextbyte(struct vpd_readstate * vrs,uint8_t * data)1099 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
1100 {
1101 uint32_t reg;
1102 uint8_t byte;
1103
1104 if (vrs->bytesinval == 0) {
1105 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
1106 return (-1);
1107 vrs->val = le32toh(reg);
1108 vrs->off += 4;
1109 byte = vrs->val & 0xff;
1110 vrs->bytesinval = 3;
1111 } else {
1112 vrs->val = vrs->val >> 8;
1113 byte = vrs->val & 0xff;
1114 vrs->bytesinval--;
1115 }
1116
1117 vrs->cksum += byte;
1118 *data = byte;
1119 return (0);
1120 }
1121
1122 /* return 0 on match, -1 and "unget" byte on no match */
1123 static int
vpd_expectbyte(struct vpd_readstate * vrs,uint8_t expected)1124 vpd_expectbyte(struct vpd_readstate *vrs, uint8_t expected)
1125 {
1126 uint8_t data;
1127
1128 if (vpd_nextbyte(vrs, &data) != 0)
1129 return (-1);
1130
1131 if (data == expected)
1132 return (0);
1133
1134 vrs->cksum -= data;
1135 vrs->val = (vrs->val << 8) + data;
1136 vrs->bytesinval++;
1137 return (-1);
1138 }
1139
1140 /* return size if tag matches, -1 on no match, -2 on read error */
1141 static int
vpd_read_tag_size(struct vpd_readstate * vrs,uint8_t vpd_tag)1142 vpd_read_tag_size(struct vpd_readstate *vrs, uint8_t vpd_tag)
1143 {
1144 uint8_t byte1, byte2;
1145
1146 if (vpd_expectbyte(vrs, vpd_tag) != 0)
1147 return (-1);
1148
1149 if ((vpd_tag & 0x80) == 0)
1150 return (vpd_tag & 0x07);
1151
1152 if (vpd_nextbyte(vrs, &byte1) != 0)
1153 return (-2);
1154 if (vpd_nextbyte(vrs, &byte2) != 0)
1155 return (-2);
1156
1157 return ((byte2 << 8) + byte1);
1158 }
1159
1160 /* (re)allocate buffer in multiples of 8 elements */
1161 static void*
alloc_buffer(void * buffer,size_t element_size,int needed)1162 alloc_buffer(void* buffer, size_t element_size, int needed)
1163 {
1164 int alloc, new_alloc;
1165
1166 alloc = roundup2(needed, 8);
1167 new_alloc = roundup2(needed + 1, 8);
1168 if (alloc != new_alloc) {
1169 buffer = reallocf(buffer,
1170 new_alloc * element_size, M_DEVBUF, M_WAITOK | M_ZERO);
1171 }
1172
1173 return (buffer);
1174 }
1175
1176 /* read VPD keyword and return element size, return -1 on read error */
1177 static int
vpd_read_elem_head(struct vpd_readstate * vrs,char keyword[2])1178 vpd_read_elem_head(struct vpd_readstate *vrs, char keyword[2])
1179 {
1180 uint8_t data;
1181
1182 if (vpd_nextbyte(vrs, &keyword[0]) != 0)
1183 return (-1);
1184 if (vpd_nextbyte(vrs, &keyword[1]) != 0)
1185 return (-1);
1186 if (vpd_nextbyte(vrs, &data) != 0)
1187 return (-1);
1188
1189 return (data);
1190 }
1191
1192 /* read VPD data element of given size into allocated buffer */
1193 static char *
vpd_read_value(struct vpd_readstate * vrs,int size)1194 vpd_read_value(struct vpd_readstate *vrs, int size)
1195 {
1196 int i;
1197 char char1;
1198 char *value;
1199
1200 value = malloc(size + 1, M_DEVBUF, M_WAITOK);
1201 for (i = 0; i < size; i++) {
1202 if (vpd_nextbyte(vrs, &char1) != 0) {
1203 free(value, M_DEVBUF);
1204 return (NULL);
1205 }
1206 value[i] = char1;
1207 }
1208 value[size] = '\0';
1209
1210 return (value);
1211 }
1212
1213 /* read VPD into *keyword and *value, return length of data element */
1214 static int
vpd_read_elem_data(struct vpd_readstate * vrs,char keyword[2],char ** value,int maxlen)1215 vpd_read_elem_data(struct vpd_readstate *vrs, char keyword[2], char **value, int maxlen)
1216 {
1217 int len;
1218
1219 len = vpd_read_elem_head(vrs, keyword);
1220 if (len < 0 || len > maxlen)
1221 return (-1);
1222 *value = vpd_read_value(vrs, len);
1223
1224 return (len);
1225 }
1226
1227 /* subtract all data following first byte from checksum of RV element */
1228 static void
vpd_fixup_cksum(struct vpd_readstate * vrs,char * rvstring,int len)1229 vpd_fixup_cksum(struct vpd_readstate *vrs, char *rvstring, int len)
1230 {
1231 int i;
1232 uint8_t fixup;
1233
1234 fixup = 0;
1235 for (i = 1; i < len; i++)
1236 fixup += rvstring[i];
1237 vrs->cksum -= fixup;
1238 }
1239
1240 /* fetch one read-only element and return size of heading + data */
1241 static int
next_vpd_ro_elem(struct vpd_readstate * vrs,int maxsize)1242 next_vpd_ro_elem(struct vpd_readstate *vrs, int maxsize)
1243 {
1244 struct pcicfg_vpd *vpd;
1245 pcicfgregs *cfg;
1246 struct vpd_readonly *vpd_ros;
1247 int len;
1248
1249 cfg = vrs->cfg;
1250 vpd = &cfg->vpd;
1251
1252 if (maxsize < 3)
1253 return (-1);
1254 vpd->vpd_ros = alloc_buffer(vpd->vpd_ros, sizeof(*vpd->vpd_ros), vpd->vpd_rocnt);
1255 vpd_ros = &vpd->vpd_ros[vpd->vpd_rocnt];
1256 maxsize -= 3;
1257 len = vpd_read_elem_data(vrs, vpd_ros->keyword, &vpd_ros->value, maxsize);
1258 if (vpd_ros->value == NULL)
1259 return (-1);
1260 vpd_ros->len = len;
1261 if (vpd_ros->keyword[0] == 'R' && vpd_ros->keyword[1] == 'V') {
1262 vpd_fixup_cksum(vrs, vpd_ros->value, len);
1263 if (vrs->cksum != 0) {
1264 pci_printf(cfg,
1265 "invalid VPD checksum %#hhx\n", vrs->cksum);
1266 return (-1);
1267 }
1268 }
1269 vpd->vpd_rocnt++;
1270
1271 return (len + 3);
1272 }
1273
1274 /* fetch one writable element and return size of heading + data */
1275 static int
next_vpd_rw_elem(struct vpd_readstate * vrs,int maxsize)1276 next_vpd_rw_elem(struct vpd_readstate *vrs, int maxsize)
1277 {
1278 struct pcicfg_vpd *vpd;
1279 pcicfgregs *cfg;
1280 struct vpd_write *vpd_w;
1281 int len;
1282
1283 cfg = vrs->cfg;
1284 vpd = &cfg->vpd;
1285
1286 if (maxsize < 3)
1287 return (-1);
1288 vpd->vpd_w = alloc_buffer(vpd->vpd_w, sizeof(*vpd->vpd_w), vpd->vpd_wcnt);
1289 if (vpd->vpd_w == NULL) {
1290 pci_printf(cfg, "out of memory");
1291 return (-1);
1292 }
1293 vpd_w = &vpd->vpd_w[vpd->vpd_wcnt];
1294 maxsize -= 3;
1295 vpd_w->start = vrs->off + 3 - vrs->bytesinval;
1296 len = vpd_read_elem_data(vrs, vpd_w->keyword, &vpd_w->value, maxsize);
1297 if (vpd_w->value == NULL)
1298 return (-1);
1299 vpd_w->len = len;
1300 vpd->vpd_wcnt++;
1301
1302 return (len + 3);
1303 }
1304
1305 /* free all memory allocated for VPD data */
1306 static void
vpd_free(struct pcicfg_vpd * vpd)1307 vpd_free(struct pcicfg_vpd *vpd)
1308 {
1309 int i;
1310
1311 free(vpd->vpd_ident, M_DEVBUF);
1312 for (i = 0; i < vpd->vpd_rocnt; i++)
1313 free(vpd->vpd_ros[i].value, M_DEVBUF);
1314 free(vpd->vpd_ros, M_DEVBUF);
1315 vpd->vpd_rocnt = 0;
1316 for (i = 0; i < vpd->vpd_wcnt; i++)
1317 free(vpd->vpd_w[i].value, M_DEVBUF);
1318 free(vpd->vpd_w, M_DEVBUF);
1319 vpd->vpd_wcnt = 0;
1320 }
1321
1322 #define VPD_TAG_END ((0x0f << 3) | 0) /* small tag, len == 0 */
1323 #define VPD_TAG_IDENT (0x02 | 0x80) /* large tag */
1324 #define VPD_TAG_RO (0x10 | 0x80) /* large tag */
1325 #define VPD_TAG_RW (0x11 | 0x80) /* large tag */
1326
1327 static int
pci_parse_vpd(device_t pcib,pcicfgregs * cfg)1328 pci_parse_vpd(device_t pcib, pcicfgregs *cfg)
1329 {
1330 struct vpd_readstate vrs;
1331 int cksumvalid;
1332 int size, elem_size;
1333
1334 /* init vpd reader */
1335 vrs.bytesinval = 0;
1336 vrs.off = 0;
1337 vrs.pcib = pcib;
1338 vrs.cfg = cfg;
1339 vrs.cksum = 0;
1340
1341 /* read VPD ident element - mandatory */
1342 size = vpd_read_tag_size(&vrs, VPD_TAG_IDENT);
1343 if (size <= 0) {
1344 pci_printf(cfg, "no VPD ident found\n");
1345 return (0);
1346 }
1347 cfg->vpd.vpd_ident = vpd_read_value(&vrs, size);
1348 if (cfg->vpd.vpd_ident == NULL) {
1349 pci_printf(cfg, "error accessing VPD ident data\n");
1350 return (0);
1351 }
1352
1353 /* read VPD RO elements - mandatory */
1354 size = vpd_read_tag_size(&vrs, VPD_TAG_RO);
1355 if (size <= 0) {
1356 pci_printf(cfg, "no read-only VPD data found\n");
1357 return (0);
1358 }
1359 while (size > 0) {
1360 elem_size = next_vpd_ro_elem(&vrs, size);
1361 if (elem_size < 0) {
1362 pci_printf(cfg, "error accessing read-only VPD data\n");
1363 return (-1);
1364 }
1365 size -= elem_size;
1366 }
1367 cksumvalid = (vrs.cksum == 0);
1368 if (!cksumvalid)
1369 return (-1);
1370
1371 /* read VPD RW elements - optional */
1372 size = vpd_read_tag_size(&vrs, VPD_TAG_RW);
1373 if (size == -2)
1374 return (-1);
1375 while (size > 0) {
1376 elem_size = next_vpd_rw_elem(&vrs, size);
1377 if (elem_size < 0) {
1378 pci_printf(cfg, "error accessing writeable VPD data\n");
1379 return (-1);
1380 }
1381 size -= elem_size;
1382 }
1383
1384 /* read empty END tag - mandatory */
1385 size = vpd_read_tag_size(&vrs, VPD_TAG_END);
1386 if (size != 0) {
1387 pci_printf(cfg, "No valid VPD end tag found\n");
1388 }
1389 return (0);
1390 }
1391
1392 static void
pci_read_vpd(device_t pcib,pcicfgregs * cfg)1393 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
1394 {
1395 int status;
1396
1397 status = pci_parse_vpd(pcib, cfg);
1398 if (status < 0)
1399 vpd_free(&cfg->vpd);
1400 cfg->vpd.vpd_cached = 1;
1401 #undef REG
1402 #undef WREG
1403 }
1404
1405 int
pci_get_vpd_ident_method(device_t dev,device_t child,const char ** identptr)1406 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1407 {
1408 struct pci_devinfo *dinfo = device_get_ivars(child);
1409 pcicfgregs *cfg = &dinfo->cfg;
1410
1411 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1412 pci_read_vpd(device_get_parent(dev), cfg);
1413
1414 *identptr = cfg->vpd.vpd_ident;
1415
1416 if (*identptr == NULL)
1417 return (ENXIO);
1418
1419 return (0);
1420 }
1421
1422 int
pci_get_vpd_readonly_method(device_t dev,device_t child,const char * kw,const char ** vptr)1423 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1424 const char **vptr)
1425 {
1426 struct pci_devinfo *dinfo = device_get_ivars(child);
1427 pcicfgregs *cfg = &dinfo->cfg;
1428 int i;
1429
1430 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1431 pci_read_vpd(device_get_parent(dev), cfg);
1432
1433 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1434 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1435 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1436 *vptr = cfg->vpd.vpd_ros[i].value;
1437 return (0);
1438 }
1439
1440 *vptr = NULL;
1441 return (ENXIO);
1442 }
1443
1444 struct pcicfg_vpd *
pci_fetch_vpd_list(device_t dev)1445 pci_fetch_vpd_list(device_t dev)
1446 {
1447 struct pci_devinfo *dinfo = device_get_ivars(dev);
1448 pcicfgregs *cfg = &dinfo->cfg;
1449
1450 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1451 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1452 return (&cfg->vpd);
1453 }
1454
1455 /*
1456 * Find the requested HyperTransport capability and return the offset
1457 * in configuration space via the pointer provided. The function
1458 * returns 0 on success and an error code otherwise.
1459 */
1460 int
pci_find_htcap_method(device_t dev,device_t child,int capability,int * capreg)1461 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1462 {
1463 int ptr, error;
1464 uint16_t val;
1465
1466 error = pci_find_cap(child, PCIY_HT, &ptr);
1467 if (error)
1468 return (error);
1469
1470 /*
1471 * Traverse the capabilities list checking each HT capability
1472 * to see if it matches the requested HT capability.
1473 */
1474 for (;;) {
1475 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1476 if (capability == PCIM_HTCAP_SLAVE ||
1477 capability == PCIM_HTCAP_HOST)
1478 val &= 0xe000;
1479 else
1480 val &= PCIM_HTCMD_CAP_MASK;
1481 if (val == capability) {
1482 if (capreg != NULL)
1483 *capreg = ptr;
1484 return (0);
1485 }
1486
1487 /* Skip to the next HT capability. */
1488 if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0)
1489 break;
1490 }
1491
1492 return (ENOENT);
1493 }
1494
1495 /*
1496 * Find the next requested HyperTransport capability after start and return
1497 * the offset in configuration space via the pointer provided. The function
1498 * returns 0 on success and an error code otherwise.
1499 */
1500 int
pci_find_next_htcap_method(device_t dev,device_t child,int capability,int start,int * capreg)1501 pci_find_next_htcap_method(device_t dev, device_t child, int capability,
1502 int start, int *capreg)
1503 {
1504 int ptr;
1505 uint16_t val;
1506
1507 KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == PCIY_HT,
1508 ("start capability is not HyperTransport capability"));
1509 ptr = start;
1510
1511 /*
1512 * Traverse the capabilities list checking each HT capability
1513 * to see if it matches the requested HT capability.
1514 */
1515 for (;;) {
1516 /* Skip to the next HT capability. */
1517 if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0)
1518 break;
1519
1520 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1521 if (capability == PCIM_HTCAP_SLAVE ||
1522 capability == PCIM_HTCAP_HOST)
1523 val &= 0xe000;
1524 else
1525 val &= PCIM_HTCMD_CAP_MASK;
1526 if (val == capability) {
1527 if (capreg != NULL)
1528 *capreg = ptr;
1529 return (0);
1530 }
1531 }
1532
1533 return (ENOENT);
1534 }
1535
1536 /*
1537 * Find the requested capability and return the offset in
1538 * configuration space via the pointer provided. The function returns
1539 * 0 on success and an error code otherwise.
1540 */
1541 int
pci_find_cap_method(device_t dev,device_t child,int capability,int * capreg)1542 pci_find_cap_method(device_t dev, device_t child, int capability,
1543 int *capreg)
1544 {
1545 struct pci_devinfo *dinfo = device_get_ivars(child);
1546 pcicfgregs *cfg = &dinfo->cfg;
1547 uint32_t status;
1548 uint8_t ptr;
1549 int cnt;
1550
1551 /*
1552 * Check the CAP_LIST bit of the PCI status register first.
1553 */
1554 status = pci_read_config(child, PCIR_STATUS, 2);
1555 if (!(status & PCIM_STATUS_CAPPRESENT))
1556 return (ENXIO);
1557
1558 /*
1559 * Determine the start pointer of the capabilities list.
1560 */
1561 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1562 case PCIM_HDRTYPE_NORMAL:
1563 case PCIM_HDRTYPE_BRIDGE:
1564 ptr = PCIR_CAP_PTR;
1565 break;
1566 case PCIM_HDRTYPE_CARDBUS:
1567 ptr = PCIR_CAP_PTR_2;
1568 break;
1569 default:
1570 /* XXX: panic? */
1571 return (ENXIO); /* no extended capabilities support */
1572 }
1573 ptr = pci_read_config(child, ptr, 1);
1574
1575 /*
1576 * Traverse the capabilities list. Limit by total theoretical
1577 * maximum number of caps: capability needs at least id and
1578 * next registers, and any type X header cannot contain caps.
1579 */
1580 for (cnt = 0; ptr != 0 && cnt < (PCIE_REGMAX - 0x40) / 2; cnt++) {
1581 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1582 if (capreg != NULL)
1583 *capreg = ptr;
1584 return (0);
1585 }
1586 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1587 }
1588
1589 return (ENOENT);
1590 }
1591
1592 /*
1593 * Find the next requested capability after start and return the offset in
1594 * configuration space via the pointer provided. The function returns
1595 * 0 on success and an error code otherwise.
1596 */
1597 int
pci_find_next_cap_method(device_t dev,device_t child,int capability,int start,int * capreg)1598 pci_find_next_cap_method(device_t dev, device_t child, int capability,
1599 int start, int *capreg)
1600 {
1601 uint8_t ptr;
1602
1603 KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == capability,
1604 ("start capability is not expected capability"));
1605
1606 ptr = pci_read_config(child, start + PCICAP_NEXTPTR, 1);
1607 while (ptr != 0) {
1608 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1609 if (capreg != NULL)
1610 *capreg = ptr;
1611 return (0);
1612 }
1613 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1614 }
1615
1616 return (ENOENT);
1617 }
1618
1619 /*
1620 * Find the requested extended capability and return the offset in
1621 * configuration space via the pointer provided. The function returns
1622 * 0 on success and an error code otherwise.
1623 */
1624 int
pci_find_extcap_method(device_t dev,device_t child,int capability,int * capreg)1625 pci_find_extcap_method(device_t dev, device_t child, int capability,
1626 int *capreg)
1627 {
1628 struct pci_devinfo *dinfo = device_get_ivars(child);
1629 pcicfgregs *cfg = &dinfo->cfg;
1630 uint32_t ecap;
1631 uint16_t ptr;
1632
1633 /* Only supported for PCI-express devices. */
1634 if (cfg->pcie.pcie_location == 0)
1635 return (ENXIO);
1636
1637 ptr = PCIR_EXTCAP;
1638 ecap = pci_read_config(child, ptr, 4);
1639 if (ecap == 0xffffffff || ecap == 0)
1640 return (ENOENT);
1641 for (;;) {
1642 if (PCI_EXTCAP_ID(ecap) == capability) {
1643 if (capreg != NULL)
1644 *capreg = ptr;
1645 return (0);
1646 }
1647 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1648 if (ptr == 0)
1649 break;
1650 ecap = pci_read_config(child, ptr, 4);
1651 }
1652
1653 return (ENOENT);
1654 }
1655
1656 /*
1657 * Find the next requested extended capability after start and return the
1658 * offset in configuration space via the pointer provided. The function
1659 * returns 0 on success and an error code otherwise.
1660 */
1661 int
pci_find_next_extcap_method(device_t dev,device_t child,int capability,int start,int * capreg)1662 pci_find_next_extcap_method(device_t dev, device_t child, int capability,
1663 int start, int *capreg)
1664 {
1665 struct pci_devinfo *dinfo = device_get_ivars(child);
1666 pcicfgregs *cfg = &dinfo->cfg;
1667 uint32_t ecap;
1668 uint16_t ptr;
1669
1670 /* Only supported for PCI-express devices. */
1671 if (cfg->pcie.pcie_location == 0)
1672 return (ENXIO);
1673
1674 ecap = pci_read_config(child, start, 4);
1675 KASSERT(PCI_EXTCAP_ID(ecap) == capability,
1676 ("start extended capability is not expected capability"));
1677 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1678 while (ptr != 0) {
1679 ecap = pci_read_config(child, ptr, 4);
1680 if (PCI_EXTCAP_ID(ecap) == capability) {
1681 if (capreg != NULL)
1682 *capreg = ptr;
1683 return (0);
1684 }
1685 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1686 }
1687
1688 return (ENOENT);
1689 }
1690
1691 /*
1692 * Support for MSI-X message interrupts.
1693 */
1694 static void
pci_write_msix_entry(device_t dev,u_int index,uint64_t address,uint32_t data)1695 pci_write_msix_entry(device_t dev, u_int index, uint64_t address, uint32_t data)
1696 {
1697 struct pci_devinfo *dinfo = device_get_ivars(dev);
1698 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1699 uint32_t offset;
1700
1701 KASSERT(msix->msix_table_len > index, ("bogus index"));
1702 offset = msix->msix_table_offset + index * 16;
1703 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1704 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1705 bus_write_4(msix->msix_table_res, offset + 8, data);
1706 }
1707
1708 void
pci_enable_msix_method(device_t dev,device_t child,u_int index,uint64_t address,uint32_t data)1709 pci_enable_msix_method(device_t dev, device_t child, u_int index,
1710 uint64_t address, uint32_t data)
1711 {
1712
1713 if (pci_msix_rewrite_table) {
1714 struct pci_devinfo *dinfo = device_get_ivars(child);
1715 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1716
1717 /*
1718 * Some VM hosts require MSIX to be disabled in the
1719 * control register before updating the MSIX table
1720 * entries are allowed. It is not enough to only
1721 * disable MSIX while updating a single entry. MSIX
1722 * must be disabled while updating all entries in the
1723 * table.
1724 */
1725 pci_write_config(child,
1726 msix->msix_location + PCIR_MSIX_CTRL,
1727 msix->msix_ctrl & ~PCIM_MSIXCTRL_MSIX_ENABLE, 2);
1728 pci_resume_msix(child);
1729 } else
1730 pci_write_msix_entry(child, index, address, data);
1731
1732 /* Enable MSI -> HT mapping. */
1733 pci_ht_map_msi(child, address);
1734 }
1735
1736 void
pci_mask_msix(device_t dev,u_int index)1737 pci_mask_msix(device_t dev, u_int index)
1738 {
1739 struct pci_devinfo *dinfo = device_get_ivars(dev);
1740 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1741 uint32_t offset, val;
1742
1743 KASSERT(PCI_MSIX_MSGNUM(msix->msix_ctrl) > index, ("bogus index"));
1744 offset = msix->msix_table_offset + index * 16 + 12;
1745 val = bus_read_4(msix->msix_table_res, offset);
1746 val |= PCIM_MSIX_VCTRL_MASK;
1747
1748 /*
1749 * Some devices (e.g. Samsung PM961) do not support reads of this
1750 * register, so always write the new value.
1751 */
1752 bus_write_4(msix->msix_table_res, offset, val);
1753 }
1754
1755 void
pci_unmask_msix(device_t dev,u_int index)1756 pci_unmask_msix(device_t dev, u_int index)
1757 {
1758 struct pci_devinfo *dinfo = device_get_ivars(dev);
1759 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1760 uint32_t offset, val;
1761
1762 KASSERT(PCI_MSIX_MSGNUM(msix->msix_ctrl) > index, ("bogus index"));
1763 offset = msix->msix_table_offset + index * 16 + 12;
1764 val = bus_read_4(msix->msix_table_res, offset);
1765 val &= ~PCIM_MSIX_VCTRL_MASK;
1766
1767 /*
1768 * Some devices (e.g. Samsung PM961) do not support reads of this
1769 * register, so always write the new value.
1770 */
1771 bus_write_4(msix->msix_table_res, offset, val);
1772 }
1773
1774 int
pci_pending_msix(device_t dev,u_int index)1775 pci_pending_msix(device_t dev, u_int index)
1776 {
1777 struct pci_devinfo *dinfo = device_get_ivars(dev);
1778 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1779 uint32_t offset, bit;
1780
1781 KASSERT(msix->msix_table_len > index, ("bogus index"));
1782 offset = msix->msix_pba_offset + (index / 32) * 4;
1783 bit = 1 << index % 32;
1784 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1785 }
1786
1787 /*
1788 * Restore MSI-X registers and table during resume. If MSI-X is
1789 * enabled then walk the virtual table to restore the actual MSI-X
1790 * table.
1791 */
1792 static void
pci_resume_msix(device_t dev)1793 pci_resume_msix(device_t dev)
1794 {
1795 struct pci_devinfo *dinfo = device_get_ivars(dev);
1796 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1797 struct msix_table_entry *mte;
1798 struct msix_vector *mv;
1799 u_int i, msgnum;
1800
1801 if (msix->msix_alloc > 0) {
1802 msgnum = PCI_MSIX_MSGNUM(msix->msix_ctrl);
1803
1804 /* First, mask all vectors. */
1805 for (i = 0; i < msgnum; i++)
1806 pci_mask_msix(dev, i);
1807
1808 /* Second, program any messages with at least one handler. */
1809 for (i = 0; i < msix->msix_table_len; i++) {
1810 mte = &msix->msix_table[i];
1811 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1812 continue;
1813 mv = &msix->msix_vectors[mte->mte_vector - 1];
1814 pci_write_msix_entry(dev, i, mv->mv_address,
1815 mv->mv_data);
1816 pci_unmask_msix(dev, i);
1817 }
1818 }
1819 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1820 msix->msix_ctrl, 2);
1821 }
1822
1823 /*
1824 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1825 * returned in *count. After this function returns, each message will be
1826 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1827 */
1828 int
pci_alloc_msix_method(device_t dev,device_t child,int * count)1829 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1830 {
1831 struct pci_devinfo *dinfo = device_get_ivars(child);
1832 pcicfgregs *cfg = &dinfo->cfg;
1833 struct resource_list_entry *rle;
1834 u_int actual, i, max;
1835 int error, irq;
1836 uint16_t ctrl, msgnum;
1837
1838 /* Don't let count == 0 get us into trouble. */
1839 if (*count < 1)
1840 return (EINVAL);
1841
1842 /* If rid 0 is allocated, then fail. */
1843 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1844 if (rle != NULL && rle->res != NULL)
1845 return (ENXIO);
1846
1847 /* Already have allocated messages? */
1848 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1849 return (ENXIO);
1850
1851 /* If MSI-X is blacklisted for this system, fail. */
1852 if (pci_msix_blacklisted())
1853 return (ENXIO);
1854
1855 /* MSI-X capability present? */
1856 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1857 return (ENODEV);
1858
1859 /* Make sure the appropriate BARs are mapped. */
1860 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1861 cfg->msix.msix_table_bar);
1862 if (rle == NULL || rle->res == NULL ||
1863 !(rman_get_flags(rle->res) & RF_ACTIVE))
1864 return (ENXIO);
1865 cfg->msix.msix_table_res = rle->res;
1866 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1867 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1868 cfg->msix.msix_pba_bar);
1869 if (rle == NULL || rle->res == NULL ||
1870 !(rman_get_flags(rle->res) & RF_ACTIVE))
1871 return (ENXIO);
1872 }
1873 cfg->msix.msix_pba_res = rle->res;
1874
1875 ctrl = pci_read_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1876 2);
1877 msgnum = PCI_MSIX_MSGNUM(ctrl);
1878 if (bootverbose)
1879 device_printf(child,
1880 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1881 *count, msgnum);
1882 max = min(*count, msgnum);
1883 for (i = 0; i < max; i++) {
1884 /* Allocate a message. */
1885 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1886 if (error) {
1887 if (i == 0)
1888 return (error);
1889 break;
1890 }
1891 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1892 irq, 1);
1893 }
1894 actual = i;
1895
1896 if (bootverbose) {
1897 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1898 if (actual == 1)
1899 device_printf(child, "using IRQ %ju for MSI-X\n",
1900 rle->start);
1901 else {
1902 bool run;
1903
1904 /*
1905 * Be fancy and try to print contiguous runs of
1906 * IRQ values as ranges. 'irq' is the previous IRQ.
1907 * 'run' is true if we are in a range.
1908 */
1909 device_printf(child, "using IRQs %ju", rle->start);
1910 irq = rle->start;
1911 run = false;
1912 for (i = 1; i < actual; i++) {
1913 rle = resource_list_find(&dinfo->resources,
1914 SYS_RES_IRQ, i + 1);
1915
1916 /* Still in a run? */
1917 if (rle->start == irq + 1) {
1918 run = true;
1919 irq++;
1920 continue;
1921 }
1922
1923 /* Finish previous range. */
1924 if (run) {
1925 printf("-%d", irq);
1926 run = false;
1927 }
1928
1929 /* Start new range. */
1930 printf(",%ju", rle->start);
1931 irq = rle->start;
1932 }
1933
1934 /* Unfinished range? */
1935 if (run)
1936 printf("-%d", irq);
1937 printf(" for MSI-X\n");
1938 }
1939 }
1940
1941 /*
1942 * Mask all vectors. Note that the message index assertion in
1943 * pci_mask_msix requires msix_ctrl to be set.
1944 */
1945 cfg->msix.msix_ctrl = ctrl;
1946 for (i = 0; i < msgnum; i++)
1947 pci_mask_msix(child, i);
1948
1949 /* Allocate and initialize vector data and virtual table. */
1950 cfg->msix.msix_vectors = mallocarray(actual, sizeof(struct msix_vector),
1951 M_DEVBUF, M_WAITOK | M_ZERO);
1952 cfg->msix.msix_table = mallocarray(actual,
1953 sizeof(struct msix_table_entry), M_DEVBUF, M_WAITOK | M_ZERO);
1954 for (i = 0; i < actual; i++) {
1955 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1956 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1957 cfg->msix.msix_table[i].mte_vector = i + 1;
1958 }
1959
1960 /* Update control register to enable MSI-X. */
1961 ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1962 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1963 ctrl, 2);
1964 cfg->msix.msix_ctrl = ctrl;
1965
1966 /* Update counts of alloc'd messages. */
1967 cfg->msix.msix_alloc = actual;
1968 cfg->msix.msix_table_len = actual;
1969 *count = actual;
1970 return (0);
1971 }
1972
1973 /*
1974 * By default, pci_alloc_msix() will assign the allocated IRQ
1975 * resources consecutively to the first N messages in the MSI-X table.
1976 * However, device drivers may want to use different layouts if they
1977 * either receive fewer messages than they asked for, or they wish to
1978 * populate the MSI-X table sparsely. This method allows the driver
1979 * to specify what layout it wants. It must be called after a
1980 * successful pci_alloc_msix() but before any of the associated
1981 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1982 *
1983 * The 'vectors' array contains 'count' message vectors. The array
1984 * maps directly to the MSI-X table in that index 0 in the array
1985 * specifies the vector for the first message in the MSI-X table, etc.
1986 * The vector value in each array index can either be 0 to indicate
1987 * that no vector should be assigned to a message slot, or it can be a
1988 * number from 1 to N (where N is the count returned from a
1989 * succcessful call to pci_alloc_msix()) to indicate which message
1990 * vector (IRQ) to be used for the corresponding message.
1991 *
1992 * On successful return, each message with a non-zero vector will have
1993 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1994 * 1. Additionally, if any of the IRQs allocated via the previous
1995 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1996 * will be freed back to the system automatically.
1997 *
1998 * For example, suppose a driver has a MSI-X table with 6 messages and
1999 * asks for 6 messages, but pci_alloc_msix() only returns a count of
2000 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
2001 * C. After the call to pci_alloc_msix(), the device will be setup to
2002 * have an MSI-X table of ABC--- (where - means no vector assigned).
2003 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
2004 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
2005 * be freed back to the system. This device will also have valid
2006 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
2007 *
2008 * In any case, the SYS_RES_IRQ rid X will always map to the message
2009 * at MSI-X table index X - 1 and will only be valid if a vector is
2010 * assigned to that table entry.
2011 */
2012 int
pci_remap_msix_method(device_t dev,device_t child,int count,const u_int * vectors)2013 pci_remap_msix_method(device_t dev, device_t child, int count,
2014 const u_int *vectors)
2015 {
2016 struct pci_devinfo *dinfo = device_get_ivars(child);
2017 struct pcicfg_msix *msix = &dinfo->cfg.msix;
2018 struct resource_list_entry *rle;
2019 u_int i, irq, j;
2020 bool *used;
2021
2022 /*
2023 * Have to have at least one message in the table but the
2024 * table can't be bigger than the actual MSI-X table in the
2025 * device.
2026 */
2027 if (count < 1 || count > PCI_MSIX_MSGNUM(msix->msix_ctrl))
2028 return (EINVAL);
2029
2030 /* Sanity check the vectors. */
2031 for (i = 0; i < count; i++)
2032 if (vectors[i] > msix->msix_alloc)
2033 return (EINVAL);
2034
2035 /*
2036 * Make sure there aren't any holes in the vectors to be used.
2037 * It's a big pain to support it, and it doesn't really make
2038 * sense anyway. Also, at least one vector must be used.
2039 */
2040 used = mallocarray(msix->msix_alloc, sizeof(*used), M_DEVBUF, M_WAITOK |
2041 M_ZERO);
2042 for (i = 0; i < count; i++)
2043 if (vectors[i] != 0)
2044 used[vectors[i] - 1] = true;
2045 for (i = 0; i < msix->msix_alloc - 1; i++)
2046 if (!used[i] && used[i + 1]) {
2047 free(used, M_DEVBUF);
2048 return (EINVAL);
2049 }
2050 if (!used[0]) {
2051 free(used, M_DEVBUF);
2052 return (EINVAL);
2053 }
2054
2055 /* Make sure none of the resources are allocated. */
2056 for (i = 0; i < msix->msix_table_len; i++) {
2057 if (msix->msix_table[i].mte_vector == 0)
2058 continue;
2059 if (msix->msix_table[i].mte_handlers > 0) {
2060 free(used, M_DEVBUF);
2061 return (EBUSY);
2062 }
2063 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2064 KASSERT(rle != NULL, ("missing resource"));
2065 if (rle->res != NULL) {
2066 free(used, M_DEVBUF);
2067 return (EBUSY);
2068 }
2069 }
2070
2071 /* Free the existing resource list entries. */
2072 for (i = 0; i < msix->msix_table_len; i++) {
2073 if (msix->msix_table[i].mte_vector == 0)
2074 continue;
2075 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2076 }
2077
2078 /*
2079 * Build the new virtual table keeping track of which vectors are
2080 * used.
2081 */
2082 free(msix->msix_table, M_DEVBUF);
2083 msix->msix_table = mallocarray(count, sizeof(struct msix_table_entry),
2084 M_DEVBUF, M_WAITOK | M_ZERO);
2085 for (i = 0; i < count; i++)
2086 msix->msix_table[i].mte_vector = vectors[i];
2087 msix->msix_table_len = count;
2088
2089 /* Free any unused IRQs and resize the vectors array if necessary. */
2090 j = msix->msix_alloc - 1;
2091 if (!used[j]) {
2092 struct msix_vector *vec;
2093
2094 while (!used[j]) {
2095 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
2096 msix->msix_vectors[j].mv_irq);
2097 j--;
2098 }
2099 vec = mallocarray(j + 1, sizeof(struct msix_vector), M_DEVBUF,
2100 M_WAITOK);
2101 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
2102 (j + 1));
2103 free(msix->msix_vectors, M_DEVBUF);
2104 msix->msix_vectors = vec;
2105 msix->msix_alloc = j + 1;
2106 }
2107 free(used, M_DEVBUF);
2108
2109 /* Map the IRQs onto the rids. */
2110 for (i = 0; i < count; i++) {
2111 if (vectors[i] == 0)
2112 continue;
2113 irq = msix->msix_vectors[vectors[i] - 1].mv_irq;
2114 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
2115 irq, 1);
2116 }
2117
2118 if (bootverbose) {
2119 device_printf(child, "Remapped MSI-X IRQs as: ");
2120 for (i = 0; i < count; i++) {
2121 if (i != 0)
2122 printf(", ");
2123 if (vectors[i] == 0)
2124 printf("---");
2125 else
2126 printf("%d",
2127 msix->msix_vectors[vectors[i] - 1].mv_irq);
2128 }
2129 printf("\n");
2130 }
2131
2132 return (0);
2133 }
2134
2135 static int
pci_release_msix(device_t dev,device_t child)2136 pci_release_msix(device_t dev, device_t child)
2137 {
2138 struct pci_devinfo *dinfo = device_get_ivars(child);
2139 struct pcicfg_msix *msix = &dinfo->cfg.msix;
2140 struct resource_list_entry *rle;
2141 u_int i;
2142
2143 /* Do we have any messages to release? */
2144 if (msix->msix_alloc == 0)
2145 return (ENODEV);
2146
2147 /* Make sure none of the resources are allocated. */
2148 for (i = 0; i < msix->msix_table_len; i++) {
2149 if (msix->msix_table[i].mte_vector == 0)
2150 continue;
2151 if (msix->msix_table[i].mte_handlers > 0)
2152 return (EBUSY);
2153 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2154 KASSERT(rle != NULL, ("missing resource"));
2155 if (rle->res != NULL)
2156 return (EBUSY);
2157 }
2158
2159 /* Update control register to disable MSI-X. */
2160 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
2161 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
2162 msix->msix_ctrl, 2);
2163
2164 /* Free the resource list entries. */
2165 for (i = 0; i < msix->msix_table_len; i++) {
2166 if (msix->msix_table[i].mte_vector == 0)
2167 continue;
2168 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2169 }
2170 free(msix->msix_table, M_DEVBUF);
2171 msix->msix_table_len = 0;
2172
2173 /* Release the IRQs. */
2174 for (i = 0; i < msix->msix_alloc; i++)
2175 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
2176 msix->msix_vectors[i].mv_irq);
2177 free(msix->msix_vectors, M_DEVBUF);
2178 msix->msix_alloc = 0;
2179 return (0);
2180 }
2181
2182 /*
2183 * Return the max supported MSI-X messages this device supports.
2184 * Basically, assuming the MD code can alloc messages, this function
2185 * should return the maximum value that pci_alloc_msix() can return.
2186 * Thus, it is subject to the tunables, etc.
2187 */
2188 int
pci_msix_count_method(device_t dev,device_t child)2189 pci_msix_count_method(device_t dev, device_t child)
2190 {
2191 struct pci_devinfo *dinfo = device_get_ivars(child);
2192 struct pcicfg_msix *msix = &dinfo->cfg.msix;
2193 uint16_t ctrl;
2194
2195 if (pci_do_msix && msix->msix_location != 0) {
2196 ctrl = pci_read_config(child, msix->msix_location +
2197 PCIR_MSI_CTRL, 2);
2198 return (PCI_MSIX_MSGNUM(ctrl));
2199 }
2200 return (0);
2201 }
2202
2203 int
pci_msix_pba_bar_method(device_t dev,device_t child)2204 pci_msix_pba_bar_method(device_t dev, device_t child)
2205 {
2206 struct pci_devinfo *dinfo = device_get_ivars(child);
2207 struct pcicfg_msix *msix = &dinfo->cfg.msix;
2208
2209 if (pci_do_msix && msix->msix_location != 0)
2210 return (msix->msix_pba_bar);
2211 return (-1);
2212 }
2213
2214 int
pci_msix_table_bar_method(device_t dev,device_t child)2215 pci_msix_table_bar_method(device_t dev, device_t child)
2216 {
2217 struct pci_devinfo *dinfo = device_get_ivars(child);
2218 struct pcicfg_msix *msix = &dinfo->cfg.msix;
2219
2220 if (pci_do_msix && msix->msix_location != 0)
2221 return (msix->msix_table_bar);
2222 return (-1);
2223 }
2224
2225 /*
2226 * HyperTransport MSI mapping control
2227 */
2228 void
pci_ht_map_msi(device_t dev,uint64_t addr)2229 pci_ht_map_msi(device_t dev, uint64_t addr)
2230 {
2231 struct pci_devinfo *dinfo = device_get_ivars(dev);
2232 struct pcicfg_ht *ht = &dinfo->cfg.ht;
2233
2234 if (!ht->ht_msimap)
2235 return;
2236
2237 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
2238 ht->ht_msiaddr >> 20 == addr >> 20) {
2239 /* Enable MSI -> HT mapping. */
2240 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
2241 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
2242 ht->ht_msictrl, 2);
2243 }
2244
2245 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
2246 /* Disable MSI -> HT mapping. */
2247 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
2248 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
2249 ht->ht_msictrl, 2);
2250 }
2251 }
2252
2253 int
pci_get_relaxed_ordering_enabled(device_t dev)2254 pci_get_relaxed_ordering_enabled(device_t dev)
2255 {
2256 struct pci_devinfo *dinfo = device_get_ivars(dev);
2257 int cap;
2258 uint16_t val;
2259
2260 cap = dinfo->cfg.pcie.pcie_location;
2261 if (cap == 0)
2262 return (0);
2263 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2264 val &= PCIEM_CTL_RELAXED_ORD_ENABLE;
2265 return (val != 0);
2266 }
2267
2268 int
pci_get_max_payload(device_t dev)2269 pci_get_max_payload(device_t dev)
2270 {
2271 struct pci_devinfo *dinfo = device_get_ivars(dev);
2272 int cap;
2273 uint16_t val;
2274
2275 cap = dinfo->cfg.pcie.pcie_location;
2276 if (cap == 0)
2277 return (0);
2278 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2279 val &= PCIEM_CTL_MAX_PAYLOAD;
2280 val >>= 5;
2281 return (1 << (val + 7));
2282 }
2283
2284 int
pci_get_max_read_req(device_t dev)2285 pci_get_max_read_req(device_t dev)
2286 {
2287 struct pci_devinfo *dinfo = device_get_ivars(dev);
2288 int cap;
2289 uint16_t val;
2290
2291 cap = dinfo->cfg.pcie.pcie_location;
2292 if (cap == 0)
2293 return (0);
2294 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2295 val &= PCIEM_CTL_MAX_READ_REQUEST;
2296 val >>= 12;
2297 return (1 << (val + 7));
2298 }
2299
2300 int
pci_set_max_read_req(device_t dev,int size)2301 pci_set_max_read_req(device_t dev, int size)
2302 {
2303 struct pci_devinfo *dinfo = device_get_ivars(dev);
2304 int cap;
2305 uint16_t val;
2306
2307 cap = dinfo->cfg.pcie.pcie_location;
2308 if (cap == 0)
2309 return (0);
2310 if (size < 128)
2311 size = 128;
2312 if (size > 4096)
2313 size = 4096;
2314 size = (1 << (fls(size) - 1));
2315 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2316 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
2317 val |= (fls(size) - 8) << 12;
2318 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
2319 return (size);
2320 }
2321
2322 uint32_t
pcie_read_config(device_t dev,int reg,int width)2323 pcie_read_config(device_t dev, int reg, int width)
2324 {
2325 struct pci_devinfo *dinfo = device_get_ivars(dev);
2326 int cap;
2327
2328 cap = dinfo->cfg.pcie.pcie_location;
2329 if (cap == 0) {
2330 if (width == 2)
2331 return (0xffff);
2332 return (0xffffffff);
2333 }
2334
2335 return (pci_read_config(dev, cap + reg, width));
2336 }
2337
2338 void
pcie_write_config(device_t dev,int reg,uint32_t value,int width)2339 pcie_write_config(device_t dev, int reg, uint32_t value, int width)
2340 {
2341 struct pci_devinfo *dinfo = device_get_ivars(dev);
2342 int cap;
2343
2344 cap = dinfo->cfg.pcie.pcie_location;
2345 if (cap == 0)
2346 return;
2347 pci_write_config(dev, cap + reg, value, width);
2348 }
2349
2350 /*
2351 * Adjusts a PCI-e capability register by clearing the bits in mask
2352 * and setting the bits in (value & mask). Bits not set in mask are
2353 * not adjusted.
2354 *
2355 * Returns the old value on success or all ones on failure.
2356 */
2357 uint32_t
pcie_adjust_config(device_t dev,int reg,uint32_t mask,uint32_t value,int width)2358 pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value,
2359 int width)
2360 {
2361 struct pci_devinfo *dinfo = device_get_ivars(dev);
2362 uint32_t old, new;
2363 int cap;
2364
2365 cap = dinfo->cfg.pcie.pcie_location;
2366 if (cap == 0) {
2367 if (width == 2)
2368 return (0xffff);
2369 return (0xffffffff);
2370 }
2371
2372 old = pci_read_config(dev, cap + reg, width);
2373 new = old & ~mask;
2374 new |= (value & mask);
2375 pci_write_config(dev, cap + reg, new, width);
2376 return (old);
2377 }
2378
2379 /*
2380 * Support for MSI message signalled interrupts.
2381 */
2382 void
pci_enable_msi_method(device_t dev,device_t child,uint64_t address,uint16_t data)2383 pci_enable_msi_method(device_t dev, device_t child, uint64_t address,
2384 uint16_t data)
2385 {
2386 struct pci_devinfo *dinfo = device_get_ivars(child);
2387 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2388
2389 /* Write data and address values. */
2390 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR,
2391 address & 0xffffffff, 4);
2392 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
2393 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH,
2394 address >> 32, 4);
2395 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT,
2396 data, 2);
2397 } else
2398 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data,
2399 2);
2400
2401 /* Enable MSI in the control register. */
2402 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
2403 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2404 msi->msi_ctrl, 2);
2405
2406 /* Enable MSI -> HT mapping. */
2407 pci_ht_map_msi(child, address);
2408 }
2409
2410 void
pci_disable_msi_method(device_t dev,device_t child)2411 pci_disable_msi_method(device_t dev, device_t child)
2412 {
2413 struct pci_devinfo *dinfo = device_get_ivars(child);
2414 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2415
2416 /* Disable MSI -> HT mapping. */
2417 pci_ht_map_msi(child, 0);
2418
2419 /* Disable MSI in the control register. */
2420 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
2421 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2422 msi->msi_ctrl, 2);
2423 }
2424
2425 /*
2426 * Restore MSI registers during resume. If MSI is enabled then
2427 * restore the data and address registers in addition to the control
2428 * register.
2429 */
2430 static void
pci_resume_msi(device_t dev)2431 pci_resume_msi(device_t dev)
2432 {
2433 struct pci_devinfo *dinfo = device_get_ivars(dev);
2434 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2435 uint64_t address;
2436 uint16_t data;
2437
2438 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
2439 address = msi->msi_addr;
2440 data = msi->msi_data;
2441 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
2442 address & 0xffffffff, 4);
2443 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
2444 pci_write_config(dev, msi->msi_location +
2445 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
2446 pci_write_config(dev, msi->msi_location +
2447 PCIR_MSI_DATA_64BIT, data, 2);
2448 } else
2449 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
2450 data, 2);
2451 }
2452 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
2453 2);
2454 }
2455
2456 static int
pci_remap_intr_method(device_t bus,device_t dev,u_int irq)2457 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
2458 {
2459 struct pci_devinfo *dinfo = device_get_ivars(dev);
2460 pcicfgregs *cfg = &dinfo->cfg;
2461 struct resource_list_entry *rle;
2462 struct msix_table_entry *mte;
2463 struct msix_vector *mv;
2464 uint64_t addr;
2465 uint32_t data;
2466 u_int i, j;
2467 int error;
2468
2469 /*
2470 * Handle MSI first. We try to find this IRQ among our list
2471 * of MSI IRQs. If we find it, we request updated address and
2472 * data registers and apply the results.
2473 */
2474 if (cfg->msi.msi_alloc > 0) {
2475 /* If we don't have any active handlers, nothing to do. */
2476 if (cfg->msi.msi_handlers == 0)
2477 return (0);
2478 for (i = 0; i < cfg->msi.msi_alloc; i++) {
2479 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
2480 i + 1);
2481 if (rle->start == irq) {
2482 error = PCIB_MAP_MSI(device_get_parent(bus),
2483 dev, irq, &addr, &data);
2484 if (error)
2485 return (error);
2486 pci_disable_msi(dev);
2487 dinfo->cfg.msi.msi_addr = addr;
2488 dinfo->cfg.msi.msi_data = data;
2489 pci_enable_msi(dev, addr, data);
2490 return (0);
2491 }
2492 }
2493 return (ENOENT);
2494 }
2495
2496 /*
2497 * For MSI-X, we check to see if we have this IRQ. If we do,
2498 * we request the updated mapping info. If that works, we go
2499 * through all the slots that use this IRQ and update them.
2500 */
2501 if (cfg->msix.msix_alloc > 0) {
2502 bool found = false;
2503
2504 for (i = 0; i < cfg->msix.msix_alloc; i++) {
2505 mv = &cfg->msix.msix_vectors[i];
2506 if (mv->mv_irq == irq) {
2507 error = PCIB_MAP_MSI(device_get_parent(bus),
2508 dev, irq, &addr, &data);
2509 if (error)
2510 return (error);
2511 mv->mv_address = addr;
2512 mv->mv_data = data;
2513 for (j = 0; j < cfg->msix.msix_table_len; j++) {
2514 mte = &cfg->msix.msix_table[j];
2515 if (mte->mte_vector != i + 1)
2516 continue;
2517 if (mte->mte_handlers == 0)
2518 continue;
2519 pci_mask_msix(dev, j);
2520 pci_enable_msix(dev, j, addr, data);
2521 pci_unmask_msix(dev, j);
2522 }
2523 found = true;
2524 }
2525 }
2526 return (found ? 0 : ENOENT);
2527 }
2528
2529 return (ENOENT);
2530 }
2531
2532 /*
2533 * Returns true if the specified device is blacklisted because MSI
2534 * doesn't work.
2535 */
2536 int
pci_msi_device_blacklisted(device_t dev)2537 pci_msi_device_blacklisted(device_t dev)
2538 {
2539
2540 if (!pci_honor_msi_blacklist)
2541 return (0);
2542
2543 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2544 }
2545
2546 /*
2547 * Determine if MSI is blacklisted globally on this system. Currently,
2548 * we just check for blacklisted chipsets as represented by the
2549 * host-PCI bridge at device 0:0:0. In the future, it may become
2550 * necessary to check other system attributes, such as the kenv values
2551 * that give the motherboard manufacturer and model number.
2552 */
2553 static int
pci_msi_blacklisted(void)2554 pci_msi_blacklisted(void)
2555 {
2556 device_t dev;
2557
2558 if (!pci_honor_msi_blacklist)
2559 return (0);
2560
2561 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2562 if (!(pcie_chipset || pcix_chipset)) {
2563 if (vm_guest != VM_GUEST_NO) {
2564 /*
2565 * Whitelist older chipsets in virtual
2566 * machines known to support MSI.
2567 */
2568 dev = pci_find_bsf(0, 0, 0);
2569 if (dev != NULL)
2570 return (!pci_has_quirk(pci_get_devid(dev),
2571 PCI_QUIRK_ENABLE_MSI_VM));
2572 }
2573 return (1);
2574 }
2575
2576 dev = pci_find_bsf(0, 0, 0);
2577 if (dev != NULL)
2578 return (pci_msi_device_blacklisted(dev));
2579 return (0);
2580 }
2581
2582 /*
2583 * Returns true if the specified device is blacklisted because MSI-X
2584 * doesn't work. Note that this assumes that if MSI doesn't work,
2585 * MSI-X doesn't either.
2586 */
2587 int
pci_msix_device_blacklisted(device_t dev)2588 pci_msix_device_blacklisted(device_t dev)
2589 {
2590
2591 if (!pci_honor_msi_blacklist)
2592 return (0);
2593
2594 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2595 return (1);
2596
2597 return (pci_msi_device_blacklisted(dev));
2598 }
2599
2600 /*
2601 * Determine if MSI-X is blacklisted globally on this system. If MSI
2602 * is blacklisted, assume that MSI-X is as well. Check for additional
2603 * chipsets where MSI works but MSI-X does not.
2604 */
2605 static int
pci_msix_blacklisted(void)2606 pci_msix_blacklisted(void)
2607 {
2608 device_t dev;
2609
2610 if (!pci_honor_msi_blacklist)
2611 return (0);
2612
2613 dev = pci_find_bsf(0, 0, 0);
2614 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2615 PCI_QUIRK_DISABLE_MSIX))
2616 return (1);
2617
2618 return (pci_msi_blacklisted());
2619 }
2620
2621 /*
2622 * Attempt to allocate *count MSI messages. The actual number allocated is
2623 * returned in *count. After this function returns, each message will be
2624 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2625 */
2626 int
pci_alloc_msi_method(device_t dev,device_t child,int * count)2627 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2628 {
2629 struct pci_devinfo *dinfo = device_get_ivars(child);
2630 pcicfgregs *cfg = &dinfo->cfg;
2631 struct resource_list_entry *rle;
2632 u_int actual, i;
2633 int error, irqs[32];
2634 uint16_t ctrl, msgnum;
2635
2636 /* Don't let count == 0 get us into trouble. */
2637 if (*count < 1)
2638 return (EINVAL);
2639
2640 /* If rid 0 is allocated, then fail. */
2641 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2642 if (rle != NULL && rle->res != NULL)
2643 return (ENXIO);
2644
2645 /* Already have allocated messages? */
2646 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2647 return (ENXIO);
2648
2649 /* If MSI is blacklisted for this system, fail. */
2650 if (pci_msi_blacklisted())
2651 return (ENXIO);
2652
2653 /* MSI capability present? */
2654 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2655 return (ENODEV);
2656
2657 ctrl = pci_read_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, 2);
2658 msgnum = PCI_MSI_MSGNUM(ctrl);
2659 if (bootverbose)
2660 device_printf(child,
2661 "attempting to allocate %d MSI vectors (%u supported)\n",
2662 *count, msgnum);
2663
2664 /* Don't ask for more than the device supports. */
2665 actual = min(*count, msgnum);
2666
2667 /* Don't ask for more than 32 messages. */
2668 actual = min(actual, 32);
2669
2670 /* MSI requires power of 2 number of messages. */
2671 if (!powerof2(actual))
2672 return (EINVAL);
2673
2674 for (;;) {
2675 /* Try to allocate N messages. */
2676 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2677 actual, irqs);
2678 if (error == 0)
2679 break;
2680 if (actual == 1)
2681 return (error);
2682
2683 /* Try N / 2. */
2684 actual >>= 1;
2685 }
2686
2687 /*
2688 * We now have N actual messages mapped onto SYS_RES_IRQ
2689 * resources in the irqs[] array, so add new resources
2690 * starting at rid 1.
2691 */
2692 for (i = 0; i < actual; i++)
2693 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2694 irqs[i], irqs[i], 1);
2695
2696 if (bootverbose) {
2697 if (actual == 1)
2698 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2699 else {
2700 bool run;
2701
2702 /*
2703 * Be fancy and try to print contiguous runs
2704 * of IRQ values as ranges. 'run' is true if
2705 * we are in a range.
2706 */
2707 device_printf(child, "using IRQs %d", irqs[0]);
2708 run = false;
2709 for (i = 1; i < actual; i++) {
2710 /* Still in a run? */
2711 if (irqs[i] == irqs[i - 1] + 1) {
2712 run = true;
2713 continue;
2714 }
2715
2716 /* Finish previous range. */
2717 if (run) {
2718 printf("-%d", irqs[i - 1]);
2719 run = false;
2720 }
2721
2722 /* Start new range. */
2723 printf(",%d", irqs[i]);
2724 }
2725
2726 /* Unfinished range? */
2727 if (run)
2728 printf("-%d", irqs[actual - 1]);
2729 printf(" for MSI\n");
2730 }
2731 }
2732
2733 /* Update control register with actual count. */
2734 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2735 ctrl |= (ffs(actual) - 1) << 4;
2736 cfg->msi.msi_ctrl = ctrl;
2737 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2738
2739 /* Update counts of alloc'd messages. */
2740 cfg->msi.msi_alloc = actual;
2741 cfg->msi.msi_handlers = 0;
2742 *count = actual;
2743 return (0);
2744 }
2745
2746 /* Release the MSI messages associated with this device. */
2747 int
pci_release_msi_method(device_t dev,device_t child)2748 pci_release_msi_method(device_t dev, device_t child)
2749 {
2750 struct pci_devinfo *dinfo = device_get_ivars(child);
2751 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2752 struct resource_list_entry *rle;
2753 u_int i, irqs[32];
2754 int error;
2755
2756 /* Try MSI-X first. */
2757 error = pci_release_msix(dev, child);
2758 if (error != ENODEV)
2759 return (error);
2760
2761 /* Do we have any messages to release? */
2762 if (msi->msi_alloc == 0)
2763 return (ENODEV);
2764 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2765
2766 /* Make sure none of the resources are allocated. */
2767 if (msi->msi_handlers > 0)
2768 return (EBUSY);
2769 for (i = 0; i < msi->msi_alloc; i++) {
2770 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2771 KASSERT(rle != NULL, ("missing MSI resource"));
2772 if (rle->res != NULL)
2773 return (EBUSY);
2774 irqs[i] = rle->start;
2775 }
2776
2777 /* Update control register with 0 count. */
2778 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2779 ("%s: MSI still enabled", __func__));
2780 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2781 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2782 msi->msi_ctrl, 2);
2783
2784 /* Release the messages. */
2785 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2786 for (i = 0; i < msi->msi_alloc; i++)
2787 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2788
2789 /* Update alloc count. */
2790 msi->msi_alloc = 0;
2791 msi->msi_addr = 0;
2792 msi->msi_data = 0;
2793 return (0);
2794 }
2795
2796 /*
2797 * Return the max supported MSI messages this device supports.
2798 * Basically, assuming the MD code can alloc messages, this function
2799 * should return the maximum value that pci_alloc_msi() can return.
2800 * Thus, it is subject to the tunables, etc.
2801 */
2802 int
pci_msi_count_method(device_t dev,device_t child)2803 pci_msi_count_method(device_t dev, device_t child)
2804 {
2805 struct pci_devinfo *dinfo = device_get_ivars(child);
2806 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2807 uint16_t ctrl;
2808
2809 if (pci_do_msi && msi->msi_location != 0) {
2810 ctrl = pci_read_config(child, msi->msi_location + PCIR_MSI_CTRL,
2811 2);
2812 return (PCI_MSI_MSGNUM(ctrl));
2813 }
2814 return (0);
2815 }
2816
2817 /* free pcicfgregs structure and all depending data structures */
2818
2819 int
pci_freecfg(struct pci_devinfo * dinfo)2820 pci_freecfg(struct pci_devinfo *dinfo)
2821 {
2822 struct devlist *devlist_head;
2823 struct pci_map *pm, *next;
2824
2825 devlist_head = &pci_devq;
2826
2827 if (dinfo->cfg.vpd.vpd_reg)
2828 vpd_free(&dinfo->cfg.vpd);
2829
2830 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2831 free(pm, M_DEVBUF);
2832 }
2833 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2834 free(dinfo, M_DEVBUF);
2835
2836 /* increment the generation count */
2837 pci_generation++;
2838
2839 /* we're losing one device */
2840 pci_numdevs--;
2841 return (0);
2842 }
2843
2844 /*
2845 * PCI power manangement
2846 */
2847 int
pci_set_powerstate_method(device_t dev,device_t child,int state)2848 pci_set_powerstate_method(device_t dev, device_t child, int state)
2849 {
2850 struct pci_devinfo *dinfo = device_get_ivars(child);
2851 pcicfgregs *cfg = &dinfo->cfg;
2852 uint16_t status;
2853 int oldstate, highest, delay;
2854
2855 if (cfg->pp.pp_location == 0)
2856 return (EOPNOTSUPP);
2857
2858 /*
2859 * Optimize a no state change request away. While it would be OK to
2860 * write to the hardware in theory, some devices have shown odd
2861 * behavior when going from D3 -> D3.
2862 */
2863 oldstate = pci_get_powerstate(child);
2864 if (oldstate == state)
2865 return (0);
2866
2867 /*
2868 * The PCI power management specification states that after a state
2869 * transition between PCI power states, system software must
2870 * guarantee a minimal delay before the function accesses the device.
2871 * Compute the worst case delay that we need to guarantee before we
2872 * access the device. Many devices will be responsive much more
2873 * quickly than this delay, but there are some that don't respond
2874 * instantly to state changes. Transitions to/from D3 state require
2875 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2876 * is done below with DELAY rather than a sleeper function because
2877 * this function can be called from contexts where we cannot sleep.
2878 */
2879 highest = (oldstate > state) ? oldstate : state;
2880 if (highest == PCI_POWERSTATE_D3)
2881 delay = 10000;
2882 else if (highest == PCI_POWERSTATE_D2)
2883 delay = 200;
2884 else
2885 delay = 0;
2886 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_location +
2887 PCIR_POWER_STATUS, 2) & ~PCIM_PSTAT_DMASK;
2888 switch (state) {
2889 case PCI_POWERSTATE_D0:
2890 status |= PCIM_PSTAT_D0;
2891 break;
2892 case PCI_POWERSTATE_D1:
2893 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2894 return (EOPNOTSUPP);
2895 status |= PCIM_PSTAT_D1;
2896 break;
2897 case PCI_POWERSTATE_D2:
2898 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2899 return (EOPNOTSUPP);
2900 status |= PCIM_PSTAT_D2;
2901 break;
2902 case PCI_POWERSTATE_D3:
2903 status |= PCIM_PSTAT_D3;
2904 break;
2905 default:
2906 return (EINVAL);
2907 }
2908
2909 if (bootverbose)
2910 pci_printf(cfg, "Transition from %s to %s\n",
2911 pci_powerstate_to_str(oldstate),
2912 pci_powerstate_to_str(state));
2913
2914 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_location + PCIR_POWER_STATUS,
2915 status, 2);
2916 if (delay)
2917 DELAY(delay);
2918 return (0);
2919 }
2920
2921 int
pci_get_powerstate_method(device_t dev,device_t child)2922 pci_get_powerstate_method(device_t dev, device_t child)
2923 {
2924 struct pci_devinfo *dinfo = device_get_ivars(child);
2925 pcicfgregs *cfg = &dinfo->cfg;
2926 uint16_t status;
2927 int result;
2928
2929 if (cfg->pp.pp_location != 0) {
2930 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_location +
2931 PCIR_POWER_STATUS, 2);
2932 switch (status & PCIM_PSTAT_DMASK) {
2933 case PCIM_PSTAT_D0:
2934 result = PCI_POWERSTATE_D0;
2935 break;
2936 case PCIM_PSTAT_D1:
2937 result = PCI_POWERSTATE_D1;
2938 break;
2939 case PCIM_PSTAT_D2:
2940 result = PCI_POWERSTATE_D2;
2941 break;
2942 case PCIM_PSTAT_D3:
2943 result = PCI_POWERSTATE_D3;
2944 break;
2945 default:
2946 result = PCI_POWERSTATE_UNKNOWN;
2947 break;
2948 }
2949 } else {
2950 /* No support, device is always at D0 */
2951 result = PCI_POWERSTATE_D0;
2952 }
2953 return (result);
2954 }
2955
2956 /* Clear any active PME# and disable PME# generation. */
2957 void
pci_clear_pme(device_t dev)2958 pci_clear_pme(device_t dev)
2959 {
2960 struct pci_devinfo *dinfo = device_get_ivars(dev);
2961 pcicfgregs *cfg = &dinfo->cfg;
2962 uint16_t status;
2963
2964 if (cfg->pp.pp_location != 0) {
2965 status = pci_read_config(dev, dinfo->cfg.pp.pp_location +
2966 PCIR_POWER_STATUS, 2);
2967 status &= ~PCIM_PSTAT_PMEENABLE;
2968 status |= PCIM_PSTAT_PME;
2969 pci_write_config(dev, dinfo->cfg.pp.pp_location +
2970 PCIR_POWER_STATUS, status, 2);
2971 }
2972 }
2973
2974 /* Clear any active PME# and enable PME# generation. */
2975 void
pci_enable_pme(device_t dev)2976 pci_enable_pme(device_t dev)
2977 {
2978 struct pci_devinfo *dinfo = device_get_ivars(dev);
2979 pcicfgregs *cfg = &dinfo->cfg;
2980 uint16_t status;
2981
2982 if (cfg->pp.pp_location != 0) {
2983 status = pci_read_config(dev, dinfo->cfg.pp.pp_location +
2984 PCIR_POWER_STATUS, 2);
2985 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2986 pci_write_config(dev, dinfo->cfg.pp.pp_location +
2987 PCIR_POWER_STATUS, status, 2);
2988 }
2989 }
2990
2991 bool
pci_has_pm(device_t dev)2992 pci_has_pm(device_t dev)
2993 {
2994 struct pci_devinfo *dinfo = device_get_ivars(dev);
2995 pcicfgregs *cfg = &dinfo->cfg;
2996
2997 return (cfg->pp.pp_location != 0);
2998 }
2999
3000 /*
3001 * Some convenience functions for PCI device drivers.
3002 */
3003
3004 static __inline void
pci_set_command_bit(device_t dev,device_t child,uint16_t bit)3005 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
3006 {
3007 uint16_t command;
3008
3009 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
3010 command |= bit;
3011 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
3012 }
3013
3014 static __inline void
pci_clear_command_bit(device_t dev,device_t child,uint16_t bit)3015 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
3016 {
3017 uint16_t command;
3018
3019 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
3020 command &= ~bit;
3021 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
3022 }
3023
3024 int
pci_enable_busmaster_method(device_t dev,device_t child)3025 pci_enable_busmaster_method(device_t dev, device_t child)
3026 {
3027 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
3028 return (0);
3029 }
3030
3031 int
pci_disable_busmaster_method(device_t dev,device_t child)3032 pci_disable_busmaster_method(device_t dev, device_t child)
3033 {
3034 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
3035 return (0);
3036 }
3037
3038 int
pci_enable_io_method(device_t dev,device_t child,int space)3039 pci_enable_io_method(device_t dev, device_t child, int space)
3040 {
3041 uint16_t bit;
3042
3043 switch(space) {
3044 case SYS_RES_IOPORT:
3045 bit = PCIM_CMD_PORTEN;
3046 break;
3047 case SYS_RES_MEMORY:
3048 bit = PCIM_CMD_MEMEN;
3049 break;
3050 default:
3051 return (EINVAL);
3052 }
3053 pci_set_command_bit(dev, child, bit);
3054 return (0);
3055 }
3056
3057 int
pci_disable_io_method(device_t dev,device_t child,int space)3058 pci_disable_io_method(device_t dev, device_t child, int space)
3059 {
3060 uint16_t bit;
3061
3062 switch(space) {
3063 case SYS_RES_IOPORT:
3064 bit = PCIM_CMD_PORTEN;
3065 break;
3066 case SYS_RES_MEMORY:
3067 bit = PCIM_CMD_MEMEN;
3068 break;
3069 default:
3070 return (EINVAL);
3071 }
3072 pci_clear_command_bit(dev, child, bit);
3073 return (0);
3074 }
3075
3076 /*
3077 * New style pci driver. Parent device is either a pci-host-bridge or a
3078 * pci-pci-bridge. Both kinds are represented by instances of pcib.
3079 */
3080
3081 void
pci_print_verbose(struct pci_devinfo * dinfo)3082 pci_print_verbose(struct pci_devinfo *dinfo)
3083 {
3084
3085 if (bootverbose) {
3086 pcicfgregs *cfg = &dinfo->cfg;
3087
3088 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
3089 cfg->vendor, cfg->device, cfg->revid);
3090 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
3091 cfg->domain, cfg->bus, cfg->slot, cfg->func);
3092 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
3093 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
3094 cfg->mfdev);
3095 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
3096 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
3097 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
3098 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
3099 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
3100 if (cfg->intpin > 0)
3101 printf("\tintpin=%c, irq=%d\n",
3102 cfg->intpin +'a' -1, cfg->intline);
3103 if (cfg->pp.pp_location) {
3104 uint16_t status;
3105
3106 status = pci_read_config(cfg->dev, cfg->pp.pp_location +
3107 PCIR_POWER_STATUS, 2);
3108 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
3109 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
3110 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
3111 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
3112 status & PCIM_PSTAT_DMASK);
3113 }
3114 if (cfg->msi.msi_location) {
3115 uint16_t ctrl, msgnum;
3116
3117 ctrl = cfg->msi.msi_ctrl;
3118 msgnum = PCI_MSI_MSGNUM(ctrl);
3119 printf("\tMSI supports %d message%s%s%s\n",
3120 msgnum, (msgnum == 1) ? "" : "s",
3121 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
3122 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
3123 }
3124 if (cfg->msix.msix_location) {
3125 uint16_t msgnum;
3126
3127 msgnum = PCI_MSIX_MSGNUM(cfg->msix.msix_ctrl);
3128 printf("\tMSI-X supports %d message%s ",
3129 msgnum, (msgnum == 1) ? "" : "s");
3130 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
3131 printf("in map 0x%x\n",
3132 cfg->msix.msix_table_bar);
3133 else
3134 printf("in maps 0x%x and 0x%x\n",
3135 cfg->msix.msix_table_bar,
3136 cfg->msix.msix_pba_bar);
3137 }
3138 }
3139 }
3140
3141 static int
pci_porten(device_t dev)3142 pci_porten(device_t dev)
3143 {
3144 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
3145 }
3146
3147 static int
pci_memen(device_t dev)3148 pci_memen(device_t dev)
3149 {
3150 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
3151 }
3152
3153 void
pci_read_bar(device_t dev,int reg,pci_addr_t * mapp,pci_addr_t * testvalp,int * bar64)3154 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp,
3155 int *bar64)
3156 {
3157 struct pci_devinfo *dinfo;
3158 pci_addr_t map, testval;
3159 int ln2range;
3160 uint16_t cmd;
3161
3162 /*
3163 * The device ROM BAR is special. It is always a 32-bit
3164 * memory BAR. Bit 0 is special and should not be set when
3165 * sizing the BAR.
3166 */
3167 dinfo = device_get_ivars(dev);
3168 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
3169 map = pci_read_config(dev, reg, 4);
3170 pci_write_config(dev, reg, 0xfffffffe, 4);
3171 testval = pci_read_config(dev, reg, 4);
3172 pci_write_config(dev, reg, map, 4);
3173 *mapp = map;
3174 *testvalp = testval;
3175 if (bar64 != NULL)
3176 *bar64 = 0;
3177 return;
3178 }
3179
3180 map = pci_read_config(dev, reg, 4);
3181 ln2range = pci_maprange(map);
3182 if (ln2range == 64)
3183 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
3184
3185 /*
3186 * Disable decoding via the command register before
3187 * determining the BAR's length since we will be placing it in
3188 * a weird state.
3189 */
3190 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3191 pci_write_config(dev, PCIR_COMMAND,
3192 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
3193
3194 /*
3195 * Determine the BAR's length by writing all 1's. The bottom
3196 * log_2(size) bits of the BAR will stick as 0 when we read
3197 * the value back.
3198 *
3199 * NB: according to the PCI Local Bus Specification, rev. 3.0:
3200 * "Software writes 0FFFFFFFFh to both registers, reads them back,
3201 * and combines the result into a 64-bit value." (section 6.2.5.1)
3202 *
3203 * Writes to both registers must be performed before attempting to
3204 * read back the size value.
3205 */
3206 testval = 0;
3207 pci_write_config(dev, reg, 0xffffffff, 4);
3208 if (ln2range == 64) {
3209 pci_write_config(dev, reg + 4, 0xffffffff, 4);
3210 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
3211 }
3212 testval |= pci_read_config(dev, reg, 4);
3213
3214 /*
3215 * Restore the original value of the BAR. We may have reprogrammed
3216 * the BAR of the low-level console device and when booting verbose,
3217 * we need the console device addressable.
3218 */
3219 pci_write_config(dev, reg, map, 4);
3220 if (ln2range == 64)
3221 pci_write_config(dev, reg + 4, map >> 32, 4);
3222 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
3223
3224 *mapp = map;
3225 *testvalp = testval;
3226 if (bar64 != NULL)
3227 *bar64 = (ln2range == 64);
3228 }
3229
3230 static void
pci_write_bar(device_t dev,struct pci_map * pm,pci_addr_t base)3231 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
3232 {
3233 struct pci_devinfo *dinfo;
3234 int ln2range;
3235
3236 /* The device ROM BAR is always a 32-bit memory BAR. */
3237 dinfo = device_get_ivars(dev);
3238 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
3239 ln2range = 32;
3240 else
3241 ln2range = pci_maprange(pm->pm_value);
3242 pci_write_config(dev, pm->pm_reg, base, 4);
3243 if (ln2range == 64)
3244 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
3245 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
3246 if (ln2range == 64)
3247 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
3248 pm->pm_reg + 4, 4) << 32;
3249 }
3250
3251 struct pci_map *
pci_find_bar(device_t dev,int reg)3252 pci_find_bar(device_t dev, int reg)
3253 {
3254 struct pci_devinfo *dinfo;
3255 struct pci_map *pm;
3256
3257 dinfo = device_get_ivars(dev);
3258 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
3259 if (pm->pm_reg == reg)
3260 return (pm);
3261 }
3262 return (NULL);
3263 }
3264
3265 struct pci_map *
pci_first_bar(device_t dev)3266 pci_first_bar(device_t dev)
3267 {
3268 struct pci_devinfo *dinfo;
3269
3270 dinfo = device_get_ivars(dev);
3271 return (STAILQ_FIRST(&dinfo->cfg.maps));
3272 }
3273
3274 struct pci_map *
pci_next_bar(struct pci_map * pm)3275 pci_next_bar(struct pci_map *pm)
3276 {
3277 return (STAILQ_NEXT(pm, pm_link));
3278 }
3279
3280 int
pci_bar_enabled(device_t dev,struct pci_map * pm)3281 pci_bar_enabled(device_t dev, struct pci_map *pm)
3282 {
3283 struct pci_devinfo *dinfo;
3284 uint16_t cmd;
3285
3286 dinfo = device_get_ivars(dev);
3287 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
3288 !(pm->pm_value & PCIM_BIOS_ENABLE))
3289 return (0);
3290 #ifdef PCI_IOV
3291 if ((dinfo->cfg.flags & PCICFG_VF) != 0) {
3292 struct pcicfg_iov *iov;
3293
3294 iov = dinfo->cfg.iov;
3295 cmd = pci_read_config(iov->iov_pf,
3296 iov->iov_pos + PCIR_SRIOV_CTL, 2);
3297 return ((cmd & PCIM_SRIOV_VF_MSE) != 0);
3298 }
3299 #endif
3300 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3301 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
3302 return ((cmd & PCIM_CMD_MEMEN) != 0);
3303 else
3304 return ((cmd & PCIM_CMD_PORTEN) != 0);
3305 }
3306
3307 struct pci_map *
pci_add_bar(device_t dev,int reg,pci_addr_t value,pci_addr_t size)3308 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
3309 {
3310 struct pci_devinfo *dinfo;
3311 struct pci_map *pm, *prev;
3312
3313 dinfo = device_get_ivars(dev);
3314 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
3315 pm->pm_reg = reg;
3316 pm->pm_value = value;
3317 pm->pm_size = size;
3318 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
3319 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
3320 reg));
3321 if (STAILQ_NEXT(prev, pm_link) == NULL ||
3322 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
3323 break;
3324 }
3325 if (prev != NULL)
3326 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
3327 else
3328 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
3329 return (pm);
3330 }
3331
3332 static void
pci_restore_bars(device_t dev)3333 pci_restore_bars(device_t dev)
3334 {
3335 struct pci_devinfo *dinfo;
3336 struct pci_map *pm;
3337 int ln2range;
3338
3339 dinfo = device_get_ivars(dev);
3340 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
3341 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
3342 ln2range = 32;
3343 else
3344 ln2range = pci_maprange(pm->pm_value);
3345 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
3346 if (ln2range == 64)
3347 pci_write_config(dev, pm->pm_reg + 4,
3348 pm->pm_value >> 32, 4);
3349 }
3350 }
3351
3352 /*
3353 * Add a resource based on a pci map register. Return 1 if the map
3354 * register is a 32bit map register or 2 if it is a 64bit register.
3355 */
3356 static int
pci_add_map(device_t bus,device_t dev,int reg,struct resource_list * rl,int force,int prefetch)3357 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
3358 int force, int prefetch)
3359 {
3360 struct pci_map *pm;
3361 pci_addr_t base, map, testval;
3362 pci_addr_t start, end, count;
3363 int barlen, basezero, flags, maprange, mapsize, type;
3364 uint16_t cmd;
3365 struct resource *res;
3366
3367 /*
3368 * The BAR may already exist if the device is a CardBus card
3369 * whose CIS is stored in this BAR.
3370 */
3371 pm = pci_find_bar(dev, reg);
3372 if (pm != NULL) {
3373 maprange = pci_maprange(pm->pm_value);
3374 barlen = maprange == 64 ? 2 : 1;
3375 return (barlen);
3376 }
3377
3378 pci_read_bar(dev, reg, &map, &testval, NULL);
3379 if (PCI_BAR_MEM(map)) {
3380 type = SYS_RES_MEMORY;
3381 if (map & PCIM_BAR_MEM_PREFETCH)
3382 prefetch = 1;
3383 } else
3384 type = SYS_RES_IOPORT;
3385 mapsize = pci_mapsize(testval);
3386 base = pci_mapbase(map);
3387 #ifdef __PCI_BAR_ZERO_VALID
3388 basezero = 0;
3389 #else
3390 basezero = base == 0;
3391 #endif
3392 maprange = pci_maprange(map);
3393 barlen = maprange == 64 ? 2 : 1;
3394
3395 /*
3396 * For I/O registers, if bottom bit is set, and the next bit up
3397 * isn't clear, we know we have a BAR that doesn't conform to the
3398 * spec, so ignore it. Also, sanity check the size of the data
3399 * areas to the type of memory involved. Memory must be at least
3400 * 16 bytes in size, while I/O ranges must be at least 4.
3401 */
3402 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
3403 return (barlen);
3404 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
3405 (type == SYS_RES_IOPORT && mapsize < 2))
3406 return (barlen);
3407
3408 /* Save a record of this BAR. */
3409 pm = pci_add_bar(dev, reg, map, mapsize);
3410 if (bootverbose) {
3411 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
3412 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
3413 if (type == SYS_RES_IOPORT && !pci_porten(dev))
3414 printf(", port disabled\n");
3415 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
3416 printf(", memory disabled\n");
3417 else
3418 printf(", enabled\n");
3419 }
3420
3421 /*
3422 * If base is 0, then we have problems if this architecture does
3423 * not allow that. It is best to ignore such entries for the
3424 * moment. These will be allocated later if the driver specifically
3425 * requests them. However, some removable buses look better when
3426 * all resources are allocated, so allow '0' to be overridden.
3427 *
3428 * Similarly treat maps whose values is the same as the test value
3429 * read back. These maps have had all f's written to them by the
3430 * BIOS in an attempt to disable the resources.
3431 */
3432 if (!force && (basezero || map == testval))
3433 return (barlen);
3434 if ((u_long)base != base) {
3435 device_printf(bus,
3436 "pci%d:%d:%d:%d bar %#x too many address bits",
3437 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
3438 pci_get_function(dev), reg);
3439 return (barlen);
3440 }
3441
3442 /*
3443 * This code theoretically does the right thing, but has
3444 * undesirable side effects in some cases where peripherals
3445 * respond oddly to having these bits enabled. Let the user
3446 * be able to turn them off (since pci_enable_io_modes is 1 by
3447 * default).
3448 */
3449 if (pci_enable_io_modes) {
3450 /* Turn on resources that have been left off by a lazy BIOS */
3451 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
3452 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3453 cmd |= PCIM_CMD_PORTEN;
3454 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
3455 }
3456 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
3457 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3458 cmd |= PCIM_CMD_MEMEN;
3459 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
3460 }
3461 } else {
3462 if (type == SYS_RES_IOPORT && !pci_porten(dev))
3463 return (barlen);
3464 if (type == SYS_RES_MEMORY && !pci_memen(dev))
3465 return (barlen);
3466 }
3467
3468 count = (pci_addr_t)1 << mapsize;
3469 flags = RF_ALIGNMENT_LOG2(mapsize);
3470 if (prefetch)
3471 flags |= RF_PREFETCHABLE;
3472 if (basezero || base == pci_mapbase(testval) || pci_clear_bars) {
3473 start = 0; /* Let the parent decide. */
3474 end = ~0;
3475 } else {
3476 start = base;
3477 end = base + count - 1;
3478 }
3479 resource_list_add(rl, type, reg, start, end, count);
3480
3481 /*
3482 * Try to allocate the resource for this BAR from our parent
3483 * so that this resource range is already reserved. The
3484 * driver for this device will later inherit this resource in
3485 * pci_alloc_resource().
3486 */
3487 res = resource_list_reserve(rl, bus, dev, type, reg, start, end, count,
3488 flags);
3489 if ((pci_do_realloc_bars
3490 || pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_REALLOC_BAR))
3491 && res == NULL && (start != 0 || end != ~0)) {
3492 /*
3493 * If the allocation fails, try to allocate a resource for
3494 * this BAR using any available range. The firmware felt
3495 * it was important enough to assign a resource, so don't
3496 * disable decoding if we can help it.
3497 */
3498 resource_list_delete(rl, type, reg);
3499 resource_list_add(rl, type, reg, 0, ~0, count);
3500 res = resource_list_reserve(rl, bus, dev, type, reg, 0, ~0,
3501 count, flags);
3502 }
3503 if (res == NULL) {
3504 /*
3505 * If the allocation fails, delete the resource list entry
3506 * and disable decoding for this device.
3507 *
3508 * If the driver requests this resource in the future,
3509 * pci_reserve_map() will try to allocate a fresh
3510 * resource range.
3511 */
3512 resource_list_delete(rl, type, reg);
3513 pci_disable_io(dev, type);
3514 if (bootverbose)
3515 device_printf(bus,
3516 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
3517 pci_get_domain(dev), pci_get_bus(dev),
3518 pci_get_slot(dev), pci_get_function(dev), reg);
3519 } else {
3520 start = rman_get_start(res);
3521 pci_write_bar(dev, pm, start);
3522 }
3523 return (barlen);
3524 }
3525
3526 /*
3527 * For ATA devices we need to decide early what addressing mode to use.
3528 * Legacy demands that the primary and secondary ATA ports sits on the
3529 * same addresses that old ISA hardware did. This dictates that we use
3530 * those addresses and ignore the BAR's if we cannot set PCI native
3531 * addressing mode.
3532 */
3533 static void
pci_ata_maps(device_t bus,device_t dev,struct resource_list * rl,int force,uint32_t prefetchmask)3534 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
3535 uint32_t prefetchmask)
3536 {
3537 int rid, type, progif;
3538 #if 0
3539 /* if this device supports PCI native addressing use it */
3540 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3541 if ((progif & 0x8a) == 0x8a) {
3542 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
3543 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
3544 printf("Trying ATA native PCI addressing mode\n");
3545 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
3546 }
3547 }
3548 #endif
3549 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3550 type = SYS_RES_IOPORT;
3551 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
3552 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
3553 prefetchmask & (1 << 0));
3554 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
3555 prefetchmask & (1 << 1));
3556 } else {
3557 rid = PCIR_BAR(0);
3558 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
3559 (void)resource_list_reserve(rl, bus, dev, type, rid, 0x1f0,
3560 0x1f7, 8, 0);
3561 rid = PCIR_BAR(1);
3562 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
3563 (void)resource_list_reserve(rl, bus, dev, type, rid, 0x3f6,
3564 0x3f6, 1, 0);
3565 }
3566 if (progif & PCIP_STORAGE_IDE_MODESEC) {
3567 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
3568 prefetchmask & (1 << 2));
3569 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
3570 prefetchmask & (1 << 3));
3571 } else {
3572 rid = PCIR_BAR(2);
3573 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
3574 (void)resource_list_reserve(rl, bus, dev, type, rid, 0x170,
3575 0x177, 8, 0);
3576 rid = PCIR_BAR(3);
3577 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
3578 (void)resource_list_reserve(rl, bus, dev, type, rid, 0x376,
3579 0x376, 1, 0);
3580 }
3581 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
3582 prefetchmask & (1 << 4));
3583 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
3584 prefetchmask & (1 << 5));
3585 }
3586
3587 static void
pci_assign_interrupt(device_t bus,device_t dev,int force_route)3588 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
3589 {
3590 struct pci_devinfo *dinfo = device_get_ivars(dev);
3591 pcicfgregs *cfg = &dinfo->cfg;
3592 char tunable_name[64];
3593 int irq;
3594
3595 /* Has to have an intpin to have an interrupt. */
3596 if (cfg->intpin == 0)
3597 return;
3598
3599 /* Let the user override the IRQ with a tunable. */
3600 irq = PCI_INVALID_IRQ;
3601 snprintf(tunable_name, sizeof(tunable_name),
3602 "hw.pci%d.%d.%d.INT%c.irq",
3603 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
3604 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
3605 irq = PCI_INVALID_IRQ;
3606
3607 /*
3608 * If we didn't get an IRQ via the tunable, then we either use the
3609 * IRQ value in the intline register or we ask the bus to route an
3610 * interrupt for us. If force_route is true, then we only use the
3611 * value in the intline register if the bus was unable to assign an
3612 * IRQ.
3613 */
3614 if (!PCI_INTERRUPT_VALID(irq)) {
3615 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
3616 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
3617 if (!PCI_INTERRUPT_VALID(irq))
3618 irq = cfg->intline;
3619 }
3620
3621 /* If after all that we don't have an IRQ, just bail. */
3622 if (!PCI_INTERRUPT_VALID(irq))
3623 return;
3624
3625 /* Update the config register if it changed. */
3626 if (irq != cfg->intline) {
3627 cfg->intline = irq;
3628 pci_write_config(dev, PCIR_INTLINE, irq, 1);
3629 }
3630
3631 /* Add this IRQ as rid 0 interrupt resource. */
3632 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3633 }
3634
3635 /* Perform early OHCI takeover from SMM. */
3636 static void
ohci_early_takeover(device_t self)3637 ohci_early_takeover(device_t self)
3638 {
3639 struct resource *res;
3640 uint32_t ctl;
3641 int rid;
3642 int i;
3643
3644 rid = PCIR_BAR(0);
3645 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3646 if (res == NULL)
3647 return;
3648
3649 ctl = bus_read_4(res, OHCI_CONTROL);
3650 if (ctl & OHCI_IR) {
3651 if (bootverbose)
3652 printf("ohci early: "
3653 "SMM active, request owner change\n");
3654 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3655 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3656 DELAY(1000);
3657 ctl = bus_read_4(res, OHCI_CONTROL);
3658 }
3659 if (ctl & OHCI_IR) {
3660 if (bootverbose)
3661 printf("ohci early: "
3662 "SMM does not respond, resetting\n");
3663 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3664 }
3665 /* Disable interrupts */
3666 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3667 }
3668
3669 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3670 }
3671
3672 /* Perform early UHCI takeover from SMM. */
3673 static void
uhci_early_takeover(device_t self)3674 uhci_early_takeover(device_t self)
3675 {
3676 struct resource *res;
3677 int rid;
3678
3679 /*
3680 * Set the PIRQD enable bit and switch off all the others. We don't
3681 * want legacy support to interfere with us XXX Does this also mean
3682 * that the BIOS won't touch the keyboard anymore if it is connected
3683 * to the ports of the root hub?
3684 */
3685 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3686
3687 /* Disable interrupts */
3688 rid = PCI_UHCI_BASE_REG;
3689 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3690 if (res != NULL) {
3691 bus_write_2(res, UHCI_INTR, 0);
3692 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3693 }
3694 }
3695
3696 /* Perform early EHCI takeover from SMM. */
3697 static void
ehci_early_takeover(device_t self)3698 ehci_early_takeover(device_t self)
3699 {
3700 struct resource *res;
3701 uint32_t cparams;
3702 uint32_t eec;
3703 uint8_t eecp;
3704 uint8_t bios_sem;
3705 uint8_t offs;
3706 int rid;
3707 int i;
3708
3709 rid = PCIR_BAR(0);
3710 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3711 if (res == NULL)
3712 return;
3713
3714 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3715
3716 /* Synchronise with the BIOS if it owns the controller. */
3717 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3718 eecp = EHCI_EECP_NEXT(eec)) {
3719 eec = pci_read_config(self, eecp, 4);
3720 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3721 continue;
3722 }
3723 bios_sem = pci_read_config(self, eecp +
3724 EHCI_LEGSUP_BIOS_SEM, 1);
3725 if (bios_sem == 0) {
3726 continue;
3727 }
3728 if (bootverbose)
3729 printf("ehci early: "
3730 "SMM active, request owner change\n");
3731
3732 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3733
3734 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3735 DELAY(1000);
3736 bios_sem = pci_read_config(self, eecp +
3737 EHCI_LEGSUP_BIOS_SEM, 1);
3738 }
3739
3740 if (bios_sem != 0) {
3741 if (bootverbose)
3742 printf("ehci early: "
3743 "SMM does not respond\n");
3744 }
3745 /* Disable interrupts */
3746 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3747 bus_write_4(res, offs + EHCI_USBINTR, 0);
3748 }
3749 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3750 }
3751
3752 /* Perform early XHCI takeover from SMM. */
3753 static void
xhci_early_takeover(device_t self)3754 xhci_early_takeover(device_t self)
3755 {
3756 struct resource *res;
3757 uint32_t cparams;
3758 uint32_t eec;
3759 uint8_t eecp;
3760 uint8_t bios_sem;
3761 uint8_t offs;
3762 int rid;
3763 int i;
3764
3765 rid = PCIR_BAR(0);
3766 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3767 if (res == NULL)
3768 return;
3769
3770 cparams = bus_read_4(res, XHCI_HCCPARAMS1);
3771
3772 eec = -1;
3773
3774 /* Synchronise with the BIOS if it owns the controller. */
3775 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3776 eecp += XHCI_XECP_NEXT(eec) << 2) {
3777 eec = bus_read_4(res, eecp);
3778
3779 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3780 continue;
3781
3782 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3783 if (bios_sem == 0)
3784 continue;
3785
3786 if (bootverbose)
3787 printf("xhci early: "
3788 "SMM active, request owner change\n");
3789
3790 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3791
3792 /* wait a maximum of 5 second */
3793
3794 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3795 DELAY(1000);
3796 bios_sem = bus_read_1(res, eecp +
3797 XHCI_XECP_BIOS_SEM);
3798 }
3799
3800 if (bios_sem != 0) {
3801 if (bootverbose)
3802 printf("xhci early: "
3803 "SMM does not respond\n");
3804 }
3805
3806 /* Disable interrupts */
3807 offs = bus_read_1(res, XHCI_CAPLENGTH);
3808 bus_write_4(res, offs + XHCI_USBCMD, 0);
3809 bus_read_4(res, offs + XHCI_USBSTS);
3810 }
3811 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3812 }
3813
3814 static void
pci_reserve_secbus(device_t bus,device_t dev,pcicfgregs * cfg,struct resource_list * rl)3815 pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg,
3816 struct resource_list *rl)
3817 {
3818 struct resource *res;
3819 char *cp;
3820 rman_res_t start, end, count;
3821 int sec_bus, sec_reg, sub_bus, sub_reg, sup_bus;
3822
3823 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3824 case PCIM_HDRTYPE_BRIDGE:
3825 sec_reg = PCIR_SECBUS_1;
3826 sub_reg = PCIR_SUBBUS_1;
3827 break;
3828 case PCIM_HDRTYPE_CARDBUS:
3829 sec_reg = PCIR_SECBUS_2;
3830 sub_reg = PCIR_SUBBUS_2;
3831 break;
3832 default:
3833 return;
3834 }
3835
3836 /*
3837 * If the existing bus range is valid, attempt to reserve it
3838 * from our parent. If this fails for any reason, clear the
3839 * secbus and subbus registers.
3840 *
3841 * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus?
3842 * This would at least preserve the existing sec_bus if it is
3843 * valid.
3844 */
3845 sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1);
3846 sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1);
3847
3848 /* Quirk handling. */
3849 switch (pci_get_devid(dev)) {
3850 case 0x12258086: /* Intel 82454KX/GX (Orion) */
3851 sup_bus = pci_read_config(dev, 0x41, 1);
3852 if (sup_bus != 0xff) {
3853 sec_bus = sup_bus + 1;
3854 sub_bus = sup_bus + 1;
3855 PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1);
3856 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3857 }
3858 break;
3859
3860 case 0x00dd10de:
3861 /* Compaq R3000 BIOS sets wrong subordinate bus number. */
3862 if ((cp = kern_getenv("smbios.planar.maker")) == NULL)
3863 break;
3864 if (strncmp(cp, "Compal", 6) != 0) {
3865 freeenv(cp);
3866 break;
3867 }
3868 freeenv(cp);
3869 if ((cp = kern_getenv("smbios.planar.product")) == NULL)
3870 break;
3871 if (strncmp(cp, "08A0", 4) != 0) {
3872 freeenv(cp);
3873 break;
3874 }
3875 freeenv(cp);
3876 if (sub_bus < 0xa) {
3877 sub_bus = 0xa;
3878 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3879 }
3880 break;
3881 }
3882
3883 if (bootverbose)
3884 printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus);
3885 if (sec_bus > 0 && sub_bus >= sec_bus) {
3886 start = sec_bus;
3887 end = sub_bus;
3888 count = end - start + 1;
3889
3890 resource_list_add(rl, PCI_RES_BUS, 0, 0, ~0, count);
3891
3892 /*
3893 * If requested, clear secondary bus registers in
3894 * bridge devices to force a complete renumbering
3895 * rather than reserving the existing range. However,
3896 * preserve the existing size.
3897 */
3898 if (pci_clear_buses)
3899 goto clear;
3900
3901 res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, 0,
3902 start, end, count, 0);
3903 if (res != NULL)
3904 return;
3905
3906 if (bootverbose)
3907 device_printf(bus,
3908 "pci%d:%d:%d:%d secbus failed to allocate\n",
3909 pci_get_domain(dev), pci_get_bus(dev),
3910 pci_get_slot(dev), pci_get_function(dev));
3911 }
3912
3913 clear:
3914 PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1);
3915 PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1);
3916 }
3917
3918 static struct resource *
pci_alloc_secbus(device_t dev,device_t child,int rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)3919 pci_alloc_secbus(device_t dev, device_t child, int rid, rman_res_t start,
3920 rman_res_t end, rman_res_t count, u_int flags)
3921 {
3922 struct pci_devinfo *dinfo;
3923 pcicfgregs *cfg;
3924 struct resource_list *rl;
3925 struct resource *res;
3926 int sec_reg, sub_reg;
3927
3928 dinfo = device_get_ivars(child);
3929 cfg = &dinfo->cfg;
3930 rl = &dinfo->resources;
3931 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3932 case PCIM_HDRTYPE_BRIDGE:
3933 sec_reg = PCIR_SECBUS_1;
3934 sub_reg = PCIR_SUBBUS_1;
3935 break;
3936 case PCIM_HDRTYPE_CARDBUS:
3937 sec_reg = PCIR_SECBUS_2;
3938 sub_reg = PCIR_SUBBUS_2;
3939 break;
3940 default:
3941 return (NULL);
3942 }
3943
3944 if (rid != 0)
3945 return (NULL);
3946
3947 if (resource_list_find(rl, PCI_RES_BUS, rid) == NULL)
3948 resource_list_add(rl, PCI_RES_BUS, rid, start, end, count);
3949 if (!resource_list_reserved(rl, PCI_RES_BUS, rid)) {
3950 res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid,
3951 start, end, count, flags & ~RF_ACTIVE);
3952 if (res == NULL) {
3953 resource_list_delete(rl, PCI_RES_BUS, rid);
3954 device_printf(child, "allocating %ju bus%s failed\n",
3955 count, count == 1 ? "" : "es");
3956 return (NULL);
3957 }
3958 if (bootverbose)
3959 device_printf(child,
3960 "Lazy allocation of %ju bus%s at %ju\n", count,
3961 count == 1 ? "" : "es", rman_get_start(res));
3962 PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1);
3963 PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1);
3964 }
3965 return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start,
3966 end, count, flags));
3967 }
3968
3969 static int
pci_ea_bei_to_rid(device_t dev,int bei)3970 pci_ea_bei_to_rid(device_t dev, int bei)
3971 {
3972 #ifdef PCI_IOV
3973 struct pci_devinfo *dinfo;
3974 int iov_pos;
3975 struct pcicfg_iov *iov;
3976
3977 dinfo = device_get_ivars(dev);
3978 iov = dinfo->cfg.iov;
3979 if (iov != NULL)
3980 iov_pos = iov->iov_pos;
3981 else
3982 iov_pos = 0;
3983 #endif
3984
3985 /* Check if matches BAR */
3986 if ((bei >= PCIM_EA_BEI_BAR_0) &&
3987 (bei <= PCIM_EA_BEI_BAR_5))
3988 return (PCIR_BAR(bei));
3989
3990 /* Check ROM */
3991 if (bei == PCIM_EA_BEI_ROM)
3992 return (PCIR_BIOS);
3993
3994 #ifdef PCI_IOV
3995 /* Check if matches VF_BAR */
3996 if ((iov != NULL) && (bei >= PCIM_EA_BEI_VF_BAR_0) &&
3997 (bei <= PCIM_EA_BEI_VF_BAR_5))
3998 return (PCIR_SRIOV_BAR(bei - PCIM_EA_BEI_VF_BAR_0) +
3999 iov_pos);
4000 #endif
4001
4002 return (-1);
4003 }
4004
4005 int
pci_ea_is_enabled(device_t dev,int rid)4006 pci_ea_is_enabled(device_t dev, int rid)
4007 {
4008 struct pci_ea_entry *ea;
4009 struct pci_devinfo *dinfo;
4010
4011 dinfo = device_get_ivars(dev);
4012
4013 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) {
4014 if (pci_ea_bei_to_rid(dev, ea->eae_bei) == rid)
4015 return ((ea->eae_flags & PCIM_EA_ENABLE) > 0);
4016 }
4017
4018 return (0);
4019 }
4020
4021 void
pci_add_resources_ea(device_t bus,device_t dev,int alloc_iov)4022 pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov)
4023 {
4024 struct pci_ea_entry *ea;
4025 struct pci_devinfo *dinfo;
4026 pci_addr_t start, end, count;
4027 struct resource_list *rl;
4028 int type, flags, rid;
4029 struct resource *res;
4030 uint32_t tmp;
4031 #ifdef PCI_IOV
4032 struct pcicfg_iov *iov;
4033 #endif
4034
4035 dinfo = device_get_ivars(dev);
4036 rl = &dinfo->resources;
4037 flags = 0;
4038
4039 #ifdef PCI_IOV
4040 iov = dinfo->cfg.iov;
4041 #endif
4042
4043 if (dinfo->cfg.ea.ea_location == 0)
4044 return;
4045
4046 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) {
4047 /*
4048 * TODO: Ignore EA-BAR if is not enabled.
4049 * Currently the EA implementation supports
4050 * only situation, where EA structure contains
4051 * predefined entries. In case they are not enabled
4052 * leave them unallocated and proceed with
4053 * a legacy-BAR mechanism.
4054 */
4055 if ((ea->eae_flags & PCIM_EA_ENABLE) == 0)
4056 continue;
4057
4058 switch ((ea->eae_flags & PCIM_EA_PP) >> PCIM_EA_PP_OFFSET) {
4059 case PCIM_EA_P_MEM_PREFETCH:
4060 case PCIM_EA_P_VF_MEM_PREFETCH:
4061 flags = RF_PREFETCHABLE;
4062 /* FALLTHROUGH */
4063 case PCIM_EA_P_VF_MEM:
4064 case PCIM_EA_P_MEM:
4065 type = SYS_RES_MEMORY;
4066 break;
4067 case PCIM_EA_P_IO:
4068 type = SYS_RES_IOPORT;
4069 break;
4070 default:
4071 continue;
4072 }
4073
4074 if (alloc_iov != 0) {
4075 #ifdef PCI_IOV
4076 /* Allocating IOV, confirm BEI matches */
4077 if ((ea->eae_bei < PCIM_EA_BEI_VF_BAR_0) ||
4078 (ea->eae_bei > PCIM_EA_BEI_VF_BAR_5))
4079 continue;
4080 #else
4081 continue;
4082 #endif
4083 } else {
4084 /* Allocating BAR, confirm BEI matches */
4085 if (((ea->eae_bei < PCIM_EA_BEI_BAR_0) ||
4086 (ea->eae_bei > PCIM_EA_BEI_BAR_5)) &&
4087 (ea->eae_bei != PCIM_EA_BEI_ROM))
4088 continue;
4089 }
4090
4091 rid = pci_ea_bei_to_rid(dev, ea->eae_bei);
4092 if (rid < 0)
4093 continue;
4094
4095 /* Skip resources already allocated by EA */
4096 if ((resource_list_find(rl, SYS_RES_MEMORY, rid) != NULL) ||
4097 (resource_list_find(rl, SYS_RES_IOPORT, rid) != NULL))
4098 continue;
4099
4100 start = ea->eae_base;
4101 count = ea->eae_max_offset + 1;
4102 #ifdef PCI_IOV
4103 if (iov != NULL)
4104 count = count * iov->iov_num_vfs;
4105 #endif
4106 end = start + count - 1;
4107 if (count == 0)
4108 continue;
4109
4110 resource_list_add(rl, type, rid, start, end, count);
4111 res = resource_list_reserve(rl, bus, dev, type, rid, start, end, count,
4112 flags);
4113 if (res == NULL) {
4114 resource_list_delete(rl, type, rid);
4115
4116 /*
4117 * Failed to allocate using EA, disable entry.
4118 * Another attempt to allocation will be performed
4119 * further, but this time using legacy BAR registers
4120 */
4121 tmp = pci_read_config(dev, ea->eae_cfg_offset, 4);
4122 tmp &= ~PCIM_EA_ENABLE;
4123 pci_write_config(dev, ea->eae_cfg_offset, tmp, 4);
4124
4125 /*
4126 * Disabling entry might fail in case it is hardwired.
4127 * Read flags again to match current status.
4128 */
4129 ea->eae_flags = pci_read_config(dev, ea->eae_cfg_offset, 4);
4130
4131 continue;
4132 }
4133
4134 /* As per specification, fill BAR with zeros */
4135 pci_write_config(dev, rid, 0, 4);
4136 }
4137 }
4138
4139 void
pci_add_resources(device_t bus,device_t dev,int force,uint32_t prefetchmask)4140 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
4141 {
4142 struct pci_devinfo *dinfo;
4143 pcicfgregs *cfg;
4144 struct resource_list *rl;
4145 const struct pci_quirk *q;
4146 uint32_t devid;
4147 int i;
4148
4149 dinfo = device_get_ivars(dev);
4150 cfg = &dinfo->cfg;
4151 rl = &dinfo->resources;
4152 devid = (cfg->device << 16) | cfg->vendor;
4153
4154 /* Allocate resources using Enhanced Allocation */
4155 pci_add_resources_ea(bus, dev, 0);
4156
4157 /* ATA devices needs special map treatment */
4158 if ((pci_get_class(dev) == PCIC_STORAGE) &&
4159 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
4160 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
4161 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
4162 !pci_read_config(dev, PCIR_BAR(2), 4))) )
4163 pci_ata_maps(bus, dev, rl, force, prefetchmask);
4164 else
4165 for (i = 0; i < cfg->nummaps;) {
4166 /* Skip resources already managed by EA */
4167 if ((resource_list_find(rl, SYS_RES_MEMORY, PCIR_BAR(i)) != NULL) ||
4168 (resource_list_find(rl, SYS_RES_IOPORT, PCIR_BAR(i)) != NULL) ||
4169 pci_ea_is_enabled(dev, PCIR_BAR(i))) {
4170 i++;
4171 continue;
4172 }
4173
4174 /*
4175 * Skip quirked resources.
4176 */
4177 for (q = &pci_quirks[0]; q->devid != 0; q++)
4178 if (q->devid == devid &&
4179 q->type == PCI_QUIRK_UNMAP_REG &&
4180 q->arg1 == PCIR_BAR(i))
4181 break;
4182 if (q->devid != 0) {
4183 i++;
4184 continue;
4185 }
4186 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
4187 prefetchmask & (1 << i));
4188 }
4189
4190 /*
4191 * Add additional, quirked resources.
4192 */
4193 for (q = &pci_quirks[0]; q->devid != 0; q++)
4194 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
4195 pci_add_map(bus, dev, q->arg1, rl, force, 0);
4196
4197 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline) &&
4198 pci_intx_reroute) {
4199 /*
4200 * Try to re-route interrupts. Sometimes the BIOS or
4201 * firmware may leave bogus values in these registers.
4202 * If the re-route fails, then just stick with what we
4203 * have.
4204 */
4205 pci_assign_interrupt(bus, dev, 1);
4206 }
4207
4208 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
4209 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
4210 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
4211 xhci_early_takeover(dev);
4212 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
4213 ehci_early_takeover(dev);
4214 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
4215 ohci_early_takeover(dev);
4216 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
4217 uhci_early_takeover(dev);
4218 }
4219
4220 /*
4221 * Reserve resources for secondary bus ranges behind bridge
4222 * devices.
4223 */
4224 pci_reserve_secbus(bus, dev, cfg, rl);
4225 }
4226
4227 static struct pci_devinfo *
pci_identify_function(device_t pcib,device_t dev,int domain,int busno,int slot,int func)4228 pci_identify_function(device_t pcib, device_t dev, int domain, int busno,
4229 int slot, int func)
4230 {
4231 struct pci_devinfo *dinfo;
4232
4233 dinfo = pci_read_device(pcib, dev, domain, busno, slot, func);
4234 if (dinfo != NULL)
4235 pci_add_child(dev, dinfo);
4236
4237 return (dinfo);
4238 }
4239
4240 void
pci_add_children(device_t dev,int domain,int busno)4241 pci_add_children(device_t dev, int domain, int busno)
4242 {
4243 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
4244 device_t pcib = device_get_parent(dev);
4245 struct pci_devinfo *dinfo;
4246 int maxslots;
4247 int s, f, pcifunchigh;
4248 uint8_t hdrtype;
4249 int first_func;
4250
4251 /*
4252 * Try to detect a device at slot 0, function 0. If it exists, try to
4253 * enable ARI. We must enable ARI before detecting the rest of the
4254 * functions on this bus as ARI changes the set of slots and functions
4255 * that are legal on this bus.
4256 */
4257 dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0);
4258 if (dinfo != NULL && pci_enable_ari)
4259 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev);
4260
4261 /*
4262 * Start looking for new devices on slot 0 at function 1 because we
4263 * just identified the device at slot 0, function 0.
4264 */
4265 first_func = 1;
4266
4267 maxslots = PCIB_MAXSLOTS(pcib);
4268 for (s = 0; s <= maxslots; s++, first_func = 0) {
4269 pcifunchigh = 0;
4270 f = 0;
4271 DELAY(1);
4272
4273 /* If function 0 is not present, skip to the next slot. */
4274 if (REG(PCIR_VENDOR, 2) == PCIV_INVALID)
4275 continue;
4276 hdrtype = REG(PCIR_HDRTYPE, 1);
4277 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
4278 continue;
4279 if (hdrtype & PCIM_MFDEV)
4280 pcifunchigh = PCIB_MAXFUNCS(pcib);
4281 for (f = first_func; f <= pcifunchigh; f++)
4282 pci_identify_function(pcib, dev, domain, busno, s, f);
4283 }
4284 #undef REG
4285 }
4286
4287 int
pci_rescan_method(device_t dev)4288 pci_rescan_method(device_t dev)
4289 {
4290 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
4291 device_t pcib = device_get_parent(dev);
4292 device_t child, *devlist, *unchanged;
4293 int devcount, error, i, j, maxslots, oldcount;
4294 int busno, domain, s, f, pcifunchigh;
4295 uint8_t hdrtype;
4296
4297 /* No need to check for ARI on a rescan. */
4298 error = device_get_children(dev, &devlist, &devcount);
4299 if (error)
4300 return (error);
4301 if (devcount != 0) {
4302 unchanged = malloc(devcount * sizeof(device_t), M_TEMP,
4303 M_NOWAIT | M_ZERO);
4304 if (unchanged == NULL) {
4305 free(devlist, M_TEMP);
4306 return (ENOMEM);
4307 }
4308 } else
4309 unchanged = NULL;
4310
4311 domain = pcib_get_domain(dev);
4312 busno = pcib_get_bus(dev);
4313 maxslots = PCIB_MAXSLOTS(pcib);
4314 for (s = 0; s <= maxslots; s++) {
4315 /* If function 0 is not present, skip to the next slot. */
4316 f = 0;
4317 if (REG(PCIR_VENDOR, 2) == PCIV_INVALID)
4318 continue;
4319 pcifunchigh = 0;
4320 hdrtype = REG(PCIR_HDRTYPE, 1);
4321 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
4322 continue;
4323 if (hdrtype & PCIM_MFDEV)
4324 pcifunchigh = PCIB_MAXFUNCS(pcib);
4325 for (f = 0; f <= pcifunchigh; f++) {
4326 if (REG(PCIR_VENDOR, 2) == PCIV_INVALID)
4327 continue;
4328
4329 /*
4330 * Found a valid function. Check if a
4331 * device_t for this device already exists.
4332 */
4333 for (i = 0; i < devcount; i++) {
4334 child = devlist[i];
4335 if (child == NULL)
4336 continue;
4337 if (pci_get_slot(child) == s &&
4338 pci_get_function(child) == f) {
4339 unchanged[i] = child;
4340 goto next_func;
4341 }
4342 }
4343
4344 pci_identify_function(pcib, dev, domain, busno, s, f);
4345 next_func:;
4346 }
4347 }
4348
4349 /* Remove devices that are no longer present. */
4350 for (i = 0; i < devcount; i++) {
4351 if (unchanged[i] != NULL)
4352 continue;
4353 device_delete_child(dev, devlist[i]);
4354 }
4355
4356 free(devlist, M_TEMP);
4357 oldcount = devcount;
4358
4359 /* Try to attach the devices just added. */
4360 error = device_get_children(dev, &devlist, &devcount);
4361 if (error) {
4362 free(unchanged, M_TEMP);
4363 return (error);
4364 }
4365
4366 for (i = 0; i < devcount; i++) {
4367 for (j = 0; j < oldcount; j++) {
4368 if (devlist[i] == unchanged[j])
4369 goto next_device;
4370 }
4371
4372 device_probe_and_attach(devlist[i]);
4373 next_device:;
4374 }
4375
4376 free(unchanged, M_TEMP);
4377 free(devlist, M_TEMP);
4378 return (0);
4379 #undef REG
4380 }
4381
4382 #ifdef PCI_IOV
4383 device_t
pci_add_iov_child(device_t bus,device_t pf,uint16_t rid,uint16_t vid,uint16_t did)4384 pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid,
4385 uint16_t did)
4386 {
4387 struct pci_devinfo *vf_dinfo;
4388 device_t pcib;
4389 int busno, slot, func;
4390
4391 pcib = device_get_parent(bus);
4392
4393 PCIB_DECODE_RID(pcib, rid, &busno, &slot, &func);
4394
4395 vf_dinfo = pci_fill_devinfo(pcib, bus, pci_get_domain(pcib), busno,
4396 slot, func, vid, did);
4397
4398 vf_dinfo->cfg.flags |= PCICFG_VF;
4399 pci_add_child(bus, vf_dinfo);
4400
4401 return (vf_dinfo->cfg.dev);
4402 }
4403
4404 device_t
pci_create_iov_child_method(device_t bus,device_t pf,uint16_t rid,uint16_t vid,uint16_t did)4405 pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid,
4406 uint16_t vid, uint16_t did)
4407 {
4408
4409 return (pci_add_iov_child(bus, pf, rid, vid, did));
4410 }
4411 #endif
4412
4413 /*
4414 * For PCIe device set Max_Payload_Size to match PCIe root's.
4415 */
4416 static void
pcie_setup_mps(device_t dev)4417 pcie_setup_mps(device_t dev)
4418 {
4419 struct pci_devinfo *dinfo = device_get_ivars(dev);
4420 device_t root;
4421 uint16_t rmps, mmps, mps;
4422
4423 if (dinfo->cfg.pcie.pcie_location == 0)
4424 return;
4425 root = pci_find_pcie_root_port(dev);
4426 if (root == NULL)
4427 return;
4428 /* Check whether the MPS is already configured. */
4429 rmps = pcie_read_config(root, PCIER_DEVICE_CTL, 2) &
4430 PCIEM_CTL_MAX_PAYLOAD;
4431 mps = pcie_read_config(dev, PCIER_DEVICE_CTL, 2) &
4432 PCIEM_CTL_MAX_PAYLOAD;
4433 if (mps == rmps)
4434 return;
4435 /* Check whether the device is capable of the root's MPS. */
4436 mmps = (pcie_read_config(dev, PCIER_DEVICE_CAP, 2) &
4437 PCIEM_CAP_MAX_PAYLOAD) << 5;
4438 if (rmps > mmps) {
4439 /*
4440 * The device is unable to handle root's MPS. Limit root.
4441 * XXX: We should traverse through all the tree, applying
4442 * it to all the devices.
4443 */
4444 pcie_adjust_config(root, PCIER_DEVICE_CTL,
4445 PCIEM_CTL_MAX_PAYLOAD, mmps, 2);
4446 } else {
4447 pcie_adjust_config(dev, PCIER_DEVICE_CTL,
4448 PCIEM_CTL_MAX_PAYLOAD, rmps, 2);
4449 }
4450 }
4451
4452 static void
pci_add_child_clear_aer(device_t dev,struct pci_devinfo * dinfo)4453 pci_add_child_clear_aer(device_t dev, struct pci_devinfo *dinfo)
4454 {
4455 int aer;
4456 uint32_t r;
4457 uint16_t r2;
4458
4459 if (dinfo->cfg.pcie.pcie_location != 0 &&
4460 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) {
4461 r2 = pci_read_config(dev, dinfo->cfg.pcie.pcie_location +
4462 PCIER_ROOT_CTL, 2);
4463 r2 &= ~(PCIEM_ROOT_CTL_SERR_CORR |
4464 PCIEM_ROOT_CTL_SERR_NONFATAL | PCIEM_ROOT_CTL_SERR_FATAL);
4465 pci_write_config(dev, dinfo->cfg.pcie.pcie_location +
4466 PCIER_ROOT_CTL, r2, 2);
4467 }
4468 if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) {
4469 r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4);
4470 pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4);
4471 if (r != 0 && bootverbose) {
4472 pci_printf(&dinfo->cfg,
4473 "clearing AER UC 0x%08x -> 0x%08x\n",
4474 r, pci_read_config(dev, aer + PCIR_AER_UC_STATUS,
4475 4));
4476 }
4477
4478 r = pci_read_config(dev, aer + PCIR_AER_UC_MASK, 4);
4479 r &= ~(PCIM_AER_UC_TRAINING_ERROR |
4480 PCIM_AER_UC_DL_PROTOCOL_ERROR |
4481 PCIM_AER_UC_SURPRISE_LINK_DOWN |
4482 PCIM_AER_UC_POISONED_TLP |
4483 PCIM_AER_UC_FC_PROTOCOL_ERROR |
4484 PCIM_AER_UC_COMPLETION_TIMEOUT |
4485 PCIM_AER_UC_COMPLETER_ABORT |
4486 PCIM_AER_UC_UNEXPECTED_COMPLETION |
4487 PCIM_AER_UC_RECEIVER_OVERFLOW |
4488 PCIM_AER_UC_MALFORMED_TLP |
4489 PCIM_AER_UC_ECRC_ERROR |
4490 PCIM_AER_UC_UNSUPPORTED_REQUEST |
4491 PCIM_AER_UC_ACS_VIOLATION |
4492 PCIM_AER_UC_INTERNAL_ERROR |
4493 PCIM_AER_UC_MC_BLOCKED_TLP |
4494 PCIM_AER_UC_ATOMIC_EGRESS_BLK |
4495 PCIM_AER_UC_TLP_PREFIX_BLOCKED);
4496 pci_write_config(dev, aer + PCIR_AER_UC_MASK, r, 4);
4497
4498 r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4);
4499 pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4);
4500 if (r != 0 && bootverbose) {
4501 pci_printf(&dinfo->cfg,
4502 "clearing AER COR 0x%08x -> 0x%08x\n",
4503 r, pci_read_config(dev, aer + PCIR_AER_COR_STATUS,
4504 4));
4505 }
4506
4507 r = pci_read_config(dev, aer + PCIR_AER_COR_MASK, 4);
4508 r &= ~(PCIM_AER_COR_RECEIVER_ERROR |
4509 PCIM_AER_COR_BAD_TLP |
4510 PCIM_AER_COR_BAD_DLLP |
4511 PCIM_AER_COR_REPLAY_ROLLOVER |
4512 PCIM_AER_COR_REPLAY_TIMEOUT |
4513 PCIM_AER_COR_ADVISORY_NF_ERROR |
4514 PCIM_AER_COR_INTERNAL_ERROR |
4515 PCIM_AER_COR_HEADER_LOG_OVFLOW);
4516 pci_write_config(dev, aer + PCIR_AER_COR_MASK, r, 4);
4517
4518 r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location +
4519 PCIER_DEVICE_CTL, 2);
4520 r |= PCIEM_CTL_COR_ENABLE | PCIEM_CTL_NFER_ENABLE |
4521 PCIEM_CTL_FER_ENABLE | PCIEM_CTL_URR_ENABLE;
4522 pci_write_config(dev, dinfo->cfg.pcie.pcie_location +
4523 PCIER_DEVICE_CTL, r, 2);
4524 }
4525 }
4526
4527 void
pci_add_child(device_t bus,struct pci_devinfo * dinfo)4528 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
4529 {
4530 device_t dev;
4531
4532 dinfo->cfg.dev = dev = device_add_child(bus, NULL, DEVICE_UNIT_ANY);
4533 device_set_ivars(dev, dinfo);
4534 resource_list_init(&dinfo->resources);
4535 pci_cfg_save(dev, dinfo, 0);
4536 pci_cfg_restore(dev, dinfo);
4537 pci_clear_pme(dev);
4538 pci_print_verbose(dinfo);
4539 pci_add_resources(bus, dev, 0, 0);
4540 if (pci_enable_mps_tune)
4541 pcie_setup_mps(dev);
4542 pci_child_added(dinfo->cfg.dev);
4543
4544 if (pci_clear_aer_on_attach)
4545 pci_add_child_clear_aer(dev, dinfo);
4546
4547 EVENTHANDLER_INVOKE(pci_add_device, dinfo->cfg.dev);
4548 }
4549
4550 void
pci_child_added_method(device_t dev,device_t child)4551 pci_child_added_method(device_t dev, device_t child)
4552 {
4553
4554 }
4555
4556 static int
pci_probe(device_t dev)4557 pci_probe(device_t dev)
4558 {
4559
4560 device_set_desc(dev, "PCI bus");
4561
4562 /* Allow other subclasses to override this driver. */
4563 return (BUS_PROBE_GENERIC);
4564 }
4565
4566 int
pci_attach_common(device_t dev)4567 pci_attach_common(device_t dev)
4568 {
4569 struct pci_softc *sc;
4570 int busno, domain;
4571 int rid;
4572
4573 sc = device_get_softc(dev);
4574 domain = pcib_get_domain(dev);
4575 busno = pcib_get_bus(dev);
4576 rid = 0;
4577 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno,
4578 1, 0);
4579 if (sc->sc_bus == NULL) {
4580 device_printf(dev, "failed to allocate bus number\n");
4581 return (ENXIO);
4582 }
4583 if (bootverbose)
4584 device_printf(dev, "domain=%d, physical bus=%d\n",
4585 domain, busno);
4586 sc->sc_dma_tag = bus_get_dma_tag(dev);
4587 return (0);
4588 }
4589
4590 int
pci_attach(device_t dev)4591 pci_attach(device_t dev)
4592 {
4593 int busno, domain, error;
4594
4595 error = pci_attach_common(dev);
4596 if (error)
4597 return (error);
4598
4599 /*
4600 * Since there can be multiple independently numbered PCI
4601 * buses on systems with multiple PCI domains, we can't use
4602 * the unit number to decide which bus we are probing. We ask
4603 * the parent pcib what our domain and bus numbers are.
4604 */
4605 domain = pcib_get_domain(dev);
4606 busno = pcib_get_bus(dev);
4607 pci_add_children(dev, domain, busno);
4608 bus_attach_children(dev);
4609 return (0);
4610 }
4611
4612 int
pci_detach(device_t dev)4613 pci_detach(device_t dev)
4614 {
4615 struct pci_softc *sc;
4616 int error;
4617
4618 error = bus_generic_detach(dev);
4619 if (error)
4620 return (error);
4621 sc = device_get_softc(dev);
4622 error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus);
4623 return (error);
4624 }
4625
4626 static void
pci_hint_device_unit(device_t dev,device_t child,const char * name,int * unitp)4627 pci_hint_device_unit(device_t dev, device_t child, const char *name, int *unitp)
4628 {
4629 int line, unit;
4630 const char *at;
4631 char me1[24], me2[32];
4632 uint8_t b, s, f;
4633 uint32_t d;
4634 device_location_cache_t *cache;
4635
4636 d = pci_get_domain(child);
4637 b = pci_get_bus(child);
4638 s = pci_get_slot(child);
4639 f = pci_get_function(child);
4640 snprintf(me1, sizeof(me1), "pci%u:%u:%u", b, s, f);
4641 snprintf(me2, sizeof(me2), "pci%u:%u:%u:%u", d, b, s, f);
4642 line = 0;
4643 cache = dev_wired_cache_init();
4644 while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) {
4645 resource_string_value(name, unit, "at", &at);
4646 if (strcmp(at, me1) == 0 || strcmp(at, me2) == 0) {
4647 *unitp = unit;
4648 break;
4649 }
4650 if (dev_wired_cache_match(cache, child, at)) {
4651 *unitp = unit;
4652 break;
4653 }
4654 }
4655 dev_wired_cache_fini(cache);
4656 }
4657
4658 static void
pci_set_power_child(device_t dev,device_t child,int state)4659 pci_set_power_child(device_t dev, device_t child, int state)
4660 {
4661 device_t pcib;
4662 int dstate;
4663
4664 /*
4665 * Set the device to the given state. If the firmware suggests
4666 * a different power state, use it instead. If power management
4667 * is not present, the firmware is responsible for managing
4668 * device power. Skip children who aren't attached since they
4669 * are handled separately.
4670 */
4671 pcib = device_get_parent(dev);
4672 dstate = state;
4673 if (device_is_attached(child) &&
4674 PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0)
4675 pci_set_powerstate(child, dstate);
4676 }
4677
4678 int
pci_suspend_child(device_t dev,device_t child)4679 pci_suspend_child(device_t dev, device_t child)
4680 {
4681 struct pci_devinfo *dinfo;
4682 struct resource_list_entry *rle;
4683 int error;
4684
4685 dinfo = device_get_ivars(child);
4686
4687 /*
4688 * Save the PCI configuration space for the child and set the
4689 * device in the appropriate power state for this sleep state.
4690 */
4691 pci_cfg_save(child, dinfo, 0);
4692
4693 /* Suspend devices before potentially powering them down. */
4694 error = bus_generic_suspend_child(dev, child);
4695
4696 if (error)
4697 return (error);
4698
4699 if (pci_do_power_suspend) {
4700 /*
4701 * Make sure this device's interrupt handler is not invoked
4702 * in the case the device uses a shared interrupt that can
4703 * be raised by some other device.
4704 * This is applicable only to regular (legacy) PCI interrupts
4705 * as MSI/MSI-X interrupts are never shared.
4706 */
4707 rle = resource_list_find(&dinfo->resources,
4708 SYS_RES_IRQ, 0);
4709 if (rle != NULL && rle->res != NULL)
4710 (void)bus_suspend_intr(child, rle->res);
4711 pci_set_power_child(dev, child, PCI_POWERSTATE_D3);
4712 }
4713
4714 return (0);
4715 }
4716
4717 int
pci_resume_child(device_t dev,device_t child)4718 pci_resume_child(device_t dev, device_t child)
4719 {
4720 struct pci_devinfo *dinfo;
4721 struct resource_list_entry *rle;
4722
4723 if (pci_do_power_resume)
4724 pci_set_power_child(dev, child, PCI_POWERSTATE_D0);
4725
4726 dinfo = device_get_ivars(child);
4727 pci_cfg_restore(child, dinfo);
4728 pci_clear_pme(child);
4729 if (!device_is_attached(child))
4730 pci_cfg_save(child, dinfo, 1);
4731
4732 bus_generic_resume_child(dev, child);
4733
4734 /*
4735 * Allow interrupts only after fully resuming the driver and hardware.
4736 */
4737 if (pci_do_power_suspend) {
4738 /* See pci_suspend_child for details. */
4739 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
4740 if (rle != NULL && rle->res != NULL)
4741 (void)bus_resume_intr(child, rle->res);
4742 }
4743
4744 return (0);
4745 }
4746
4747 int
pci_resume(device_t dev)4748 pci_resume(device_t dev)
4749 {
4750 device_t child, *devlist;
4751 int error, i, numdevs;
4752
4753 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
4754 return (error);
4755
4756 /*
4757 * Resume critical devices first, then everything else later.
4758 */
4759 for (i = 0; i < numdevs; i++) {
4760 child = devlist[i];
4761 switch (pci_get_class(child)) {
4762 case PCIC_DISPLAY:
4763 case PCIC_MEMORY:
4764 case PCIC_BRIDGE:
4765 case PCIC_BASEPERIPH:
4766 BUS_RESUME_CHILD(dev, child);
4767 break;
4768 }
4769 }
4770 for (i = 0; i < numdevs; i++) {
4771 child = devlist[i];
4772 switch (pci_get_class(child)) {
4773 case PCIC_DISPLAY:
4774 case PCIC_MEMORY:
4775 case PCIC_BRIDGE:
4776 case PCIC_BASEPERIPH:
4777 break;
4778 default:
4779 BUS_RESUME_CHILD(dev, child);
4780 }
4781 }
4782 free(devlist, M_TEMP);
4783 return (0);
4784 }
4785
4786 static void
pci_load_vendor_data(void)4787 pci_load_vendor_data(void)
4788 {
4789 caddr_t data;
4790 void *ptr;
4791 size_t sz;
4792
4793 data = preload_search_by_type("pci_vendor_data");
4794 if (data != NULL) {
4795 ptr = preload_fetch_addr(data);
4796 sz = preload_fetch_size(data);
4797 if (ptr != NULL && sz != 0) {
4798 pci_vendordata = ptr;
4799 pci_vendordata_size = sz;
4800 /* terminate the database */
4801 pci_vendordata[pci_vendordata_size] = '\n';
4802 }
4803 }
4804 }
4805
4806 void
pci_driver_added(device_t dev,driver_t * driver)4807 pci_driver_added(device_t dev, driver_t *driver)
4808 {
4809 int numdevs;
4810 device_t *devlist;
4811 device_t child;
4812 struct pci_devinfo *dinfo;
4813 int i;
4814
4815 if (bootverbose)
4816 device_printf(dev, "driver added\n");
4817 DEVICE_IDENTIFY(driver, dev);
4818 if (device_get_children(dev, &devlist, &numdevs) != 0)
4819 return;
4820 for (i = 0; i < numdevs; i++) {
4821 child = devlist[i];
4822 if (device_get_state(child) != DS_NOTPRESENT)
4823 continue;
4824 dinfo = device_get_ivars(child);
4825 pci_print_verbose(dinfo);
4826 if (bootverbose)
4827 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
4828 pci_cfg_restore(child, dinfo);
4829 if (device_probe_and_attach(child) != 0)
4830 pci_child_detached(dev, child);
4831 }
4832 free(devlist, M_TEMP);
4833 }
4834
4835 int
pci_setup_intr(device_t dev,device_t child,struct resource * irq,int flags,driver_filter_t * filter,driver_intr_t * intr,void * arg,void ** cookiep)4836 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
4837 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
4838 {
4839 struct pci_devinfo *dinfo;
4840 struct msix_table_entry *mte;
4841 struct msix_vector *mv;
4842 uint64_t addr;
4843 uint32_t data;
4844 void *cookie;
4845 int error, rid;
4846
4847 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
4848 arg, &cookie);
4849 if (error)
4850 return (error);
4851
4852 /* If this is not a direct child, just bail out. */
4853 if (device_get_parent(child) != dev) {
4854 *cookiep = cookie;
4855 return(0);
4856 }
4857
4858 rid = rman_get_rid(irq);
4859 if (rid == 0) {
4860 /* Make sure that INTx is enabled */
4861 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
4862 } else {
4863 /*
4864 * Check to see if the interrupt is MSI or MSI-X.
4865 * Ask our parent to map the MSI and give
4866 * us the address and data register values.
4867 * If we fail for some reason, teardown the
4868 * interrupt handler.
4869 */
4870 dinfo = device_get_ivars(child);
4871 if (dinfo->cfg.msi.msi_alloc > 0) {
4872 if (dinfo->cfg.msi.msi_addr == 0) {
4873 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
4874 ("MSI has handlers, but vectors not mapped"));
4875 error = PCIB_MAP_MSI(device_get_parent(dev),
4876 child, rman_get_start(irq), &addr, &data);
4877 if (error)
4878 goto bad;
4879 dinfo->cfg.msi.msi_addr = addr;
4880 dinfo->cfg.msi.msi_data = data;
4881 }
4882 if (dinfo->cfg.msi.msi_handlers == 0)
4883 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
4884 dinfo->cfg.msi.msi_data);
4885 dinfo->cfg.msi.msi_handlers++;
4886 } else {
4887 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4888 ("No MSI or MSI-X interrupts allocated"));
4889 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4890 ("MSI-X index too high"));
4891 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4892 KASSERT(mte->mte_vector != 0, ("no message vector"));
4893 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
4894 KASSERT(mv->mv_irq == rman_get_start(irq),
4895 ("IRQ mismatch"));
4896 if (mv->mv_address == 0) {
4897 KASSERT(mte->mte_handlers == 0,
4898 ("MSI-X table entry has handlers, but vector not mapped"));
4899 error = PCIB_MAP_MSI(device_get_parent(dev),
4900 child, rman_get_start(irq), &addr, &data);
4901 if (error)
4902 goto bad;
4903 mv->mv_address = addr;
4904 mv->mv_data = data;
4905 }
4906
4907 /*
4908 * The MSIX table entry must be made valid by
4909 * incrementing the mte_handlers before
4910 * calling pci_enable_msix() and
4911 * pci_resume_msix(). Else the MSIX rewrite
4912 * table quirk will not work as expected.
4913 */
4914 mte->mte_handlers++;
4915 if (mte->mte_handlers == 1) {
4916 pci_enable_msix(child, rid - 1, mv->mv_address,
4917 mv->mv_data);
4918 pci_unmask_msix(child, rid - 1);
4919 }
4920 }
4921
4922 /*
4923 * Make sure that INTx is disabled if we are using MSI/MSI-X,
4924 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG,
4925 * in which case we "enable" INTx so MSI/MSI-X actually works.
4926 */
4927 if (!pci_has_quirk(pci_get_devid(child),
4928 PCI_QUIRK_MSI_INTX_BUG))
4929 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
4930 else
4931 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
4932 bad:
4933 if (error) {
4934 (void)bus_generic_teardown_intr(dev, child, irq,
4935 cookie);
4936 return (error);
4937 }
4938 }
4939 *cookiep = cookie;
4940 return (0);
4941 }
4942
4943 int
pci_teardown_intr(device_t dev,device_t child,struct resource * irq,void * cookie)4944 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
4945 void *cookie)
4946 {
4947 struct msix_table_entry *mte;
4948 struct resource_list_entry *rle;
4949 struct pci_devinfo *dinfo;
4950 int error, rid;
4951
4952 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
4953 return (EINVAL);
4954
4955 /* If this isn't a direct child, just bail out */
4956 if (device_get_parent(child) != dev)
4957 return(bus_generic_teardown_intr(dev, child, irq, cookie));
4958
4959 rid = rman_get_rid(irq);
4960 if (rid == 0) {
4961 /* Mask INTx */
4962 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
4963 } else {
4964 /*
4965 * Check to see if the interrupt is MSI or MSI-X. If so,
4966 * decrement the appropriate handlers count and mask the
4967 * MSI-X message, or disable MSI messages if the count
4968 * drops to 0.
4969 */
4970 dinfo = device_get_ivars(child);
4971 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
4972 if (rle->res != irq)
4973 return (EINVAL);
4974 if (dinfo->cfg.msi.msi_alloc > 0) {
4975 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
4976 ("MSI-X index too high"));
4977 if (dinfo->cfg.msi.msi_handlers == 0)
4978 return (EINVAL);
4979 dinfo->cfg.msi.msi_handlers--;
4980 if (dinfo->cfg.msi.msi_handlers == 0)
4981 pci_disable_msi(child);
4982 } else {
4983 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4984 ("No MSI or MSI-X interrupts allocated"));
4985 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4986 ("MSI-X index too high"));
4987 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4988 if (mte->mte_handlers == 0)
4989 return (EINVAL);
4990 mte->mte_handlers--;
4991 if (mte->mte_handlers == 0)
4992 pci_mask_msix(child, rid - 1);
4993 }
4994 }
4995 error = bus_generic_teardown_intr(dev, child, irq, cookie);
4996 if (rid > 0)
4997 KASSERT(error == 0,
4998 ("%s: generic teardown failed for MSI/MSI-X", __func__));
4999 return (error);
5000 }
5001
5002 int
pci_print_child(device_t dev,device_t child)5003 pci_print_child(device_t dev, device_t child)
5004 {
5005 struct pci_devinfo *dinfo;
5006 struct resource_list *rl;
5007 int retval = 0;
5008
5009 dinfo = device_get_ivars(child);
5010 rl = &dinfo->resources;
5011
5012 retval += bus_print_child_header(dev, child);
5013
5014 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx");
5015 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx");
5016 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
5017 if (device_get_flags(dev))
5018 retval += printf(" flags %#x", device_get_flags(dev));
5019
5020 retval += printf(" at device %d.%d", pci_get_slot(child),
5021 pci_get_function(child));
5022
5023 retval += bus_print_child_domain(dev, child);
5024 retval += bus_print_child_footer(dev, child);
5025
5026 return (retval);
5027 }
5028
5029 static const struct
5030 {
5031 int class;
5032 int subclass;
5033 int report; /* 0 = bootverbose, 1 = always */
5034 const char *desc;
5035 } pci_nomatch_tab[] = {
5036 {PCIC_OLD, -1, 1, "old"},
5037 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
5038 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
5039 {PCIC_STORAGE, -1, 1, "mass storage"},
5040 {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"},
5041 {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"},
5042 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"},
5043 {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"},
5044 {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"},
5045 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"},
5046 {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"},
5047 {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"},
5048 {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"},
5049 {PCIC_NETWORK, -1, 1, "network"},
5050 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"},
5051 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"},
5052 {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"},
5053 {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"},
5054 {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"},
5055 {PCIC_DISPLAY, -1, 1, "display"},
5056 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"},
5057 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"},
5058 {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"},
5059 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
5060 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"},
5061 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"},
5062 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"},
5063 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"},
5064 {PCIC_MEMORY, -1, 1, "memory"},
5065 {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"},
5066 {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"},
5067 {PCIC_BRIDGE, -1, 1, "bridge"},
5068 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
5069 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
5070 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
5071 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
5072 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
5073 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
5074 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
5075 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
5076 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
5077 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
5078 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */
5079 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"},
5080 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"},
5081 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"},
5082 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
5083 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"},
5084 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"},
5085 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"},
5086 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"},
5087 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
5088 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"},
5089 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"},
5090 {PCIC_INPUTDEV, -1, 1, "input device"},
5091 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"},
5092 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
5093 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"},
5094 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"},
5095 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"},
5096 {PCIC_DOCKING, -1, 1, "docking station"},
5097 {PCIC_PROCESSOR, -1, 1, "processor"},
5098 {PCIC_SERIALBUS, -1, 1, "serial bus"},
5099 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"},
5100 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"},
5101 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"},
5102 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"},
5103 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"},
5104 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"},
5105 {PCIC_WIRELESS, -1, 1, "wireless controller"},
5106 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"},
5107 {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"},
5108 {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"},
5109 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
5110 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"},
5111 {PCIC_SATCOM, -1, 1, "satellite communication"},
5112 {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"},
5113 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"},
5114 {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"},
5115 {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"},
5116 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
5117 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"},
5118 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"},
5119 {PCIC_DASP, -1, 0, "dasp"},
5120 {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"},
5121 {PCIC_DASP, PCIS_DASP_PERFCNTRS, 1, "performance counters"},
5122 {PCIC_DASP, PCIS_DASP_COMM_SYNC, 1, "communication synchronizer"},
5123 {PCIC_DASP, PCIS_DASP_MGMT_CARD, 1, "signal processing management"},
5124 {PCIC_INSTRUMENT, -1, 0, "non-essential instrumentation"},
5125 {0, 0, 0, NULL}
5126 };
5127
5128 void
pci_probe_nomatch(device_t dev,device_t child)5129 pci_probe_nomatch(device_t dev, device_t child)
5130 {
5131 int i, report;
5132 const char *cp, *scp;
5133 char *device;
5134
5135 /*
5136 * Look for a listing for this device in a loaded device database.
5137 */
5138 report = 1;
5139 if ((device = pci_describe_device(child)) != NULL) {
5140 device_printf(dev, "<%s>", device);
5141 free(device, M_DEVBUF);
5142 } else {
5143 /*
5144 * Scan the class/subclass descriptions for a general
5145 * description.
5146 */
5147 cp = "unknown";
5148 scp = NULL;
5149 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
5150 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
5151 if (pci_nomatch_tab[i].subclass == -1) {
5152 cp = pci_nomatch_tab[i].desc;
5153 report = pci_nomatch_tab[i].report;
5154 } else if (pci_nomatch_tab[i].subclass ==
5155 pci_get_subclass(child)) {
5156 scp = pci_nomatch_tab[i].desc;
5157 report = pci_nomatch_tab[i].report;
5158 }
5159 }
5160 }
5161 if (report || bootverbose) {
5162 device_printf(dev, "<%s%s%s>",
5163 cp ? cp : "",
5164 ((cp != NULL) && (scp != NULL)) ? ", " : "",
5165 scp ? scp : "");
5166 }
5167 }
5168 if (report || bootverbose) {
5169 printf(" at device %d.%d (no driver attached)\n",
5170 pci_get_slot(child), pci_get_function(child));
5171 }
5172 pci_cfg_save(child, device_get_ivars(child), 1);
5173 }
5174
5175 void
pci_child_detached(device_t dev,device_t child)5176 pci_child_detached(device_t dev, device_t child)
5177 {
5178 struct pci_devinfo *dinfo;
5179 struct resource_list *rl;
5180
5181 dinfo = device_get_ivars(child);
5182 rl = &dinfo->resources;
5183
5184 /*
5185 * Have to deallocate IRQs before releasing any MSI messages and
5186 * have to release MSI messages before deallocating any memory
5187 * BARs.
5188 */
5189 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
5190 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
5191 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
5192 if (dinfo->cfg.msi.msi_alloc != 0)
5193 pci_printf(&dinfo->cfg, "Device leaked %d MSI "
5194 "vectors\n", dinfo->cfg.msi.msi_alloc);
5195 else
5196 pci_printf(&dinfo->cfg, "Device leaked %d MSI-X "
5197 "vectors\n", dinfo->cfg.msix.msix_alloc);
5198 (void)pci_release_msi(child);
5199 }
5200 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
5201 pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
5202 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
5203 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
5204 if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0)
5205 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n");
5206
5207 pci_cfg_save(child, dinfo, 1);
5208 }
5209
5210 /*
5211 * Parse the PCI device database, if loaded, and return a pointer to a
5212 * description of the device.
5213 *
5214 * The database is flat text formatted as follows:
5215 *
5216 * Any line not in a valid format is ignored.
5217 * Lines are terminated with newline '\n' characters.
5218 *
5219 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
5220 * the vendor name.
5221 *
5222 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
5223 * - devices cannot be listed without a corresponding VENDOR line.
5224 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
5225 * another TAB, then the device name.
5226 */
5227
5228 /*
5229 * Assuming (ptr) points to the beginning of a line in the database,
5230 * return the vendor or device and description of the next entry.
5231 * The value of (vendor) or (device) inappropriate for the entry type
5232 * is set to -1. Returns nonzero at the end of the database.
5233 *
5234 * Note that this is slightly unrobust in the face of corrupt data;
5235 * we attempt to safeguard against this by spamming the end of the
5236 * database with a newline when we initialise.
5237 */
5238 static int
pci_describe_parse_line(char ** ptr,int * vendor,int * device,char ** desc)5239 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
5240 {
5241 char *cp = *ptr;
5242 int left;
5243
5244 *device = -1;
5245 *vendor = -1;
5246 **desc = '\0';
5247 for (;;) {
5248 left = pci_vendordata_size - (cp - pci_vendordata);
5249 if (left <= 0) {
5250 *ptr = cp;
5251 return(1);
5252 }
5253
5254 /* vendor entry? */
5255 if (*cp != '\t' &&
5256 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
5257 break;
5258 /* device entry? */
5259 if (*cp == '\t' &&
5260 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
5261 break;
5262
5263 /* skip to next line */
5264 while (*cp != '\n' && left > 0) {
5265 cp++;
5266 left--;
5267 }
5268 if (*cp == '\n') {
5269 cp++;
5270 left--;
5271 }
5272 }
5273 /* skip to next line */
5274 while (*cp != '\n' && left > 0) {
5275 cp++;
5276 left--;
5277 }
5278 if (*cp == '\n' && left > 0)
5279 cp++;
5280 *ptr = cp;
5281 return(0);
5282 }
5283
5284 static char *
pci_describe_device(device_t dev)5285 pci_describe_device(device_t dev)
5286 {
5287 int vendor, device;
5288 char *desc, *vp, *dp, *line;
5289
5290 desc = vp = dp = NULL;
5291
5292 /*
5293 * If we have no vendor data, we can't do anything.
5294 */
5295 if (pci_vendordata == NULL)
5296 goto out;
5297
5298 /*
5299 * Scan the vendor data looking for this device
5300 */
5301 line = pci_vendordata;
5302 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
5303 goto out;
5304 for (;;) {
5305 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
5306 goto out;
5307 if (vendor == pci_get_vendor(dev))
5308 break;
5309 }
5310 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
5311 goto out;
5312 for (;;) {
5313 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
5314 *dp = 0;
5315 break;
5316 }
5317 if (vendor != -1) {
5318 *dp = 0;
5319 break;
5320 }
5321 if (device == pci_get_device(dev))
5322 break;
5323 }
5324 if (dp[0] == '\0')
5325 snprintf(dp, 80, "0x%x", pci_get_device(dev));
5326 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
5327 NULL)
5328 sprintf(desc, "%s, %s", vp, dp);
5329 out:
5330 if (vp != NULL)
5331 free(vp, M_DEVBUF);
5332 if (dp != NULL)
5333 free(dp, M_DEVBUF);
5334 return(desc);
5335 }
5336
5337 int
pci_read_ivar(device_t dev,device_t child,int which,uintptr_t * result)5338 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
5339 {
5340 struct pci_devinfo *dinfo;
5341 pcicfgregs *cfg;
5342
5343 dinfo = device_get_ivars(child);
5344 cfg = &dinfo->cfg;
5345
5346 switch (which) {
5347 case PCI_IVAR_ETHADDR:
5348 /*
5349 * The generic accessor doesn't deal with failure, so
5350 * we set the return value, then return an error.
5351 */
5352 *((uint8_t **) result) = NULL;
5353 return (EINVAL);
5354 case PCI_IVAR_SUBVENDOR:
5355 *result = cfg->subvendor;
5356 break;
5357 case PCI_IVAR_SUBDEVICE:
5358 *result = cfg->subdevice;
5359 break;
5360 case PCI_IVAR_VENDOR:
5361 *result = cfg->vendor;
5362 break;
5363 case PCI_IVAR_DEVICE:
5364 *result = cfg->device;
5365 break;
5366 case PCI_IVAR_DEVID:
5367 *result = (cfg->device << 16) | cfg->vendor;
5368 break;
5369 case PCI_IVAR_CLASS:
5370 *result = cfg->baseclass;
5371 break;
5372 case PCI_IVAR_SUBCLASS:
5373 *result = cfg->subclass;
5374 break;
5375 case PCI_IVAR_PROGIF:
5376 *result = cfg->progif;
5377 break;
5378 case PCI_IVAR_REVID:
5379 *result = cfg->revid;
5380 break;
5381 case PCI_IVAR_INTPIN:
5382 *result = cfg->intpin;
5383 break;
5384 case PCI_IVAR_IRQ:
5385 *result = cfg->intline;
5386 break;
5387 case PCI_IVAR_DOMAIN:
5388 *result = cfg->domain;
5389 break;
5390 case PCI_IVAR_BUS:
5391 *result = cfg->bus;
5392 break;
5393 case PCI_IVAR_SLOT:
5394 *result = cfg->slot;
5395 break;
5396 case PCI_IVAR_FUNCTION:
5397 *result = cfg->func;
5398 break;
5399 case PCI_IVAR_CMDREG:
5400 *result = cfg->cmdreg;
5401 break;
5402 case PCI_IVAR_CACHELNSZ:
5403 *result = cfg->cachelnsz;
5404 break;
5405 case PCI_IVAR_MINGNT:
5406 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
5407 *result = -1;
5408 return (EINVAL);
5409 }
5410 *result = cfg->mingnt;
5411 break;
5412 case PCI_IVAR_MAXLAT:
5413 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
5414 *result = -1;
5415 return (EINVAL);
5416 }
5417 *result = cfg->maxlat;
5418 break;
5419 case PCI_IVAR_LATTIMER:
5420 *result = cfg->lattimer;
5421 break;
5422 default:
5423 return (ENOENT);
5424 }
5425 return (0);
5426 }
5427
5428 int
pci_write_ivar(device_t dev,device_t child,int which,uintptr_t value)5429 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
5430 {
5431 struct pci_devinfo *dinfo;
5432
5433 dinfo = device_get_ivars(child);
5434
5435 switch (which) {
5436 case PCI_IVAR_INTPIN:
5437 dinfo->cfg.intpin = value;
5438 return (0);
5439 case PCI_IVAR_ETHADDR:
5440 case PCI_IVAR_SUBVENDOR:
5441 case PCI_IVAR_SUBDEVICE:
5442 case PCI_IVAR_VENDOR:
5443 case PCI_IVAR_DEVICE:
5444 case PCI_IVAR_DEVID:
5445 case PCI_IVAR_CLASS:
5446 case PCI_IVAR_SUBCLASS:
5447 case PCI_IVAR_PROGIF:
5448 case PCI_IVAR_REVID:
5449 case PCI_IVAR_IRQ:
5450 case PCI_IVAR_DOMAIN:
5451 case PCI_IVAR_BUS:
5452 case PCI_IVAR_SLOT:
5453 case PCI_IVAR_FUNCTION:
5454 return (EINVAL); /* disallow for now */
5455
5456 default:
5457 return (ENOENT);
5458 }
5459 }
5460
5461 #include "opt_ddb.h"
5462 #ifdef DDB
5463 #include <ddb/ddb.h>
5464 #include <sys/cons.h>
5465
5466 /*
5467 * List resources based on pci map registers, used for within ddb
5468 */
5469
DB_SHOW_COMMAND_FLAGS(pciregs,db_pci_dump,DB_CMD_MEMSAFE)5470 DB_SHOW_COMMAND_FLAGS(pciregs, db_pci_dump, DB_CMD_MEMSAFE)
5471 {
5472 struct pci_devinfo *dinfo;
5473 struct devlist *devlist_head;
5474 struct pci_conf *p;
5475 const char *name;
5476 int i, error, none_count;
5477
5478 none_count = 0;
5479 /* get the head of the device queue */
5480 devlist_head = &pci_devq;
5481
5482 /*
5483 * Go through the list of devices and print out devices
5484 */
5485 for (error = 0, i = 0,
5486 dinfo = STAILQ_FIRST(devlist_head);
5487 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
5488 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
5489 /* Populate pd_name and pd_unit */
5490 name = NULL;
5491 if (dinfo->cfg.dev)
5492 name = device_get_name(dinfo->cfg.dev);
5493
5494 p = &dinfo->conf;
5495 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
5496 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
5497 (name && *name) ? name : "none",
5498 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
5499 none_count++,
5500 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
5501 p->pc_sel.pc_func, (p->pc_class << 16) |
5502 (p->pc_subclass << 8) | p->pc_progif,
5503 (p->pc_subdevice << 16) | p->pc_subvendor,
5504 (p->pc_device << 16) | p->pc_vendor,
5505 p->pc_revid, p->pc_hdr);
5506 }
5507 }
5508 #endif /* DDB */
5509
5510 struct resource *
pci_reserve_map(device_t dev,device_t child,int type,int rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int num,u_int flags)5511 pci_reserve_map(device_t dev, device_t child, int type, int rid,
5512 rman_res_t start, rman_res_t end, rman_res_t count, u_int num,
5513 u_int flags)
5514 {
5515 struct pci_devinfo *dinfo = device_get_ivars(child);
5516 struct resource_list *rl = &dinfo->resources;
5517 struct resource *res;
5518 struct pci_map *pm;
5519 uint16_t cmd;
5520 pci_addr_t map, testval;
5521 int mapsize;
5522
5523 res = NULL;
5524
5525 /* If rid is managed by EA, ignore it */
5526 if (pci_ea_is_enabled(child, rid))
5527 goto out;
5528
5529 pm = pci_find_bar(child, rid);
5530 if (pm != NULL) {
5531 /* This is a BAR that we failed to allocate earlier. */
5532 mapsize = pm->pm_size;
5533 map = pm->pm_value;
5534 } else {
5535 /*
5536 * Weed out the bogons, and figure out how large the
5537 * BAR/map is. BARs that read back 0 here are bogus
5538 * and unimplemented. Note: atapci in legacy mode are
5539 * special and handled elsewhere in the code. If you
5540 * have a atapci device in legacy mode and it fails
5541 * here, that other code is broken.
5542 */
5543 pci_read_bar(child, rid, &map, &testval, NULL);
5544
5545 /*
5546 * Determine the size of the BAR and ignore BARs with a size
5547 * of 0. Device ROM BARs use a different mask value.
5548 */
5549 if (PCIR_IS_BIOS(&dinfo->cfg, rid))
5550 mapsize = pci_romsize(testval);
5551 else
5552 mapsize = pci_mapsize(testval);
5553 if (mapsize == 0)
5554 goto out;
5555 pm = pci_add_bar(child, rid, map, mapsize);
5556 }
5557
5558 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, rid)) {
5559 if (type != SYS_RES_MEMORY) {
5560 if (bootverbose)
5561 device_printf(dev,
5562 "child %s requested type %d for rid %#x,"
5563 " but the BAR says it is an memio\n",
5564 device_get_nameunit(child), type, rid);
5565 goto out;
5566 }
5567 } else {
5568 if (type != SYS_RES_IOPORT) {
5569 if (bootverbose)
5570 device_printf(dev,
5571 "child %s requested type %d for rid %#x,"
5572 " but the BAR says it is an ioport\n",
5573 device_get_nameunit(child), type, rid);
5574 goto out;
5575 }
5576 }
5577
5578 /*
5579 * For real BARs, we need to override the size that
5580 * the driver requests, because that's what the BAR
5581 * actually uses and we would otherwise have a
5582 * situation where we might allocate the excess to
5583 * another driver, which won't work.
5584 */
5585 count = ((pci_addr_t)1 << mapsize) * num;
5586 if (RF_ALIGNMENT(flags) < mapsize)
5587 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
5588 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
5589 flags |= RF_PREFETCHABLE;
5590
5591 /*
5592 * Allocate enough resource, and then write back the
5593 * appropriate BAR for that resource.
5594 */
5595 resource_list_add(rl, type, rid, start, end, count);
5596 res = resource_list_reserve(rl, dev, child, type, rid, start, end,
5597 count, flags & ~RF_ACTIVE);
5598 if (res == NULL) {
5599 resource_list_delete(rl, type, rid);
5600 device_printf(child,
5601 "%#jx bytes of rid %#x res %d failed (%#jx, %#jx).\n",
5602 count, rid, type, start, end);
5603 goto out;
5604 }
5605 if (bootverbose)
5606 device_printf(child,
5607 "Lazy allocation of %#jx bytes rid %#x type %d at %#jx\n",
5608 count, rid, type, rman_get_start(res));
5609
5610 /* Disable decoding via the CMD register before updating the BAR */
5611 cmd = pci_read_config(child, PCIR_COMMAND, 2);
5612 pci_write_config(child, PCIR_COMMAND,
5613 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
5614
5615 map = rman_get_start(res);
5616 pci_write_bar(child, pm, map);
5617
5618 /* Restore the original value of the CMD register */
5619 pci_write_config(child, PCIR_COMMAND, cmd, 2);
5620 out:
5621 return (res);
5622 }
5623
5624 struct resource *
pci_alloc_multi_resource(device_t dev,device_t child,int type,int rid,rman_res_t start,rman_res_t end,rman_res_t count,u_long num,u_int flags)5625 pci_alloc_multi_resource(device_t dev, device_t child, int type, int rid,
5626 rman_res_t start, rman_res_t end, rman_res_t count, u_long num,
5627 u_int flags)
5628 {
5629 struct pci_devinfo *dinfo;
5630 struct resource_list *rl;
5631 struct resource_list_entry *rle;
5632 struct resource *res;
5633 pcicfgregs *cfg;
5634
5635 /*
5636 * Perform lazy resource allocation
5637 */
5638 dinfo = device_get_ivars(child);
5639 rl = &dinfo->resources;
5640 cfg = &dinfo->cfg;
5641 switch (type) {
5642 case PCI_RES_BUS:
5643 return (pci_alloc_secbus(dev, child, rid, start, end, count,
5644 flags));
5645 case SYS_RES_IRQ:
5646 /*
5647 * Can't alloc legacy interrupt once MSI messages have
5648 * been allocated.
5649 */
5650 if (rid == 0 && (cfg->msi.msi_alloc > 0 ||
5651 cfg->msix.msix_alloc > 0))
5652 return (NULL);
5653
5654 /*
5655 * If the child device doesn't have an interrupt
5656 * routed and is deserving of an interrupt, try to
5657 * assign it one.
5658 */
5659 if (rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
5660 (cfg->intpin != 0))
5661 pci_assign_interrupt(dev, child, 0);
5662 break;
5663 case SYS_RES_IOPORT:
5664 case SYS_RES_MEMORY:
5665 /*
5666 * PCI-PCI bridge I/O window resources are not BARs.
5667 * For those allocations just pass the request up the
5668 * tree.
5669 */
5670 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
5671 switch (rid) {
5672 case PCIR_IOBASEL_1:
5673 case PCIR_MEMBASE_1:
5674 case PCIR_PMBASEL_1:
5675 /*
5676 * XXX: Should we bother creating a resource
5677 * list entry?
5678 */
5679 return (bus_generic_alloc_resource(dev, child,
5680 type, rid, start, end, count, flags));
5681 }
5682 }
5683 /* Reserve resources for this BAR if needed. */
5684 rle = resource_list_find(rl, type, rid);
5685 if (rle == NULL) {
5686 res = pci_reserve_map(dev, child, type, rid, start, end,
5687 count, num, flags);
5688 if (res == NULL)
5689 return (NULL);
5690 }
5691 }
5692 return (resource_list_alloc(rl, dev, child, type, rid,
5693 start, end, count, flags));
5694 }
5695
5696 struct resource *
pci_alloc_resource(device_t dev,device_t child,int type,int rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)5697 pci_alloc_resource(device_t dev, device_t child, int type, int rid,
5698 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
5699 {
5700 #ifdef PCI_IOV
5701 struct pci_devinfo *dinfo;
5702 #endif
5703
5704 if (device_get_parent(child) != dev)
5705 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
5706 type, rid, start, end, count, flags));
5707
5708 #ifdef PCI_IOV
5709 dinfo = device_get_ivars(child);
5710 if (dinfo->cfg.flags & PCICFG_VF) {
5711 switch (type) {
5712 /* VFs can't have I/O BARs. */
5713 case SYS_RES_IOPORT:
5714 return (NULL);
5715 case SYS_RES_MEMORY:
5716 return (pci_vf_alloc_mem_resource(dev, child, rid,
5717 start, end, count, flags));
5718 }
5719
5720 /* Fall through for other types of resource allocations. */
5721 }
5722 #endif
5723
5724 return (pci_alloc_multi_resource(dev, child, type, rid, start, end,
5725 count, 1, flags));
5726 }
5727
5728 int
pci_release_resource(device_t dev,device_t child,struct resource * r)5729 pci_release_resource(device_t dev, device_t child, struct resource *r)
5730 {
5731 struct pci_devinfo *dinfo;
5732 struct resource_list *rl;
5733 pcicfgregs *cfg __unused;
5734
5735 if (device_get_parent(child) != dev)
5736 return (bus_generic_release_resource(dev, child, r));
5737
5738 dinfo = device_get_ivars(child);
5739 cfg = &dinfo->cfg;
5740
5741 #ifdef PCI_IOV
5742 if (cfg->flags & PCICFG_VF) {
5743 switch (rman_get_type(r)) {
5744 /* VFs can't have I/O BARs. */
5745 case SYS_RES_IOPORT:
5746 return (EDOOFUS);
5747 case SYS_RES_MEMORY:
5748 return (pci_vf_release_mem_resource(dev, child, r));
5749 }
5750
5751 /* Fall through for other types of resource allocations. */
5752 }
5753 #endif
5754
5755 /*
5756 * PCI-PCI bridge I/O window resources are not BARs. For
5757 * those allocations just pass the request up the tree.
5758 */
5759 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
5760 (rman_get_type(r) == SYS_RES_IOPORT ||
5761 rman_get_type(r) == SYS_RES_MEMORY)) {
5762 switch (rman_get_rid(r)) {
5763 case PCIR_IOBASEL_1:
5764 case PCIR_MEMBASE_1:
5765 case PCIR_PMBASEL_1:
5766 return (bus_generic_release_resource(dev, child, r));
5767 }
5768 }
5769
5770 rl = &dinfo->resources;
5771 return (resource_list_release(rl, dev, child, r));
5772 }
5773
5774 int
pci_activate_resource(device_t dev,device_t child,struct resource * r)5775 pci_activate_resource(device_t dev, device_t child, struct resource *r)
5776 {
5777 struct pci_devinfo *dinfo;
5778 int error, rid, type;
5779
5780 if (device_get_parent(child) != dev)
5781 return (bus_generic_activate_resource(dev, child, r));
5782
5783 dinfo = device_get_ivars(child);
5784 #ifdef PCI_IOV
5785 if (dinfo->cfg.flags & PCICFG_VF) {
5786 switch (rman_get_type(r)) {
5787 /* VFs can't have I/O BARs. */
5788 case SYS_RES_IOPORT:
5789 error = EINVAL;
5790 break;
5791 case SYS_RES_MEMORY:
5792 error = pci_vf_activate_mem_resource(dev, child, r);
5793 break;
5794 default:
5795 error = bus_generic_activate_resource(dev, child, r);
5796 break;
5797 }
5798 } else
5799 #endif
5800 error = bus_generic_activate_resource(dev, child, r);
5801 if (error)
5802 return (error);
5803
5804 rid = rman_get_rid(r);
5805 type = rman_get_type(r);
5806
5807 /* Device ROMs need their decoding explicitly enabled. */
5808 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
5809 pci_write_bar(child, pci_find_bar(child, rid),
5810 rman_get_start(r) | PCIM_BIOS_ENABLE);
5811
5812 /* Enable decoding in the command register when activating BARs. */
5813 switch (type) {
5814 case SYS_RES_IOPORT:
5815 case SYS_RES_MEMORY:
5816 error = PCI_ENABLE_IO(dev, child, type);
5817 break;
5818 }
5819 return (error);
5820 }
5821
5822 int
pci_deactivate_resource(device_t dev,device_t child,struct resource * r)5823 pci_deactivate_resource(device_t dev, device_t child, struct resource *r)
5824 {
5825 struct pci_devinfo *dinfo;
5826 int error, rid, type;
5827
5828 if (device_get_parent(child) != dev)
5829 return (bus_generic_deactivate_resource(dev, child, r));
5830
5831 dinfo = device_get_ivars(child);
5832 #ifdef PCI_IOV
5833 if (dinfo->cfg.flags & PCICFG_VF) {
5834 switch (rman_get_type(r)) {
5835 /* VFs can't have I/O BARs. */
5836 case SYS_RES_IOPORT:
5837 error = EINVAL;
5838 break;
5839 case SYS_RES_MEMORY:
5840 error = pci_vf_deactivate_mem_resource(dev, child, r);
5841 break;
5842 default:
5843 error = bus_generic_deactivate_resource(dev, child, r);
5844 break;
5845 }
5846 } else
5847 #endif
5848 error = bus_generic_deactivate_resource(dev, child, r);
5849 if (error)
5850 return (error);
5851
5852 /* Disable decoding for device ROMs. */
5853 rid = rman_get_rid(r);
5854 type = rman_get_type(r);
5855 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
5856 pci_write_bar(child, pci_find_bar(child, rid),
5857 rman_get_start(r));
5858 return (0);
5859 }
5860
5861 int
pci_adjust_resource(device_t dev,device_t child,struct resource * r,rman_res_t start,rman_res_t end)5862 pci_adjust_resource(device_t dev, device_t child, struct resource *r,
5863 rman_res_t start, rman_res_t end)
5864 {
5865 #ifdef PCI_IOV
5866 struct pci_devinfo *dinfo;
5867
5868 if (device_get_parent(child) != dev)
5869 return (bus_generic_adjust_resource(dev, child, r, start,
5870 end));
5871
5872 dinfo = device_get_ivars(child);
5873 if (dinfo->cfg.flags & PCICFG_VF) {
5874 switch (rman_get_type(r)) {
5875 /* VFs can't have I/O BARs. */
5876 case SYS_RES_IOPORT:
5877 return (EINVAL);
5878 case SYS_RES_MEMORY:
5879 return (pci_vf_adjust_mem_resource(dev, child, r,
5880 start, end));
5881 }
5882
5883 /* Fall through for other types of resource allocations. */
5884 }
5885 #endif
5886
5887 return (bus_generic_adjust_resource(dev, child, r, start, end));
5888 }
5889
5890 int
pci_map_resource(device_t dev,device_t child,struct resource * r,struct resource_map_request * argsp,struct resource_map * map)5891 pci_map_resource(device_t dev, device_t child, struct resource *r,
5892 struct resource_map_request *argsp, struct resource_map *map)
5893 {
5894 #ifdef PCI_IOV
5895 struct pci_devinfo *dinfo;
5896
5897 if (device_get_parent(child) != dev)
5898 return (bus_generic_map_resource(dev, child, r, argsp,
5899 map));
5900
5901 dinfo = device_get_ivars(child);
5902 if (dinfo->cfg.flags & PCICFG_VF) {
5903 switch (rman_get_type(r)) {
5904 /* VFs can't have I/O BARs. */
5905 case SYS_RES_IOPORT:
5906 return (EINVAL);
5907 case SYS_RES_MEMORY:
5908 return (pci_vf_map_mem_resource(dev, child, r, argsp,
5909 map));
5910 }
5911
5912 /* Fall through for other types of resource allocations. */
5913 }
5914 #endif
5915
5916 return (bus_generic_map_resource(dev, child, r, argsp, map));
5917 }
5918
5919 int
pci_unmap_resource(device_t dev,device_t child,struct resource * r,struct resource_map * map)5920 pci_unmap_resource(device_t dev, device_t child, struct resource *r,
5921 struct resource_map *map)
5922 {
5923 #ifdef PCI_IOV
5924 struct pci_devinfo *dinfo;
5925
5926 if (device_get_parent(child) != dev)
5927 return (bus_generic_unmap_resource(dev, child, r, map));
5928
5929 dinfo = device_get_ivars(child);
5930 if (dinfo->cfg.flags & PCICFG_VF) {
5931 switch (rman_get_type(r)) {
5932 /* VFs can't have I/O BARs. */
5933 case SYS_RES_IOPORT:
5934 return (EINVAL);
5935 case SYS_RES_MEMORY:
5936 return (pci_vf_unmap_mem_resource(dev, child, r, map));
5937 }
5938
5939 /* Fall through for other types of resource allocations. */
5940 }
5941 #endif
5942
5943 return (bus_generic_unmap_resource(dev, child, r, map));
5944 }
5945
5946 void
pci_child_deleted(device_t dev,device_t child)5947 pci_child_deleted(device_t dev, device_t child)
5948 {
5949 struct resource_list_entry *rle;
5950 struct resource_list *rl;
5951 struct pci_devinfo *dinfo;
5952
5953 dinfo = device_get_ivars(child);
5954 rl = &dinfo->resources;
5955
5956 EVENTHANDLER_INVOKE(pci_delete_device, child);
5957
5958 /* Turn off access to resources we're about to free */
5959 if (bus_child_present(child) != 0) {
5960 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
5961 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
5962
5963 pci_disable_busmaster(child);
5964 }
5965
5966 /* Free all allocated resources */
5967 STAILQ_FOREACH(rle, rl, link) {
5968 if (rle->res) {
5969 if (rman_get_flags(rle->res) & RF_ACTIVE ||
5970 resource_list_busy(rl, rle->type, rle->rid)) {
5971 pci_printf(&dinfo->cfg,
5972 "Resource still owned, oops. "
5973 "(type=%d, rid=%d, addr=%lx)\n",
5974 rle->type, rle->rid,
5975 rman_get_start(rle->res));
5976 bus_release_resource(child, rle->type, rle->rid,
5977 rle->res);
5978 }
5979 resource_list_unreserve(rl, dev, child, rle->type,
5980 rle->rid);
5981 }
5982 }
5983 resource_list_free(rl);
5984
5985 pci_freecfg(dinfo);
5986 }
5987
5988 void
pci_delete_resource(device_t dev,device_t child,int type,int rid)5989 pci_delete_resource(device_t dev, device_t child, int type, int rid)
5990 {
5991 struct pci_devinfo *dinfo;
5992 struct resource_list *rl;
5993 struct resource_list_entry *rle;
5994
5995 if (device_get_parent(child) != dev)
5996 return;
5997
5998 dinfo = device_get_ivars(child);
5999 rl = &dinfo->resources;
6000 rle = resource_list_find(rl, type, rid);
6001 if (rle == NULL)
6002 return;
6003
6004 if (rle->res) {
6005 if (rman_get_flags(rle->res) & RF_ACTIVE ||
6006 resource_list_busy(rl, type, rid)) {
6007 device_printf(dev, "delete_resource: "
6008 "Resource still owned by child, oops. "
6009 "(type=%d, rid=%d, addr=%jx)\n",
6010 type, rid, rman_get_start(rle->res));
6011 return;
6012 }
6013 resource_list_unreserve(rl, dev, child, type, rid);
6014 }
6015 resource_list_delete(rl, type, rid);
6016 }
6017
6018 struct resource_list *
pci_get_resource_list(device_t dev,device_t child)6019 pci_get_resource_list (device_t dev, device_t child)
6020 {
6021 struct pci_devinfo *dinfo = device_get_ivars(child);
6022
6023 return (&dinfo->resources);
6024 }
6025
6026 #ifdef IOMMU
6027 bus_dma_tag_t
pci_get_dma_tag(device_t bus,device_t dev)6028 pci_get_dma_tag(device_t bus, device_t dev)
6029 {
6030 bus_dma_tag_t tag;
6031 struct pci_softc *sc;
6032
6033 if (device_get_parent(dev) == bus) {
6034 /* try iommu and return if it works */
6035 tag = iommu_get_dma_tag(bus, dev);
6036 } else
6037 tag = NULL;
6038 if (tag == NULL) {
6039 sc = device_get_softc(bus);
6040 tag = sc->sc_dma_tag;
6041 }
6042 return (tag);
6043 }
6044 #else
6045 bus_dma_tag_t
pci_get_dma_tag(device_t bus,device_t dev)6046 pci_get_dma_tag(device_t bus, device_t dev)
6047 {
6048 struct pci_softc *sc = device_get_softc(bus);
6049
6050 return (sc->sc_dma_tag);
6051 }
6052 #endif
6053
6054 uint32_t
pci_read_config_method(device_t dev,device_t child,int reg,int width)6055 pci_read_config_method(device_t dev, device_t child, int reg, int width)
6056 {
6057 struct pci_devinfo *dinfo = device_get_ivars(child);
6058 pcicfgregs *cfg = &dinfo->cfg;
6059
6060 #ifdef PCI_IOV
6061 /*
6062 * SR-IOV VFs don't implement the VID or DID registers, so we have to
6063 * emulate them here.
6064 */
6065 if (cfg->flags & PCICFG_VF) {
6066 if (reg == PCIR_VENDOR) {
6067 switch (width) {
6068 case 4:
6069 return (cfg->device << 16 | cfg->vendor);
6070 case 2:
6071 return (cfg->vendor);
6072 case 1:
6073 return (cfg->vendor & 0xff);
6074 default:
6075 return (0xffffffff);
6076 }
6077 } else if (reg == PCIR_DEVICE) {
6078 switch (width) {
6079 /* Note that an unaligned 4-byte read is an error. */
6080 case 2:
6081 return (cfg->device);
6082 case 1:
6083 return (cfg->device & 0xff);
6084 default:
6085 return (0xffffffff);
6086 }
6087 }
6088 }
6089 #endif
6090
6091 return (PCIB_READ_CONFIG(device_get_parent(dev),
6092 cfg->bus, cfg->slot, cfg->func, reg, width));
6093 }
6094
6095 void
pci_write_config_method(device_t dev,device_t child,int reg,uint32_t val,int width)6096 pci_write_config_method(device_t dev, device_t child, int reg,
6097 uint32_t val, int width)
6098 {
6099 struct pci_devinfo *dinfo = device_get_ivars(child);
6100 pcicfgregs *cfg = &dinfo->cfg;
6101
6102 PCIB_WRITE_CONFIG(device_get_parent(dev),
6103 cfg->bus, cfg->slot, cfg->func, reg, val, width);
6104 }
6105
6106 int
pci_child_location_method(device_t dev,device_t child,struct sbuf * sb)6107 pci_child_location_method(device_t dev, device_t child, struct sbuf *sb)
6108 {
6109
6110 sbuf_printf(sb, "slot=%d function=%d dbsf=pci%d:%d:%d:%d",
6111 pci_get_slot(child), pci_get_function(child), pci_get_domain(child),
6112 pci_get_bus(child), pci_get_slot(child), pci_get_function(child));
6113 return (0);
6114 }
6115
6116 int
pci_child_pnpinfo_method(device_t dev,device_t child,struct sbuf * sb)6117 pci_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb)
6118 {
6119 struct pci_devinfo *dinfo;
6120 pcicfgregs *cfg;
6121
6122 dinfo = device_get_ivars(child);
6123 cfg = &dinfo->cfg;
6124 sbuf_printf(sb, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
6125 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
6126 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
6127 cfg->progif);
6128 return (0);
6129 }
6130
6131 int
pci_get_device_path_method(device_t bus,device_t child,const char * locator,struct sbuf * sb)6132 pci_get_device_path_method(device_t bus, device_t child, const char *locator,
6133 struct sbuf *sb)
6134 {
6135 device_t parent = device_get_parent(bus);
6136 int rv;
6137
6138 if (strcmp(locator, BUS_LOCATOR_UEFI) == 0) {
6139 rv = bus_generic_get_device_path(parent, bus, locator, sb);
6140 if (rv == 0) {
6141 sbuf_printf(sb, "/Pci(0x%x,0x%x)", pci_get_slot(child),
6142 pci_get_function(child));
6143 }
6144 return (0);
6145 }
6146 return (bus_generic_get_device_path(bus, child, locator, sb));
6147 }
6148
6149 int
pci_assign_interrupt_method(device_t dev,device_t child)6150 pci_assign_interrupt_method(device_t dev, device_t child)
6151 {
6152 struct pci_devinfo *dinfo = device_get_ivars(child);
6153 pcicfgregs *cfg = &dinfo->cfg;
6154
6155 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
6156 cfg->intpin));
6157 }
6158
6159 static void
pci_lookup(void * arg,const char * name,device_t * dev)6160 pci_lookup(void *arg, const char *name, device_t *dev)
6161 {
6162 long val;
6163 char *end;
6164 int domain, bus, slot, func;
6165
6166 if (*dev != NULL)
6167 return;
6168
6169 /*
6170 * Accept pciconf-style selectors of either pciD:B:S:F or
6171 * pciB:S:F. In the latter case, the domain is assumed to
6172 * be zero.
6173 */
6174 if (strncmp(name, "pci", 3) != 0)
6175 return;
6176 val = strtol(name + 3, &end, 10);
6177 if (val < 0 || val > INT_MAX || *end != ':')
6178 return;
6179 domain = val;
6180 val = strtol(end + 1, &end, 10);
6181 if (val < 0 || val > INT_MAX || *end != ':')
6182 return;
6183 bus = val;
6184 val = strtol(end + 1, &end, 10);
6185 if (val < 0 || val > INT_MAX)
6186 return;
6187 slot = val;
6188 if (*end == ':') {
6189 val = strtol(end + 1, &end, 10);
6190 if (val < 0 || val > INT_MAX || *end != '\0')
6191 return;
6192 func = val;
6193 } else if (*end == '\0') {
6194 func = slot;
6195 slot = bus;
6196 bus = domain;
6197 domain = 0;
6198 } else
6199 return;
6200
6201 if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX ||
6202 func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX))
6203 return;
6204
6205 *dev = pci_find_dbsf(domain, bus, slot, func);
6206 }
6207
6208 static int
pci_modevent(module_t mod,int what,void * arg)6209 pci_modevent(module_t mod, int what, void *arg)
6210 {
6211 static struct cdev *pci_cdev;
6212 static eventhandler_tag tag;
6213
6214 switch (what) {
6215 case MOD_LOAD:
6216 STAILQ_INIT(&pci_devq);
6217 pci_generation = 0;
6218 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
6219 "pci");
6220 pci_load_vendor_data();
6221 tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL,
6222 1000);
6223 break;
6224
6225 case MOD_UNLOAD:
6226 if (tag != NULL)
6227 EVENTHANDLER_DEREGISTER(dev_lookup, tag);
6228 destroy_dev(pci_cdev);
6229 break;
6230 }
6231
6232 return (0);
6233 }
6234
6235 static void
pci_cfg_restore_pcie(device_t dev,struct pci_devinfo * dinfo)6236 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
6237 {
6238 #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2)
6239 struct pcicfg_pcie *cfg;
6240 int version, pos;
6241
6242 cfg = &dinfo->cfg.pcie;
6243 pos = cfg->pcie_location;
6244
6245 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
6246
6247 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl);
6248
6249 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
6250 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
6251 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
6252 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl);
6253
6254 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
6255 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
6256 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
6257 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl);
6258
6259 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
6260 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
6261 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl);
6262
6263 if (version > 1) {
6264 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2);
6265 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2);
6266 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2);
6267 }
6268 #undef WREG
6269 }
6270
6271 static void
pci_cfg_restore_pcix(device_t dev,struct pci_devinfo * dinfo)6272 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
6273 {
6274 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
6275 dinfo->cfg.pcix.pcix_command, 2);
6276 }
6277
6278 void
pci_cfg_restore(device_t dev,struct pci_devinfo * dinfo)6279 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
6280 {
6281
6282 /*
6283 * Restore the device to full power mode. We must do this
6284 * before we restore the registers because moving from D3 to
6285 * D0 will cause the chip's BARs and some other registers to
6286 * be reset to some unknown power on reset values. Cut down
6287 * the noise on boot by doing nothing if we are already in
6288 * state D0.
6289 */
6290 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
6291 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
6292 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
6293 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
6294 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
6295 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
6296 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
6297 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
6298 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) {
6299 case PCIM_HDRTYPE_NORMAL:
6300 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
6301 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
6302 break;
6303 case PCIM_HDRTYPE_BRIDGE:
6304 pci_write_config(dev, PCIR_SECLAT_1,
6305 dinfo->cfg.bridge.br_seclat, 1);
6306 pci_write_config(dev, PCIR_SUBBUS_1,
6307 dinfo->cfg.bridge.br_subbus, 1);
6308 pci_write_config(dev, PCIR_SECBUS_1,
6309 dinfo->cfg.bridge.br_secbus, 1);
6310 pci_write_config(dev, PCIR_PRIBUS_1,
6311 dinfo->cfg.bridge.br_pribus, 1);
6312 pci_write_config(dev, PCIR_BRIDGECTL_1,
6313 dinfo->cfg.bridge.br_control, 2);
6314 break;
6315 case PCIM_HDRTYPE_CARDBUS:
6316 pci_write_config(dev, PCIR_SECLAT_2,
6317 dinfo->cfg.bridge.br_seclat, 1);
6318 pci_write_config(dev, PCIR_SUBBUS_2,
6319 dinfo->cfg.bridge.br_subbus, 1);
6320 pci_write_config(dev, PCIR_SECBUS_2,
6321 dinfo->cfg.bridge.br_secbus, 1);
6322 pci_write_config(dev, PCIR_PRIBUS_2,
6323 dinfo->cfg.bridge.br_pribus, 1);
6324 pci_write_config(dev, PCIR_BRIDGECTL_2,
6325 dinfo->cfg.bridge.br_control, 2);
6326 break;
6327 }
6328 pci_restore_bars(dev);
6329
6330 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_BRIDGE)
6331 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
6332
6333 /*
6334 * Restore extended capabilities for PCI-Express and PCI-X
6335 */
6336 if (dinfo->cfg.pcie.pcie_location != 0)
6337 pci_cfg_restore_pcie(dev, dinfo);
6338 if (dinfo->cfg.pcix.pcix_location != 0)
6339 pci_cfg_restore_pcix(dev, dinfo);
6340
6341 /* Restore MSI and MSI-X configurations if they are present. */
6342 if (dinfo->cfg.msi.msi_location != 0)
6343 pci_resume_msi(dev);
6344 if (dinfo->cfg.msix.msix_location != 0)
6345 pci_resume_msix(dev);
6346
6347 #ifdef PCI_IOV
6348 if (dinfo->cfg.iov != NULL)
6349 pci_iov_cfg_restore(dev, dinfo);
6350 #endif
6351 }
6352
6353 static void
pci_cfg_save_pcie(device_t dev,struct pci_devinfo * dinfo)6354 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
6355 {
6356 #define RREG(n) pci_read_config(dev, pos + (n), 2)
6357 struct pcicfg_pcie *cfg;
6358 int version, pos;
6359
6360 cfg = &dinfo->cfg.pcie;
6361 pos = cfg->pcie_location;
6362
6363 cfg->pcie_flags = RREG(PCIER_FLAGS);
6364
6365 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
6366
6367 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL);
6368
6369 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
6370 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
6371 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
6372 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL);
6373
6374 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
6375 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
6376 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
6377 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL);
6378
6379 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
6380 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
6381 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL);
6382
6383 if (version > 1) {
6384 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2);
6385 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2);
6386 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2);
6387 }
6388 #undef RREG
6389 }
6390
6391 static void
pci_cfg_save_pcix(device_t dev,struct pci_devinfo * dinfo)6392 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
6393 {
6394 dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
6395 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
6396 }
6397
6398 void
pci_cfg_save(device_t dev,struct pci_devinfo * dinfo,int setstate)6399 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
6400 {
6401 uint32_t cls;
6402 int ps;
6403
6404 /*
6405 * Some drivers apparently write to these registers w/o updating our
6406 * cached copy. No harm happens if we update the copy, so do so here
6407 * so we can restore them. The COMMAND register is modified by the
6408 * bus w/o updating the cache. This should represent the normally
6409 * writable portion of the 'defined' part of type 0/1/2 headers.
6410 */
6411 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
6412 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
6413 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
6414 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
6415 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
6416 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
6417 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
6418 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
6419 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
6420 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
6421 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
6422 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) {
6423 case PCIM_HDRTYPE_NORMAL:
6424 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
6425 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
6426 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
6427 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
6428 break;
6429 case PCIM_HDRTYPE_BRIDGE:
6430 dinfo->cfg.bridge.br_seclat = pci_read_config(dev,
6431 PCIR_SECLAT_1, 1);
6432 dinfo->cfg.bridge.br_subbus = pci_read_config(dev,
6433 PCIR_SUBBUS_1, 1);
6434 dinfo->cfg.bridge.br_secbus = pci_read_config(dev,
6435 PCIR_SECBUS_1, 1);
6436 dinfo->cfg.bridge.br_pribus = pci_read_config(dev,
6437 PCIR_PRIBUS_1, 1);
6438 dinfo->cfg.bridge.br_control = pci_read_config(dev,
6439 PCIR_BRIDGECTL_1, 2);
6440 break;
6441 case PCIM_HDRTYPE_CARDBUS:
6442 dinfo->cfg.bridge.br_seclat = pci_read_config(dev,
6443 PCIR_SECLAT_2, 1);
6444 dinfo->cfg.bridge.br_subbus = pci_read_config(dev,
6445 PCIR_SUBBUS_2, 1);
6446 dinfo->cfg.bridge.br_secbus = pci_read_config(dev,
6447 PCIR_SECBUS_2, 1);
6448 dinfo->cfg.bridge.br_pribus = pci_read_config(dev,
6449 PCIR_PRIBUS_2, 1);
6450 dinfo->cfg.bridge.br_control = pci_read_config(dev,
6451 PCIR_BRIDGECTL_2, 2);
6452 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2);
6453 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2);
6454 break;
6455 }
6456
6457 if (dinfo->cfg.pcie.pcie_location != 0)
6458 pci_cfg_save_pcie(dev, dinfo);
6459
6460 if (dinfo->cfg.pcix.pcix_location != 0)
6461 pci_cfg_save_pcix(dev, dinfo);
6462
6463 #ifdef PCI_IOV
6464 if (dinfo->cfg.iov != NULL)
6465 pci_iov_cfg_save(dev, dinfo);
6466 #endif
6467
6468 /*
6469 * don't set the state for display devices, base peripherals and
6470 * memory devices since bad things happen when they are powered down.
6471 * We should (a) have drivers that can easily detach and (b) use
6472 * generic drivers for these devices so that some device actually
6473 * attaches. We need to make sure that when we implement (a) we don't
6474 * power the device down on a reattach.
6475 */
6476 cls = pci_get_class(dev);
6477 if (!setstate)
6478 return;
6479 switch (pci_do_power_nodriver)
6480 {
6481 case 0: /* NO powerdown at all */
6482 return;
6483 case 1: /* Conservative about what to power down */
6484 if (cls == PCIC_STORAGE)
6485 return;
6486 /*FALLTHROUGH*/
6487 case 2: /* Aggressive about what to power down */
6488 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
6489 cls == PCIC_BASEPERIPH)
6490 return;
6491 /*FALLTHROUGH*/
6492 case 3: /* Power down everything */
6493 break;
6494 }
6495 /*
6496 * PCI spec says we can only go into D3 state from D0 state.
6497 * Transition from D[12] into D0 before going to D3 state.
6498 */
6499 ps = pci_get_powerstate(dev);
6500 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
6501 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
6502 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
6503 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
6504 }
6505
6506 /* Wrapper APIs suitable for device driver use. */
6507 void
pci_save_state(device_t dev)6508 pci_save_state(device_t dev)
6509 {
6510 struct pci_devinfo *dinfo;
6511
6512 dinfo = device_get_ivars(dev);
6513 pci_cfg_save(dev, dinfo, 0);
6514 }
6515
6516 void
pci_restore_state(device_t dev)6517 pci_restore_state(device_t dev)
6518 {
6519 struct pci_devinfo *dinfo;
6520
6521 dinfo = device_get_ivars(dev);
6522 pci_cfg_restore(dev, dinfo);
6523 }
6524
6525 static int
pci_get_id_method(device_t dev,device_t child,enum pci_id_type type,uintptr_t * id)6526 pci_get_id_method(device_t dev, device_t child, enum pci_id_type type,
6527 uintptr_t *id)
6528 {
6529
6530 return (PCIB_GET_ID(device_get_parent(dev), child, type, id));
6531 }
6532
6533 /* Find the upstream port of a given PCI device in a root complex. */
6534 device_t
pci_find_pcie_root_port(device_t dev)6535 pci_find_pcie_root_port(device_t dev)
6536 {
6537 struct pci_devinfo *dinfo;
6538 devclass_t pci_class;
6539 device_t pcib, bus;
6540
6541 pci_class = devclass_find("pci");
6542 KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class,
6543 ("%s: non-pci device %s", __func__, device_get_nameunit(dev)));
6544
6545 /*
6546 * Walk the bridge hierarchy until we find a PCI-e root
6547 * port or a non-PCI device.
6548 */
6549 for (;;) {
6550 bus = device_get_parent(dev);
6551 KASSERT(bus != NULL, ("%s: null parent of %s", __func__,
6552 device_get_nameunit(dev)));
6553
6554 pcib = device_get_parent(bus);
6555 KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__,
6556 device_get_nameunit(bus)));
6557
6558 /*
6559 * pcib's parent must be a PCI bus for this to be a
6560 * PCI-PCI bridge.
6561 */
6562 if (device_get_devclass(device_get_parent(pcib)) != pci_class)
6563 return (NULL);
6564
6565 dinfo = device_get_ivars(pcib);
6566 if (dinfo->cfg.pcie.pcie_location != 0 &&
6567 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)
6568 return (pcib);
6569
6570 dev = pcib;
6571 }
6572 }
6573
6574 /*
6575 * Wait for pending transactions to complete on a PCI-express function.
6576 *
6577 * The maximum delay is specified in milliseconds in max_delay. Note
6578 * that this function may sleep.
6579 *
6580 * Returns true if the function is idle and false if the timeout is
6581 * exceeded. If dev is not a PCI-express function, this returns true.
6582 */
6583 bool
pcie_wait_for_pending_transactions(device_t dev,u_int max_delay)6584 pcie_wait_for_pending_transactions(device_t dev, u_int max_delay)
6585 {
6586 struct pci_devinfo *dinfo = device_get_ivars(dev);
6587 uint16_t sta;
6588 int cap;
6589
6590 cap = dinfo->cfg.pcie.pcie_location;
6591 if (cap == 0)
6592 return (true);
6593
6594 sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2);
6595 while (sta & PCIEM_STA_TRANSACTION_PND) {
6596 if (max_delay == 0)
6597 return (false);
6598
6599 /* Poll once every 100 milliseconds up to the timeout. */
6600 if (max_delay > 100) {
6601 pause_sbt("pcietp", 100 * SBT_1MS, 0, C_HARDCLOCK);
6602 max_delay -= 100;
6603 } else {
6604 pause_sbt("pcietp", max_delay * SBT_1MS, 0,
6605 C_HARDCLOCK);
6606 max_delay = 0;
6607 }
6608 sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2);
6609 }
6610
6611 return (true);
6612 }
6613
6614 /*
6615 * Determine the maximum Completion Timeout in microseconds.
6616 *
6617 * For non-PCI-express functions this returns 0.
6618 */
6619 int
pcie_get_max_completion_timeout(device_t dev)6620 pcie_get_max_completion_timeout(device_t dev)
6621 {
6622 struct pci_devinfo *dinfo = device_get_ivars(dev);
6623 int cap;
6624
6625 cap = dinfo->cfg.pcie.pcie_location;
6626 if (cap == 0)
6627 return (0);
6628
6629 /*
6630 * Functions using the 1.x spec use the default timeout range of
6631 * 50 microseconds to 50 milliseconds. Functions that do not
6632 * support programmable timeouts also use this range.
6633 */
6634 if ((dinfo->cfg.pcie.pcie_flags & PCIEM_FLAGS_VERSION) < 2 ||
6635 (pci_read_config(dev, cap + PCIER_DEVICE_CAP2, 4) &
6636 PCIEM_CAP2_COMP_TIMO_RANGES) == 0)
6637 return (50 * 1000);
6638
6639 switch (pci_read_config(dev, cap + PCIER_DEVICE_CTL2, 2) &
6640 PCIEM_CTL2_COMP_TIMO_VAL) {
6641 case PCIEM_CTL2_COMP_TIMO_100US:
6642 return (100);
6643 case PCIEM_CTL2_COMP_TIMO_10MS:
6644 return (10 * 1000);
6645 case PCIEM_CTL2_COMP_TIMO_55MS:
6646 return (55 * 1000);
6647 case PCIEM_CTL2_COMP_TIMO_210MS:
6648 return (210 * 1000);
6649 case PCIEM_CTL2_COMP_TIMO_900MS:
6650 return (900 * 1000);
6651 case PCIEM_CTL2_COMP_TIMO_3500MS:
6652 return (3500 * 1000);
6653 case PCIEM_CTL2_COMP_TIMO_13S:
6654 return (13 * 1000 * 1000);
6655 case PCIEM_CTL2_COMP_TIMO_64S:
6656 return (64 * 1000 * 1000);
6657 default:
6658 return (50 * 1000);
6659 }
6660 }
6661
6662 void
pcie_apei_error(device_t dev,int sev,uint8_t * aerp)6663 pcie_apei_error(device_t dev, int sev, uint8_t *aerp)
6664 {
6665 struct pci_devinfo *dinfo = device_get_ivars(dev);
6666 const char *s;
6667 int aer;
6668 uint32_t r, r1;
6669 uint16_t rs;
6670
6671 if (sev == PCIEM_STA_CORRECTABLE_ERROR)
6672 s = "Correctable";
6673 else if (sev == PCIEM_STA_NON_FATAL_ERROR)
6674 s = "Uncorrectable (Non-Fatal)";
6675 else
6676 s = "Uncorrectable (Fatal)";
6677 device_printf(dev, "%s PCIe error reported by APEI\n", s);
6678 if (aerp) {
6679 if (sev == PCIEM_STA_CORRECTABLE_ERROR) {
6680 r = le32dec(aerp + PCIR_AER_COR_STATUS);
6681 r1 = le32dec(aerp + PCIR_AER_COR_MASK);
6682 } else {
6683 r = le32dec(aerp + PCIR_AER_UC_STATUS);
6684 r1 = le32dec(aerp + PCIR_AER_UC_MASK);
6685 }
6686 device_printf(dev, "status 0x%08x mask 0x%08x", r, r1);
6687 if (sev != PCIEM_STA_CORRECTABLE_ERROR) {
6688 r = le32dec(aerp + PCIR_AER_UC_SEVERITY);
6689 rs = le16dec(aerp + PCIR_AER_CAP_CONTROL);
6690 printf(" severity 0x%08x first %d\n",
6691 r, rs & 0x1f);
6692 } else
6693 printf("\n");
6694 }
6695
6696 /* As kind of recovery just report and clear the error statuses. */
6697 if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) {
6698 r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4);
6699 if (r != 0) {
6700 pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4);
6701 device_printf(dev, "Clearing UC AER errors 0x%08x\n", r);
6702 }
6703
6704 r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4);
6705 if (r != 0) {
6706 pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4);
6707 device_printf(dev, "Clearing COR AER errors 0x%08x\n", r);
6708 }
6709 }
6710 if (dinfo->cfg.pcie.pcie_location != 0) {
6711 rs = pci_read_config(dev, dinfo->cfg.pcie.pcie_location +
6712 PCIER_DEVICE_STA, 2);
6713 if ((rs & (PCIEM_STA_CORRECTABLE_ERROR |
6714 PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR |
6715 PCIEM_STA_UNSUPPORTED_REQ)) != 0) {
6716 pci_write_config(dev, dinfo->cfg.pcie.pcie_location +
6717 PCIER_DEVICE_STA, rs, 2);
6718 device_printf(dev, "Clearing PCIe errors 0x%04x\n", rs);
6719 }
6720 }
6721 }
6722
6723 /*
6724 * Perform a Function Level Reset (FLR) on a device.
6725 *
6726 * This function first waits for any pending transactions to complete
6727 * within the timeout specified by max_delay. If transactions are
6728 * still pending, the function will return false without attempting a
6729 * reset.
6730 *
6731 * If dev is not a PCI-express function or does not support FLR, this
6732 * function returns false.
6733 *
6734 * Note that no registers are saved or restored. The caller is
6735 * responsible for saving and restoring any registers including
6736 * PCI-standard registers via pci_save_state() and
6737 * pci_restore_state().
6738 */
6739 bool
pcie_flr(device_t dev,u_int max_delay,bool force)6740 pcie_flr(device_t dev, u_int max_delay, bool force)
6741 {
6742 struct pci_devinfo *dinfo = device_get_ivars(dev);
6743 uint16_t cmd, ctl;
6744 int compl_delay;
6745 int cap;
6746
6747 cap = dinfo->cfg.pcie.pcie_location;
6748 if (cap == 0)
6749 return (false);
6750
6751 if (!(pci_read_config(dev, cap + PCIER_DEVICE_CAP, 4) & PCIEM_CAP_FLR))
6752 return (false);
6753 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_FLR))
6754 return (false);
6755
6756 /*
6757 * Disable busmastering to prevent generation of new
6758 * transactions while waiting for the device to go idle. If
6759 * the idle timeout fails, the command register is restored
6760 * which will re-enable busmastering.
6761 */
6762 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
6763 pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCIM_CMD_BUSMASTEREN), 2);
6764 if (!pcie_wait_for_pending_transactions(dev, max_delay)) {
6765 if (!force) {
6766 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
6767 return (false);
6768 }
6769 pci_printf(&dinfo->cfg,
6770 "Resetting with transactions pending after %d ms\n",
6771 max_delay);
6772
6773 /*
6774 * Extend the post-FLR delay to cover the maximum
6775 * Completion Timeout delay of anything in flight
6776 * during the FLR delay. Enforce a minimum delay of
6777 * at least 10ms.
6778 */
6779 compl_delay = pcie_get_max_completion_timeout(dev) / 1000;
6780 if (compl_delay < 10)
6781 compl_delay = 10;
6782 } else
6783 compl_delay = 0;
6784
6785 /* Initiate the reset. */
6786 ctl = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
6787 pci_write_config(dev, cap + PCIER_DEVICE_CTL, ctl |
6788 PCIEM_CTL_INITIATE_FLR, 2);
6789
6790 /* Wait for 100ms. */
6791 pause_sbt("pcieflr", (100 + compl_delay) * SBT_1MS, 0, C_HARDCLOCK);
6792
6793 if (pci_read_config(dev, cap + PCIER_DEVICE_STA, 2) &
6794 PCIEM_STA_TRANSACTION_PND)
6795 pci_printf(&dinfo->cfg, "Transactions pending after FLR!\n");
6796 return (true);
6797 }
6798
6799 /*
6800 * Attempt a power-management reset by cycling the device in/out of D3
6801 * state. PCI spec says we can only go into D3 state from D0 state.
6802 * Transition from D[12] into D0 before going to D3 state.
6803 */
6804 int
pci_power_reset(device_t dev)6805 pci_power_reset(device_t dev)
6806 {
6807 int ps;
6808
6809 ps = pci_get_powerstate(dev);
6810 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
6811 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
6812 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
6813 pci_set_powerstate(dev, ps);
6814 return (0);
6815 }
6816
6817 /*
6818 * Try link drop and retrain of the downstream port of upstream
6819 * switch, for PCIe. According to the PCIe 3.0 spec 6.6.1, this must
6820 * cause Conventional Hot reset of the device in the slot.
6821 * Alternative, for PCIe, could be the secondary bus reset initiatied
6822 * on the upstream switch PCIR_BRIDGECTL_1, bit 6.
6823 */
6824 int
pcie_link_reset(device_t port,int pcie_location)6825 pcie_link_reset(device_t port, int pcie_location)
6826 {
6827 uint16_t v;
6828
6829 v = pci_read_config(port, pcie_location + PCIER_LINK_CTL, 2);
6830 v |= PCIEM_LINK_CTL_LINK_DIS;
6831 pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2);
6832 pause_sbt("pcier1", mstosbt(20), 0, 0);
6833 v &= ~PCIEM_LINK_CTL_LINK_DIS;
6834 v |= PCIEM_LINK_CTL_RETRAIN_LINK;
6835 pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2);
6836 pause_sbt("pcier2", mstosbt(100), 0, 0); /* 100 ms */
6837 v = pci_read_config(port, pcie_location + PCIER_LINK_STA, 2);
6838 return ((v & PCIEM_LINK_STA_TRAINING) != 0 ? ETIMEDOUT : 0);
6839 }
6840
6841 static int
pci_reset_post(device_t dev,device_t child)6842 pci_reset_post(device_t dev, device_t child)
6843 {
6844
6845 if (dev == device_get_parent(child))
6846 pci_restore_state(child);
6847 return (0);
6848 }
6849
6850 static int
pci_reset_prepare(device_t dev,device_t child)6851 pci_reset_prepare(device_t dev, device_t child)
6852 {
6853
6854 if (dev == device_get_parent(child))
6855 pci_save_state(child);
6856 return (0);
6857 }
6858
6859 static int
pci_reset_child(device_t dev,device_t child,int flags)6860 pci_reset_child(device_t dev, device_t child, int flags)
6861 {
6862 int error;
6863
6864 if (dev == NULL || device_get_parent(child) != dev)
6865 return (0);
6866 if ((flags & DEVF_RESET_DETACH) != 0) {
6867 error = device_get_state(child) == DS_ATTACHED ?
6868 device_detach(child) : 0;
6869 } else {
6870 error = BUS_SUSPEND_CHILD(dev, child);
6871 }
6872 if (error == 0) {
6873 if (!pcie_flr(child, 1000, false)) {
6874 error = BUS_RESET_PREPARE(dev, child);
6875 if (error == 0)
6876 pci_power_reset(child);
6877 BUS_RESET_POST(dev, child);
6878 }
6879 if ((flags & DEVF_RESET_DETACH) != 0)
6880 device_probe_and_attach(child);
6881 else
6882 BUS_RESUME_CHILD(dev, child);
6883 }
6884 return (error);
6885 }
6886
6887 const struct pci_device_table *
pci_match_device(device_t child,const struct pci_device_table * id,size_t nelt)6888 pci_match_device(device_t child, const struct pci_device_table *id, size_t nelt)
6889 {
6890 bool match;
6891 uint16_t vendor, device, subvendor, subdevice, class, subclass, revid;
6892
6893 vendor = pci_get_vendor(child);
6894 device = pci_get_device(child);
6895 subvendor = pci_get_subvendor(child);
6896 subdevice = pci_get_subdevice(child);
6897 class = pci_get_class(child);
6898 subclass = pci_get_subclass(child);
6899 revid = pci_get_revid(child);
6900 while (nelt-- > 0) {
6901 match = true;
6902 if (id->match_flag_vendor)
6903 match &= vendor == id->vendor;
6904 if (id->match_flag_device)
6905 match &= device == id->device;
6906 if (id->match_flag_subvendor)
6907 match &= subvendor == id->subvendor;
6908 if (id->match_flag_subdevice)
6909 match &= subdevice == id->subdevice;
6910 if (id->match_flag_class)
6911 match &= class == id->class_id;
6912 if (id->match_flag_subclass)
6913 match &= subclass == id->subclass;
6914 if (id->match_flag_revid)
6915 match &= revid == id->revid;
6916 if (match)
6917 return (id);
6918 id++;
6919 }
6920 return (NULL);
6921 }
6922
6923 static void
pci_print_faulted_dev_name(const struct pci_devinfo * dinfo)6924 pci_print_faulted_dev_name(const struct pci_devinfo *dinfo)
6925 {
6926 const char *dev_name;
6927 device_t dev;
6928
6929 dev = dinfo->cfg.dev;
6930 printf("pci%d:%d:%d:%d", dinfo->cfg.domain, dinfo->cfg.bus,
6931 dinfo->cfg.slot, dinfo->cfg.func);
6932 dev_name = device_get_name(dev);
6933 if (dev_name != NULL)
6934 printf(" (%s%d)", dev_name, device_get_unit(dev));
6935 }
6936
6937 void
pci_print_faulted_dev(void)6938 pci_print_faulted_dev(void)
6939 {
6940 struct pci_devinfo *dinfo;
6941 device_t dev;
6942 int aer, i;
6943 uint32_t r1, r2;
6944 uint16_t status;
6945
6946 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
6947 dev = dinfo->cfg.dev;
6948 status = pci_read_config(dev, PCIR_STATUS, 2);
6949 status &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT |
6950 PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT |
6951 PCIM_STATUS_SERR | PCIM_STATUS_PERR;
6952 if (status != 0) {
6953 pci_print_faulted_dev_name(dinfo);
6954 printf(" error 0x%04x\n", status);
6955 }
6956 if (dinfo->cfg.pcie.pcie_location != 0) {
6957 status = pci_read_config(dev,
6958 dinfo->cfg.pcie.pcie_location +
6959 PCIER_DEVICE_STA, 2);
6960 if ((status & (PCIEM_STA_CORRECTABLE_ERROR |
6961 PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR |
6962 PCIEM_STA_UNSUPPORTED_REQ)) != 0) {
6963 pci_print_faulted_dev_name(dinfo);
6964 printf(" PCIe DEVCTL 0x%04x DEVSTA 0x%04x\n",
6965 pci_read_config(dev,
6966 dinfo->cfg.pcie.pcie_location +
6967 PCIER_DEVICE_CTL, 2),
6968 status);
6969 }
6970 }
6971 if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) {
6972 r1 = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4);
6973 r2 = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4);
6974 if (r1 != 0 || r2 != 0) {
6975 pci_print_faulted_dev_name(dinfo);
6976 printf(" AER UC 0x%08x Mask 0x%08x Svr 0x%08x\n"
6977 " COR 0x%08x Mask 0x%08x Ctl 0x%08x\n",
6978 r1, pci_read_config(dev, aer +
6979 PCIR_AER_UC_MASK, 4),
6980 pci_read_config(dev, aer +
6981 PCIR_AER_UC_SEVERITY, 4),
6982 r2, pci_read_config(dev, aer +
6983 PCIR_AER_COR_MASK, 4),
6984 pci_read_config(dev, aer +
6985 PCIR_AER_CAP_CONTROL, 4));
6986 for (i = 0; i < 4; i++) {
6987 r1 = pci_read_config(dev, aer +
6988 PCIR_AER_HEADER_LOG + i * 4, 4);
6989 printf(" HL%d: 0x%08x\n", i, r1);
6990 }
6991 }
6992 }
6993 }
6994 }
6995
6996 #ifdef DDB
DB_SHOW_COMMAND_FLAGS(pcierr,pci_print_faulted_dev_db,DB_CMD_MEMSAFE)6997 DB_SHOW_COMMAND_FLAGS(pcierr, pci_print_faulted_dev_db, DB_CMD_MEMSAFE)
6998 {
6999
7000 pci_print_faulted_dev();
7001 }
7002
7003 static void
db_clear_pcie_errors(const struct pci_devinfo * dinfo)7004 db_clear_pcie_errors(const struct pci_devinfo *dinfo)
7005 {
7006 device_t dev;
7007 int aer;
7008 uint32_t r;
7009
7010 dev = dinfo->cfg.dev;
7011 r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location +
7012 PCIER_DEVICE_STA, 2);
7013 pci_write_config(dev, dinfo->cfg.pcie.pcie_location +
7014 PCIER_DEVICE_STA, r, 2);
7015
7016 if (pci_find_extcap(dev, PCIZ_AER, &aer) != 0)
7017 return;
7018 r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4);
7019 if (r != 0)
7020 pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4);
7021 r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4);
7022 if (r != 0)
7023 pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4);
7024 }
7025
DB_COMMAND_FLAGS(pci_clearerr,db_pci_clearerr,DB_CMD_MEMSAFE)7026 DB_COMMAND_FLAGS(pci_clearerr, db_pci_clearerr, DB_CMD_MEMSAFE)
7027 {
7028 struct pci_devinfo *dinfo;
7029 device_t dev;
7030 uint16_t status, status1;
7031
7032 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
7033 dev = dinfo->cfg.dev;
7034 status1 = status = pci_read_config(dev, PCIR_STATUS, 2);
7035 status1 &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT |
7036 PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT |
7037 PCIM_STATUS_SERR | PCIM_STATUS_PERR;
7038 if (status1 != 0) {
7039 status &= ~status1;
7040 pci_write_config(dev, PCIR_STATUS, status, 2);
7041 }
7042 if (dinfo->cfg.pcie.pcie_location != 0)
7043 db_clear_pcie_errors(dinfo);
7044 }
7045 }
7046 #endif
7047