xref: /linux/include/linux/pci.h (revision 40286d6379aacfcc053253ef78dc78b09addffda)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *	pci.h
4  *
5  *	PCI defines and function prototypes
6  *	Copyright 1994, Drew Eckhardt
7  *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8  *
9  *	PCI Express ASPM defines and function prototypes
10  *	Copyright (c) 2007 Intel Corp.
11  *		Zhang Yanmin (yanmin.zhang@intel.com)
12  *		Shaohua Li (shaohua.li@intel.com)
13  *
14  *	For more information, please consult the following manuals (look at
15  *	http://www.pcisig.com/ for how to get them):
16  *
17  *	PCI BIOS Specification
18  *	PCI Local Bus Specification
19  *	PCI to PCI Bridge Specification
20  *	PCI Express Specification
21  *	PCI System Design Guide
22  */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25 
26 #include <linux/args.h>
27 #include <linux/mod_devicetable.h>
28 
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <linux/msi_api.h>
42 #include <uapi/linux/pci.h>
43 
44 #include <linux/pci_ids.h>
45 
46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY  | \
47 			       PCI_STATUS_SIG_SYSTEM_ERROR | \
48 			       PCI_STATUS_REC_MASTER_ABORT | \
49 			       PCI_STATUS_REC_TARGET_ABORT | \
50 			       PCI_STATUS_SIG_TARGET_ABORT | \
51 			       PCI_STATUS_PARITY)
52 
53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */
54 #define PCI_NUM_RESET_METHODS 8
55 
56 #define PCI_RESET_PROBE		true
57 #define PCI_RESET_DO_RESET	false
58 
59 /*
60  * The PCI interface treats multi-function devices as independent
61  * devices.  The slot/function address of each device is encoded
62  * in a single byte as follows:
63  *
64  *	7:3 = slot
65  *	2:0 = function
66  *
67  * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
68  * In the interest of not exposing interfaces to user-space unnecessarily,
69  * the following kernel-only defines are being added here.
70  */
71 #define PCI_DEVID(bus, devfn)	((((u16)(bus)) << 8) | (devfn))
72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
74 
75 /*
76  * PCI_SLOT_ALL_DEVICES indicates a slot that covers all devices on the bus.
77  * Used for PCIe hotplug where the physical slot is the entire secondary bus,
78  * and, if ARI Forwarding is enabled, functions may appear to be on multiple
79  * devices.
80  */
81 #define PCI_SLOT_ALL_DEVICES	0xfe
82 
83 /* pci_slot represents a physical slot */
84 struct pci_slot {
85 	struct pci_bus		*bus;		/* Bus this slot is on */
86 	struct list_head	list;		/* Node in list of slots */
87 	struct hotplug_slot	*hotplug;	/* Hotplug info (move here) */
88 	unsigned char		number;		/* Device nr, or PCI_SLOT_ALL_DEVICES */
89 	struct kobject		kobj;
90 };
91 
92 static inline const char *pci_slot_name(const struct pci_slot *slot)
93 {
94 	return kobject_name(&slot->kobj);
95 }
96 
97 /* File state for mmap()s on /proc/bus/pci/X/Y */
98 enum pci_mmap_state {
99 	pci_mmap_io,
100 	pci_mmap_mem
101 };
102 
103 /* For PCI devices, the region numbers are assigned this way: */
104 enum {
105 	/* #0-5: standard PCI resources */
106 	PCI_STD_RESOURCES,
107 	PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
108 
109 	/* #6: expansion ROM resource */
110 	PCI_ROM_RESOURCE,
111 
112 	/* Device-specific resources */
113 #ifdef CONFIG_PCI_IOV
114 	PCI_IOV_RESOURCES,
115 	PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
116 #endif
117 
118 /* PCI-to-PCI (P2P) bridge windows */
119 #define PCI_BRIDGE_IO_WINDOW		(PCI_BRIDGE_RESOURCES + 0)
120 #define PCI_BRIDGE_MEM_WINDOW		(PCI_BRIDGE_RESOURCES + 1)
121 #define PCI_BRIDGE_PREF_MEM_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
122 
123 /* CardBus bridge windows */
124 #define PCI_CB_BRIDGE_IO_0_WINDOW	(PCI_BRIDGE_RESOURCES + 0)
125 #define PCI_CB_BRIDGE_IO_1_WINDOW	(PCI_BRIDGE_RESOURCES + 1)
126 #define PCI_CB_BRIDGE_MEM_0_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
127 #define PCI_CB_BRIDGE_MEM_1_WINDOW	(PCI_BRIDGE_RESOURCES + 3)
128 
129 /* Total number of bridge resources for P2P and CardBus */
130 #define PCI_P2P_BRIDGE_RESOURCE_NUM	3
131 #define PCI_BRIDGE_RESOURCE_NUM		4
132 
133 	/* Resources assigned to buses behind the bridge */
134 	PCI_BRIDGE_RESOURCES,
135 	PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
136 				  PCI_BRIDGE_RESOURCE_NUM - 1,
137 
138 	/* Total resources associated with a PCI device */
139 	PCI_NUM_RESOURCES,
140 
141 	/* Preserve this for compatibility */
142 	DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
143 };
144 
145 /**
146  * enum pci_interrupt_pin - PCI INTx interrupt values
147  * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
148  * @PCI_INTERRUPT_INTA: PCI INTA pin
149  * @PCI_INTERRUPT_INTB: PCI INTB pin
150  * @PCI_INTERRUPT_INTC: PCI INTC pin
151  * @PCI_INTERRUPT_INTD: PCI INTD pin
152  *
153  * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
154  * PCI_INTERRUPT_PIN register.
155  */
156 enum pci_interrupt_pin {
157 	PCI_INTERRUPT_UNKNOWN,
158 	PCI_INTERRUPT_INTA,
159 	PCI_INTERRUPT_INTB,
160 	PCI_INTERRUPT_INTC,
161 	PCI_INTERRUPT_INTD,
162 };
163 
164 /* The number of legacy PCI INTx interrupts */
165 #define PCI_NUM_INTX	4
166 
167 /*
168  * Reading from a device that doesn't respond typically returns ~0.  A
169  * successful read from a device may also return ~0, so you need additional
170  * information to reliably identify errors.
171  */
172 #define PCI_ERROR_RESPONSE		(~0ULL)
173 #define PCI_SET_ERROR_RESPONSE(val)	(*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
174 #define PCI_POSSIBLE_ERROR(val)		((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
175 
176 /*
177  * pci_power_t values must match the bits in the Capabilities PME_Support
178  * and Control/Status PowerState fields in the Power Management capability.
179  */
180 typedef int __bitwise pci_power_t;
181 
182 #define PCI_D0		((pci_power_t __force) 0)
183 #define PCI_D1		((pci_power_t __force) 1)
184 #define PCI_D2		((pci_power_t __force) 2)
185 #define PCI_D3hot	((pci_power_t __force) 3)
186 #define PCI_D3cold	((pci_power_t __force) 4)
187 #define PCI_UNKNOWN	((pci_power_t __force) 5)
188 #define PCI_POWER_ERROR	((pci_power_t __force) -1)
189 
190 /* Remember to update this when the list above changes! */
191 extern const char *pci_power_names[];
192 
193 static inline const char *pci_power_name(pci_power_t state)
194 {
195 	return pci_power_names[1 + (__force int) state];
196 }
197 
198 /**
199  * typedef pci_channel_state_t
200  *
201  * The pci_channel state describes connectivity between the CPU and
202  * the PCI device.  If some PCI bus between here and the PCI device
203  * has crashed or locked up, this info is reflected here.
204  */
205 typedef unsigned int __bitwise pci_channel_state_t;
206 
207 enum {
208 	/* I/O channel is in normal state */
209 	pci_channel_io_normal = (__force pci_channel_state_t) 1,
210 
211 	/* I/O to channel is blocked */
212 	pci_channel_io_frozen = (__force pci_channel_state_t) 2,
213 
214 	/* PCI card is dead */
215 	pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
216 };
217 
218 typedef unsigned int __bitwise pcie_reset_state_t;
219 
220 enum pcie_reset_state {
221 	/* Reset is NOT asserted (Use to deassert reset) */
222 	pcie_deassert_reset = (__force pcie_reset_state_t) 1,
223 
224 	/* Use #PERST to reset PCIe device */
225 	pcie_warm_reset = (__force pcie_reset_state_t) 2,
226 
227 	/* Use PCIe Hot Reset to reset device */
228 	pcie_hot_reset = (__force pcie_reset_state_t) 3
229 };
230 
231 typedef unsigned short __bitwise pci_dev_flags_t;
232 enum pci_dev_flags {
233 	/* INTX_DISABLE in PCI_COMMAND register disables MSI too */
234 	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
235 	/* Device configuration is irrevocably lost if disabled into D3 */
236 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
237 	/* Provide indication device is assigned by a Virtual Machine Manager */
238 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
239 	/* Flag for quirk use to store if quirk-specific ACS is enabled */
240 	PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
241 	/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
242 	PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
243 	/* Do not use bus resets for device */
244 	PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
245 	/* Do not use PM reset even if device advertises NoSoftRst- */
246 	PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
247 	/* Get VPD from function 0 VPD */
248 	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
249 	/* A non-root bridge where translation occurs, stop alias search here */
250 	PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
251 	/* Do not use FLR even if device advertises PCI_AF_CAP */
252 	PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
253 	/* Don't use Relaxed Ordering for TLPs directed at this device */
254 	PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
255 	/* Device does honor MSI masking despite saying otherwise */
256 	PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
257 	/* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
258 	PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
259 	/*
260 	 * PCIe to PCI bridge does not create RID aliases because the bridge is
261 	 * integrated with the downstream devices and doesn't use real PCI.
262 	 */
263 	PCI_DEV_FLAGS_PCI_BRIDGE_NO_ALIAS = (__force pci_dev_flags_t) (1 << 14),
264 };
265 
266 enum pci_irq_reroute_variant {
267 	INTEL_IRQ_REROUTE_VARIANT = 1,
268 	MAX_IRQ_REROUTE_VARIANTS = 3
269 };
270 
271 typedef unsigned short __bitwise pci_bus_flags_t;
272 enum pci_bus_flags {
273 	PCI_BUS_FLAGS_NO_MSI	= (__force pci_bus_flags_t) 1,
274 	PCI_BUS_FLAGS_NO_MMRBC	= (__force pci_bus_flags_t) 2,
275 	PCI_BUS_FLAGS_NO_AERSID	= (__force pci_bus_flags_t) 4,
276 	PCI_BUS_FLAGS_NO_EXTCFG	= (__force pci_bus_flags_t) 8,
277 };
278 
279 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
280 enum pcie_link_width {
281 	PCIE_LNK_WIDTH_RESRV	= 0x00,
282 	PCIE_LNK_X1		= 0x01,
283 	PCIE_LNK_X2		= 0x02,
284 	PCIE_LNK_X4		= 0x04,
285 	PCIE_LNK_X8		= 0x08,
286 	PCIE_LNK_X12		= 0x0c,
287 	PCIE_LNK_X16		= 0x10,
288 	PCIE_LNK_X32		= 0x20,
289 	PCIE_LNK_WIDTH_UNKNOWN	= 0xff,
290 };
291 
292 /* See matching string table in pci_speed_string() */
293 enum pci_bus_speed {
294 	PCI_SPEED_33MHz			= 0x00,
295 	PCI_SPEED_66MHz			= 0x01,
296 	PCI_SPEED_66MHz_PCIX		= 0x02,
297 	PCI_SPEED_100MHz_PCIX		= 0x03,
298 	PCI_SPEED_133MHz_PCIX		= 0x04,
299 	PCI_SPEED_66MHz_PCIX_ECC	= 0x05,
300 	PCI_SPEED_100MHz_PCIX_ECC	= 0x06,
301 	PCI_SPEED_133MHz_PCIX_ECC	= 0x07,
302 	PCI_SPEED_66MHz_PCIX_266	= 0x09,
303 	PCI_SPEED_100MHz_PCIX_266	= 0x0a,
304 	PCI_SPEED_133MHz_PCIX_266	= 0x0b,
305 	AGP_UNKNOWN			= 0x0c,
306 	AGP_1X				= 0x0d,
307 	AGP_2X				= 0x0e,
308 	AGP_4X				= 0x0f,
309 	AGP_8X				= 0x10,
310 	PCI_SPEED_66MHz_PCIX_533	= 0x11,
311 	PCI_SPEED_100MHz_PCIX_533	= 0x12,
312 	PCI_SPEED_133MHz_PCIX_533	= 0x13,
313 	PCIE_SPEED_2_5GT		= 0x14,
314 	PCIE_SPEED_5_0GT		= 0x15,
315 	PCIE_SPEED_8_0GT		= 0x16,
316 	PCIE_SPEED_16_0GT		= 0x17,
317 	PCIE_SPEED_32_0GT		= 0x18,
318 	PCIE_SPEED_64_0GT		= 0x19,
319 	PCI_SPEED_UNKNOWN		= 0xff,
320 };
321 
322 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
323 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
324 
325 struct pci_vpd {
326 	struct mutex	lock;
327 	unsigned int	len;
328 	u8		cap;
329 };
330 
331 struct irq_affinity;
332 struct pcie_bwctrl_data;
333 struct pcie_link_state;
334 struct pci_sriov;
335 struct pci_p2pdma;
336 struct rcec_ea;
337 
338 /* struct pci_dev - describes a PCI device
339  *
340  * @supported_speeds:	PCIe Supported Link Speeds Vector (+ reserved 0 at
341  *			LSB). 0 when the supported speeds cannot be
342  *			determined (e.g., for Root Complex Integrated
343  *			Endpoints without the relevant Capability
344  *			Registers).
345  * @is_hotplug_bridge:	Hotplug bridge of any kind (e.g. PCIe Hot-Plug Capable,
346  *			Conventional PCI Hot-Plug, ACPI slot).
347  *			Such bridges are allocated additional MMIO and bus
348  *			number resources to allow for hierarchy expansion.
349  * @is_pciehp:		PCIe Hot-Plug Capable bridge.
350  */
351 struct pci_dev {
352 	struct list_head bus_list;	/* Node in per-bus list */
353 	struct pci_bus	*bus;		/* Bus this device is on */
354 	struct pci_bus	*subordinate;	/* Bus this device bridges to */
355 
356 	void		*sysdata;	/* Hook for sys-specific extension */
357 	struct proc_dir_entry *procent;	/* Device entry in /proc/bus/pci */
358 	struct pci_slot	*slot;		/* Physical slot this device is in */
359 
360 	unsigned int	devfn;		/* Encoded device & function index */
361 	unsigned short	vendor;
362 	unsigned short	device;
363 	unsigned short	subsystem_vendor;
364 	unsigned short	subsystem_device;
365 	unsigned int	class;		/* 3 bytes: (base,sub,prog-if) */
366 	u8		revision;	/* PCI revision, low byte of class word */
367 	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
368 #ifdef CONFIG_PCIEAER
369 	u16		aer_cap;	/* AER capability offset */
370 	struct aer_info	*aer_info;	/* AER info for this device */
371 #endif
372 #ifdef CONFIG_PCIEPORTBUS
373 	struct rcec_ea	*rcec_ea;	/* RCEC cached endpoint association */
374 	struct pci_dev  *rcec;          /* Associated RCEC device */
375 #endif
376 	u32		devcap;		/* PCIe Device Capabilities */
377 	u16		rebar_cap;	/* Resizable BAR capability offset */
378 	u8		pcie_cap;	/* PCIe capability offset */
379 	u8		msi_cap;	/* MSI capability offset */
380 	u8		msix_cap;	/* MSI-X capability offset */
381 	u8		pcie_mpss:3;	/* PCIe Max Payload Size Supported */
382 	u8		rom_base_reg;	/* Config register controlling ROM */
383 	u8		pin;		/* Interrupt pin this device uses */
384 	u16		pcie_flags_reg;	/* Cached PCIe Capabilities Register */
385 	unsigned long	*dma_alias_mask;/* Mask of enabled devfn aliases */
386 
387 	struct pci_driver *driver;	/* Driver bound to this device */
388 	u64		dma_mask;	/* Mask of the bits of bus address this
389 					   device implements.  Normally this is
390 					   0xffffffff.  You only need to change
391 					   this if your device has broken DMA
392 					   or supports 64-bit transfers.  */
393 	u64		msi_addr_mask;	/* Mask of the bits of bus address for
394 					   MSI that this device implements.
395 					   Normally set based on device
396 					   capabilities. You only need to
397 					   change this if your device claims
398 					   to support 64-bit MSI but implements
399 					   fewer than 64 address bits. */
400 
401 	struct device_dma_parameters dma_parms;
402 
403 	pci_power_t	current_state;	/* Current operating state. In ACPI,
404 					   this is D0-D3, D0 being fully
405 					   functional, and D3 being off. */
406 	u8		pm_cap;		/* PM capability offset */
407 	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
408 					   can be generated */
409 	unsigned int	pme_poll:1;	/* Poll device's PME status bit */
410 	unsigned int	pinned:1;	/* Whether this dev is pinned */
411 	unsigned int	config_rrs_sv:1; /* Config RRS software visibility */
412 	unsigned int	imm_ready:1;	/* Supports Immediate Readiness */
413 	unsigned int	d1_support:1;	/* Low power state D1 is supported */
414 	unsigned int	d2_support:1;	/* Low power state D2 is supported */
415 	unsigned int	no_d1d2:1;	/* D1 and D2 are forbidden */
416 	unsigned int	no_d3cold:1;	/* D3cold is forbidden */
417 	unsigned int	bridge_d3:1;	/* Allow D3 for bridge */
418 	unsigned int	d3cold_allowed:1;	/* D3cold is allowed by user */
419 	unsigned int	mmio_always_on:1;	/* Disallow turning off io/mem
420 						   decoding during BAR sizing */
421 	unsigned int	wakeup_prepared:1;
422 	unsigned int	skip_bus_pm:1;	/* Internal: Skip bus-level PM */
423 	unsigned int	ignore_hotplug:1;	/* Ignore hotplug events */
424 	unsigned int	hotplug_user_indicators:1; /* SlotCtl indicators
425 						      controlled exclusively by
426 						      user sysfs */
427 	unsigned int	clear_retrain_link:1;	/* Need to clear Retrain Link
428 						   bit manually */
429 	unsigned int	no_bw_notif:1;	/* BW notifications may cause issues */
430 	unsigned int	d3hot_delay;	/* D3hot->D0 transition time in ms */
431 	unsigned int	d3cold_delay;	/* D3cold->D0 transition time in ms */
432 
433 	u16		l1ss;		/* L1SS Capability pointer */
434 #ifdef CONFIG_PCIEASPM
435 	struct pcie_link_state	*link_state;	/* ASPM link state */
436 	unsigned int	aspm_l0s_support:1;	/* ASPM L0s support */
437 	unsigned int	aspm_l1_support:1;	/* ASPM L1 support */
438 	unsigned int	ltr_path:1;	/* Latency Tolerance Reporting
439 					   supported from root to here */
440 #endif
441 	unsigned int	pasid_no_tlp:1;		/* PASID works without TLP Prefix */
442 	unsigned int	eetlp_prefix_max:3;	/* Max # of End-End TLP Prefixes, 0=not supported */
443 
444 	pci_channel_state_t error_state;	/* Current connectivity state */
445 	struct device	dev;			/* Generic device interface */
446 
447 	int		cfg_size;		/* Size of config space */
448 
449 	/*
450 	 * Instead of touching interrupt line and base address registers
451 	 * directly, use the values stored here. They might be different!
452 	 */
453 	unsigned int	irq;
454 	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
455 	struct resource driver_exclusive_resource;	 /* driver exclusive resource ranges */
456 
457 	unsigned int	transparent:1;		/* Subtractive decode bridge */
458 	unsigned int	io_window:1;		/* Bridge has I/O window */
459 	unsigned int	pref_window:1;		/* Bridge has pref mem window */
460 	unsigned int	pref_64_window:1;	/* Pref mem window is 64-bit */
461 	unsigned int	multifunction:1;	/* Multi-function device */
462 
463 	unsigned int	is_busmaster:1;		/* Is busmaster */
464 	unsigned int	no_msi:1;		/* May not use MSI */
465 	unsigned int	block_cfg_access:1;	/* Config space access blocked */
466 	unsigned int	broken_parity_status:1;	/* Generates false positive parity */
467 	unsigned int	irq_reroute_variant:2;	/* Needs IRQ rerouting variant */
468 	unsigned int	msi_enabled:1;
469 	unsigned int	msix_enabled:1;
470 	unsigned int	ari_enabled:1;		/* ARI forwarding */
471 	unsigned int	ats_enabled:1;		/* Address Translation Svc */
472 	unsigned int	pasid_enabled:1;	/* Process Address Space ID */
473 	unsigned int	pri_enabled:1;		/* Page Request Interface */
474 	unsigned int	tph_enabled:1;		/* TLP Processing Hints */
475 	unsigned int	fm_enabled:1;		/* Flit Mode (segment captured) */
476 	unsigned int	is_managed:1;		/* Managed via devres */
477 	unsigned int	is_msi_managed:1;	/* MSI release via devres installed */
478 	unsigned int	needs_freset:1;		/* Requires fundamental reset */
479 	unsigned int	state_saved:1;
480 	unsigned int	is_physfn:1;
481 	unsigned int	is_virtfn:1;
482 	unsigned int	is_hotplug_bridge:1;
483 	unsigned int	is_pciehp:1;
484 	unsigned int	shpc_managed:1;		/* SHPC owned by shpchp */
485 	unsigned int	is_thunderbolt:1;	/* Thunderbolt controller */
486 	unsigned int	is_cxl:1;               /* Compute Express Link (CXL) */
487 	/*
488 	 * Devices marked being untrusted are the ones that can potentially
489 	 * execute DMA attacks and similar. They are typically connected
490 	 * through external ports such as Thunderbolt but not limited to
491 	 * that. When an IOMMU is enabled they should be getting full
492 	 * mappings to make sure they cannot access arbitrary memory.
493 	 */
494 	unsigned int	untrusted:1;
495 	/*
496 	 * Info from the platform, e.g., ACPI or device tree, may mark a
497 	 * device as "external-facing".  An external-facing device is
498 	 * itself internal but devices downstream from it are external.
499 	 */
500 	unsigned int	external_facing:1;
501 	unsigned int	broken_intx_masking:1;	/* INTx masking can't be used */
502 	unsigned int	io_window_1k:1;		/* Intel bridge 1K I/O windows */
503 	unsigned int	irq_managed:1;
504 	unsigned int	non_compliant_bars:1;	/* Broken BARs; ignore them */
505 	unsigned int	is_probed:1;		/* Device probing in progress */
506 	unsigned int	link_active_reporting:1;/* Device capable of reporting link active */
507 	unsigned int	no_vf_scan:1;		/* Don't scan for VFs after IOV enablement */
508 	unsigned int	no_command_memory:1;	/* No PCI_COMMAND_MEMORY */
509 	unsigned int	rom_bar_overlap:1;	/* ROM BAR disable broken */
510 	unsigned int	rom_attr_enabled:1;	/* Display of ROM attribute enabled? */
511 	unsigned int	non_mappable_bars:1;	/* BARs can't be mapped to user-space  */
512 	pci_dev_flags_t dev_flags;
513 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
514 
515 	spinlock_t	pcie_cap_lock;		/* Protects RMW ops in capability accessors */
516 	u32		saved_config_space[16]; /* Config space saved at suspend time */
517 	struct hlist_head saved_cap_space;
518 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
519 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
520 
521 #ifdef CONFIG_HOTPLUG_PCI_PCIE
522 	unsigned int	broken_cmd_compl:1;	/* No compl for some cmds */
523 #endif
524 #ifdef CONFIG_PCIE_PTM
525 	u16		ptm_cap;		/* PTM Capability */
526 	unsigned int	ptm_root:1;
527 	unsigned int	ptm_responder:1;
528 	unsigned int	ptm_requester:1;
529 	atomic_t	ptm_enable_cnt;
530 	u8		ptm_granularity;
531 #endif
532 #ifdef CONFIG_PCI_MSI
533 	void __iomem	*msix_base;
534 	raw_spinlock_t	msi_lock;
535 #endif
536 	struct pci_vpd	vpd;
537 #ifdef CONFIG_PCIE_DPC
538 	u16		dpc_cap;
539 	unsigned int	dpc_rp_extensions:1;
540 	u8		dpc_rp_log_size;
541 #endif
542 	struct pcie_bwctrl_data		*link_bwctrl;
543 #ifdef CONFIG_PCI_ATS
544 	union {
545 		struct pci_sriov	*sriov;		/* PF: SR-IOV info */
546 		struct pci_dev		*physfn;	/* VF: related PF */
547 	};
548 	u16		ats_cap;	/* ATS Capability offset */
549 	u8		ats_stu;	/* ATS Smallest Translation Unit */
550 #endif
551 #ifdef CONFIG_PCI_PRI
552 	u16		pri_cap;	/* PRI Capability offset */
553 	u32		pri_reqs_alloc; /* Number of PRI requests allocated */
554 	unsigned int	pasid_required:1; /* PRG Response PASID Required */
555 #endif
556 #ifdef CONFIG_PCI_PASID
557 	u16		pasid_cap;	/* PASID Capability offset */
558 	u16		pasid_features;
559 #endif
560 #ifdef CONFIG_PCI_P2PDMA
561 	struct pci_p2pdma __rcu *p2pdma;
562 #endif
563 #ifdef CONFIG_PCI_DOE
564 	struct xarray	doe_mbs;	/* Data Object Exchange mailboxes */
565 #endif
566 #ifdef CONFIG_PCI_NPEM
567 	struct npem	*npem;		/* Native PCIe Enclosure Management */
568 #endif
569 #ifdef CONFIG_PCI_IDE
570 	u16		ide_cap;	/* Link Integrity & Data Encryption */
571 	u8		nr_ide_mem;	/* Address association resources for streams */
572 	u8		nr_link_ide;	/* Link Stream count (Selective Stream offset) */
573 	u16		nr_sel_ide;	/* Selective Stream count (register block allocator) */
574 	struct ida	ide_stream_ida;
575 	unsigned int	ide_cfg:1;	/* Config cycles over IDE */
576 	unsigned int	ide_tee_limit:1; /* Disallow T=0 traffic over IDE */
577 #endif
578 #ifdef CONFIG_PCI_TSM
579 	struct pci_tsm *tsm;		/* TSM operation state */
580 #endif
581 	u16		acs_cap;	/* ACS Capability offset */
582 	u16		acs_capabilities; /* ACS Capabilities */
583 	u8		supported_speeds; /* Supported Link Speeds Vector */
584 	phys_addr_t	rom;		/* Physical address if not from BAR */
585 	size_t		romlen;		/* Length if not from BAR */
586 	unsigned long	priv_flags;	/* Private flags for the PCI driver */
587 
588 	/* These methods index pci_reset_fn_methods[] */
589 	u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
590 
591 #ifdef CONFIG_PCIE_TPH
592 	u16		tph_cap;	/* TPH capability offset */
593 	u8		tph_mode;	/* TPH mode */
594 	u8		tph_req_type;	/* TPH requester type */
595 #endif
596 };
597 
598 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
599 {
600 #ifdef CONFIG_PCI_IOV
601 	if (dev->is_virtfn)
602 		dev = dev->physfn;
603 #endif
604 	return dev;
605 }
606 
607 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
608 
609 #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
610 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
611 #define for_each_pci_dev_reverse(d) \
612 	while ((d = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
613 
614 static inline int pci_channel_offline(struct pci_dev *pdev)
615 {
616 	return (pdev->error_state != pci_channel_io_normal);
617 }
618 
619 /*
620  * Currently in ACPI spec, for each PCI host bridge, PCI Segment
621  * Group number is limited to a 16-bit value, therefore (int)-1 is
622  * not a valid PCI domain number, and can be used as a sentinel
623  * value indicating ->domain_nr is not set by the driver (and
624  * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
625  * pci_bus_find_domain_nr()).
626  */
627 #define PCI_DOMAIN_NR_NOT_SET (-1)
628 
629 struct pci_host_bridge {
630 	struct device	dev;
631 	struct pci_bus	*bus;		/* Root bus */
632 	struct pci_ops	*ops;
633 	struct pci_ops	*child_ops;
634 	void		*sysdata;
635 	int		busnr;
636 	int		domain_nr;
637 	struct list_head windows;	/* resource_entry */
638 	struct list_head dma_ranges;	/* dma ranges resource list */
639 #ifdef CONFIG_PCI_IDE
640 	u16 nr_ide_streams; /* Max streams possibly active in @ide_stream_ida */
641 	struct ida ide_stream_ida;
642 	struct ida ide_stream_ids_ida; /* track unique ids per domain */
643 #endif
644 	u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
645 	int (*map_irq)(const struct pci_dev *, u8, u8);
646 	void (*release_fn)(struct pci_host_bridge *);
647 	int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
648 	void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
649 	void		*release_data;
650 	unsigned int	ignore_reset_delay:1;	/* For entire hierarchy */
651 	unsigned int	no_ext_tags:1;		/* No Extended Tags */
652 	unsigned int	no_inc_mrrs:1;		/* No Increase MRRS */
653 	unsigned int	native_aer:1;		/* OS may use PCIe AER */
654 	unsigned int	native_pcie_hotplug:1;	/* OS may use PCIe hotplug */
655 	unsigned int	native_shpc_hotplug:1;	/* OS may use SHPC hotplug */
656 	unsigned int	native_pme:1;		/* OS may use PCIe PME */
657 	unsigned int	native_ltr:1;		/* OS may use PCIe LTR */
658 	unsigned int	native_dpc:1;		/* OS may use PCIe DPC */
659 	unsigned int	native_cxl_error:1;	/* OS may use CXL RAS/Events */
660 	unsigned int	preserve_config:1;	/* Preserve FW resource setup */
661 	unsigned int	size_windows:1;		/* Enable root bus sizing */
662 	unsigned int	msi_domain:1;		/* Bridge wants MSI domain */
663 
664 	/* Resource alignment requirements */
665 	resource_size_t (*align_resource)(struct pci_dev *dev,
666 			const struct resource *res,
667 			resource_size_t start,
668 			resource_size_t size,
669 			resource_size_t align);
670 	unsigned long	private[] ____cacheline_aligned;
671 };
672 
673 #define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
674 
675 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
676 {
677 	return (void *)bridge->private;
678 }
679 
680 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
681 {
682 	return container_of(priv, struct pci_host_bridge, private);
683 }
684 
685 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
686 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
687 						   size_t priv);
688 void pci_free_host_bridge(struct pci_host_bridge *bridge);
689 struct device *pci_get_host_bridge_device(struct pci_dev *dev);
690 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
691 
692 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
693 				 void (*release_fn)(struct pci_host_bridge *),
694 				 void *release_data);
695 
696 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
697 
698 #define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
699 
700 struct pci_bus {
701 	struct list_head node;		/* Node in list of buses */
702 	struct pci_bus	*parent;	/* Parent bus this bridge is on */
703 	struct list_head children;	/* List of child buses */
704 	struct list_head devices;	/* List of devices on this bus */
705 	struct pci_dev	*self;		/* Bridge device as seen by parent */
706 	struct list_head slots;		/* List of slots on this bus;
707 					   protected by pci_slot_mutex */
708 	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
709 	struct list_head resources;	/* Address space routed to this bus */
710 	struct resource busn_res;	/* Bus numbers routed to this bus */
711 
712 	struct pci_ops	*ops;		/* Configuration access functions */
713 	void		*sysdata;	/* Hook for sys-specific extension */
714 	struct proc_dir_entry *procdir;	/* Directory entry in /proc/bus/pci */
715 
716 	unsigned char	number;		/* Bus number */
717 	unsigned char	primary;	/* Number of primary bridge */
718 	unsigned char	max_bus_speed;	/* enum pci_bus_speed */
719 	unsigned char	cur_bus_speed;	/* enum pci_bus_speed */
720 #ifdef CONFIG_PCI_DOMAINS_GENERIC
721 	int		domain_nr;
722 #endif
723 
724 	char		name[48];
725 
726 	unsigned short	bridge_ctl;	/* Manage NO_ISA/FBB/et al behaviors */
727 	pci_bus_flags_t bus_flags;	/* Inherited by child buses */
728 	struct device		*bridge;
729 	struct device		dev;
730 	struct bin_attribute	*legacy_io;	/* Legacy I/O for this bus */
731 	struct bin_attribute	*legacy_mem;	/* Legacy mem */
732 	unsigned int		is_added:1;
733 	unsigned int		unsafe_warn:1;	/* warned about RW1C config write */
734 	unsigned int		flit_mode:1;	/* Link in Flit mode */
735 };
736 
737 #define to_pci_bus(n)	container_of(n, struct pci_bus, dev)
738 
739 static inline u16 pci_dev_id(struct pci_dev *dev)
740 {
741 	return PCI_DEVID(dev->bus->number, dev->devfn);
742 }
743 
744 /*
745  * Returns true if the PCI bus is root (behind host-PCI bridge),
746  * false otherwise
747  *
748  * Some code assumes that "bus->self == NULL" means that bus is a root bus.
749  * This is incorrect because "virtual" buses added for SR-IOV (via
750  * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
751  */
752 static inline bool pci_is_root_bus(struct pci_bus *pbus)
753 {
754 	return !(pbus->parent);
755 }
756 
757 /**
758  * pci_is_bridge - check if the PCI device is a bridge
759  * @dev: PCI device
760  *
761  * Return true if the PCI device is bridge whether it has subordinate
762  * or not.
763  */
764 static inline bool pci_is_bridge(struct pci_dev *dev)
765 {
766 	return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
767 		dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
768 }
769 
770 /**
771  * pci_is_vga - check if the PCI device is a VGA device
772  * @pdev: PCI device
773  *
774  * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
775  * VGA Base Class and Sub-Classes:
776  *
777  *   03 00  PCI_CLASS_DISPLAY_VGA      VGA-compatible or 8514-compatible
778  *   00 01  PCI_CLASS_NOT_DEFINED_VGA  VGA-compatible (before Class Code)
779  *
780  * Return true if the PCI device is a VGA device and uses the legacy VGA
781  * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
782  * aliases).
783  */
784 static inline bool pci_is_vga(struct pci_dev *pdev)
785 {
786 	if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
787 		return true;
788 
789 	if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
790 		return true;
791 
792 	return false;
793 }
794 
795 /**
796  * pci_is_display - check if the PCI device is a display controller
797  * @pdev: PCI device
798  *
799  * Determine whether the given PCI device corresponds to a display
800  * controller. Display controllers are typically used for graphical output
801  * and are identified based on their class code.
802  *
803  * Return: true if the PCI device is a display controller, false otherwise.
804  */
805 static inline bool pci_is_display(struct pci_dev *pdev)
806 {
807 	return (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY;
808 }
809 
810 static inline bool pcie_is_cxl(struct pci_dev *pci_dev)
811 {
812 	return pci_dev->is_cxl;
813 }
814 
815 #define for_each_pci_bridge(dev, bus)				\
816 	list_for_each_entry(dev, &bus->devices, bus_list)	\
817 		if (!pci_is_bridge(dev)) {} else
818 
819 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
820 {
821 	dev = pci_physfn(dev);
822 	if (pci_is_root_bus(dev->bus))
823 		return NULL;
824 
825 	return dev->bus->self;
826 }
827 
828 #ifdef CONFIG_PCI_MSI
829 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
830 {
831 	return pci_dev->msi_enabled || pci_dev->msix_enabled;
832 }
833 #else
834 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
835 #endif
836 
837 /* Error values that may be returned by PCI functions */
838 #define PCIBIOS_SUCCESSFUL		0x00
839 #define PCIBIOS_FUNC_NOT_SUPPORTED	0x81
840 #define PCIBIOS_BAD_VENDOR_ID		0x83
841 #define PCIBIOS_DEVICE_NOT_FOUND	0x86
842 #define PCIBIOS_BAD_REGISTER_NUMBER	0x87
843 #define PCIBIOS_SET_FAILED		0x88
844 #define PCIBIOS_BUFFER_TOO_SMALL	0x89
845 
846 /* Translate above to generic errno for passing back through non-PCI code */
847 static inline int pcibios_err_to_errno(int err)
848 {
849 	if (err <= PCIBIOS_SUCCESSFUL)
850 		return err; /* Assume already errno */
851 
852 	switch (err) {
853 	case PCIBIOS_FUNC_NOT_SUPPORTED:
854 		return -ENOENT;
855 	case PCIBIOS_BAD_VENDOR_ID:
856 		return -ENOTTY;
857 	case PCIBIOS_DEVICE_NOT_FOUND:
858 		return -ENODEV;
859 	case PCIBIOS_BAD_REGISTER_NUMBER:
860 		return -EFAULT;
861 	case PCIBIOS_SET_FAILED:
862 		return -EIO;
863 	case PCIBIOS_BUFFER_TOO_SMALL:
864 		return -ENOSPC;
865 	}
866 
867 	return -ERANGE;
868 }
869 
870 /* Low-level architecture-dependent routines */
871 
872 struct pci_ops {
873 	int (*add_bus)(struct pci_bus *bus);
874 	void (*remove_bus)(struct pci_bus *bus);
875 	void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
876 	int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
877 	int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
878 };
879 
880 /*
881  * ACPI needs to be able to access PCI config space before we've done a
882  * PCI bus scan and created pci_bus structures.
883  */
884 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
885 		 int reg, int len, u32 *val);
886 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
887 		  int reg, int len, u32 val);
888 
889 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
890 typedef u64 pci_bus_addr_t;
891 #else
892 typedef u32 pci_bus_addr_t;
893 #endif
894 
895 struct pci_bus_region {
896 	pci_bus_addr_t	start;
897 	pci_bus_addr_t	end;
898 };
899 
900 static inline pci_bus_addr_t pci_bus_region_size(const struct pci_bus_region *region)
901 {
902 	return region->end - region->start + 1;
903 }
904 
905 struct pci_dynids {
906 	spinlock_t		lock;	/* Protects list, index */
907 	struct list_head	list;	/* For IDs added at runtime */
908 };
909 
910 
911 /*
912  * PCI Error Recovery System (PCI-ERS).  If a PCI device driver provides
913  * a set of callbacks in struct pci_error_handlers, that device driver
914  * will be notified of PCI bus errors, and will be driven to recovery
915  * when an error occurs.
916  */
917 
918 typedef unsigned int __bitwise pci_ers_result_t;
919 
920 enum pci_ers_result {
921 	/* No result/none/not supported in device driver */
922 	PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
923 
924 	/* Device driver can recover without slot reset */
925 	PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
926 
927 	/* Device driver wants slot to be reset */
928 	PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
929 
930 	/* Device has completely failed, is unrecoverable */
931 	PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
932 
933 	/* Device driver is fully recovered and operational */
934 	PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
935 
936 	/* No AER capabilities registered for the driver */
937 	PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
938 };
939 
940 /* PCI bus error event callbacks */
941 struct pci_error_handlers {
942 	/* PCI bus error detected on this device */
943 	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
944 					   pci_channel_state_t error);
945 
946 	/* MMIO has been re-enabled, but not DMA */
947 	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
948 
949 	/* PCI slot has been reset */
950 	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
951 
952 	/* PCI function reset prepare or completed */
953 	void (*reset_prepare)(struct pci_dev *dev);
954 	void (*reset_done)(struct pci_dev *dev);
955 
956 	/* Device driver may resume normal operations */
957 	void (*resume)(struct pci_dev *dev);
958 
959 	/* Allow device driver to record more details of a correctable error */
960 	void (*cor_error_detected)(struct pci_dev *dev);
961 };
962 
963 
964 struct module;
965 
966 /**
967  * struct pci_driver - PCI driver structure
968  * @name:	Driver name.
969  * @id_table:	Pointer to table of device IDs the driver is
970  *		interested in.  Most drivers should export this
971  *		table using MODULE_DEVICE_TABLE(pci,...).
972  * @probe:	This probing function gets called (during execution
973  *		of pci_register_driver() for already existing
974  *		devices or later if a new device gets inserted) for
975  *		all PCI devices which match the ID table and are not
976  *		"owned" by the other drivers yet. This function gets
977  *		passed a "struct pci_dev \*" for each device whose
978  *		entry in the ID table matches the device. The probe
979  *		function returns zero when the driver chooses to
980  *		take "ownership" of the device or an error code
981  *		(negative number) otherwise.
982  *		The probe function always gets called from process
983  *		context, so it can sleep.
984  * @remove:	The remove() function gets called whenever a device
985  *		being handled by this driver is removed (either during
986  *		deregistration of the driver or when it's manually
987  *		pulled out of a hot-pluggable slot).
988  *		The remove function always gets called from process
989  *		context, so it can sleep.
990  * @suspend:	Put device into low power state.
991  * @resume:	Wake device from low power state.
992  *		(Please see Documentation/power/pci.rst for descriptions
993  *		of PCI Power Management and the related functions.)
994  * @shutdown:	Hook into reboot_notifier_list (kernel/sys.c).
995  *		Intended to stop any idling DMA operations.
996  *		Useful for enabling wake-on-lan (NIC) or changing
997  *		the power state of a device before reboot.
998  *		e.g. drivers/net/e100.c.
999  * @sriov_configure: Optional driver callback to allow configuration of
1000  *		number of VFs to enable via sysfs "sriov_numvfs" file.
1001  * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
1002  *              vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
1003  *              This will change MSI-X Table Size in the VF Message Control
1004  *              registers.
1005  * @sriov_get_vf_total_msix: PF driver callback to get the total number of
1006  *              MSI-X vectors available for distribution to the VFs.
1007  * @err_handler: See Documentation/PCI/pci-error-recovery.rst
1008  * @groups:	Sysfs attribute groups.
1009  * @dev_groups: Attributes attached to the device that will be
1010  *              created once it is bound to the driver.
1011  * @driver:	Driver model structure.
1012  * @dynids:	List of dynamically added device IDs.
1013  * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
1014  *		For most device drivers, no need to care about this flag
1015  *		as long as all DMAs are handled through the kernel DMA API.
1016  *		For some special ones, for example VFIO drivers, they know
1017  *		how to manage the DMA themselves and set this flag so that
1018  *		the IOMMU layer will allow them to setup and manage their
1019  *		own I/O address space.
1020  */
1021 struct pci_driver {
1022 	const char		*name;
1023 	const struct pci_device_id *id_table;	/* Must be non-NULL for probe to be called */
1024 	int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
1025 	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
1026 	int  (*suspend)(struct pci_dev *dev, pm_message_t state);	/* Device suspended */
1027 	int  (*resume)(struct pci_dev *dev);	/* Device woken up */
1028 	void (*shutdown)(struct pci_dev *dev);
1029 	int  (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
1030 	int  (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
1031 	u32  (*sriov_get_vf_total_msix)(struct pci_dev *pf);
1032 	const struct pci_error_handlers *err_handler;
1033 	const struct attribute_group **groups;
1034 	const struct attribute_group **dev_groups;
1035 	struct device_driver	driver;
1036 	struct pci_dynids	dynids;
1037 	bool driver_managed_dma;
1038 };
1039 
1040 #define to_pci_driver(__drv)	\
1041 	( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL )
1042 
1043 /**
1044  * PCI_DEVICE - macro used to describe a specific PCI device
1045  * @vend: the 16 bit PCI Vendor ID
1046  * @dev: the 16 bit PCI Device ID
1047  *
1048  * This macro is used to create a struct pci_device_id that matches a
1049  * specific device.  The subvendor and subdevice fields will be set to
1050  * PCI_ANY_ID.
1051  */
1052 #define PCI_DEVICE(vend,dev) \
1053 	.vendor = (vend), .device = (dev), \
1054 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1055 
1056 /**
1057  * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
1058  *                              override_only flags.
1059  * @vend: the 16 bit PCI Vendor ID
1060  * @dev: the 16 bit PCI Device ID
1061  * @driver_override: the 32 bit PCI Device override_only
1062  *
1063  * This macro is used to create a struct pci_device_id that matches only a
1064  * driver_override device. The subvendor and subdevice fields will be set to
1065  * PCI_ANY_ID.
1066  */
1067 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
1068 	.vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
1069 	.subdevice = PCI_ANY_ID, .override_only = (driver_override)
1070 
1071 /**
1072  * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
1073  *                                   "driver_override" PCI device.
1074  * @vend: the 16 bit PCI Vendor ID
1075  * @dev: the 16 bit PCI Device ID
1076  *
1077  * This macro is used to create a struct pci_device_id that matches a
1078  * specific device. The subvendor and subdevice fields will be set to
1079  * PCI_ANY_ID and the driver_override will be set to
1080  * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
1081  */
1082 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
1083 	PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
1084 
1085 /**
1086  * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
1087  * @vend: the 16 bit PCI Vendor ID
1088  * @dev: the 16 bit PCI Device ID
1089  * @subvend: the 16 bit PCI Subvendor ID
1090  * @subdev: the 16 bit PCI Subdevice ID
1091  *
1092  * This macro is used to create a struct pci_device_id that matches a
1093  * specific device with subsystem information.
1094  */
1095 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
1096 	.vendor = (vend), .device = (dev), \
1097 	.subvendor = (subvend), .subdevice = (subdev)
1098 
1099 /**
1100  * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
1101  * @dev_class: the class, subclass, prog-if triple for this device
1102  * @dev_class_mask: the class mask for this device
1103  *
1104  * This macro is used to create a struct pci_device_id that matches a
1105  * specific PCI class.  The vendor, device, subvendor, and subdevice
1106  * fields will be set to PCI_ANY_ID.
1107  */
1108 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
1109 	.class = (dev_class), .class_mask = (dev_class_mask), \
1110 	.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
1111 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1112 
1113 /**
1114  * PCI_VDEVICE - macro used to describe a specific PCI device in short form
1115  * @vend: the vendor name
1116  * @dev: the 16 bit PCI Device ID
1117  *
1118  * This macro is used to create a struct pci_device_id that matches a
1119  * specific PCI device.  The subvendor, and subdevice fields will be set
1120  * to PCI_ANY_ID. The macro allows the next field to follow as the device
1121  * private data.
1122  */
1123 #define PCI_VDEVICE(vend, dev) \
1124 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1125 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
1126 
1127 /**
1128  * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form
1129  * @vend: the vendor name
1130  * @dev: the 16 bit PCI Device ID
1131  * @subvend: the 16 bit PCI Subvendor ID
1132  * @subdev: the 16 bit PCI Subdevice ID
1133  *
1134  * Generate the pci_device_id struct layout for the specific PCI
1135  * device/subdevice. Private data may follow the output.
1136  */
1137 #define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \
1138 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1139 	.subvendor = (subvend), .subdevice = (subdev), 0, 0
1140 
1141 /**
1142  * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
1143  * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
1144  * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
1145  * @data: the driver data to be filled
1146  *
1147  * This macro is used to create a struct pci_device_id that matches a
1148  * specific PCI device.  The subvendor, and subdevice fields will be set
1149  * to PCI_ANY_ID.
1150  */
1151 #define PCI_DEVICE_DATA(vend, dev, data) \
1152 	.vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
1153 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
1154 	.driver_data = (kernel_ulong_t)(data)
1155 
1156 enum {
1157 	PCI_REASSIGN_ALL_RSRC	= 0x00000001,	/* Ignore firmware setup */
1158 	PCI_REASSIGN_ALL_BUS	= 0x00000002,	/* Reassign all bus numbers */
1159 	PCI_PROBE_ONLY		= 0x00000004,	/* Use existing setup */
1160 	PCI_CAN_SKIP_ISA_ALIGN	= 0x00000008,	/* Don't do ISA alignment */
1161 	PCI_ENABLE_PROC_DOMAINS	= 0x00000010,	/* Enable domains in /proc */
1162 	PCI_COMPAT_DOMAIN_0	= 0x00000020,	/* ... except domain 0 */
1163 	PCI_SCAN_ALL_PCIE_DEVS	= 0x00000040,	/* Scan all, not just dev 0 */
1164 };
1165 
1166 #define PCI_IRQ_INTX		(1 << 0) /* Allow INTx interrupts */
1167 #define PCI_IRQ_MSI		(1 << 1) /* Allow MSI interrupts */
1168 #define PCI_IRQ_MSIX		(1 << 2) /* Allow MSI-X interrupts */
1169 #define PCI_IRQ_AFFINITY	(1 << 3) /* Auto-assign affinity */
1170 
1171 /* These external functions are only available when PCI support is enabled */
1172 #ifdef CONFIG_PCI
1173 
1174 extern unsigned int pci_flags;
1175 
1176 static inline void pci_set_flags(int flags) { pci_flags = flags; }
1177 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
1178 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
1179 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
1180 
1181 void pcie_bus_configure_settings(struct pci_bus *bus);
1182 
1183 enum pcie_bus_config_types {
1184 	PCIE_BUS_TUNE_OFF,	/* Don't touch MPS at all */
1185 	PCIE_BUS_DEFAULT,	/* Ensure MPS matches upstream bridge */
1186 	PCIE_BUS_SAFE,		/* Use largest MPS boot-time devices support */
1187 	PCIE_BUS_PERFORMANCE,	/* Use MPS and MRRS for best performance */
1188 	PCIE_BUS_PEER2PEER,	/* Set MPS = 128 for all devices */
1189 };
1190 
1191 extern enum pcie_bus_config_types pcie_bus_config;
1192 
1193 extern const struct bus_type pci_bus_type;
1194 
1195 /* Do NOT directly access these two variables, unless you are arch-specific PCI
1196  * code, or PCI core code. */
1197 extern struct list_head pci_root_buses;	/* List of all known PCI buses */
1198 
1199 void pcibios_resource_survey_bus(struct pci_bus *bus);
1200 void pcibios_bus_add_device(struct pci_dev *pdev);
1201 void pcibios_add_bus(struct pci_bus *bus);
1202 void pcibios_remove_bus(struct pci_bus *bus);
1203 void pcibios_fixup_bus(struct pci_bus *);
1204 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1205 /* Architecture-specific versions may override this (weak) */
1206 char *pcibios_setup(char *str);
1207 
1208 /* Used only when drivers/pci/setup.c is used */
1209 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1210 				       const struct resource *empty_res,
1211 				       resource_size_t size,
1212 				       resource_size_t align);
1213 resource_size_t pci_align_resource(struct pci_dev *dev,
1214 				   const struct resource *res,
1215 				   const struct resource *empty_res,
1216 				   resource_size_t size,
1217 				   resource_size_t align);
1218 
1219 /* Generic PCI functions used internally */
1220 
1221 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1222 			     struct resource *res);
1223 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1224 			     struct pci_bus_region *region);
1225 void pcibios_scan_specific_bus(int busn);
1226 struct pci_bus *pci_find_bus(int domain, int busnr);
1227 void pci_bus_add_devices(const struct pci_bus *bus);
1228 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1229 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1230 				    struct pci_ops *ops, void *sysdata,
1231 				    struct list_head *resources);
1232 int pci_host_probe(struct pci_host_bridge *bridge);
1233 void pci_probe_flush_workqueue(void);
1234 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1235 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1236 void pci_bus_release_busn_res(struct pci_bus *b);
1237 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1238 				  struct pci_ops *ops, void *sysdata,
1239 				  struct list_head *resources);
1240 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1241 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1242 				int busnr);
1243 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1244 				 const char *name,
1245 				 struct hotplug_slot *hotplug);
1246 void pci_destroy_slot(struct pci_slot *slot);
1247 #ifdef CONFIG_SYSFS
1248 void pci_dev_assign_slot(struct pci_dev *dev);
1249 #else
1250 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1251 #endif
1252 int pci_scan_slot(struct pci_bus *bus, int devfn);
1253 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1254 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1255 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1256 void pci_bus_add_device(struct pci_dev *dev);
1257 void pci_read_bridge_bases(struct pci_bus *child);
1258 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1259 					  struct resource *res);
1260 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1261 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1262 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1263 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1264 void pci_dev_put(struct pci_dev *dev);
1265 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
1266 void pci_remove_bus(struct pci_bus *b);
1267 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1268 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1269 void pci_stop_root_bus(struct pci_bus *bus);
1270 void pci_remove_root_bus(struct pci_bus *bus);
1271 #ifdef CONFIG_CARDBUS
1272 void pci_setup_cardbus_bridge(struct pci_bus *bus);
1273 #else
1274 static inline void pci_setup_cardbus_bridge(struct pci_bus *bus) { }
1275 #endif
1276 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1277 void pci_sort_breadthfirst(void);
1278 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1279 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1280 
1281 /* Generic PCI functions exported to card drivers */
1282 
1283 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1284 u8 pci_find_capability(struct pci_dev *dev, int cap);
1285 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1286 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1287 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
1288 u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
1289 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
1290 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1291 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
1292 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
1293 
1294 u64 pci_get_dsn(struct pci_dev *dev);
1295 
1296 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1297 			       struct pci_dev *from);
1298 struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int device,
1299 				       struct pci_dev *from);
1300 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1301 			       unsigned int ss_vendor, unsigned int ss_device,
1302 			       struct pci_dev *from);
1303 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1304 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1305 					    unsigned int devfn);
1306 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1307 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
1308 
1309 int pci_dev_present(const struct pci_device_id *ids);
1310 
1311 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1312 			     int where, u8 *val);
1313 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1314 			     int where, u16 *val);
1315 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1316 			      int where, u32 *val);
1317 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1318 			      int where, u8 val);
1319 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1320 			      int where, u16 val);
1321 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1322 			       int where, u32 val);
1323 
1324 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1325 			    int where, int size, u32 *val);
1326 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1327 			    int where, int size, u32 val);
1328 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1329 			      int where, int size, u32 *val);
1330 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1331 			       int where, int size, u32 val);
1332 
1333 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1334 
1335 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1336 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1337 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1338 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1339 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1340 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1341 void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
1342 				    u32 clear, u32 set);
1343 
1344 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1345 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1346 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1347 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1348 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
1349 						u16 clear, u16 set);
1350 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
1351 					      u16 clear, u16 set);
1352 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1353 					u32 clear, u32 set);
1354 
1355 /**
1356  * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
1357  * @dev:	PCI device structure of the PCI Express device
1358  * @pos:	PCI Express Capability Register
1359  * @clear:	Clear bitmask
1360  * @set:	Set bitmask
1361  *
1362  * Perform a Read-Modify-Write (RMW) operation using @clear and @set
1363  * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
1364  * Capability Registers are accessed concurrently in RMW fashion, hence
1365  * require locking which is handled transparently to the caller.
1366  */
1367 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
1368 						     int pos,
1369 						     u16 clear, u16 set)
1370 {
1371 	switch (pos) {
1372 	case PCI_EXP_LNKCTL:
1373 	case PCI_EXP_LNKCTL2:
1374 	case PCI_EXP_RTCTL:
1375 		return pcie_capability_clear_and_set_word_locked(dev, pos,
1376 								 clear, set);
1377 	default:
1378 		return pcie_capability_clear_and_set_word_unlocked(dev, pos,
1379 								   clear, set);
1380 	}
1381 }
1382 
1383 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1384 					   u16 set)
1385 {
1386 	return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1387 }
1388 
1389 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1390 					    u32 set)
1391 {
1392 	return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1393 }
1394 
1395 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1396 					     u16 clear)
1397 {
1398 	return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1399 }
1400 
1401 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1402 					      u32 clear)
1403 {
1404 	return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1405 }
1406 
1407 /* User-space driven config access */
1408 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1409 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1410 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1411 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1412 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1413 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1414 
1415 int __must_check pci_enable_device(struct pci_dev *dev);
1416 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1417 int __must_check pci_reenable_device(struct pci_dev *);
1418 int __must_check pcim_enable_device(struct pci_dev *pdev);
1419 void pcim_pin_device(struct pci_dev *pdev);
1420 
1421 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1422 {
1423 	/*
1424 	 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1425 	 * writable and no quirk has marked the feature broken.
1426 	 */
1427 	return !pdev->broken_intx_masking;
1428 }
1429 
1430 static inline int pci_is_enabled(struct pci_dev *pdev)
1431 {
1432 	return (atomic_read(&pdev->enable_cnt) > 0);
1433 }
1434 
1435 static inline int pci_is_managed(struct pci_dev *pdev)
1436 {
1437 	return pdev->is_managed;
1438 }
1439 
1440 void pci_disable_device(struct pci_dev *dev);
1441 
1442 extern unsigned int pcibios_max_latency;
1443 void pci_set_master(struct pci_dev *dev);
1444 void pci_clear_master(struct pci_dev *dev);
1445 
1446 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1447 int pci_set_cacheline_size(struct pci_dev *dev);
1448 int __must_check pci_set_mwi(struct pci_dev *dev);
1449 int __must_check pcim_set_mwi(struct pci_dev *dev);
1450 int pci_try_set_mwi(struct pci_dev *dev);
1451 void pci_clear_mwi(struct pci_dev *dev);
1452 void pci_disable_parity(struct pci_dev *dev);
1453 void pci_intx(struct pci_dev *dev, int enable);
1454 bool pci_check_and_mask_intx(struct pci_dev *dev);
1455 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1456 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1457 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1458 int pcix_get_max_mmrbc(struct pci_dev *dev);
1459 int pcix_get_mmrbc(struct pci_dev *dev);
1460 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1461 int pcie_get_readrq(struct pci_dev *dev);
1462 int pcie_set_readrq(struct pci_dev *dev, int rq);
1463 int pcie_get_mps(struct pci_dev *dev);
1464 int pcie_set_mps(struct pci_dev *dev, int mps);
1465 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1466 			     enum pci_bus_speed *speed,
1467 			     enum pcie_link_width *width);
1468 int pcie_link_speed_mbps(struct pci_dev *pdev);
1469 void pcie_print_link_status(struct pci_dev *dev);
1470 int pcie_reset_flr(struct pci_dev *dev, bool probe);
1471 int pcie_flr(struct pci_dev *dev);
1472 int __pci_reset_function_locked(struct pci_dev *dev);
1473 int pci_reset_function(struct pci_dev *dev);
1474 int pci_reset_function_locked(struct pci_dev *dev);
1475 int pci_try_reset_function(struct pci_dev *dev);
1476 int pci_probe_reset_slot(struct pci_slot *slot);
1477 int pci_probe_reset_bus(struct pci_bus *bus);
1478 int pci_reset_bus(struct pci_dev *dev);
1479 void pci_reset_secondary_bus(struct pci_dev *dev);
1480 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1481 void pci_update_resource(struct pci_dev *dev, int resno);
1482 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1483 int pci_release_resource(struct pci_dev *dev, int resno);
1484 
1485 /* Resizable BAR related routines */
1486 int pci_rebar_bytes_to_size(u64 bytes);
1487 resource_size_t pci_rebar_size_to_bytes(int size);
1488 u64 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
1489 bool pci_rebar_size_supported(struct pci_dev *pdev, int bar, int size);
1490 int pci_rebar_get_max_size(struct pci_dev *pdev, int bar);
1491 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size,
1492 				     int exclude_bars);
1493 
1494 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1495 bool pci_device_is_present(struct pci_dev *pdev);
1496 void pci_ignore_hotplug(struct pci_dev *dev);
1497 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1498 int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1499 
1500 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1501 		irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1502 		const char *fmt, ...);
1503 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1504 
1505 /* ROM control related routines */
1506 int pci_enable_rom(struct pci_dev *pdev);
1507 void pci_disable_rom(struct pci_dev *pdev);
1508 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1509 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1510 
1511 /* Power management related routines */
1512 int pci_save_state(struct pci_dev *dev);
1513 void pci_restore_state(struct pci_dev *dev);
1514 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1515 int pci_load_saved_state(struct pci_dev *dev,
1516 			 struct pci_saved_state *state);
1517 int pci_load_and_free_saved_state(struct pci_dev *dev,
1518 				  struct pci_saved_state **state);
1519 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1520 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1521 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
1522 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1523 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1524 void pci_pme_active(struct pci_dev *dev, bool enable);
1525 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1526 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1527 int pci_prepare_to_sleep(struct pci_dev *dev);
1528 int pci_back_from_sleep(struct pci_dev *dev);
1529 bool pci_dev_run_wake(struct pci_dev *dev);
1530 void pci_d3cold_enable(struct pci_dev *dev);
1531 void pci_d3cold_disable(struct pci_dev *dev);
1532 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1533 void pci_resume_bus(struct pci_bus *bus);
1534 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1535 
1536 /* For use by arch with custom probe code */
1537 void set_pcie_port_type(struct pci_dev *pdev);
1538 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1539 
1540 /* Functions for PCI Hotplug drivers to use */
1541 unsigned int pci_rescan_bus(struct pci_bus *bus);
1542 void pci_lock_rescan_remove(void);
1543 void pci_unlock_rescan_remove(void);
1544 
1545 /* Vital Product Data routines */
1546 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1547 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1548 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1549 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1550 
1551 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1552 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1553 void pci_bus_assign_resources(const struct pci_bus *bus);
1554 void pci_bus_claim_resources(struct pci_bus *bus);
1555 void pci_bus_size_bridges(struct pci_bus *bus);
1556 int pci_claim_resource(struct pci_dev *, int);
1557 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1558 void pci_assign_unassigned_resources(void);
1559 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1560 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1561 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1562 int pci_enable_resources(struct pci_dev *, int mask);
1563 void pci_assign_irq(struct pci_dev *dev);
1564 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1565 #define HAVE_PCI_REQ_REGIONS	2
1566 int __must_check pci_request_regions(struct pci_dev *, const char *);
1567 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1568 void pci_release_regions(struct pci_dev *);
1569 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1570 void pci_release_region(struct pci_dev *, int);
1571 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1572 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1573 void pci_release_selected_regions(struct pci_dev *, int);
1574 
1575 static inline __must_check struct resource *
1576 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
1577 				    unsigned int len, const char *name)
1578 {
1579 	return __request_region(&pdev->driver_exclusive_resource, offset, len,
1580 				name, IORESOURCE_EXCLUSIVE);
1581 }
1582 
1583 static inline void pci_release_config_region(struct pci_dev *pdev,
1584 					     unsigned int offset,
1585 					     unsigned int len)
1586 {
1587 	__release_region(&pdev->driver_exclusive_resource, offset, len);
1588 }
1589 
1590 /* drivers/pci/bus.c */
1591 void pci_add_resource(struct list_head *resources, struct resource *res);
1592 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1593 			     resource_size_t offset);
1594 void pci_free_resource_list(struct list_head *resources);
1595 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res);
1596 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1597 void pci_bus_remove_resources(struct pci_bus *bus);
1598 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
1599 int devm_request_pci_bus_resources(struct device *dev,
1600 				   struct list_head *resources);
1601 
1602 /* Temporary until new and working PCI SBR API in place */
1603 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1604 
1605 #define __pci_bus_for_each_res0(bus, res, ...)				\
1606 	for (unsigned int __b = 0;					\
1607 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1608 	     __b++)
1609 
1610 #define __pci_bus_for_each_res1(bus, res, __b)				\
1611 	for (__b = 0;							\
1612 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1613 	     __b++)
1614 
1615 /**
1616  * pci_bus_for_each_resource - iterate over PCI bus resources
1617  * @bus: the PCI bus
1618  * @res: pointer to the current resource
1619  * @...: optional index of the current resource
1620  *
1621  * Iterate over PCI bus resources. The first part is to go over PCI bus
1622  * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
1623  * After that continue with the separate list of the additional resources,
1624  * if not empty. That's why the Logical OR is being used.
1625  *
1626  * Possible usage:
1627  *
1628  *	struct pci_bus *bus = ...;
1629  *	struct resource *res;
1630  *	unsigned int i;
1631  *
1632  * 	// With optional index
1633  * 	pci_bus_for_each_resource(bus, res, i)
1634  * 		pr_info("PCI bus resource[%u]: %pR\n", i, res);
1635  *
1636  * 	// Without index
1637  * 	pci_bus_for_each_resource(bus, res)
1638  * 		_do_something_(res);
1639  */
1640 #define pci_bus_for_each_resource(bus, res, ...)			\
1641 	CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__))	\
1642 		    (bus, res, __VA_ARGS__)
1643 
1644 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1645 			struct resource *res, resource_size_t size,
1646 			resource_size_t align, resource_size_t min,
1647 			unsigned long type_mask,
1648 			resource_alignf alignf,
1649 			void *alignf_data);
1650 
1651 
1652 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
1653 			resource_size_t size);
1654 unsigned long pci_address_to_pio(phys_addr_t addr);
1655 phys_addr_t pci_pio_to_address(unsigned long pio);
1656 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1657 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1658 			   phys_addr_t phys_addr);
1659 void pci_unmap_iospace(struct resource *res);
1660 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1661 				      resource_size_t offset,
1662 				      resource_size_t size);
1663 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1664 					  struct resource *res);
1665 
1666 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1667 {
1668 	struct pci_bus_region region;
1669 
1670 	pcibios_resource_to_bus(pdev->bus, &region, &pdev->resource[bar]);
1671 	return region.start;
1672 }
1673 
1674 /* Proper probing supporting hot-pluggable devices */
1675 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1676 				       const char *mod_name);
1677 
1678 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1679 #define pci_register_driver(driver)		\
1680 	__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1681 
1682 void pci_unregister_driver(struct pci_driver *dev);
1683 
1684 /**
1685  * module_pci_driver() - Helper macro for registering a PCI driver
1686  * @__pci_driver: pci_driver struct
1687  *
1688  * Helper macro for PCI drivers which do not do anything special in module
1689  * init/exit. This eliminates a lot of boilerplate. Each module may only
1690  * use this macro once, and calling it replaces module_init() and module_exit()
1691  */
1692 #define module_pci_driver(__pci_driver) \
1693 	module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1694 
1695 /**
1696  * builtin_pci_driver() - Helper macro for registering a PCI driver
1697  * @__pci_driver: pci_driver struct
1698  *
1699  * Helper macro for PCI drivers which do not do anything special in their
1700  * init code. This eliminates a lot of boilerplate. Each driver may only
1701  * use this macro once, and calling it replaces device_initcall(...)
1702  */
1703 #define builtin_pci_driver(__pci_driver) \
1704 	builtin_driver(__pci_driver, pci_register_driver)
1705 
1706 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1707 int pci_add_dynid(struct pci_driver *drv,
1708 		  unsigned int vendor, unsigned int device,
1709 		  unsigned int subvendor, unsigned int subdevice,
1710 		  unsigned int class, unsigned int class_mask,
1711 		  unsigned long driver_data);
1712 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1713 					 struct pci_dev *dev);
1714 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1715 		    int pass);
1716 
1717 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1718 		  void *userdata);
1719 void pci_walk_bus_reverse(struct pci_bus *top,
1720 			  int (*cb)(struct pci_dev *, void *), void *userdata);
1721 int pci_cfg_space_size(struct pci_dev *dev);
1722 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1723 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1724 					 unsigned long type);
1725 
1726 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1727 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1728 
1729 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1730 		      unsigned int command_bits, u32 flags);
1731 
1732 /*
1733  * Virtual interrupts allow for more interrupts to be allocated
1734  * than the device has interrupts for. These are not programmed
1735  * into the device's MSI-X table and must be handled by some
1736  * other driver means.
1737  */
1738 #define PCI_IRQ_VIRTUAL		(1 << 4)
1739 
1740 #define PCI_IRQ_ALL_TYPES	(PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1741 
1742 #include <linux/dmapool.h>
1743 
1744 struct msix_entry {
1745 	u32	vector;	/* Kernel uses to write allocated vector */
1746 	u16	entry;	/* Driver uses to specify entry, OS writes */
1747 };
1748 
1749 #ifdef CONFIG_PCI_MSI
1750 int pci_msi_vec_count(struct pci_dev *dev);
1751 void pci_disable_msi(struct pci_dev *dev);
1752 int pci_msix_vec_count(struct pci_dev *dev);
1753 void pci_disable_msix(struct pci_dev *dev);
1754 void pci_restore_msi_state(struct pci_dev *dev);
1755 bool pci_msi_enabled(void);
1756 int pci_enable_msi(struct pci_dev *dev);
1757 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1758 			  int minvec, int maxvec);
1759 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1760 					struct msix_entry *entries, int nvec)
1761 {
1762 	int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1763 	if (rc < 0)
1764 		return rc;
1765 	return 0;
1766 }
1767 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1768 			  unsigned int max_vecs, unsigned int flags);
1769 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1770 				   unsigned int max_vecs, unsigned int flags,
1771 				   struct irq_affinity *affd);
1772 
1773 bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
1774 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1775 				     const struct irq_affinity_desc *affdesc);
1776 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
1777 
1778 void pci_free_irq_vectors(struct pci_dev *dev);
1779 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1780 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1781 
1782 #else
1783 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
1784 static inline void pci_disable_msi(struct pci_dev *dev) { }
1785 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
1786 static inline void pci_disable_msix(struct pci_dev *dev) { }
1787 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
1788 static inline bool pci_msi_enabled(void) { return false; }
1789 static inline int pci_enable_msi(struct pci_dev *dev)
1790 { return -ENOSYS; }
1791 static inline int pci_enable_msix_range(struct pci_dev *dev,
1792 			struct msix_entry *entries, int minvec, int maxvec)
1793 { return -ENOSYS; }
1794 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1795 			struct msix_entry *entries, int nvec)
1796 { return -ENOSYS; }
1797 
1798 static inline int
1799 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1800 			       unsigned int max_vecs, unsigned int flags,
1801 			       struct irq_affinity *aff_desc)
1802 {
1803 	if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq)
1804 		return 1;
1805 	return -ENOSPC;
1806 }
1807 static inline int
1808 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1809 		      unsigned int max_vecs, unsigned int flags)
1810 {
1811 	return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
1812 					      flags, NULL);
1813 }
1814 
1815 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
1816 { return false; }
1817 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1818 						   const struct irq_affinity_desc *affdesc)
1819 {
1820 	struct msi_map map = { .index = -ENOSYS, };
1821 
1822 	return map;
1823 }
1824 
1825 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
1826 {
1827 }
1828 
1829 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1830 {
1831 }
1832 
1833 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1834 {
1835 	if (WARN_ON_ONCE(nr > 0))
1836 		return -EINVAL;
1837 	return dev->irq;
1838 }
1839 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1840 		int vec)
1841 {
1842 	return cpu_possible_mask;
1843 }
1844 #endif
1845 
1846 /**
1847  * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1848  * @d: the INTx IRQ domain
1849  * @node: the DT node for the device whose interrupt we're translating
1850  * @intspec: the interrupt specifier data from the DT
1851  * @intsize: the number of entries in @intspec
1852  * @out_hwirq: pointer at which to write the hwirq number
1853  * @out_type: pointer at which to write the interrupt type
1854  *
1855  * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1856  * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1857  * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1858  * INTx value to obtain the hwirq number.
1859  *
1860  * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1861  */
1862 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1863 				      struct device_node *node,
1864 				      const u32 *intspec,
1865 				      unsigned int intsize,
1866 				      unsigned long *out_hwirq,
1867 				      unsigned int *out_type)
1868 {
1869 	const u32 intx = intspec[0];
1870 
1871 	if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1872 		return -EINVAL;
1873 
1874 	*out_hwirq = intx - PCI_INTERRUPT_INTA;
1875 	return 0;
1876 }
1877 
1878 #ifdef CONFIG_PCIEPORTBUS
1879 extern bool pcie_ports_disabled;
1880 extern bool pcie_ports_native;
1881 
1882 int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
1883 			  bool use_lt);
1884 #else
1885 #define pcie_ports_disabled	true
1886 #define pcie_ports_native	false
1887 
1888 static inline int pcie_set_target_speed(struct pci_dev *port,
1889 					enum pci_bus_speed speed_req,
1890 					bool use_lt)
1891 {
1892 	return -EOPNOTSUPP;
1893 }
1894 #endif
1895 
1896 #define PCIE_LINK_STATE_L0S		(BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */
1897 #define PCIE_LINK_STATE_L1		BIT(2)	/* L1 state */
1898 #define PCIE_LINK_STATE_L1_1		BIT(3)	/* ASPM L1.1 state */
1899 #define PCIE_LINK_STATE_L1_2		BIT(4)	/* ASPM L1.2 state */
1900 #define PCIE_LINK_STATE_L1_1_PCIPM	BIT(5)	/* PCI-PM L1.1 state */
1901 #define PCIE_LINK_STATE_L1_2_PCIPM	BIT(6)	/* PCI-PM L1.2 state */
1902 #define PCIE_LINK_STATE_ASPM_ALL	(PCIE_LINK_STATE_L0S		|\
1903 					 PCIE_LINK_STATE_L1		|\
1904 					 PCIE_LINK_STATE_L1_1		|\
1905 					 PCIE_LINK_STATE_L1_2		|\
1906 					 PCIE_LINK_STATE_L1_1_PCIPM	|\
1907 					 PCIE_LINK_STATE_L1_2_PCIPM)
1908 #define PCIE_LINK_STATE_CLKPM		BIT(7)
1909 #define PCIE_LINK_STATE_ALL		(PCIE_LINK_STATE_ASPM_ALL	|\
1910 					 PCIE_LINK_STATE_CLKPM)
1911 
1912 #ifdef CONFIG_PCIEASPM
1913 int pci_disable_link_state(struct pci_dev *pdev, int state);
1914 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1915 int pci_enable_link_state(struct pci_dev *pdev, int state);
1916 int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
1917 void pcie_no_aspm(void);
1918 bool pcie_aspm_support_enabled(void);
1919 bool pcie_aspm_enabled(struct pci_dev *pdev);
1920 #else
1921 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1922 { return 0; }
1923 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1924 { return 0; }
1925 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
1926 { return 0; }
1927 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1928 { return 0; }
1929 static inline void pcie_no_aspm(void) { }
1930 static inline bool pcie_aspm_support_enabled(void) { return false; }
1931 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1932 #endif
1933 
1934 #ifdef CONFIG_HOTPLUG_PCI
1935 void pci_hp_ignore_link_change(struct pci_dev *pdev);
1936 void pci_hp_unignore_link_change(struct pci_dev *pdev);
1937 #else
1938 static inline void pci_hp_ignore_link_change(struct pci_dev *pdev) { }
1939 static inline void pci_hp_unignore_link_change(struct pci_dev *pdev) { }
1940 #endif
1941 
1942 #ifdef CONFIG_PCIEAER
1943 bool pci_aer_available(void);
1944 #else
1945 static inline bool pci_aer_available(void) { return false; }
1946 #endif
1947 
1948 bool pci_ats_disabled(void);
1949 
1950 #define PCIE_PTM_CONTEXT_UPDATE_AUTO 0
1951 #define PCIE_PTM_CONTEXT_UPDATE_MANUAL 1
1952 
1953 struct pcie_ptm_ops {
1954 	int (*check_capability)(void *drvdata);
1955 	int (*context_update_write)(void *drvdata, u8 mode);
1956 	int (*context_update_read)(void *drvdata, u8 *mode);
1957 	int (*context_valid_write)(void *drvdata, bool valid);
1958 	int (*context_valid_read)(void *drvdata, bool *valid);
1959 	int (*local_clock_read)(void *drvdata, u64 *clock);
1960 	int (*master_clock_read)(void *drvdata, u64 *clock);
1961 	int (*t1_read)(void *drvdata, u64 *clock);
1962 	int (*t2_read)(void *drvdata, u64 *clock);
1963 	int (*t3_read)(void *drvdata, u64 *clock);
1964 	int (*t4_read)(void *drvdata, u64 *clock);
1965 
1966 	bool (*context_update_visible)(void *drvdata);
1967 	bool (*context_valid_visible)(void *drvdata);
1968 	bool (*local_clock_visible)(void *drvdata);
1969 	bool (*master_clock_visible)(void *drvdata);
1970 	bool (*t1_visible)(void *drvdata);
1971 	bool (*t2_visible)(void *drvdata);
1972 	bool (*t3_visible)(void *drvdata);
1973 	bool (*t4_visible)(void *drvdata);
1974 };
1975 
1976 struct pci_ptm_debugfs {
1977 	struct dentry *debugfs;
1978 	const struct pcie_ptm_ops *ops;
1979 	struct mutex lock;
1980 	void *pdata;
1981 };
1982 
1983 #ifdef CONFIG_PCIE_PTM
1984 int pci_enable_ptm(struct pci_dev *dev);
1985 void pci_disable_ptm(struct pci_dev *dev);
1986 bool pcie_ptm_enabled(struct pci_dev *dev);
1987 #else
1988 static inline int pci_enable_ptm(struct pci_dev *dev)
1989 { return -EINVAL; }
1990 static inline void pci_disable_ptm(struct pci_dev *dev) { }
1991 static inline bool pcie_ptm_enabled(struct pci_dev *dev)
1992 { return false; }
1993 #endif
1994 
1995 #if IS_ENABLED(CONFIG_DEBUG_FS) && IS_ENABLED(CONFIG_PCIE_PTM)
1996 struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
1997 						const struct pcie_ptm_ops *ops);
1998 void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs);
1999 #else
2000 static inline struct pci_ptm_debugfs
2001 *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
2002 			 const struct pcie_ptm_ops *ops) { return NULL; }
2003 static inline void
2004 pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs) { }
2005 #endif
2006 
2007 void pci_cfg_access_lock(struct pci_dev *dev);
2008 bool pci_cfg_access_trylock(struct pci_dev *dev);
2009 void pci_cfg_access_unlock(struct pci_dev *dev);
2010 
2011 void pci_dev_lock(struct pci_dev *dev);
2012 int pci_dev_trylock(struct pci_dev *dev);
2013 void pci_dev_unlock(struct pci_dev *dev);
2014 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
2015 
2016 /*
2017  * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
2018  * a PCI domain is defined to be a set of PCI buses which share
2019  * configuration space.
2020  */
2021 #ifdef CONFIG_PCI_DOMAINS
2022 extern int pci_domains_supported;
2023 int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max);
2024 void pci_bus_release_emul_domain_nr(int domain_nr);
2025 #else
2026 enum { pci_domains_supported = 0 };
2027 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
2028 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
2029 static inline int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max)
2030 {
2031 	return 0;
2032 }
2033 static inline void pci_bus_release_emul_domain_nr(int domain_nr) { }
2034 #endif /* CONFIG_PCI_DOMAINS */
2035 
2036 /*
2037  * Generic implementation for PCI domain support. If your
2038  * architecture does not need custom management of PCI
2039  * domains then this implementation will be used
2040  */
2041 #ifdef CONFIG_PCI_DOMAINS_GENERIC
2042 static inline int pci_domain_nr(struct pci_bus *bus)
2043 {
2044 	return bus->domain_nr;
2045 }
2046 #ifdef CONFIG_ACPI
2047 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
2048 #else
2049 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
2050 { return 0; }
2051 #endif
2052 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
2053 void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
2054 #endif
2055 
2056 /* Some architectures require additional setup to direct VGA traffic */
2057 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
2058 				    unsigned int command_bits, u32 flags);
2059 void pci_register_set_vga_state(arch_set_vga_state_t func);
2060 
2061 static inline int
2062 pci_request_io_regions(struct pci_dev *pdev, const char *name)
2063 {
2064 	return pci_request_selected_regions(pdev,
2065 			    pci_select_bars(pdev, IORESOURCE_IO), name);
2066 }
2067 
2068 static inline void
2069 pci_release_io_regions(struct pci_dev *pdev)
2070 {
2071 	return pci_release_selected_regions(pdev,
2072 			    pci_select_bars(pdev, IORESOURCE_IO));
2073 }
2074 
2075 static inline int
2076 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
2077 {
2078 	return pci_request_selected_regions(pdev,
2079 			    pci_select_bars(pdev, IORESOURCE_MEM), name);
2080 }
2081 
2082 static inline void
2083 pci_release_mem_regions(struct pci_dev *pdev)
2084 {
2085 	return pci_release_selected_regions(pdev,
2086 			    pci_select_bars(pdev, IORESOURCE_MEM));
2087 }
2088 
2089 #else /* CONFIG_PCI is not enabled */
2090 
2091 static inline void pci_set_flags(int flags) { }
2092 static inline void pci_add_flags(int flags) { }
2093 static inline void pci_clear_flags(int flags) { }
2094 static inline int pci_has_flag(int flag) { return 0; }
2095 
2096 /*
2097  * If the system does not have PCI, clearly these return errors.  Define
2098  * these as simple inline functions to avoid hair in drivers.
2099  */
2100 #define _PCI_NOP(o, s, t) \
2101 	static inline int pci_##o##_config_##s(struct pci_dev *dev, \
2102 						int where, t val) \
2103 		{ return PCIBIOS_FUNC_NOT_SUPPORTED; }
2104 
2105 #define _PCI_NOP_ALL(o, x)	_PCI_NOP(o, byte, u8 x) \
2106 				_PCI_NOP(o, word, u16 x) \
2107 				_PCI_NOP(o, dword, u32 x)
2108 _PCI_NOP_ALL(read, *)
2109 _PCI_NOP_ALL(write,)
2110 
2111 static inline void pci_probe_flush_workqueue(void) { }
2112 
2113 static inline struct pci_dev *pci_get_device(unsigned int vendor,
2114 					     unsigned int device,
2115 					     struct pci_dev *from)
2116 { return NULL; }
2117 
2118 static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor,
2119 						     unsigned int device,
2120 						     struct pci_dev *from)
2121 { return NULL; }
2122 
2123 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
2124 					     unsigned int device,
2125 					     unsigned int ss_vendor,
2126 					     unsigned int ss_device,
2127 					     struct pci_dev *from)
2128 { return NULL; }
2129 
2130 static inline struct pci_dev *pci_get_class(unsigned int class,
2131 					    struct pci_dev *from)
2132 { return NULL; }
2133 
2134 static inline struct pci_dev *pci_get_base_class(unsigned int class,
2135 						 struct pci_dev *from)
2136 { return NULL; }
2137 
2138 static inline int pci_dev_present(const struct pci_device_id *ids)
2139 { return 0; }
2140 
2141 #define pci_dev_put(dev)	do { } while (0)
2142 
2143 static inline void pci_set_master(struct pci_dev *dev) { }
2144 static inline void pci_clear_master(struct pci_dev *dev) { }
2145 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
2146 static inline void pci_disable_device(struct pci_dev *dev) { }
2147 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
2148 static inline int pci_assign_resource(struct pci_dev *dev, int i)
2149 { return -EBUSY; }
2150 static inline int __must_check __pci_register_driver(struct pci_driver *drv,
2151 						     struct module *owner,
2152 						     const char *mod_name)
2153 { return 0; }
2154 static inline int pci_register_driver(struct pci_driver *drv)
2155 { return 0; }
2156 static inline void pci_unregister_driver(struct pci_driver *drv) { }
2157 static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
2158 { return 0; }
2159 static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap)
2160 { return 0; }
2161 static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
2162 { return 0; }
2163 
2164 static inline u64 pci_get_dsn(struct pci_dev *dev)
2165 { return 0; }
2166 
2167 /* Power management related routines */
2168 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
2169 static inline void pci_restore_state(struct pci_dev *dev) { }
2170 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2171 { return 0; }
2172 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
2173 { return 0; }
2174 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2175 { return 0; }
2176 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
2177 					   pm_message_t state)
2178 { return PCI_D0; }
2179 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
2180 				  int enable)
2181 { return 0; }
2182 
2183 static inline struct resource *pci_find_resource(struct pci_dev *dev,
2184 						 struct resource *res)
2185 { return NULL; }
2186 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
2187 { return -EIO; }
2188 static inline void pci_release_regions(struct pci_dev *dev) { }
2189 
2190 static inline int pci_register_io_range(const struct fwnode_handle *fwnode,
2191 					phys_addr_t addr, resource_size_t size)
2192 { return -EINVAL; }
2193 
2194 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
2195 
2196 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
2197 { return NULL; }
2198 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
2199 						unsigned int devfn)
2200 { return NULL; }
2201 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
2202 					unsigned int bus, unsigned int devfn)
2203 { return NULL; }
2204 
2205 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
2206 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
2207 
2208 #define dev_is_pci(d) (false)
2209 #define dev_is_pf(d) (false)
2210 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2211 { return false; }
2212 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
2213 				      struct device_node *node,
2214 				      const u32 *intspec,
2215 				      unsigned int intsize,
2216 				      unsigned long *out_hwirq,
2217 				      unsigned int *out_type)
2218 { return -EINVAL; }
2219 
2220 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
2221 							 struct pci_dev *dev)
2222 { return NULL; }
2223 static inline bool pci_ats_disabled(void) { return true; }
2224 
2225 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
2226 {
2227 	return -EINVAL;
2228 }
2229 
2230 static inline int
2231 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
2232 			       unsigned int max_vecs, unsigned int flags,
2233 			       struct irq_affinity *aff_desc)
2234 {
2235 	return -ENOSPC;
2236 }
2237 static inline int
2238 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
2239 		      unsigned int max_vecs, unsigned int flags)
2240 {
2241 	return -ENOSPC;
2242 }
2243 
2244 static inline void pci_free_irq_vectors(struct pci_dev *dev)
2245 {
2246 }
2247 #endif /* CONFIG_PCI */
2248 
2249 /* Include architecture-dependent settings and functions */
2250 
2251 #include <asm/pci.h>
2252 
2253 /*
2254  * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
2255  * is expected to be an offset within that region.
2256  *
2257  */
2258 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
2259 			    struct vm_area_struct *vma,
2260 			    enum pci_mmap_state mmap_state, int write_combine);
2261 
2262 #ifndef arch_can_pci_mmap_wc
2263 #define arch_can_pci_mmap_wc()		0
2264 #endif
2265 
2266 #ifndef arch_can_pci_mmap_io
2267 #define arch_can_pci_mmap_io()		0
2268 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
2269 #else
2270 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
2271 #endif
2272 
2273 #ifndef pci_root_bus_fwnode
2274 #define pci_root_bus_fwnode(bus)	NULL
2275 #endif
2276 
2277 /*
2278  * These helpers provide future and backwards compatibility
2279  * for accessing popular PCI BAR info
2280  */
2281 #define pci_resource_n(dev, bar)	(&(dev)->resource[(bar)])
2282 #define pci_resource_start(dev, bar)	(pci_resource_n(dev, bar)->start)
2283 #define pci_resource_end(dev, bar)	(pci_resource_n(dev, bar)->end)
2284 #define pci_resource_flags(dev, bar)	(pci_resource_n(dev, bar)->flags)
2285 #define pci_resource_len(dev,bar)					\
2286 	(pci_resource_end((dev), (bar)) ? 				\
2287 	 resource_size(pci_resource_n((dev), (bar))) : 0)
2288 
2289 #define __pci_dev_for_each_res0(dev, res, ...)				  \
2290 	for (unsigned int __b = 0;					  \
2291 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2292 	     __b++)
2293 
2294 #define __pci_dev_for_each_res1(dev, res, __b)				  \
2295 	for (__b = 0;							  \
2296 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2297 	     __b++)
2298 
2299 #define pci_dev_for_each_resource(dev, res, ...)			\
2300 	CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) 	\
2301 		    (dev, res, __VA_ARGS__)
2302 
2303 /*
2304  * Similar to the helpers above, these manipulate per-pci_dev
2305  * driver-specific data.  They are really just a wrapper around
2306  * the generic device structure functions of these calls.
2307  */
2308 static inline void *pci_get_drvdata(struct pci_dev *pdev)
2309 {
2310 	return dev_get_drvdata(&pdev->dev);
2311 }
2312 
2313 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
2314 {
2315 	dev_set_drvdata(&pdev->dev, data);
2316 }
2317 
2318 static inline const char *pci_name(const struct pci_dev *pdev)
2319 {
2320 	return dev_name(&pdev->dev);
2321 }
2322 
2323 void pci_resource_to_user(const struct pci_dev *dev, int bar,
2324 			  const struct resource *rsrc,
2325 			  resource_size_t *start, resource_size_t *end);
2326 
2327 /*
2328  * The world is not perfect and supplies us with broken PCI devices.
2329  * For at least a part of these bugs we need a work-around, so both
2330  * generic (drivers/pci/quirks.c) and per-architecture code can define
2331  * fixup hooks to be called for particular buggy devices.
2332  */
2333 
2334 struct pci_fixup {
2335 	u16 vendor;			/* Or PCI_ANY_ID */
2336 	u16 device;			/* Or PCI_ANY_ID */
2337 	u32 class;			/* Or PCI_ANY_ID */
2338 	unsigned int class_shift;	/* should be 0, 8, 16 */
2339 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2340 	int hook_offset;
2341 #else
2342 	void (*hook)(struct pci_dev *dev);
2343 #endif
2344 };
2345 
2346 enum pci_fixup_pass {
2347 	pci_fixup_early,	/* Before probing BARs */
2348 	pci_fixup_header,	/* After reading configuration header */
2349 	pci_fixup_final,	/* Final phase of device fixups */
2350 	pci_fixup_enable,	/* pci_enable_device() time */
2351 	pci_fixup_resume,	/* pci_device_resume() */
2352 	pci_fixup_suspend,	/* pci_device_suspend() */
2353 	pci_fixup_resume_early, /* pci_device_resume_early() */
2354 	pci_fixup_suspend_late,	/* pci_device_suspend_late() */
2355 };
2356 
2357 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2358 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2359 				    class_shift, hook)			\
2360 	__ADDRESSABLE(hook)						\
2361 	asm(".section "	#sec ", \"a\"				\n"	\
2362 	    ".balign	16					\n"	\
2363 	    ".short "	#vendor ", " #device "			\n"	\
2364 	    ".long "	#class ", " #class_shift "		\n"	\
2365 	    ".long "	#hook " - .				\n"	\
2366 	    ".previous						\n");
2367 
2368 /*
2369  * Clang's LTO may rename static functions in C, but has no way to
2370  * handle such renamings when referenced from inline asm. To work
2371  * around this, create global C stubs for these cases.
2372  */
2373 #ifdef CONFIG_LTO_CLANG
2374 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2375 				  class_shift, hook, stub)		\
2376 	void stub(struct pci_dev *dev);					\
2377 	void stub(struct pci_dev *dev)					\
2378 	{ 								\
2379 		hook(dev); 						\
2380 	}								\
2381 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2382 				  class_shift, stub)
2383 #else
2384 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2385 				  class_shift, hook, stub)		\
2386 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2387 				  class_shift, hook)
2388 #endif
2389 
2390 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2391 				  class_shift, hook)			\
2392 	__DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2393 				  class_shift, hook, __UNIQUE_ID(hook))
2394 #else
2395 /* Anonymous variables would be nice... */
2396 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class,	\
2397 				  class_shift, hook)			\
2398 	static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used	\
2399 	__attribute__((__section__(#section), aligned((sizeof(void *)))))    \
2400 		= { vendor, device, class, class_shift, hook };
2401 #endif
2402 
2403 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class,		\
2404 					 class_shift, hook)		\
2405 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2406 		hook, vendor, device, class, class_shift, hook)
2407 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class,		\
2408 					 class_shift, hook)		\
2409 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2410 		hook, vendor, device, class, class_shift, hook)
2411 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class,		\
2412 					 class_shift, hook)		\
2413 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2414 		hook, vendor, device, class, class_shift, hook)
2415 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class,		\
2416 					 class_shift, hook)		\
2417 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2418 		hook, vendor, device, class, class_shift, hook)
2419 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class,		\
2420 					 class_shift, hook)		\
2421 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2422 		resume##hook, vendor, device, class, class_shift, hook)
2423 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class,	\
2424 					 class_shift, hook)		\
2425 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2426 		resume_early##hook, vendor, device, class, class_shift, hook)
2427 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class,		\
2428 					 class_shift, hook)		\
2429 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2430 		suspend##hook, vendor, device, class, class_shift, hook)
2431 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class,	\
2432 					 class_shift, hook)		\
2433 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2434 		suspend_late##hook, vendor, device, class, class_shift, hook)
2435 
2436 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\
2437 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2438 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2439 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)			\
2440 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2441 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2442 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)			\
2443 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2444 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2445 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)			\
2446 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2447 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2448 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook)			\
2449 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2450 		resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2451 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook)		\
2452 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2453 		resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2454 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook)			\
2455 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2456 		suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2457 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook)		\
2458 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2459 		suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2460 
2461 #ifdef CONFIG_PCI_QUIRKS
2462 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2463 #else
2464 static inline void pci_fixup_device(enum pci_fixup_pass pass,
2465 				    struct pci_dev *dev) { }
2466 #endif
2467 
2468 int pcim_intx(struct pci_dev *pdev, int enabled);
2469 int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
2470 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2471 void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
2472 				const char *name);
2473 void pcim_iounmap_region(struct pci_dev *pdev, int bar);
2474 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2475 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2476 int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
2477 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2478 void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
2479 				unsigned long offset, unsigned long len);
2480 
2481 extern int pci_pci_problems;
2482 #define PCIPCI_FAIL		1	/* No PCI PCI DMA */
2483 #define PCIPCI_TRITON		2
2484 #define PCIPCI_NATOMA		4
2485 #define PCIPCI_VIAETBF		8
2486 #define PCIPCI_VSFX		16
2487 #define PCIPCI_ALIMAGIK		32	/* Need low latency setting */
2488 #define PCIAGP_FAIL		64	/* No PCI to AGP DMA */
2489 
2490 extern u8 pci_dfl_cache_line_size;
2491 extern u8 pci_cache_line_size;
2492 
2493 /* Architecture-specific versions may override these (weak) */
2494 void pcibios_disable_device(struct pci_dev *dev);
2495 void pcibios_set_master(struct pci_dev *dev);
2496 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2497 				 enum pcie_reset_state state);
2498 int pcibios_device_add(struct pci_dev *dev);
2499 void pcibios_release_device(struct pci_dev *dev);
2500 #ifdef CONFIG_PCI
2501 void pcibios_penalize_isa_irq(int irq, int active);
2502 #else
2503 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2504 #endif
2505 int pcibios_alloc_irq(struct pci_dev *dev);
2506 void pcibios_free_irq(struct pci_dev *dev);
2507 resource_size_t pcibios_default_alignment(void);
2508 
2509 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
2510 extern int pci_create_resource_files(struct pci_dev *dev);
2511 extern void pci_remove_resource_files(struct pci_dev *dev);
2512 #endif
2513 
2514 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2515 void __init pci_mmcfg_early_init(void);
2516 void __init pci_mmcfg_late_init(void);
2517 #else
2518 static inline void pci_mmcfg_early_init(void) { }
2519 static inline void pci_mmcfg_late_init(void) { }
2520 #endif
2521 
2522 int pci_ext_cfg_avail(void);
2523 
2524 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2525 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2526 
2527 #ifdef CONFIG_PCI_IOV
2528 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2529 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2530 int pci_iov_vf_id(struct pci_dev *dev);
2531 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
2532 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2533 void pci_disable_sriov(struct pci_dev *dev);
2534 
2535 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2536 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2537 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2538 int pci_num_vf(struct pci_dev *dev);
2539 int pci_vfs_assigned(struct pci_dev *dev);
2540 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2541 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2542 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2543 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2544 int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size);
2545 u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs);
2546 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2547 
2548 /* Arch may override these (weak) */
2549 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2550 int pcibios_sriov_disable(struct pci_dev *pdev);
2551 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2552 #else
2553 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2554 {
2555 	return -ENOSYS;
2556 }
2557 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2558 {
2559 	return -ENOSYS;
2560 }
2561 
2562 static inline int pci_iov_vf_id(struct pci_dev *dev)
2563 {
2564 	return -ENOSYS;
2565 }
2566 
2567 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
2568 					   struct pci_driver *pf_driver)
2569 {
2570 	return ERR_PTR(-EINVAL);
2571 }
2572 
2573 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2574 { return -ENODEV; }
2575 
2576 static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2577 				     struct pci_dev *virtfn, int id)
2578 {
2579 	return -ENODEV;
2580 }
2581 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2582 {
2583 	return -ENOSYS;
2584 }
2585 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2586 					 int id) { }
2587 static inline void pci_disable_sriov(struct pci_dev *dev) { }
2588 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
2589 static inline int pci_vfs_assigned(struct pci_dev *dev)
2590 { return 0; }
2591 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2592 { return 0; }
2593 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2594 { return 0; }
2595 #define pci_sriov_configure_simple	NULL
2596 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2597 { return 0; }
2598 static inline int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size)
2599 { return -ENODEV; }
2600 static inline u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs)
2601 { return 0; }
2602 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2603 #endif
2604 
2605 /**
2606  * pci_pcie_cap - get the saved PCIe capability offset
2607  * @dev: PCI device
2608  *
2609  * PCIe capability offset is calculated at PCI device initialization
2610  * time and saved in the data structure. This function returns saved
2611  * PCIe capability offset. Using this instead of pci_find_capability()
2612  * reduces unnecessary search in the PCI configuration space. If you
2613  * need to calculate PCIe capability offset from raw device for some
2614  * reasons, please use pci_find_capability() instead.
2615  */
2616 static inline int pci_pcie_cap(struct pci_dev *dev)
2617 {
2618 	return dev->pcie_cap;
2619 }
2620 
2621 /**
2622  * pci_is_pcie - check if the PCI device is PCI Express capable
2623  * @dev: PCI device
2624  *
2625  * Returns: true if the PCI device is PCI Express capable, false otherwise.
2626  */
2627 static inline bool pci_is_pcie(struct pci_dev *dev)
2628 {
2629 	return pci_pcie_cap(dev);
2630 }
2631 
2632 /**
2633  * pcie_caps_reg - get the PCIe Capabilities Register
2634  * @dev: PCI device
2635  */
2636 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2637 {
2638 	return dev->pcie_flags_reg;
2639 }
2640 
2641 /**
2642  * pci_pcie_type - get the PCIe device/port type
2643  * @dev: PCI device
2644  */
2645 static inline int pci_pcie_type(const struct pci_dev *dev)
2646 {
2647 	return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2648 }
2649 
2650 /**
2651  * pcie_find_root_port - Get the PCIe root port device
2652  * @dev: PCI device
2653  *
2654  * Traverse up the parent chain and return the PCIe Root Port PCI Device
2655  * for a given PCI/PCIe Device.
2656  */
2657 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2658 {
2659 	while (dev) {
2660 		if (pci_is_pcie(dev) &&
2661 		    pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2662 			return dev;
2663 		dev = pci_upstream_bridge(dev);
2664 	}
2665 
2666 	return NULL;
2667 }
2668 
2669 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
2670 {
2671 	/*
2672 	 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
2673 	 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
2674 	 * the value (e.g. inside the loop in pci_dev_wait()).
2675 	 */
2676 	return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
2677 }
2678 
2679 void pci_request_acs(void);
2680 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2681 bool pci_acs_path_enabled(struct pci_dev *start,
2682 			  struct pci_dev *end, u16 acs_flags);
2683 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2684 
2685 #define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
2686 #define PCI_VPD_LRDT_ID(x)		((x) | PCI_VPD_LRDT)
2687 
2688 /* Large Resource Data Type Tag Item Names */
2689 #define PCI_VPD_LTIN_ID_STRING		0x02	/* Identifier String */
2690 #define PCI_VPD_LTIN_RO_DATA		0x10	/* Read-Only Data */
2691 #define PCI_VPD_LTIN_RW_DATA		0x11	/* Read-Write Data */
2692 
2693 #define PCI_VPD_LRDT_ID_STRING		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2694 #define PCI_VPD_LRDT_RO_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2695 #define PCI_VPD_LRDT_RW_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2696 
2697 #define PCI_VPD_RO_KEYWORD_PARTNO	"PN"
2698 #define PCI_VPD_RO_KEYWORD_SERIALNO	"SN"
2699 #define PCI_VPD_RO_KEYWORD_MFR_ID	"MN"
2700 #define PCI_VPD_RO_KEYWORD_VENDOR0	"V0"
2701 #define PCI_VPD_RO_KEYWORD_CHKSUM	"RV"
2702 
2703 /**
2704  * pci_vpd_alloc - Allocate buffer and read VPD into it
2705  * @dev: PCI device
2706  * @size: pointer to field where VPD length is returned
2707  *
2708  * Returns pointer to allocated buffer or an ERR_PTR in case of failure
2709  */
2710 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
2711 
2712 /**
2713  * pci_vpd_find_id_string - Locate id string in VPD
2714  * @buf: Pointer to buffered VPD data
2715  * @len: The length of the buffer area in which to search
2716  * @size: Pointer to field where length of id string is returned
2717  *
2718  * Returns the index of the id string or -ENOENT if not found.
2719  */
2720 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
2721 
2722 /**
2723  * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
2724  * @buf: Pointer to buffered VPD data
2725  * @len: The length of the buffer area in which to search
2726  * @kw: The keyword to search for
2727  * @size: Pointer to field where length of found keyword data is returned
2728  *
2729  * Returns the index of the information field keyword data or -ENOENT if
2730  * not found.
2731  */
2732 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
2733 				 const char *kw, unsigned int *size);
2734 
2735 /**
2736  * pci_vpd_check_csum - Check VPD checksum
2737  * @buf: Pointer to buffered VPD data
2738  * @len: VPD size
2739  *
2740  * Returns 1 if VPD has no checksum, otherwise 0 or an errno
2741  */
2742 int pci_vpd_check_csum(const void *buf, unsigned int len);
2743 
2744 /* PCI <-> OF binding helpers */
2745 #ifdef CONFIG_OF
2746 struct device_node;
2747 struct irq_domain;
2748 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2749 bool pci_host_of_has_msi_map(struct device *dev);
2750 
2751 /* Arch may override this (weak) */
2752 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2753 
2754 #else	/* CONFIG_OF */
2755 static inline struct irq_domain *
2756 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
2757 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
2758 #endif  /* CONFIG_OF */
2759 
2760 static inline struct device_node *
2761 pci_device_to_OF_node(const struct pci_dev *pdev)
2762 {
2763 	return pdev ? pdev->dev.of_node : NULL;
2764 }
2765 
2766 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2767 {
2768 	return bus ? bus->dev.of_node : NULL;
2769 }
2770 
2771 #ifdef CONFIG_ACPI
2772 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2773 
2774 void
2775 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2776 bool pci_pr3_present(struct pci_dev *pdev);
2777 #else
2778 static inline struct irq_domain *
2779 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
2780 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2781 #endif
2782 
2783 #if defined(CONFIG_X86) && defined(CONFIG_ACPI)
2784 bool arch_pci_dev_is_removable(struct pci_dev *pdev);
2785 #else
2786 static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; }
2787 #endif
2788 
2789 #ifdef CONFIG_EEH
2790 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2791 {
2792 	return pdev->dev.archdata.edev;
2793 }
2794 #endif
2795 
2796 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2797 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2798 int pci_for_each_dma_alias(struct pci_dev *pdev,
2799 			   int (*fn)(struct pci_dev *pdev,
2800 				     u16 alias, void *data), void *data);
2801 
2802 /* Helper functions for operation of device flag */
2803 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2804 {
2805 	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2806 }
2807 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2808 {
2809 	pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2810 }
2811 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2812 {
2813 	return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2814 }
2815 
2816 /**
2817  * pci_ari_enabled - query ARI forwarding status
2818  * @bus: the PCI bus
2819  *
2820  * Returns true if ARI forwarding is enabled.
2821  */
2822 static inline bool pci_ari_enabled(struct pci_bus *bus)
2823 {
2824 	return bus->self && bus->self->ari_enabled;
2825 }
2826 
2827 /**
2828  * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2829  * @pdev: PCI device to check
2830  *
2831  * Walk upwards from @pdev and check for each encountered bridge if it's part
2832  * of a Thunderbolt controller.  Reaching the host bridge means @pdev is not
2833  * Thunderbolt-attached.  (But rather soldered to the mainboard usually.)
2834  */
2835 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2836 {
2837 	struct pci_dev *parent = pdev;
2838 
2839 	if (pdev->is_thunderbolt)
2840 		return true;
2841 
2842 	while ((parent = pci_upstream_bridge(parent)))
2843 		if (parent->is_thunderbolt)
2844 			return true;
2845 
2846 	return false;
2847 }
2848 
2849 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) || defined(CONFIG_S390)
2850 void pci_uevent_ers(struct pci_dev *pdev, enum  pci_ers_result err_type);
2851 #endif
2852 
2853 #include <linux/dma-mapping.h>
2854 
2855 #define pci_emerg(pdev, fmt, arg...)	dev_emerg(&(pdev)->dev, fmt, ##arg)
2856 #define pci_alert(pdev, fmt, arg...)	dev_alert(&(pdev)->dev, fmt, ##arg)
2857 #define pci_crit(pdev, fmt, arg...)	dev_crit(&(pdev)->dev, fmt, ##arg)
2858 #define pci_err(pdev, fmt, arg...)	dev_err(&(pdev)->dev, fmt, ##arg)
2859 #define pci_warn(pdev, fmt, arg...)	dev_warn(&(pdev)->dev, fmt, ##arg)
2860 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
2861 #define pci_notice(pdev, fmt, arg...)	dev_notice(&(pdev)->dev, fmt, ##arg)
2862 #define pci_info(pdev, fmt, arg...)	dev_info(&(pdev)->dev, fmt, ##arg)
2863 #define pci_dbg(pdev, fmt, arg...)	dev_dbg(&(pdev)->dev, fmt, ##arg)
2864 
2865 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2866 	dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2867 
2868 #define pci_info_ratelimited(pdev, fmt, arg...) \
2869 	dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2870 
2871 #define pci_WARN(pdev, condition, fmt, arg...) \
2872 	WARN(condition, "%s %s: " fmt, \
2873 	     dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2874 
2875 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2876 	WARN_ONCE(condition, "%s %s: " fmt, \
2877 		  dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2878 
2879 #endif /* LINUX_PCI_H */
2880