1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *	pci.h
4  *
5  *	PCI defines and function prototypes
6  *	Copyright 1994, Drew Eckhardt
7  *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8  *
9  *	PCI Express ASPM defines and function prototypes
10  *	Copyright (c) 2007 Intel Corp.
11  *		Zhang Yanmin (yanmin.zhang@intel.com)
12  *		Shaohua Li (shaohua.li@intel.com)
13  *
14  *	For more information, please consult the following manuals (look at
15  *	http://www.pcisig.com/ for how to get them):
16  *
17  *	PCI BIOS Specification
18  *	PCI Local Bus Specification
19  *	PCI to PCI Bridge Specification
20  *	PCI Express Specification
21  *	PCI System Design Guide
22  */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25 
26 #include <linux/args.h>
27 #include <linux/mod_devicetable.h>
28 
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <linux/msi_api.h>
42 #include <uapi/linux/pci.h>
43 
44 #include <linux/pci_ids.h>
45 
46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY  | \
47 			       PCI_STATUS_SIG_SYSTEM_ERROR | \
48 			       PCI_STATUS_REC_MASTER_ABORT | \
49 			       PCI_STATUS_REC_TARGET_ABORT | \
50 			       PCI_STATUS_SIG_TARGET_ABORT | \
51 			       PCI_STATUS_PARITY)
52 
53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */
54 #define PCI_NUM_RESET_METHODS 8
55 
56 #define PCI_RESET_PROBE		true
57 #define PCI_RESET_DO_RESET	false
58 
59 /*
60  * The PCI interface treats multi-function devices as independent
61  * devices.  The slot/function address of each device is encoded
62  * in a single byte as follows:
63  *
64  *	7:3 = slot
65  *	2:0 = function
66  *
67  * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
68  * In the interest of not exposing interfaces to user-space unnecessarily,
69  * the following kernel-only defines are being added here.
70  */
71 #define PCI_DEVID(bus, devfn)	((((u16)(bus)) << 8) | (devfn))
72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
74 
75 /* pci_slot represents a physical slot */
76 struct pci_slot {
77 	struct pci_bus		*bus;		/* Bus this slot is on */
78 	struct list_head	list;		/* Node in list of slots */
79 	struct hotplug_slot	*hotplug;	/* Hotplug info (move here) */
80 	unsigned char		number;		/* PCI_SLOT(pci_dev->devfn) */
81 	struct kobject		kobj;
82 };
83 
pci_slot_name(const struct pci_slot * slot)84 static inline const char *pci_slot_name(const struct pci_slot *slot)
85 {
86 	return kobject_name(&slot->kobj);
87 }
88 
89 /* File state for mmap()s on /proc/bus/pci/X/Y */
90 enum pci_mmap_state {
91 	pci_mmap_io,
92 	pci_mmap_mem
93 };
94 
95 /* For PCI devices, the region numbers are assigned this way: */
96 enum {
97 	/* #0-5: standard PCI resources */
98 	PCI_STD_RESOURCES,
99 	PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
100 
101 	/* #6: expansion ROM resource */
102 	PCI_ROM_RESOURCE,
103 
104 	/* Device-specific resources */
105 #ifdef CONFIG_PCI_IOV
106 	PCI_IOV_RESOURCES,
107 	PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
108 #endif
109 
110 /* PCI-to-PCI (P2P) bridge windows */
111 #define PCI_BRIDGE_IO_WINDOW		(PCI_BRIDGE_RESOURCES + 0)
112 #define PCI_BRIDGE_MEM_WINDOW		(PCI_BRIDGE_RESOURCES + 1)
113 #define PCI_BRIDGE_PREF_MEM_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
114 
115 /* CardBus bridge windows */
116 #define PCI_CB_BRIDGE_IO_0_WINDOW	(PCI_BRIDGE_RESOURCES + 0)
117 #define PCI_CB_BRIDGE_IO_1_WINDOW	(PCI_BRIDGE_RESOURCES + 1)
118 #define PCI_CB_BRIDGE_MEM_0_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
119 #define PCI_CB_BRIDGE_MEM_1_WINDOW	(PCI_BRIDGE_RESOURCES + 3)
120 
121 /* Total number of bridge resources for P2P and CardBus */
122 #define PCI_BRIDGE_RESOURCE_NUM 4
123 
124 	/* Resources assigned to buses behind the bridge */
125 	PCI_BRIDGE_RESOURCES,
126 	PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
127 				  PCI_BRIDGE_RESOURCE_NUM - 1,
128 
129 	/* Total resources associated with a PCI device */
130 	PCI_NUM_RESOURCES,
131 
132 	/* Preserve this for compatibility */
133 	DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
134 };
135 
136 /**
137  * enum pci_interrupt_pin - PCI INTx interrupt values
138  * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
139  * @PCI_INTERRUPT_INTA: PCI INTA pin
140  * @PCI_INTERRUPT_INTB: PCI INTB pin
141  * @PCI_INTERRUPT_INTC: PCI INTC pin
142  * @PCI_INTERRUPT_INTD: PCI INTD pin
143  *
144  * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
145  * PCI_INTERRUPT_PIN register.
146  */
147 enum pci_interrupt_pin {
148 	PCI_INTERRUPT_UNKNOWN,
149 	PCI_INTERRUPT_INTA,
150 	PCI_INTERRUPT_INTB,
151 	PCI_INTERRUPT_INTC,
152 	PCI_INTERRUPT_INTD,
153 };
154 
155 /* The number of legacy PCI INTx interrupts */
156 #define PCI_NUM_INTX	4
157 
158 /*
159  * Reading from a device that doesn't respond typically returns ~0.  A
160  * successful read from a device may also return ~0, so you need additional
161  * information to reliably identify errors.
162  */
163 #define PCI_ERROR_RESPONSE		(~0ULL)
164 #define PCI_SET_ERROR_RESPONSE(val)	(*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
165 #define PCI_POSSIBLE_ERROR(val)		((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
166 
167 /*
168  * pci_power_t values must match the bits in the Capabilities PME_Support
169  * and Control/Status PowerState fields in the Power Management capability.
170  */
171 typedef int __bitwise pci_power_t;
172 
173 #define PCI_D0		((pci_power_t __force) 0)
174 #define PCI_D1		((pci_power_t __force) 1)
175 #define PCI_D2		((pci_power_t __force) 2)
176 #define PCI_D3hot	((pci_power_t __force) 3)
177 #define PCI_D3cold	((pci_power_t __force) 4)
178 #define PCI_UNKNOWN	((pci_power_t __force) 5)
179 #define PCI_POWER_ERROR	((pci_power_t __force) -1)
180 
181 /* Remember to update this when the list above changes! */
182 extern const char *pci_power_names[];
183 
pci_power_name(pci_power_t state)184 static inline const char *pci_power_name(pci_power_t state)
185 {
186 	return pci_power_names[1 + (__force int) state];
187 }
188 
189 /**
190  * typedef pci_channel_state_t
191  *
192  * The pci_channel state describes connectivity between the CPU and
193  * the PCI device.  If some PCI bus between here and the PCI device
194  * has crashed or locked up, this info is reflected here.
195  */
196 typedef unsigned int __bitwise pci_channel_state_t;
197 
198 enum {
199 	/* I/O channel is in normal state */
200 	pci_channel_io_normal = (__force pci_channel_state_t) 1,
201 
202 	/* I/O to channel is blocked */
203 	pci_channel_io_frozen = (__force pci_channel_state_t) 2,
204 
205 	/* PCI card is dead */
206 	pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
207 };
208 
209 typedef unsigned int __bitwise pcie_reset_state_t;
210 
211 enum pcie_reset_state {
212 	/* Reset is NOT asserted (Use to deassert reset) */
213 	pcie_deassert_reset = (__force pcie_reset_state_t) 1,
214 
215 	/* Use #PERST to reset PCIe device */
216 	pcie_warm_reset = (__force pcie_reset_state_t) 2,
217 
218 	/* Use PCIe Hot Reset to reset device */
219 	pcie_hot_reset = (__force pcie_reset_state_t) 3
220 };
221 
222 typedef unsigned short __bitwise pci_dev_flags_t;
223 enum pci_dev_flags {
224 	/* INTX_DISABLE in PCI_COMMAND register disables MSI too */
225 	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
226 	/* Device configuration is irrevocably lost if disabled into D3 */
227 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
228 	/* Provide indication device is assigned by a Virtual Machine Manager */
229 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
230 	/* Flag for quirk use to store if quirk-specific ACS is enabled */
231 	PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
232 	/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
233 	PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
234 	/* Do not use bus resets for device */
235 	PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
236 	/* Do not use PM reset even if device advertises NoSoftRst- */
237 	PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
238 	/* Get VPD from function 0 VPD */
239 	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
240 	/* A non-root bridge where translation occurs, stop alias search here */
241 	PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
242 	/* Do not use FLR even if device advertises PCI_AF_CAP */
243 	PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
244 	/* Don't use Relaxed Ordering for TLPs directed at this device */
245 	PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
246 	/* Device does honor MSI masking despite saying otherwise */
247 	PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
248 	/* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
249 	PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
250 };
251 
252 enum pci_irq_reroute_variant {
253 	INTEL_IRQ_REROUTE_VARIANT = 1,
254 	MAX_IRQ_REROUTE_VARIANTS = 3
255 };
256 
257 typedef unsigned short __bitwise pci_bus_flags_t;
258 enum pci_bus_flags {
259 	PCI_BUS_FLAGS_NO_MSI	= (__force pci_bus_flags_t) 1,
260 	PCI_BUS_FLAGS_NO_MMRBC	= (__force pci_bus_flags_t) 2,
261 	PCI_BUS_FLAGS_NO_AERSID	= (__force pci_bus_flags_t) 4,
262 	PCI_BUS_FLAGS_NO_EXTCFG	= (__force pci_bus_flags_t) 8,
263 };
264 
265 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
266 enum pcie_link_width {
267 	PCIE_LNK_WIDTH_RESRV	= 0x00,
268 	PCIE_LNK_X1		= 0x01,
269 	PCIE_LNK_X2		= 0x02,
270 	PCIE_LNK_X4		= 0x04,
271 	PCIE_LNK_X8		= 0x08,
272 	PCIE_LNK_X12		= 0x0c,
273 	PCIE_LNK_X16		= 0x10,
274 	PCIE_LNK_X32		= 0x20,
275 	PCIE_LNK_WIDTH_UNKNOWN	= 0xff,
276 };
277 
278 /* See matching string table in pci_speed_string() */
279 enum pci_bus_speed {
280 	PCI_SPEED_33MHz			= 0x00,
281 	PCI_SPEED_66MHz			= 0x01,
282 	PCI_SPEED_66MHz_PCIX		= 0x02,
283 	PCI_SPEED_100MHz_PCIX		= 0x03,
284 	PCI_SPEED_133MHz_PCIX		= 0x04,
285 	PCI_SPEED_66MHz_PCIX_ECC	= 0x05,
286 	PCI_SPEED_100MHz_PCIX_ECC	= 0x06,
287 	PCI_SPEED_133MHz_PCIX_ECC	= 0x07,
288 	PCI_SPEED_66MHz_PCIX_266	= 0x09,
289 	PCI_SPEED_100MHz_PCIX_266	= 0x0a,
290 	PCI_SPEED_133MHz_PCIX_266	= 0x0b,
291 	AGP_UNKNOWN			= 0x0c,
292 	AGP_1X				= 0x0d,
293 	AGP_2X				= 0x0e,
294 	AGP_4X				= 0x0f,
295 	AGP_8X				= 0x10,
296 	PCI_SPEED_66MHz_PCIX_533	= 0x11,
297 	PCI_SPEED_100MHz_PCIX_533	= 0x12,
298 	PCI_SPEED_133MHz_PCIX_533	= 0x13,
299 	PCIE_SPEED_2_5GT		= 0x14,
300 	PCIE_SPEED_5_0GT		= 0x15,
301 	PCIE_SPEED_8_0GT		= 0x16,
302 	PCIE_SPEED_16_0GT		= 0x17,
303 	PCIE_SPEED_32_0GT		= 0x18,
304 	PCIE_SPEED_64_0GT		= 0x19,
305 	PCI_SPEED_UNKNOWN		= 0xff,
306 };
307 
308 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
309 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
310 
311 struct pci_vpd {
312 	struct mutex	lock;
313 	unsigned int	len;
314 	u8		cap;
315 };
316 
317 struct irq_affinity;
318 struct pcie_bwctrl_data;
319 struct pcie_link_state;
320 struct pci_sriov;
321 struct pci_p2pdma;
322 struct rcec_ea;
323 
324 /* struct pci_dev - describes a PCI device
325  *
326  * @supported_speeds:	PCIe Supported Link Speeds Vector (+ reserved 0 at
327  *			LSB). 0 when the supported speeds cannot be
328  *			determined (e.g., for Root Complex Integrated
329  *			Endpoints without the relevant Capability
330  *			Registers).
331  */
332 struct pci_dev {
333 	struct list_head bus_list;	/* Node in per-bus list */
334 	struct pci_bus	*bus;		/* Bus this device is on */
335 	struct pci_bus	*subordinate;	/* Bus this device bridges to */
336 
337 	void		*sysdata;	/* Hook for sys-specific extension */
338 	struct proc_dir_entry *procent;	/* Device entry in /proc/bus/pci */
339 	struct pci_slot	*slot;		/* Physical slot this device is in */
340 
341 	unsigned int	devfn;		/* Encoded device & function index */
342 	unsigned short	vendor;
343 	unsigned short	device;
344 	unsigned short	subsystem_vendor;
345 	unsigned short	subsystem_device;
346 	unsigned int	class;		/* 3 bytes: (base,sub,prog-if) */
347 	u8		revision;	/* PCI revision, low byte of class word */
348 	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
349 #ifdef CONFIG_PCIEAER
350 	u16		aer_cap;	/* AER capability offset */
351 	struct aer_stats *aer_stats;	/* AER stats for this device */
352 #endif
353 #ifdef CONFIG_PCIEPORTBUS
354 	struct rcec_ea	*rcec_ea;	/* RCEC cached endpoint association */
355 	struct pci_dev  *rcec;          /* Associated RCEC device */
356 #endif
357 	u32		devcap;		/* PCIe Device Capabilities */
358 	u16		rebar_cap;	/* Resizable BAR capability offset */
359 	u8		pcie_cap;	/* PCIe capability offset */
360 	u8		msi_cap;	/* MSI capability offset */
361 	u8		msix_cap;	/* MSI-X capability offset */
362 	u8		pcie_mpss:3;	/* PCIe Max Payload Size Supported */
363 	u8		rom_base_reg;	/* Config register controlling ROM */
364 	u8		pin;		/* Interrupt pin this device uses */
365 	u16		pcie_flags_reg;	/* Cached PCIe Capabilities Register */
366 	unsigned long	*dma_alias_mask;/* Mask of enabled devfn aliases */
367 
368 	struct pci_driver *driver;	/* Driver bound to this device */
369 	u64		dma_mask;	/* Mask of the bits of bus address this
370 					   device implements.  Normally this is
371 					   0xffffffff.  You only need to change
372 					   this if your device has broken DMA
373 					   or supports 64-bit transfers.  */
374 
375 	struct device_dma_parameters dma_parms;
376 
377 	pci_power_t	current_state;	/* Current operating state. In ACPI,
378 					   this is D0-D3, D0 being fully
379 					   functional, and D3 being off. */
380 	u8		pm_cap;		/* PM capability offset */
381 	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
382 					   can be generated */
383 	unsigned int	pme_poll:1;	/* Poll device's PME status bit */
384 	unsigned int	pinned:1;	/* Whether this dev is pinned */
385 	unsigned int	config_rrs_sv:1; /* Config RRS software visibility */
386 	unsigned int	imm_ready:1;	/* Supports Immediate Readiness */
387 	unsigned int	d1_support:1;	/* Low power state D1 is supported */
388 	unsigned int	d2_support:1;	/* Low power state D2 is supported */
389 	unsigned int	no_d1d2:1;	/* D1 and D2 are forbidden */
390 	unsigned int	no_d3cold:1;	/* D3cold is forbidden */
391 	unsigned int	bridge_d3:1;	/* Allow D3 for bridge */
392 	unsigned int	d3cold_allowed:1;	/* D3cold is allowed by user */
393 	unsigned int	mmio_always_on:1;	/* Disallow turning off io/mem
394 						   decoding during BAR sizing */
395 	unsigned int	wakeup_prepared:1;
396 	unsigned int	skip_bus_pm:1;	/* Internal: Skip bus-level PM */
397 	unsigned int	ignore_hotplug:1;	/* Ignore hotplug events */
398 	unsigned int	hotplug_user_indicators:1; /* SlotCtl indicators
399 						      controlled exclusively by
400 						      user sysfs */
401 	unsigned int	clear_retrain_link:1;	/* Need to clear Retrain Link
402 						   bit manually */
403 	unsigned int	d3hot_delay;	/* D3hot->D0 transition time in ms */
404 	unsigned int	d3cold_delay;	/* D3cold->D0 transition time in ms */
405 
406 	u16		l1ss;		/* L1SS Capability pointer */
407 #ifdef CONFIG_PCIEASPM
408 	struct pcie_link_state	*link_state;	/* ASPM link state */
409 	unsigned int	ltr_path:1;	/* Latency Tolerance Reporting
410 					   supported from root to here */
411 #endif
412 	unsigned int	pasid_no_tlp:1;		/* PASID works without TLP Prefix */
413 	unsigned int	eetlp_prefix_max:3;	/* Max # of End-End TLP Prefixes, 0=not supported */
414 
415 	pci_channel_state_t error_state;	/* Current connectivity state */
416 	struct device	dev;			/* Generic device interface */
417 
418 	int		cfg_size;		/* Size of config space */
419 
420 	/*
421 	 * Instead of touching interrupt line and base address registers
422 	 * directly, use the values stored here. They might be different!
423 	 */
424 	unsigned int	irq;
425 	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
426 	struct resource driver_exclusive_resource;	 /* driver exclusive resource ranges */
427 
428 	bool		match_driver;		/* Skip attaching driver */
429 
430 	unsigned int	transparent:1;		/* Subtractive decode bridge */
431 	unsigned int	io_window:1;		/* Bridge has I/O window */
432 	unsigned int	pref_window:1;		/* Bridge has pref mem window */
433 	unsigned int	pref_64_window:1;	/* Pref mem window is 64-bit */
434 	unsigned int	multifunction:1;	/* Multi-function device */
435 
436 	unsigned int	is_busmaster:1;		/* Is busmaster */
437 	unsigned int	no_msi:1;		/* May not use MSI */
438 	unsigned int	no_64bit_msi:1;		/* May only use 32-bit MSIs */
439 	unsigned int	block_cfg_access:1;	/* Config space access blocked */
440 	unsigned int	broken_parity_status:1;	/* Generates false positive parity */
441 	unsigned int	irq_reroute_variant:2;	/* Needs IRQ rerouting variant */
442 	unsigned int	msi_enabled:1;
443 	unsigned int	msix_enabled:1;
444 	unsigned int	ari_enabled:1;		/* ARI forwarding */
445 	unsigned int	ats_enabled:1;		/* Address Translation Svc */
446 	unsigned int	pasid_enabled:1;	/* Process Address Space ID */
447 	unsigned int	pri_enabled:1;		/* Page Request Interface */
448 	unsigned int	tph_enabled:1;		/* TLP Processing Hints */
449 	unsigned int	is_managed:1;		/* Managed via devres */
450 	unsigned int	is_msi_managed:1;	/* MSI release via devres installed */
451 	unsigned int	needs_freset:1;		/* Requires fundamental reset */
452 	unsigned int	state_saved:1;
453 	unsigned int	is_physfn:1;
454 	unsigned int	is_virtfn:1;
455 	unsigned int	is_hotplug_bridge:1;
456 	unsigned int	shpc_managed:1;		/* SHPC owned by shpchp */
457 	unsigned int	is_thunderbolt:1;	/* Thunderbolt controller */
458 	/*
459 	 * Devices marked being untrusted are the ones that can potentially
460 	 * execute DMA attacks and similar. They are typically connected
461 	 * through external ports such as Thunderbolt but not limited to
462 	 * that. When an IOMMU is enabled they should be getting full
463 	 * mappings to make sure they cannot access arbitrary memory.
464 	 */
465 	unsigned int	untrusted:1;
466 	/*
467 	 * Info from the platform, e.g., ACPI or device tree, may mark a
468 	 * device as "external-facing".  An external-facing device is
469 	 * itself internal but devices downstream from it are external.
470 	 */
471 	unsigned int	external_facing:1;
472 	unsigned int	broken_intx_masking:1;	/* INTx masking can't be used */
473 	unsigned int	io_window_1k:1;		/* Intel bridge 1K I/O windows */
474 	unsigned int	irq_managed:1;
475 	unsigned int	non_compliant_bars:1;	/* Broken BARs; ignore them */
476 	unsigned int	is_probed:1;		/* Device probing in progress */
477 	unsigned int	link_active_reporting:1;/* Device capable of reporting link active */
478 	unsigned int	no_vf_scan:1;		/* Don't scan for VFs after IOV enablement */
479 	unsigned int	no_command_memory:1;	/* No PCI_COMMAND_MEMORY */
480 	unsigned int	rom_bar_overlap:1;	/* ROM BAR disable broken */
481 	unsigned int	rom_attr_enabled:1;	/* Display of ROM attribute enabled? */
482 	unsigned int	non_mappable_bars:1;	/* BARs can't be mapped to user-space  */
483 	pci_dev_flags_t dev_flags;
484 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
485 
486 	spinlock_t	pcie_cap_lock;		/* Protects RMW ops in capability accessors */
487 	u32		saved_config_space[16]; /* Config space saved at suspend time */
488 	struct hlist_head saved_cap_space;
489 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
490 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
491 
492 #ifdef CONFIG_HOTPLUG_PCI_PCIE
493 	unsigned int	broken_cmd_compl:1;	/* No compl for some cmds */
494 #endif
495 #ifdef CONFIG_PCIE_PTM
496 	u16		ptm_cap;		/* PTM Capability */
497 	unsigned int	ptm_root:1;
498 	unsigned int	ptm_enabled:1;
499 	u8		ptm_granularity;
500 #endif
501 #ifdef CONFIG_PCI_MSI
502 	void __iomem	*msix_base;
503 	raw_spinlock_t	msi_lock;
504 #endif
505 	struct pci_vpd	vpd;
506 #ifdef CONFIG_PCIE_DPC
507 	u16		dpc_cap;
508 	unsigned int	dpc_rp_extensions:1;
509 	u8		dpc_rp_log_size;
510 #endif
511 	struct pcie_bwctrl_data		*link_bwctrl;
512 #ifdef CONFIG_PCI_ATS
513 	union {
514 		struct pci_sriov	*sriov;		/* PF: SR-IOV info */
515 		struct pci_dev		*physfn;	/* VF: related PF */
516 	};
517 	u16		ats_cap;	/* ATS Capability offset */
518 	u8		ats_stu;	/* ATS Smallest Translation Unit */
519 #endif
520 #ifdef CONFIG_PCI_PRI
521 	u16		pri_cap;	/* PRI Capability offset */
522 	u32		pri_reqs_alloc; /* Number of PRI requests allocated */
523 	unsigned int	pasid_required:1; /* PRG Response PASID Required */
524 #endif
525 #ifdef CONFIG_PCI_PASID
526 	u16		pasid_cap;	/* PASID Capability offset */
527 	u16		pasid_features;
528 #endif
529 #ifdef CONFIG_PCI_P2PDMA
530 	struct pci_p2pdma __rcu *p2pdma;
531 #endif
532 #ifdef CONFIG_PCI_DOE
533 	struct xarray	doe_mbs;	/* Data Object Exchange mailboxes */
534 #endif
535 #ifdef CONFIG_PCI_NPEM
536 	struct npem	*npem;		/* Native PCIe Enclosure Management */
537 #endif
538 	u16		acs_cap;	/* ACS Capability offset */
539 	u8		supported_speeds; /* Supported Link Speeds Vector */
540 	phys_addr_t	rom;		/* Physical address if not from BAR */
541 	size_t		romlen;		/* Length if not from BAR */
542 	/*
543 	 * Driver name to force a match.  Do not set directly, because core
544 	 * frees it.  Use driver_set_override() to set or clear it.
545 	 */
546 	const char	*driver_override;
547 
548 	unsigned long	priv_flags;	/* Private flags for the PCI driver */
549 
550 	/* These methods index pci_reset_fn_methods[] */
551 	u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
552 
553 #ifdef CONFIG_PCIE_TPH
554 	u16		tph_cap;	/* TPH capability offset */
555 	u8		tph_mode;	/* TPH mode */
556 	u8		tph_req_type;	/* TPH requester type */
557 #endif
558 };
559 
pci_physfn(struct pci_dev * dev)560 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
561 {
562 #ifdef CONFIG_PCI_IOV
563 	if (dev->is_virtfn)
564 		dev = dev->physfn;
565 #endif
566 	return dev;
567 }
568 
569 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
570 
571 #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
572 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
573 
pci_channel_offline(struct pci_dev * pdev)574 static inline int pci_channel_offline(struct pci_dev *pdev)
575 {
576 	return (pdev->error_state != pci_channel_io_normal);
577 }
578 
579 /*
580  * Currently in ACPI spec, for each PCI host bridge, PCI Segment
581  * Group number is limited to a 16-bit value, therefore (int)-1 is
582  * not a valid PCI domain number, and can be used as a sentinel
583  * value indicating ->domain_nr is not set by the driver (and
584  * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
585  * pci_bus_find_domain_nr()).
586  */
587 #define PCI_DOMAIN_NR_NOT_SET (-1)
588 
589 struct pci_host_bridge {
590 	struct device	dev;
591 	struct pci_bus	*bus;		/* Root bus */
592 	struct pci_ops	*ops;
593 	struct pci_ops	*child_ops;
594 	void		*sysdata;
595 	int		busnr;
596 	int		domain_nr;
597 	struct list_head windows;	/* resource_entry */
598 	struct list_head dma_ranges;	/* dma ranges resource list */
599 	u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
600 	int (*map_irq)(const struct pci_dev *, u8, u8);
601 	void (*release_fn)(struct pci_host_bridge *);
602 	int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
603 	void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
604 	void		*release_data;
605 	unsigned int	ignore_reset_delay:1;	/* For entire hierarchy */
606 	unsigned int	no_ext_tags:1;		/* No Extended Tags */
607 	unsigned int	no_inc_mrrs:1;		/* No Increase MRRS */
608 	unsigned int	native_aer:1;		/* OS may use PCIe AER */
609 	unsigned int	native_pcie_hotplug:1;	/* OS may use PCIe hotplug */
610 	unsigned int	native_shpc_hotplug:1;	/* OS may use SHPC hotplug */
611 	unsigned int	native_pme:1;		/* OS may use PCIe PME */
612 	unsigned int	native_ltr:1;		/* OS may use PCIe LTR */
613 	unsigned int	native_dpc:1;		/* OS may use PCIe DPC */
614 	unsigned int	native_cxl_error:1;	/* OS may use CXL RAS/Events */
615 	unsigned int	preserve_config:1;	/* Preserve FW resource setup */
616 	unsigned int	size_windows:1;		/* Enable root bus sizing */
617 	unsigned int	msi_domain:1;		/* Bridge wants MSI domain */
618 
619 	/* Resource alignment requirements */
620 	resource_size_t (*align_resource)(struct pci_dev *dev,
621 			const struct resource *res,
622 			resource_size_t start,
623 			resource_size_t size,
624 			resource_size_t align);
625 	unsigned long	private[] ____cacheline_aligned;
626 };
627 
628 #define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
629 
pci_host_bridge_priv(struct pci_host_bridge * bridge)630 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
631 {
632 	return (void *)bridge->private;
633 }
634 
pci_host_bridge_from_priv(void * priv)635 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
636 {
637 	return container_of(priv, struct pci_host_bridge, private);
638 }
639 
640 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
641 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
642 						   size_t priv);
643 void pci_free_host_bridge(struct pci_host_bridge *bridge);
644 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
645 
646 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
647 				 void (*release_fn)(struct pci_host_bridge *),
648 				 void *release_data);
649 
650 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
651 
652 #define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
653 
654 struct pci_bus {
655 	struct list_head node;		/* Node in list of buses */
656 	struct pci_bus	*parent;	/* Parent bus this bridge is on */
657 	struct list_head children;	/* List of child buses */
658 	struct list_head devices;	/* List of devices on this bus */
659 	struct pci_dev	*self;		/* Bridge device as seen by parent */
660 	struct list_head slots;		/* List of slots on this bus;
661 					   protected by pci_slot_mutex */
662 	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
663 	struct list_head resources;	/* Address space routed to this bus */
664 	struct resource busn_res;	/* Bus numbers routed to this bus */
665 
666 	struct pci_ops	*ops;		/* Configuration access functions */
667 	void		*sysdata;	/* Hook for sys-specific extension */
668 	struct proc_dir_entry *procdir;	/* Directory entry in /proc/bus/pci */
669 
670 	unsigned char	number;		/* Bus number */
671 	unsigned char	primary;	/* Number of primary bridge */
672 	unsigned char	max_bus_speed;	/* enum pci_bus_speed */
673 	unsigned char	cur_bus_speed;	/* enum pci_bus_speed */
674 #ifdef CONFIG_PCI_DOMAINS_GENERIC
675 	int		domain_nr;
676 #endif
677 
678 	char		name[48];
679 
680 	unsigned short	bridge_ctl;	/* Manage NO_ISA/FBB/et al behaviors */
681 	pci_bus_flags_t bus_flags;	/* Inherited by child buses */
682 	struct device		*bridge;
683 	struct device		dev;
684 	struct bin_attribute	*legacy_io;	/* Legacy I/O for this bus */
685 	struct bin_attribute	*legacy_mem;	/* Legacy mem */
686 	unsigned int		is_added:1;
687 	unsigned int		unsafe_warn:1;	/* warned about RW1C config write */
688 	unsigned int		flit_mode:1;	/* Link in Flit mode */
689 };
690 
691 #define to_pci_bus(n)	container_of(n, struct pci_bus, dev)
692 
pci_dev_id(struct pci_dev * dev)693 static inline u16 pci_dev_id(struct pci_dev *dev)
694 {
695 	return PCI_DEVID(dev->bus->number, dev->devfn);
696 }
697 
698 /*
699  * Returns true if the PCI bus is root (behind host-PCI bridge),
700  * false otherwise
701  *
702  * Some code assumes that "bus->self == NULL" means that bus is a root bus.
703  * This is incorrect because "virtual" buses added for SR-IOV (via
704  * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
705  */
pci_is_root_bus(struct pci_bus * pbus)706 static inline bool pci_is_root_bus(struct pci_bus *pbus)
707 {
708 	return !(pbus->parent);
709 }
710 
711 /**
712  * pci_is_bridge - check if the PCI device is a bridge
713  * @dev: PCI device
714  *
715  * Return true if the PCI device is bridge whether it has subordinate
716  * or not.
717  */
pci_is_bridge(struct pci_dev * dev)718 static inline bool pci_is_bridge(struct pci_dev *dev)
719 {
720 	return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
721 		dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
722 }
723 
724 /**
725  * pci_is_vga - check if the PCI device is a VGA device
726  * @pdev: PCI device
727  *
728  * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
729  * VGA Base Class and Sub-Classes:
730  *
731  *   03 00  PCI_CLASS_DISPLAY_VGA      VGA-compatible or 8514-compatible
732  *   00 01  PCI_CLASS_NOT_DEFINED_VGA  VGA-compatible (before Class Code)
733  *
734  * Return true if the PCI device is a VGA device and uses the legacy VGA
735  * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
736  * aliases).
737  */
pci_is_vga(struct pci_dev * pdev)738 static inline bool pci_is_vga(struct pci_dev *pdev)
739 {
740 	if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
741 		return true;
742 
743 	if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
744 		return true;
745 
746 	return false;
747 }
748 
749 #define for_each_pci_bridge(dev, bus)				\
750 	list_for_each_entry(dev, &bus->devices, bus_list)	\
751 		if (!pci_is_bridge(dev)) {} else
752 
pci_upstream_bridge(struct pci_dev * dev)753 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
754 {
755 	dev = pci_physfn(dev);
756 	if (pci_is_root_bus(dev->bus))
757 		return NULL;
758 
759 	return dev->bus->self;
760 }
761 
762 #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)763 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
764 {
765 	return pci_dev->msi_enabled || pci_dev->msix_enabled;
766 }
767 #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)768 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
769 #endif
770 
771 /* Error values that may be returned by PCI functions */
772 #define PCIBIOS_SUCCESSFUL		0x00
773 #define PCIBIOS_FUNC_NOT_SUPPORTED	0x81
774 #define PCIBIOS_BAD_VENDOR_ID		0x83
775 #define PCIBIOS_DEVICE_NOT_FOUND	0x86
776 #define PCIBIOS_BAD_REGISTER_NUMBER	0x87
777 #define PCIBIOS_SET_FAILED		0x88
778 #define PCIBIOS_BUFFER_TOO_SMALL	0x89
779 
780 /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)781 static inline int pcibios_err_to_errno(int err)
782 {
783 	if (err <= PCIBIOS_SUCCESSFUL)
784 		return err; /* Assume already errno */
785 
786 	switch (err) {
787 	case PCIBIOS_FUNC_NOT_SUPPORTED:
788 		return -ENOENT;
789 	case PCIBIOS_BAD_VENDOR_ID:
790 		return -ENOTTY;
791 	case PCIBIOS_DEVICE_NOT_FOUND:
792 		return -ENODEV;
793 	case PCIBIOS_BAD_REGISTER_NUMBER:
794 		return -EFAULT;
795 	case PCIBIOS_SET_FAILED:
796 		return -EIO;
797 	case PCIBIOS_BUFFER_TOO_SMALL:
798 		return -ENOSPC;
799 	}
800 
801 	return -ERANGE;
802 }
803 
804 /* Low-level architecture-dependent routines */
805 
806 struct pci_ops {
807 	int (*add_bus)(struct pci_bus *bus);
808 	void (*remove_bus)(struct pci_bus *bus);
809 	void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
810 	int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
811 	int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
812 };
813 
814 /*
815  * ACPI needs to be able to access PCI config space before we've done a
816  * PCI bus scan and created pci_bus structures.
817  */
818 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
819 		 int reg, int len, u32 *val);
820 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
821 		  int reg, int len, u32 val);
822 
823 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
824 typedef u64 pci_bus_addr_t;
825 #else
826 typedef u32 pci_bus_addr_t;
827 #endif
828 
829 struct pci_bus_region {
830 	pci_bus_addr_t	start;
831 	pci_bus_addr_t	end;
832 };
833 
834 struct pci_dynids {
835 	spinlock_t		lock;	/* Protects list, index */
836 	struct list_head	list;	/* For IDs added at runtime */
837 };
838 
839 
840 /*
841  * PCI Error Recovery System (PCI-ERS).  If a PCI device driver provides
842  * a set of callbacks in struct pci_error_handlers, that device driver
843  * will be notified of PCI bus errors, and will be driven to recovery
844  * when an error occurs.
845  */
846 
847 typedef unsigned int __bitwise pci_ers_result_t;
848 
849 enum pci_ers_result {
850 	/* No result/none/not supported in device driver */
851 	PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
852 
853 	/* Device driver can recover without slot reset */
854 	PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
855 
856 	/* Device driver wants slot to be reset */
857 	PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
858 
859 	/* Device has completely failed, is unrecoverable */
860 	PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
861 
862 	/* Device driver is fully recovered and operational */
863 	PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
864 
865 	/* No AER capabilities registered for the driver */
866 	PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
867 };
868 
869 /* PCI bus error event callbacks */
870 struct pci_error_handlers {
871 	/* PCI bus error detected on this device */
872 	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
873 					   pci_channel_state_t error);
874 
875 	/* MMIO has been re-enabled, but not DMA */
876 	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
877 
878 	/* PCI slot has been reset */
879 	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
880 
881 	/* PCI function reset prepare or completed */
882 	void (*reset_prepare)(struct pci_dev *dev);
883 	void (*reset_done)(struct pci_dev *dev);
884 
885 	/* Device driver may resume normal operations */
886 	void (*resume)(struct pci_dev *dev);
887 
888 	/* Allow device driver to record more details of a correctable error */
889 	void (*cor_error_detected)(struct pci_dev *dev);
890 };
891 
892 
893 struct module;
894 
895 /**
896  * struct pci_driver - PCI driver structure
897  * @name:	Driver name.
898  * @id_table:	Pointer to table of device IDs the driver is
899  *		interested in.  Most drivers should export this
900  *		table using MODULE_DEVICE_TABLE(pci,...).
901  * @probe:	This probing function gets called (during execution
902  *		of pci_register_driver() for already existing
903  *		devices or later if a new device gets inserted) for
904  *		all PCI devices which match the ID table and are not
905  *		"owned" by the other drivers yet. This function gets
906  *		passed a "struct pci_dev \*" for each device whose
907  *		entry in the ID table matches the device. The probe
908  *		function returns zero when the driver chooses to
909  *		take "ownership" of the device or an error code
910  *		(negative number) otherwise.
911  *		The probe function always gets called from process
912  *		context, so it can sleep.
913  * @remove:	The remove() function gets called whenever a device
914  *		being handled by this driver is removed (either during
915  *		deregistration of the driver or when it's manually
916  *		pulled out of a hot-pluggable slot).
917  *		The remove function always gets called from process
918  *		context, so it can sleep.
919  * @suspend:	Put device into low power state.
920  * @resume:	Wake device from low power state.
921  *		(Please see Documentation/power/pci.rst for descriptions
922  *		of PCI Power Management and the related functions.)
923  * @shutdown:	Hook into reboot_notifier_list (kernel/sys.c).
924  *		Intended to stop any idling DMA operations.
925  *		Useful for enabling wake-on-lan (NIC) or changing
926  *		the power state of a device before reboot.
927  *		e.g. drivers/net/e100.c.
928  * @sriov_configure: Optional driver callback to allow configuration of
929  *		number of VFs to enable via sysfs "sriov_numvfs" file.
930  * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
931  *              vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
932  *              This will change MSI-X Table Size in the VF Message Control
933  *              registers.
934  * @sriov_get_vf_total_msix: PF driver callback to get the total number of
935  *              MSI-X vectors available for distribution to the VFs.
936  * @err_handler: See Documentation/PCI/pci-error-recovery.rst
937  * @groups:	Sysfs attribute groups.
938  * @dev_groups: Attributes attached to the device that will be
939  *              created once it is bound to the driver.
940  * @driver:	Driver model structure.
941  * @dynids:	List of dynamically added device IDs.
942  * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
943  *		For most device drivers, no need to care about this flag
944  *		as long as all DMAs are handled through the kernel DMA API.
945  *		For some special ones, for example VFIO drivers, they know
946  *		how to manage the DMA themselves and set this flag so that
947  *		the IOMMU layer will allow them to setup and manage their
948  *		own I/O address space.
949  */
950 struct pci_driver {
951 	const char		*name;
952 	const struct pci_device_id *id_table;	/* Must be non-NULL for probe to be called */
953 	int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
954 	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
955 	int  (*suspend)(struct pci_dev *dev, pm_message_t state);	/* Device suspended */
956 	int  (*resume)(struct pci_dev *dev);	/* Device woken up */
957 	void (*shutdown)(struct pci_dev *dev);
958 	int  (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
959 	int  (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
960 	u32  (*sriov_get_vf_total_msix)(struct pci_dev *pf);
961 	const struct pci_error_handlers *err_handler;
962 	const struct attribute_group **groups;
963 	const struct attribute_group **dev_groups;
964 	struct device_driver	driver;
965 	struct pci_dynids	dynids;
966 	bool driver_managed_dma;
967 };
968 
969 #define to_pci_driver(__drv)	\
970 	( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL )
971 
972 /**
973  * PCI_DEVICE - macro used to describe a specific PCI device
974  * @vend: the 16 bit PCI Vendor ID
975  * @dev: the 16 bit PCI Device ID
976  *
977  * This macro is used to create a struct pci_device_id that matches a
978  * specific device.  The subvendor and subdevice fields will be set to
979  * PCI_ANY_ID.
980  */
981 #define PCI_DEVICE(vend,dev) \
982 	.vendor = (vend), .device = (dev), \
983 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
984 
985 /**
986  * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
987  *                              override_only flags.
988  * @vend: the 16 bit PCI Vendor ID
989  * @dev: the 16 bit PCI Device ID
990  * @driver_override: the 32 bit PCI Device override_only
991  *
992  * This macro is used to create a struct pci_device_id that matches only a
993  * driver_override device. The subvendor and subdevice fields will be set to
994  * PCI_ANY_ID.
995  */
996 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
997 	.vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
998 	.subdevice = PCI_ANY_ID, .override_only = (driver_override)
999 
1000 /**
1001  * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
1002  *                                   "driver_override" PCI device.
1003  * @vend: the 16 bit PCI Vendor ID
1004  * @dev: the 16 bit PCI Device ID
1005  *
1006  * This macro is used to create a struct pci_device_id that matches a
1007  * specific device. The subvendor and subdevice fields will be set to
1008  * PCI_ANY_ID and the driver_override will be set to
1009  * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
1010  */
1011 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
1012 	PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
1013 
1014 /**
1015  * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
1016  * @vend: the 16 bit PCI Vendor ID
1017  * @dev: the 16 bit PCI Device ID
1018  * @subvend: the 16 bit PCI Subvendor ID
1019  * @subdev: the 16 bit PCI Subdevice ID
1020  *
1021  * This macro is used to create a struct pci_device_id that matches a
1022  * specific device with subsystem information.
1023  */
1024 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
1025 	.vendor = (vend), .device = (dev), \
1026 	.subvendor = (subvend), .subdevice = (subdev)
1027 
1028 /**
1029  * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
1030  * @dev_class: the class, subclass, prog-if triple for this device
1031  * @dev_class_mask: the class mask for this device
1032  *
1033  * This macro is used to create a struct pci_device_id that matches a
1034  * specific PCI class.  The vendor, device, subvendor, and subdevice
1035  * fields will be set to PCI_ANY_ID.
1036  */
1037 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
1038 	.class = (dev_class), .class_mask = (dev_class_mask), \
1039 	.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
1040 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1041 
1042 /**
1043  * PCI_VDEVICE - macro used to describe a specific PCI device in short form
1044  * @vend: the vendor name
1045  * @dev: the 16 bit PCI Device ID
1046  *
1047  * This macro is used to create a struct pci_device_id that matches a
1048  * specific PCI device.  The subvendor, and subdevice fields will be set
1049  * to PCI_ANY_ID. The macro allows the next field to follow as the device
1050  * private data.
1051  */
1052 #define PCI_VDEVICE(vend, dev) \
1053 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1054 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
1055 
1056 /**
1057  * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form
1058  * @vend: the vendor name
1059  * @dev: the 16 bit PCI Device ID
1060  * @subvend: the 16 bit PCI Subvendor ID
1061  * @subdev: the 16 bit PCI Subdevice ID
1062  *
1063  * Generate the pci_device_id struct layout for the specific PCI
1064  * device/subdevice. Private data may follow the output.
1065  */
1066 #define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \
1067 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1068 	.subvendor = (subvend), .subdevice = (subdev), 0, 0
1069 
1070 /**
1071  * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
1072  * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
1073  * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
1074  * @data: the driver data to be filled
1075  *
1076  * This macro is used to create a struct pci_device_id that matches a
1077  * specific PCI device.  The subvendor, and subdevice fields will be set
1078  * to PCI_ANY_ID.
1079  */
1080 #define PCI_DEVICE_DATA(vend, dev, data) \
1081 	.vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
1082 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
1083 	.driver_data = (kernel_ulong_t)(data)
1084 
1085 enum {
1086 	PCI_REASSIGN_ALL_RSRC	= 0x00000001,	/* Ignore firmware setup */
1087 	PCI_REASSIGN_ALL_BUS	= 0x00000002,	/* Reassign all bus numbers */
1088 	PCI_PROBE_ONLY		= 0x00000004,	/* Use existing setup */
1089 	PCI_CAN_SKIP_ISA_ALIGN	= 0x00000008,	/* Don't do ISA alignment */
1090 	PCI_ENABLE_PROC_DOMAINS	= 0x00000010,	/* Enable domains in /proc */
1091 	PCI_COMPAT_DOMAIN_0	= 0x00000020,	/* ... except domain 0 */
1092 	PCI_SCAN_ALL_PCIE_DEVS	= 0x00000040,	/* Scan all, not just dev 0 */
1093 };
1094 
1095 #define PCI_IRQ_INTX		(1 << 0) /* Allow INTx interrupts */
1096 #define PCI_IRQ_MSI		(1 << 1) /* Allow MSI interrupts */
1097 #define PCI_IRQ_MSIX		(1 << 2) /* Allow MSI-X interrupts */
1098 #define PCI_IRQ_AFFINITY	(1 << 3) /* Auto-assign affinity */
1099 
1100 /* These external functions are only available when PCI support is enabled */
1101 #ifdef CONFIG_PCI
1102 
1103 extern unsigned int pci_flags;
1104 
pci_set_flags(int flags)1105 static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)1106 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)1107 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)1108 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
1109 
1110 void pcie_bus_configure_settings(struct pci_bus *bus);
1111 
1112 enum pcie_bus_config_types {
1113 	PCIE_BUS_TUNE_OFF,	/* Don't touch MPS at all */
1114 	PCIE_BUS_DEFAULT,	/* Ensure MPS matches upstream bridge */
1115 	PCIE_BUS_SAFE,		/* Use largest MPS boot-time devices support */
1116 	PCIE_BUS_PERFORMANCE,	/* Use MPS and MRRS for best performance */
1117 	PCIE_BUS_PEER2PEER,	/* Set MPS = 128 for all devices */
1118 };
1119 
1120 extern enum pcie_bus_config_types pcie_bus_config;
1121 
1122 extern const struct bus_type pci_bus_type;
1123 
1124 /* Do NOT directly access these two variables, unless you are arch-specific PCI
1125  * code, or PCI core code. */
1126 extern struct list_head pci_root_buses;	/* List of all known PCI buses */
1127 /* Some device drivers need know if PCI is initiated */
1128 int no_pci_devices(void);
1129 
1130 void pcibios_resource_survey_bus(struct pci_bus *bus);
1131 void pcibios_bus_add_device(struct pci_dev *pdev);
1132 void pcibios_add_bus(struct pci_bus *bus);
1133 void pcibios_remove_bus(struct pci_bus *bus);
1134 void pcibios_fixup_bus(struct pci_bus *);
1135 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1136 /* Architecture-specific versions may override this (weak) */
1137 char *pcibios_setup(char *str);
1138 
1139 /* Used only when drivers/pci/setup.c is used */
1140 resource_size_t pcibios_align_resource(void *, const struct resource *,
1141 				resource_size_t,
1142 				resource_size_t);
1143 
1144 /* Weak but can be overridden by arch */
1145 void pci_fixup_cardbus(struct pci_bus *);
1146 
1147 /* Generic PCI functions used internally */
1148 
1149 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1150 			     struct resource *res);
1151 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1152 			     struct pci_bus_region *region);
1153 void pcibios_scan_specific_bus(int busn);
1154 struct pci_bus *pci_find_bus(int domain, int busnr);
1155 void pci_bus_add_devices(const struct pci_bus *bus);
1156 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1157 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1158 				    struct pci_ops *ops, void *sysdata,
1159 				    struct list_head *resources);
1160 int pci_host_probe(struct pci_host_bridge *bridge);
1161 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1162 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1163 void pci_bus_release_busn_res(struct pci_bus *b);
1164 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1165 				  struct pci_ops *ops, void *sysdata,
1166 				  struct list_head *resources);
1167 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1168 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1169 				int busnr);
1170 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1171 				 const char *name,
1172 				 struct hotplug_slot *hotplug);
1173 void pci_destroy_slot(struct pci_slot *slot);
1174 #ifdef CONFIG_SYSFS
1175 void pci_dev_assign_slot(struct pci_dev *dev);
1176 #else
pci_dev_assign_slot(struct pci_dev * dev)1177 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1178 #endif
1179 int pci_scan_slot(struct pci_bus *bus, int devfn);
1180 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1181 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1182 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1183 void pci_bus_add_device(struct pci_dev *dev);
1184 void pci_read_bridge_bases(struct pci_bus *child);
1185 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1186 					  struct resource *res);
1187 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1188 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1189 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1190 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1191 void pci_dev_put(struct pci_dev *dev);
1192 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
1193 void pci_remove_bus(struct pci_bus *b);
1194 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1195 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1196 void pci_stop_root_bus(struct pci_bus *bus);
1197 void pci_remove_root_bus(struct pci_bus *bus);
1198 void pci_setup_cardbus(struct pci_bus *bus);
1199 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1200 void pci_sort_breadthfirst(void);
1201 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1202 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1203 
1204 /* Generic PCI functions exported to card drivers */
1205 
1206 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1207 u8 pci_find_capability(struct pci_dev *dev, int cap);
1208 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1209 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1210 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
1211 u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
1212 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
1213 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1214 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
1215 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
1216 
1217 u64 pci_get_dsn(struct pci_dev *dev);
1218 
1219 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1220 			       struct pci_dev *from);
1221 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1222 			       unsigned int ss_vendor, unsigned int ss_device,
1223 			       struct pci_dev *from);
1224 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1225 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1226 					    unsigned int devfn);
1227 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1228 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
1229 
1230 int pci_dev_present(const struct pci_device_id *ids);
1231 
1232 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1233 			     int where, u8 *val);
1234 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1235 			     int where, u16 *val);
1236 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1237 			      int where, u32 *val);
1238 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1239 			      int where, u8 val);
1240 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1241 			      int where, u16 val);
1242 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1243 			       int where, u32 val);
1244 
1245 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1246 			    int where, int size, u32 *val);
1247 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1248 			    int where, int size, u32 val);
1249 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1250 			      int where, int size, u32 *val);
1251 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1252 			       int where, int size, u32 val);
1253 
1254 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1255 
1256 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1257 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1258 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1259 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1260 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1261 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1262 void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
1263 				    u32 clear, u32 set);
1264 
1265 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1266 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1267 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1268 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1269 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
1270 						u16 clear, u16 set);
1271 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
1272 					      u16 clear, u16 set);
1273 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1274 					u32 clear, u32 set);
1275 
1276 /**
1277  * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
1278  * @dev:	PCI device structure of the PCI Express device
1279  * @pos:	PCI Express Capability Register
1280  * @clear:	Clear bitmask
1281  * @set:	Set bitmask
1282  *
1283  * Perform a Read-Modify-Write (RMW) operation using @clear and @set
1284  * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
1285  * Capability Registers are accessed concurrently in RMW fashion, hence
1286  * require locking which is handled transparently to the caller.
1287  */
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,u16 clear,u16 set)1288 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
1289 						     int pos,
1290 						     u16 clear, u16 set)
1291 {
1292 	switch (pos) {
1293 	case PCI_EXP_LNKCTL:
1294 	case PCI_EXP_LNKCTL2:
1295 	case PCI_EXP_RTCTL:
1296 		return pcie_capability_clear_and_set_word_locked(dev, pos,
1297 								 clear, set);
1298 	default:
1299 		return pcie_capability_clear_and_set_word_unlocked(dev, pos,
1300 								   clear, set);
1301 	}
1302 }
1303 
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1304 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1305 					   u16 set)
1306 {
1307 	return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1308 }
1309 
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1310 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1311 					    u32 set)
1312 {
1313 	return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1314 }
1315 
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1316 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1317 					     u16 clear)
1318 {
1319 	return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1320 }
1321 
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1322 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1323 					      u32 clear)
1324 {
1325 	return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1326 }
1327 
1328 /* User-space driven config access */
1329 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1330 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1331 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1332 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1333 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1334 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1335 
1336 int __must_check pci_enable_device(struct pci_dev *dev);
1337 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1338 int __must_check pci_reenable_device(struct pci_dev *);
1339 int __must_check pcim_enable_device(struct pci_dev *pdev);
1340 void pcim_pin_device(struct pci_dev *pdev);
1341 
pci_intx_mask_supported(struct pci_dev * pdev)1342 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1343 {
1344 	/*
1345 	 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1346 	 * writable and no quirk has marked the feature broken.
1347 	 */
1348 	return !pdev->broken_intx_masking;
1349 }
1350 
pci_is_enabled(struct pci_dev * pdev)1351 static inline int pci_is_enabled(struct pci_dev *pdev)
1352 {
1353 	return (atomic_read(&pdev->enable_cnt) > 0);
1354 }
1355 
pci_is_managed(struct pci_dev * pdev)1356 static inline int pci_is_managed(struct pci_dev *pdev)
1357 {
1358 	return pdev->is_managed;
1359 }
1360 
1361 void pci_disable_device(struct pci_dev *dev);
1362 
1363 extern unsigned int pcibios_max_latency;
1364 void pci_set_master(struct pci_dev *dev);
1365 void pci_clear_master(struct pci_dev *dev);
1366 
1367 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1368 int pci_set_cacheline_size(struct pci_dev *dev);
1369 int __must_check pci_set_mwi(struct pci_dev *dev);
1370 int __must_check pcim_set_mwi(struct pci_dev *dev);
1371 int pci_try_set_mwi(struct pci_dev *dev);
1372 void pci_clear_mwi(struct pci_dev *dev);
1373 void pci_disable_parity(struct pci_dev *dev);
1374 void pci_intx(struct pci_dev *dev, int enable);
1375 bool pci_check_and_mask_intx(struct pci_dev *dev);
1376 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1377 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1378 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1379 int pcix_get_max_mmrbc(struct pci_dev *dev);
1380 int pcix_get_mmrbc(struct pci_dev *dev);
1381 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1382 int pcie_get_readrq(struct pci_dev *dev);
1383 int pcie_set_readrq(struct pci_dev *dev, int rq);
1384 int pcie_get_mps(struct pci_dev *dev);
1385 int pcie_set_mps(struct pci_dev *dev, int mps);
1386 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1387 			     enum pci_bus_speed *speed,
1388 			     enum pcie_link_width *width);
1389 int pcie_link_speed_mbps(struct pci_dev *pdev);
1390 void pcie_print_link_status(struct pci_dev *dev);
1391 int pcie_reset_flr(struct pci_dev *dev, bool probe);
1392 int pcie_flr(struct pci_dev *dev);
1393 int __pci_reset_function_locked(struct pci_dev *dev);
1394 int pci_reset_function(struct pci_dev *dev);
1395 int pci_reset_function_locked(struct pci_dev *dev);
1396 int pci_try_reset_function(struct pci_dev *dev);
1397 int pci_probe_reset_slot(struct pci_slot *slot);
1398 int pci_probe_reset_bus(struct pci_bus *bus);
1399 int pci_reset_bus(struct pci_dev *dev);
1400 void pci_reset_secondary_bus(struct pci_dev *dev);
1401 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1402 void pci_update_resource(struct pci_dev *dev, int resno);
1403 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1404 void pci_release_resource(struct pci_dev *dev, int resno);
pci_rebar_bytes_to_size(u64 bytes)1405 static inline int pci_rebar_bytes_to_size(u64 bytes)
1406 {
1407 	bytes = roundup_pow_of_two(bytes);
1408 
1409 	/* Return BAR size as defined in the resizable BAR specification */
1410 	return max(ilog2(bytes), 20) - 20;
1411 }
1412 
1413 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
1414 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
1415 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1416 bool pci_device_is_present(struct pci_dev *pdev);
1417 void pci_ignore_hotplug(struct pci_dev *dev);
1418 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1419 int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1420 
1421 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1422 		irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1423 		const char *fmt, ...);
1424 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1425 
1426 /* ROM control related routines */
1427 int pci_enable_rom(struct pci_dev *pdev);
1428 void pci_disable_rom(struct pci_dev *pdev);
1429 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1430 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1431 
1432 /* Power management related routines */
1433 int pci_save_state(struct pci_dev *dev);
1434 void pci_restore_state(struct pci_dev *dev);
1435 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1436 int pci_load_saved_state(struct pci_dev *dev,
1437 			 struct pci_saved_state *state);
1438 int pci_load_and_free_saved_state(struct pci_dev *dev,
1439 				  struct pci_saved_state **state);
1440 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1441 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1442 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
1443 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1444 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1445 void pci_pme_active(struct pci_dev *dev, bool enable);
1446 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1447 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1448 int pci_prepare_to_sleep(struct pci_dev *dev);
1449 int pci_back_from_sleep(struct pci_dev *dev);
1450 bool pci_dev_run_wake(struct pci_dev *dev);
1451 void pci_d3cold_enable(struct pci_dev *dev);
1452 void pci_d3cold_disable(struct pci_dev *dev);
1453 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1454 void pci_resume_bus(struct pci_bus *bus);
1455 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1456 
1457 /* For use by arch with custom probe code */
1458 void set_pcie_port_type(struct pci_dev *pdev);
1459 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1460 
1461 /* Functions for PCI Hotplug drivers to use */
1462 unsigned int pci_rescan_bus(struct pci_bus *bus);
1463 void pci_lock_rescan_remove(void);
1464 void pci_unlock_rescan_remove(void);
1465 
1466 /* Vital Product Data routines */
1467 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1468 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1469 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1470 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1471 
1472 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1473 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1474 void pci_bus_assign_resources(const struct pci_bus *bus);
1475 void pci_bus_claim_resources(struct pci_bus *bus);
1476 void pci_bus_size_bridges(struct pci_bus *bus);
1477 int pci_claim_resource(struct pci_dev *, int);
1478 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1479 void pci_assign_unassigned_resources(void);
1480 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1481 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1482 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1483 int pci_enable_resources(struct pci_dev *, int mask);
1484 void pci_assign_irq(struct pci_dev *dev);
1485 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1486 #define HAVE_PCI_REQ_REGIONS	2
1487 int __must_check pci_request_regions(struct pci_dev *, const char *);
1488 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1489 void pci_release_regions(struct pci_dev *);
1490 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1491 void pci_release_region(struct pci_dev *, int);
1492 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1493 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1494 void pci_release_selected_regions(struct pci_dev *, int);
1495 
1496 static inline __must_check struct resource *
pci_request_config_region_exclusive(struct pci_dev * pdev,unsigned int offset,unsigned int len,const char * name)1497 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
1498 				    unsigned int len, const char *name)
1499 {
1500 	return __request_region(&pdev->driver_exclusive_resource, offset, len,
1501 				name, IORESOURCE_EXCLUSIVE);
1502 }
1503 
pci_release_config_region(struct pci_dev * pdev,unsigned int offset,unsigned int len)1504 static inline void pci_release_config_region(struct pci_dev *pdev,
1505 					     unsigned int offset,
1506 					     unsigned int len)
1507 {
1508 	__release_region(&pdev->driver_exclusive_resource, offset, len);
1509 }
1510 
1511 /* drivers/pci/bus.c */
1512 void pci_add_resource(struct list_head *resources, struct resource *res);
1513 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1514 			     resource_size_t offset);
1515 void pci_free_resource_list(struct list_head *resources);
1516 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res);
1517 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1518 void pci_bus_remove_resources(struct pci_bus *bus);
1519 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
1520 int devm_request_pci_bus_resources(struct device *dev,
1521 				   struct list_head *resources);
1522 
1523 /* Temporary until new and working PCI SBR API in place */
1524 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1525 
1526 #define __pci_bus_for_each_res0(bus, res, ...)				\
1527 	for (unsigned int __b = 0;					\
1528 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1529 	     __b++)
1530 
1531 #define __pci_bus_for_each_res1(bus, res, __b)				\
1532 	for (__b = 0;							\
1533 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1534 	     __b++)
1535 
1536 /**
1537  * pci_bus_for_each_resource - iterate over PCI bus resources
1538  * @bus: the PCI bus
1539  * @res: pointer to the current resource
1540  * @...: optional index of the current resource
1541  *
1542  * Iterate over PCI bus resources. The first part is to go over PCI bus
1543  * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
1544  * After that continue with the separate list of the additional resources,
1545  * if not empty. That's why the Logical OR is being used.
1546  *
1547  * Possible usage:
1548  *
1549  *	struct pci_bus *bus = ...;
1550  *	struct resource *res;
1551  *	unsigned int i;
1552  *
1553  * 	// With optional index
1554  * 	pci_bus_for_each_resource(bus, res, i)
1555  * 		pr_info("PCI bus resource[%u]: %pR\n", i, res);
1556  *
1557  * 	// Without index
1558  * 	pci_bus_for_each_resource(bus, res)
1559  * 		_do_something_(res);
1560  */
1561 #define pci_bus_for_each_resource(bus, res, ...)			\
1562 	CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__))	\
1563 		    (bus, res, __VA_ARGS__)
1564 
1565 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1566 			struct resource *res, resource_size_t size,
1567 			resource_size_t align, resource_size_t min,
1568 			unsigned long type_mask,
1569 			resource_alignf alignf,
1570 			void *alignf_data);
1571 
1572 
1573 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
1574 			resource_size_t size);
1575 unsigned long pci_address_to_pio(phys_addr_t addr);
1576 phys_addr_t pci_pio_to_address(unsigned long pio);
1577 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1578 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1579 			   phys_addr_t phys_addr);
1580 void pci_unmap_iospace(struct resource *res);
1581 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1582 				      resource_size_t offset,
1583 				      resource_size_t size);
1584 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1585 					  struct resource *res);
1586 
pci_bus_address(struct pci_dev * pdev,int bar)1587 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1588 {
1589 	struct pci_bus_region region;
1590 
1591 	pcibios_resource_to_bus(pdev->bus, &region, &pdev->resource[bar]);
1592 	return region.start;
1593 }
1594 
1595 /* Proper probing supporting hot-pluggable devices */
1596 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1597 				       const char *mod_name);
1598 
1599 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1600 #define pci_register_driver(driver)		\
1601 	__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1602 
1603 void pci_unregister_driver(struct pci_driver *dev);
1604 
1605 /**
1606  * module_pci_driver() - Helper macro for registering a PCI driver
1607  * @__pci_driver: pci_driver struct
1608  *
1609  * Helper macro for PCI drivers which do not do anything special in module
1610  * init/exit. This eliminates a lot of boilerplate. Each module may only
1611  * use this macro once, and calling it replaces module_init() and module_exit()
1612  */
1613 #define module_pci_driver(__pci_driver) \
1614 	module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1615 
1616 /**
1617  * builtin_pci_driver() - Helper macro for registering a PCI driver
1618  * @__pci_driver: pci_driver struct
1619  *
1620  * Helper macro for PCI drivers which do not do anything special in their
1621  * init code. This eliminates a lot of boilerplate. Each driver may only
1622  * use this macro once, and calling it replaces device_initcall(...)
1623  */
1624 #define builtin_pci_driver(__pci_driver) \
1625 	builtin_driver(__pci_driver, pci_register_driver)
1626 
1627 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1628 int pci_add_dynid(struct pci_driver *drv,
1629 		  unsigned int vendor, unsigned int device,
1630 		  unsigned int subvendor, unsigned int subdevice,
1631 		  unsigned int class, unsigned int class_mask,
1632 		  unsigned long driver_data);
1633 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1634 					 struct pci_dev *dev);
1635 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1636 		    int pass);
1637 
1638 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1639 		  void *userdata);
1640 int pci_cfg_space_size(struct pci_dev *dev);
1641 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1642 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1643 					 unsigned long type);
1644 
1645 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1646 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1647 
1648 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1649 		      unsigned int command_bits, u32 flags);
1650 
1651 /*
1652  * Virtual interrupts allow for more interrupts to be allocated
1653  * than the device has interrupts for. These are not programmed
1654  * into the device's MSI-X table and must be handled by some
1655  * other driver means.
1656  */
1657 #define PCI_IRQ_VIRTUAL		(1 << 4)
1658 
1659 #define PCI_IRQ_ALL_TYPES	(PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1660 
1661 #include <linux/dmapool.h>
1662 
1663 struct msix_entry {
1664 	u32	vector;	/* Kernel uses to write allocated vector */
1665 	u16	entry;	/* Driver uses to specify entry, OS writes */
1666 };
1667 
1668 #ifdef CONFIG_PCI_MSI
1669 int pci_msi_vec_count(struct pci_dev *dev);
1670 void pci_disable_msi(struct pci_dev *dev);
1671 int pci_msix_vec_count(struct pci_dev *dev);
1672 void pci_disable_msix(struct pci_dev *dev);
1673 void pci_restore_msi_state(struct pci_dev *dev);
1674 int pci_msi_enabled(void);
1675 int pci_enable_msi(struct pci_dev *dev);
1676 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1677 			  int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1678 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1679 					struct msix_entry *entries, int nvec)
1680 {
1681 	int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1682 	if (rc < 0)
1683 		return rc;
1684 	return 0;
1685 }
1686 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1687 			  unsigned int max_vecs, unsigned int flags);
1688 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1689 				   unsigned int max_vecs, unsigned int flags,
1690 				   struct irq_affinity *affd);
1691 
1692 bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
1693 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1694 				     const struct irq_affinity_desc *affdesc);
1695 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
1696 
1697 void pci_free_irq_vectors(struct pci_dev *dev);
1698 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1699 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1700 
1701 #else
pci_msi_vec_count(struct pci_dev * dev)1702 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1703 static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1704 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1705 static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1706 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1707 static inline int pci_msi_enabled(void) { return 0; }
pci_enable_msi(struct pci_dev * dev)1708 static inline int pci_enable_msi(struct pci_dev *dev)
1709 { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1710 static inline int pci_enable_msix_range(struct pci_dev *dev,
1711 			struct msix_entry *entries, int minvec, int maxvec)
1712 { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1713 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1714 			struct msix_entry *entries, int nvec)
1715 { return -ENOSYS; }
1716 
1717 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1718 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1719 			       unsigned int max_vecs, unsigned int flags,
1720 			       struct irq_affinity *aff_desc)
1721 {
1722 	if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq)
1723 		return 1;
1724 	return -ENOSPC;
1725 }
1726 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1727 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1728 		      unsigned int max_vecs, unsigned int flags)
1729 {
1730 	return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
1731 					      flags, NULL);
1732 }
1733 
pci_msix_can_alloc_dyn(struct pci_dev * dev)1734 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
1735 { return false; }
pci_msix_alloc_irq_at(struct pci_dev * dev,unsigned int index,const struct irq_affinity_desc * affdesc)1736 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1737 						   const struct irq_affinity_desc *affdesc)
1738 {
1739 	struct msi_map map = { .index = -ENOSYS, };
1740 
1741 	return map;
1742 }
1743 
pci_msix_free_irq(struct pci_dev * pdev,struct msi_map map)1744 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
1745 {
1746 }
1747 
pci_free_irq_vectors(struct pci_dev * dev)1748 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1749 {
1750 }
1751 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1752 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1753 {
1754 	if (WARN_ON_ONCE(nr > 0))
1755 		return -EINVAL;
1756 	return dev->irq;
1757 }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1758 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1759 		int vec)
1760 {
1761 	return cpu_possible_mask;
1762 }
1763 #endif
1764 
1765 /**
1766  * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1767  * @d: the INTx IRQ domain
1768  * @node: the DT node for the device whose interrupt we're translating
1769  * @intspec: the interrupt specifier data from the DT
1770  * @intsize: the number of entries in @intspec
1771  * @out_hwirq: pointer at which to write the hwirq number
1772  * @out_type: pointer at which to write the interrupt type
1773  *
1774  * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1775  * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1776  * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1777  * INTx value to obtain the hwirq number.
1778  *
1779  * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1780  */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1781 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1782 				      struct device_node *node,
1783 				      const u32 *intspec,
1784 				      unsigned int intsize,
1785 				      unsigned long *out_hwirq,
1786 				      unsigned int *out_type)
1787 {
1788 	const u32 intx = intspec[0];
1789 
1790 	if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1791 		return -EINVAL;
1792 
1793 	*out_hwirq = intx - PCI_INTERRUPT_INTA;
1794 	return 0;
1795 }
1796 
1797 #ifdef CONFIG_PCIEPORTBUS
1798 extern bool pcie_ports_disabled;
1799 extern bool pcie_ports_native;
1800 
1801 int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
1802 			  bool use_lt);
1803 #else
1804 #define pcie_ports_disabled	true
1805 #define pcie_ports_native	false
1806 
pcie_set_target_speed(struct pci_dev * port,enum pci_bus_speed speed_req,bool use_lt)1807 static inline int pcie_set_target_speed(struct pci_dev *port,
1808 					enum pci_bus_speed speed_req,
1809 					bool use_lt)
1810 {
1811 	return -EOPNOTSUPP;
1812 }
1813 #endif
1814 
1815 #define PCIE_LINK_STATE_L0S		(BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */
1816 #define PCIE_LINK_STATE_L1		BIT(2)	/* L1 state */
1817 #define PCIE_LINK_STATE_L1_1		BIT(3)	/* ASPM L1.1 state */
1818 #define PCIE_LINK_STATE_L1_2		BIT(4)	/* ASPM L1.2 state */
1819 #define PCIE_LINK_STATE_L1_1_PCIPM	BIT(5)	/* PCI-PM L1.1 state */
1820 #define PCIE_LINK_STATE_L1_2_PCIPM	BIT(6)	/* PCI-PM L1.2 state */
1821 #define PCIE_LINK_STATE_ASPM_ALL	(PCIE_LINK_STATE_L0S		|\
1822 					 PCIE_LINK_STATE_L1		|\
1823 					 PCIE_LINK_STATE_L1_1		|\
1824 					 PCIE_LINK_STATE_L1_2		|\
1825 					 PCIE_LINK_STATE_L1_1_PCIPM	|\
1826 					 PCIE_LINK_STATE_L1_2_PCIPM)
1827 #define PCIE_LINK_STATE_CLKPM		BIT(7)
1828 #define PCIE_LINK_STATE_ALL		(PCIE_LINK_STATE_ASPM_ALL	|\
1829 					 PCIE_LINK_STATE_CLKPM)
1830 
1831 #ifdef CONFIG_PCIEASPM
1832 int pci_disable_link_state(struct pci_dev *pdev, int state);
1833 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1834 int pci_enable_link_state(struct pci_dev *pdev, int state);
1835 int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
1836 void pcie_no_aspm(void);
1837 bool pcie_aspm_support_enabled(void);
1838 bool pcie_aspm_enabled(struct pci_dev *pdev);
1839 #else
pci_disable_link_state(struct pci_dev * pdev,int state)1840 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1841 { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1842 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1843 { return 0; }
pci_enable_link_state(struct pci_dev * pdev,int state)1844 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
1845 { return 0; }
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1846 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1847 { return 0; }
pcie_no_aspm(void)1848 static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1849 static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1850 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1851 #endif
1852 
1853 #ifdef CONFIG_PCIEAER
1854 bool pci_aer_available(void);
1855 #else
pci_aer_available(void)1856 static inline bool pci_aer_available(void) { return false; }
1857 #endif
1858 
1859 bool pci_ats_disabled(void);
1860 
1861 #ifdef CONFIG_PCIE_PTM
1862 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1863 void pci_disable_ptm(struct pci_dev *dev);
1864 bool pcie_ptm_enabled(struct pci_dev *dev);
1865 #else
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)1866 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1867 { return -EINVAL; }
pci_disable_ptm(struct pci_dev * dev)1868 static inline void pci_disable_ptm(struct pci_dev *dev) { }
pcie_ptm_enabled(struct pci_dev * dev)1869 static inline bool pcie_ptm_enabled(struct pci_dev *dev)
1870 { return false; }
1871 #endif
1872 
1873 void pci_cfg_access_lock(struct pci_dev *dev);
1874 bool pci_cfg_access_trylock(struct pci_dev *dev);
1875 void pci_cfg_access_unlock(struct pci_dev *dev);
1876 
1877 void pci_dev_lock(struct pci_dev *dev);
1878 int pci_dev_trylock(struct pci_dev *dev);
1879 void pci_dev_unlock(struct pci_dev *dev);
1880 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
1881 
1882 /*
1883  * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
1884  * a PCI domain is defined to be a set of PCI buses which share
1885  * configuration space.
1886  */
1887 #ifdef CONFIG_PCI_DOMAINS
1888 extern int pci_domains_supported;
1889 #else
1890 enum { pci_domains_supported = 0 };
1891 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1892 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1893 #endif /* CONFIG_PCI_DOMAINS */
1894 
1895 /*
1896  * Generic implementation for PCI domain support. If your
1897  * architecture does not need custom management of PCI
1898  * domains then this implementation will be used
1899  */
1900 #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)1901 static inline int pci_domain_nr(struct pci_bus *bus)
1902 {
1903 	return bus->domain_nr;
1904 }
1905 #ifdef CONFIG_ACPI
1906 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1907 #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)1908 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1909 { return 0; }
1910 #endif
1911 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1912 void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
1913 #endif
1914 
1915 /* Some architectures require additional setup to direct VGA traffic */
1916 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1917 				    unsigned int command_bits, u32 flags);
1918 void pci_register_set_vga_state(arch_set_vga_state_t func);
1919 
1920 static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)1921 pci_request_io_regions(struct pci_dev *pdev, const char *name)
1922 {
1923 	return pci_request_selected_regions(pdev,
1924 			    pci_select_bars(pdev, IORESOURCE_IO), name);
1925 }
1926 
1927 static inline void
pci_release_io_regions(struct pci_dev * pdev)1928 pci_release_io_regions(struct pci_dev *pdev)
1929 {
1930 	return pci_release_selected_regions(pdev,
1931 			    pci_select_bars(pdev, IORESOURCE_IO));
1932 }
1933 
1934 static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)1935 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
1936 {
1937 	return pci_request_selected_regions(pdev,
1938 			    pci_select_bars(pdev, IORESOURCE_MEM), name);
1939 }
1940 
1941 static inline void
pci_release_mem_regions(struct pci_dev * pdev)1942 pci_release_mem_regions(struct pci_dev *pdev)
1943 {
1944 	return pci_release_selected_regions(pdev,
1945 			    pci_select_bars(pdev, IORESOURCE_MEM));
1946 }
1947 
1948 #else /* CONFIG_PCI is not enabled */
1949 
pci_set_flags(int flags)1950 static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)1951 static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)1952 static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)1953 static inline int pci_has_flag(int flag) { return 0; }
1954 
1955 /*
1956  * If the system does not have PCI, clearly these return errors.  Define
1957  * these as simple inline functions to avoid hair in drivers.
1958  */
1959 #define _PCI_NOP(o, s, t) \
1960 	static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1961 						int where, t val) \
1962 		{ return PCIBIOS_FUNC_NOT_SUPPORTED; }
1963 
1964 #define _PCI_NOP_ALL(o, x)	_PCI_NOP(o, byte, u8 x) \
1965 				_PCI_NOP(o, word, u16 x) \
1966 				_PCI_NOP(o, dword, u32 x)
1967 _PCI_NOP_ALL(read, *)
1968 _PCI_NOP_ALL(write,)
1969 
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)1970 static inline struct pci_dev *pci_get_device(unsigned int vendor,
1971 					     unsigned int device,
1972 					     struct pci_dev *from)
1973 { return NULL; }
1974 
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)1975 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
1976 					     unsigned int device,
1977 					     unsigned int ss_vendor,
1978 					     unsigned int ss_device,
1979 					     struct pci_dev *from)
1980 { return NULL; }
1981 
pci_get_class(unsigned int class,struct pci_dev * from)1982 static inline struct pci_dev *pci_get_class(unsigned int class,
1983 					    struct pci_dev *from)
1984 { return NULL; }
1985 
pci_get_base_class(unsigned int class,struct pci_dev * from)1986 static inline struct pci_dev *pci_get_base_class(unsigned int class,
1987 						 struct pci_dev *from)
1988 { return NULL; }
1989 
pci_dev_present(const struct pci_device_id * ids)1990 static inline int pci_dev_present(const struct pci_device_id *ids)
1991 { return 0; }
1992 
1993 #define no_pci_devices()	(1)
1994 #define pci_dev_put(dev)	do { } while (0)
1995 
pci_set_master(struct pci_dev * dev)1996 static inline void pci_set_master(struct pci_dev *dev) { }
pci_clear_master(struct pci_dev * dev)1997 static inline void pci_clear_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)1998 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)1999 static inline void pci_disable_device(struct pci_dev *dev) { }
pcim_enable_device(struct pci_dev * pdev)2000 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
pci_assign_resource(struct pci_dev * dev,int i)2001 static inline int pci_assign_resource(struct pci_dev *dev, int i)
2002 { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner,const char * mod_name)2003 static inline int __must_check __pci_register_driver(struct pci_driver *drv,
2004 						     struct module *owner,
2005 						     const char *mod_name)
2006 { return 0; }
pci_register_driver(struct pci_driver * drv)2007 static inline int pci_register_driver(struct pci_driver *drv)
2008 { return 0; }
pci_unregister_driver(struct pci_driver * drv)2009 static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)2010 static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
2011 { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)2012 static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap)
2013 { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)2014 static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
2015 { return 0; }
2016 
pci_get_dsn(struct pci_dev * dev)2017 static inline u64 pci_get_dsn(struct pci_dev *dev)
2018 { return 0; }
2019 
2020 /* Power management related routines */
pci_save_state(struct pci_dev * dev)2021 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)2022 static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)2023 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2024 { return 0; }
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)2025 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
2026 { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)2027 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2028 { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)2029 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
2030 					   pm_message_t state)
2031 { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)2032 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
2033 				  int enable)
2034 { return 0; }
2035 
pci_find_resource(struct pci_dev * dev,struct resource * res)2036 static inline struct resource *pci_find_resource(struct pci_dev *dev,
2037 						 struct resource *res)
2038 { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)2039 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
2040 { return -EIO; }
pci_release_regions(struct pci_dev * dev)2041 static inline void pci_release_regions(struct pci_dev *dev) { }
2042 
pci_register_io_range(const struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)2043 static inline int pci_register_io_range(const struct fwnode_handle *fwnode,
2044 					phys_addr_t addr, resource_size_t size)
2045 { return -EINVAL; }
2046 
pci_address_to_pio(phys_addr_t addr)2047 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
2048 
pci_find_next_bus(const struct pci_bus * from)2049 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
2050 { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)2051 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
2052 						unsigned int devfn)
2053 { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)2054 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
2055 					unsigned int bus, unsigned int devfn)
2056 { return NULL; }
2057 
pci_domain_nr(struct pci_bus * bus)2058 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)2059 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
2060 
2061 #define dev_is_pci(d) (false)
2062 #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)2063 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2064 { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)2065 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
2066 				      struct device_node *node,
2067 				      const u32 *intspec,
2068 				      unsigned int intsize,
2069 				      unsigned long *out_hwirq,
2070 				      unsigned int *out_type)
2071 { return -EINVAL; }
2072 
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)2073 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
2074 							 struct pci_dev *dev)
2075 { return NULL; }
pci_ats_disabled(void)2076 static inline bool pci_ats_disabled(void) { return true; }
2077 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)2078 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
2079 {
2080 	return -EINVAL;
2081 }
2082 
2083 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)2084 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
2085 			       unsigned int max_vecs, unsigned int flags,
2086 			       struct irq_affinity *aff_desc)
2087 {
2088 	return -ENOSPC;
2089 }
2090 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)2091 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
2092 		      unsigned int max_vecs, unsigned int flags)
2093 {
2094 	return -ENOSPC;
2095 }
2096 #endif /* CONFIG_PCI */
2097 
2098 /* Include architecture-dependent settings and functions */
2099 
2100 #include <asm/pci.h>
2101 
2102 /*
2103  * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
2104  * is expected to be an offset within that region.
2105  *
2106  */
2107 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
2108 			    struct vm_area_struct *vma,
2109 			    enum pci_mmap_state mmap_state, int write_combine);
2110 
2111 #ifndef arch_can_pci_mmap_wc
2112 #define arch_can_pci_mmap_wc()		0
2113 #endif
2114 
2115 #ifndef arch_can_pci_mmap_io
2116 #define arch_can_pci_mmap_io()		0
2117 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
2118 #else
2119 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
2120 #endif
2121 
2122 #ifndef pci_root_bus_fwnode
2123 #define pci_root_bus_fwnode(bus)	NULL
2124 #endif
2125 
2126 /*
2127  * These helpers provide future and backwards compatibility
2128  * for accessing popular PCI BAR info
2129  */
2130 #define pci_resource_n(dev, bar)	(&(dev)->resource[(bar)])
2131 #define pci_resource_start(dev, bar)	(pci_resource_n(dev, bar)->start)
2132 #define pci_resource_end(dev, bar)	(pci_resource_n(dev, bar)->end)
2133 #define pci_resource_flags(dev, bar)	(pci_resource_n(dev, bar)->flags)
2134 #define pci_resource_len(dev,bar)					\
2135 	(pci_resource_end((dev), (bar)) ? 				\
2136 	 resource_size(pci_resource_n((dev), (bar))) : 0)
2137 
2138 #define __pci_dev_for_each_res0(dev, res, ...)				  \
2139 	for (unsigned int __b = 0;					  \
2140 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2141 	     __b++)
2142 
2143 #define __pci_dev_for_each_res1(dev, res, __b)				  \
2144 	for (__b = 0;							  \
2145 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2146 	     __b++)
2147 
2148 #define pci_dev_for_each_resource(dev, res, ...)			\
2149 	CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) 	\
2150 		    (dev, res, __VA_ARGS__)
2151 
2152 /*
2153  * Similar to the helpers above, these manipulate per-pci_dev
2154  * driver-specific data.  They are really just a wrapper around
2155  * the generic device structure functions of these calls.
2156  */
pci_get_drvdata(struct pci_dev * pdev)2157 static inline void *pci_get_drvdata(struct pci_dev *pdev)
2158 {
2159 	return dev_get_drvdata(&pdev->dev);
2160 }
2161 
pci_set_drvdata(struct pci_dev * pdev,void * data)2162 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
2163 {
2164 	dev_set_drvdata(&pdev->dev, data);
2165 }
2166 
pci_name(const struct pci_dev * pdev)2167 static inline const char *pci_name(const struct pci_dev *pdev)
2168 {
2169 	return dev_name(&pdev->dev);
2170 }
2171 
2172 void pci_resource_to_user(const struct pci_dev *dev, int bar,
2173 			  const struct resource *rsrc,
2174 			  resource_size_t *start, resource_size_t *end);
2175 
2176 /*
2177  * The world is not perfect and supplies us with broken PCI devices.
2178  * For at least a part of these bugs we need a work-around, so both
2179  * generic (drivers/pci/quirks.c) and per-architecture code can define
2180  * fixup hooks to be called for particular buggy devices.
2181  */
2182 
2183 struct pci_fixup {
2184 	u16 vendor;			/* Or PCI_ANY_ID */
2185 	u16 device;			/* Or PCI_ANY_ID */
2186 	u32 class;			/* Or PCI_ANY_ID */
2187 	unsigned int class_shift;	/* should be 0, 8, 16 */
2188 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2189 	int hook_offset;
2190 #else
2191 	void (*hook)(struct pci_dev *dev);
2192 #endif
2193 };
2194 
2195 enum pci_fixup_pass {
2196 	pci_fixup_early,	/* Before probing BARs */
2197 	pci_fixup_header,	/* After reading configuration header */
2198 	pci_fixup_final,	/* Final phase of device fixups */
2199 	pci_fixup_enable,	/* pci_enable_device() time */
2200 	pci_fixup_resume,	/* pci_device_resume() */
2201 	pci_fixup_suspend,	/* pci_device_suspend() */
2202 	pci_fixup_resume_early, /* pci_device_resume_early() */
2203 	pci_fixup_suspend_late,	/* pci_device_suspend_late() */
2204 };
2205 
2206 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2207 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2208 				    class_shift, hook)			\
2209 	__ADDRESSABLE(hook)						\
2210 	asm(".section "	#sec ", \"a\"				\n"	\
2211 	    ".balign	16					\n"	\
2212 	    ".short "	#vendor ", " #device "			\n"	\
2213 	    ".long "	#class ", " #class_shift "		\n"	\
2214 	    ".long "	#hook " - .				\n"	\
2215 	    ".previous						\n");
2216 
2217 /*
2218  * Clang's LTO may rename static functions in C, but has no way to
2219  * handle such renamings when referenced from inline asm. To work
2220  * around this, create global C stubs for these cases.
2221  */
2222 #ifdef CONFIG_LTO_CLANG
2223 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2224 				  class_shift, hook, stub)		\
2225 	void stub(struct pci_dev *dev);					\
2226 	void stub(struct pci_dev *dev)					\
2227 	{ 								\
2228 		hook(dev); 						\
2229 	}								\
2230 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2231 				  class_shift, stub)
2232 #else
2233 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2234 				  class_shift, hook, stub)		\
2235 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2236 				  class_shift, hook)
2237 #endif
2238 
2239 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2240 				  class_shift, hook)			\
2241 	__DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2242 				  class_shift, hook, __UNIQUE_ID(hook))
2243 #else
2244 /* Anonymous variables would be nice... */
2245 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class,	\
2246 				  class_shift, hook)			\
2247 	static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used	\
2248 	__attribute__((__section__(#section), aligned((sizeof(void *)))))    \
2249 		= { vendor, device, class, class_shift, hook };
2250 #endif
2251 
2252 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class,		\
2253 					 class_shift, hook)		\
2254 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2255 		hook, vendor, device, class, class_shift, hook)
2256 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class,		\
2257 					 class_shift, hook)		\
2258 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2259 		hook, vendor, device, class, class_shift, hook)
2260 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class,		\
2261 					 class_shift, hook)		\
2262 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2263 		hook, vendor, device, class, class_shift, hook)
2264 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class,		\
2265 					 class_shift, hook)		\
2266 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2267 		hook, vendor, device, class, class_shift, hook)
2268 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class,		\
2269 					 class_shift, hook)		\
2270 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2271 		resume##hook, vendor, device, class, class_shift, hook)
2272 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class,	\
2273 					 class_shift, hook)		\
2274 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2275 		resume_early##hook, vendor, device, class, class_shift, hook)
2276 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class,		\
2277 					 class_shift, hook)		\
2278 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2279 		suspend##hook, vendor, device, class, class_shift, hook)
2280 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class,	\
2281 					 class_shift, hook)		\
2282 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2283 		suspend_late##hook, vendor, device, class, class_shift, hook)
2284 
2285 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\
2286 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2287 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2288 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)			\
2289 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2290 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2291 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)			\
2292 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2293 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2294 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)			\
2295 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2296 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2297 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook)			\
2298 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2299 		resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2300 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook)		\
2301 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2302 		resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2303 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook)			\
2304 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2305 		suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2306 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook)		\
2307 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2308 		suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2309 
2310 #ifdef CONFIG_PCI_QUIRKS
2311 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2312 #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)2313 static inline void pci_fixup_device(enum pci_fixup_pass pass,
2314 				    struct pci_dev *dev) { }
2315 #endif
2316 
2317 int pcim_intx(struct pci_dev *pdev, int enabled);
2318 int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
2319 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2320 void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
2321 				const char *name);
2322 void pcim_iounmap_region(struct pci_dev *pdev, int bar);
2323 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2324 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2325 int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
2326 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2327 void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
2328 void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
2329 				unsigned long offset, unsigned long len);
2330 
2331 extern int pci_pci_problems;
2332 #define PCIPCI_FAIL		1	/* No PCI PCI DMA */
2333 #define PCIPCI_TRITON		2
2334 #define PCIPCI_NATOMA		4
2335 #define PCIPCI_VIAETBF		8
2336 #define PCIPCI_VSFX		16
2337 #define PCIPCI_ALIMAGIK		32	/* Need low latency setting */
2338 #define PCIAGP_FAIL		64	/* No PCI to AGP DMA */
2339 
2340 extern u8 pci_dfl_cache_line_size;
2341 extern u8 pci_cache_line_size;
2342 
2343 /* Architecture-specific versions may override these (weak) */
2344 void pcibios_disable_device(struct pci_dev *dev);
2345 void pcibios_set_master(struct pci_dev *dev);
2346 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2347 				 enum pcie_reset_state state);
2348 int pcibios_device_add(struct pci_dev *dev);
2349 void pcibios_release_device(struct pci_dev *dev);
2350 #ifdef CONFIG_PCI
2351 void pcibios_penalize_isa_irq(int irq, int active);
2352 #else
pcibios_penalize_isa_irq(int irq,int active)2353 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2354 #endif
2355 int pcibios_alloc_irq(struct pci_dev *dev);
2356 void pcibios_free_irq(struct pci_dev *dev);
2357 resource_size_t pcibios_default_alignment(void);
2358 
2359 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
2360 extern int pci_create_resource_files(struct pci_dev *dev);
2361 extern void pci_remove_resource_files(struct pci_dev *dev);
2362 #endif
2363 
2364 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2365 void __init pci_mmcfg_early_init(void);
2366 void __init pci_mmcfg_late_init(void);
2367 #else
pci_mmcfg_early_init(void)2368 static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2369 static inline void pci_mmcfg_late_init(void) { }
2370 #endif
2371 
2372 int pci_ext_cfg_avail(void);
2373 
2374 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2375 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2376 
2377 #ifdef CONFIG_PCI_IOV
2378 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2379 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2380 int pci_iov_vf_id(struct pci_dev *dev);
2381 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
2382 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2383 void pci_disable_sriov(struct pci_dev *dev);
2384 
2385 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2386 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2387 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2388 int pci_num_vf(struct pci_dev *dev);
2389 int pci_vfs_assigned(struct pci_dev *dev);
2390 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2391 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2392 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2393 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2394 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2395 
2396 /* Arch may override these (weak) */
2397 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2398 int pcibios_sriov_disable(struct pci_dev *pdev);
2399 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2400 #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2401 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2402 {
2403 	return -ENOSYS;
2404 }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2405 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2406 {
2407 	return -ENOSYS;
2408 }
2409 
pci_iov_vf_id(struct pci_dev * dev)2410 static inline int pci_iov_vf_id(struct pci_dev *dev)
2411 {
2412 	return -ENOSYS;
2413 }
2414 
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)2415 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
2416 					   struct pci_driver *pf_driver)
2417 {
2418 	return ERR_PTR(-EINVAL);
2419 }
2420 
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2421 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2422 { return -ENODEV; }
2423 
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)2424 static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2425 				     struct pci_dev *virtfn, int id)
2426 {
2427 	return -ENODEV;
2428 }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2429 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2430 {
2431 	return -ENOSYS;
2432 }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2433 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2434 					 int id) { }
pci_disable_sriov(struct pci_dev * dev)2435 static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2436 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2437 static inline int pci_vfs_assigned(struct pci_dev *dev)
2438 { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2439 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2440 { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2441 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2442 { return 0; }
2443 #define pci_sriov_configure_simple	NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2444 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2445 { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2446 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2447 #endif
2448 
2449 /**
2450  * pci_pcie_cap - get the saved PCIe capability offset
2451  * @dev: PCI device
2452  *
2453  * PCIe capability offset is calculated at PCI device initialization
2454  * time and saved in the data structure. This function returns saved
2455  * PCIe capability offset. Using this instead of pci_find_capability()
2456  * reduces unnecessary search in the PCI configuration space. If you
2457  * need to calculate PCIe capability offset from raw device for some
2458  * reasons, please use pci_find_capability() instead.
2459  */
pci_pcie_cap(struct pci_dev * dev)2460 static inline int pci_pcie_cap(struct pci_dev *dev)
2461 {
2462 	return dev->pcie_cap;
2463 }
2464 
2465 /**
2466  * pci_is_pcie - check if the PCI device is PCI Express capable
2467  * @dev: PCI device
2468  *
2469  * Returns: true if the PCI device is PCI Express capable, false otherwise.
2470  */
pci_is_pcie(struct pci_dev * dev)2471 static inline bool pci_is_pcie(struct pci_dev *dev)
2472 {
2473 	return pci_pcie_cap(dev);
2474 }
2475 
2476 /**
2477  * pcie_caps_reg - get the PCIe Capabilities Register
2478  * @dev: PCI device
2479  */
pcie_caps_reg(const struct pci_dev * dev)2480 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2481 {
2482 	return dev->pcie_flags_reg;
2483 }
2484 
2485 /**
2486  * pci_pcie_type - get the PCIe device/port type
2487  * @dev: PCI device
2488  */
pci_pcie_type(const struct pci_dev * dev)2489 static inline int pci_pcie_type(const struct pci_dev *dev)
2490 {
2491 	return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2492 }
2493 
2494 /**
2495  * pcie_find_root_port - Get the PCIe root port device
2496  * @dev: PCI device
2497  *
2498  * Traverse up the parent chain and return the PCIe Root Port PCI Device
2499  * for a given PCI/PCIe Device.
2500  */
pcie_find_root_port(struct pci_dev * dev)2501 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2502 {
2503 	while (dev) {
2504 		if (pci_is_pcie(dev) &&
2505 		    pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2506 			return dev;
2507 		dev = pci_upstream_bridge(dev);
2508 	}
2509 
2510 	return NULL;
2511 }
2512 
pci_dev_is_disconnected(const struct pci_dev * dev)2513 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
2514 {
2515 	/*
2516 	 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
2517 	 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
2518 	 * the value (e.g. inside the loop in pci_dev_wait()).
2519 	 */
2520 	return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
2521 }
2522 
2523 void pci_request_acs(void);
2524 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2525 bool pci_acs_path_enabled(struct pci_dev *start,
2526 			  struct pci_dev *end, u16 acs_flags);
2527 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2528 
2529 #define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
2530 #define PCI_VPD_LRDT_ID(x)		((x) | PCI_VPD_LRDT)
2531 
2532 /* Large Resource Data Type Tag Item Names */
2533 #define PCI_VPD_LTIN_ID_STRING		0x02	/* Identifier String */
2534 #define PCI_VPD_LTIN_RO_DATA		0x10	/* Read-Only Data */
2535 #define PCI_VPD_LTIN_RW_DATA		0x11	/* Read-Write Data */
2536 
2537 #define PCI_VPD_LRDT_ID_STRING		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2538 #define PCI_VPD_LRDT_RO_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2539 #define PCI_VPD_LRDT_RW_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2540 
2541 #define PCI_VPD_RO_KEYWORD_PARTNO	"PN"
2542 #define PCI_VPD_RO_KEYWORD_SERIALNO	"SN"
2543 #define PCI_VPD_RO_KEYWORD_MFR_ID	"MN"
2544 #define PCI_VPD_RO_KEYWORD_VENDOR0	"V0"
2545 #define PCI_VPD_RO_KEYWORD_CHKSUM	"RV"
2546 
2547 /**
2548  * pci_vpd_alloc - Allocate buffer and read VPD into it
2549  * @dev: PCI device
2550  * @size: pointer to field where VPD length is returned
2551  *
2552  * Returns pointer to allocated buffer or an ERR_PTR in case of failure
2553  */
2554 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
2555 
2556 /**
2557  * pci_vpd_find_id_string - Locate id string in VPD
2558  * @buf: Pointer to buffered VPD data
2559  * @len: The length of the buffer area in which to search
2560  * @size: Pointer to field where length of id string is returned
2561  *
2562  * Returns the index of the id string or -ENOENT if not found.
2563  */
2564 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
2565 
2566 /**
2567  * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
2568  * @buf: Pointer to buffered VPD data
2569  * @len: The length of the buffer area in which to search
2570  * @kw: The keyword to search for
2571  * @size: Pointer to field where length of found keyword data is returned
2572  *
2573  * Returns the index of the information field keyword data or -ENOENT if
2574  * not found.
2575  */
2576 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
2577 				 const char *kw, unsigned int *size);
2578 
2579 /**
2580  * pci_vpd_check_csum - Check VPD checksum
2581  * @buf: Pointer to buffered VPD data
2582  * @len: VPD size
2583  *
2584  * Returns 1 if VPD has no checksum, otherwise 0 or an errno
2585  */
2586 int pci_vpd_check_csum(const void *buf, unsigned int len);
2587 
2588 /* PCI <-> OF binding helpers */
2589 #ifdef CONFIG_OF
2590 struct device_node;
2591 struct irq_domain;
2592 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2593 bool pci_host_of_has_msi_map(struct device *dev);
2594 
2595 /* Arch may override this (weak) */
2596 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2597 
2598 #else	/* CONFIG_OF */
2599 static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2600 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
pci_host_of_has_msi_map(struct device * dev)2601 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
2602 #endif  /* CONFIG_OF */
2603 
2604 static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2605 pci_device_to_OF_node(const struct pci_dev *pdev)
2606 {
2607 	return pdev ? pdev->dev.of_node : NULL;
2608 }
2609 
pci_bus_to_OF_node(struct pci_bus * bus)2610 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2611 {
2612 	return bus ? bus->dev.of_node : NULL;
2613 }
2614 
2615 #ifdef CONFIG_ACPI
2616 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2617 
2618 void
2619 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2620 bool pci_pr3_present(struct pci_dev *pdev);
2621 #else
2622 static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2623 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
pci_pr3_present(struct pci_dev * pdev)2624 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2625 #endif
2626 
2627 #if defined(CONFIG_X86) && defined(CONFIG_ACPI)
2628 bool arch_pci_dev_is_removable(struct pci_dev *pdev);
2629 #else
arch_pci_dev_is_removable(struct pci_dev * pdev)2630 static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; }
2631 #endif
2632 
2633 #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2634 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2635 {
2636 	return pdev->dev.archdata.edev;
2637 }
2638 #endif
2639 
2640 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2641 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2642 int pci_for_each_dma_alias(struct pci_dev *pdev,
2643 			   int (*fn)(struct pci_dev *pdev,
2644 				     u16 alias, void *data), void *data);
2645 
2646 /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2647 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2648 {
2649 	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2650 }
pci_clear_dev_assigned(struct pci_dev * pdev)2651 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2652 {
2653 	pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2654 }
pci_is_dev_assigned(struct pci_dev * pdev)2655 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2656 {
2657 	return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2658 }
2659 
2660 /**
2661  * pci_ari_enabled - query ARI forwarding status
2662  * @bus: the PCI bus
2663  *
2664  * Returns true if ARI forwarding is enabled.
2665  */
pci_ari_enabled(struct pci_bus * bus)2666 static inline bool pci_ari_enabled(struct pci_bus *bus)
2667 {
2668 	return bus->self && bus->self->ari_enabled;
2669 }
2670 
2671 /**
2672  * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2673  * @pdev: PCI device to check
2674  *
2675  * Walk upwards from @pdev and check for each encountered bridge if it's part
2676  * of a Thunderbolt controller.  Reaching the host bridge means @pdev is not
2677  * Thunderbolt-attached.  (But rather soldered to the mainboard usually.)
2678  */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2679 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2680 {
2681 	struct pci_dev *parent = pdev;
2682 
2683 	if (pdev->is_thunderbolt)
2684 		return true;
2685 
2686 	while ((parent = pci_upstream_bridge(parent)))
2687 		if (parent->is_thunderbolt)
2688 			return true;
2689 
2690 	return false;
2691 }
2692 
2693 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
2694 void pci_uevent_ers(struct pci_dev *pdev, enum  pci_ers_result err_type);
2695 #endif
2696 
2697 #include <linux/dma-mapping.h>
2698 
2699 #define pci_printk(level, pdev, fmt, arg...) \
2700 	dev_printk(level, &(pdev)->dev, fmt, ##arg)
2701 
2702 #define pci_emerg(pdev, fmt, arg...)	dev_emerg(&(pdev)->dev, fmt, ##arg)
2703 #define pci_alert(pdev, fmt, arg...)	dev_alert(&(pdev)->dev, fmt, ##arg)
2704 #define pci_crit(pdev, fmt, arg...)	dev_crit(&(pdev)->dev, fmt, ##arg)
2705 #define pci_err(pdev, fmt, arg...)	dev_err(&(pdev)->dev, fmt, ##arg)
2706 #define pci_warn(pdev, fmt, arg...)	dev_warn(&(pdev)->dev, fmt, ##arg)
2707 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
2708 #define pci_notice(pdev, fmt, arg...)	dev_notice(&(pdev)->dev, fmt, ##arg)
2709 #define pci_info(pdev, fmt, arg...)	dev_info(&(pdev)->dev, fmt, ##arg)
2710 #define pci_dbg(pdev, fmt, arg...)	dev_dbg(&(pdev)->dev, fmt, ##arg)
2711 
2712 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2713 	dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2714 
2715 #define pci_info_ratelimited(pdev, fmt, arg...) \
2716 	dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2717 
2718 #define pci_WARN(pdev, condition, fmt, arg...) \
2719 	WARN(condition, "%s %s: " fmt, \
2720 	     dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2721 
2722 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2723 	WARN_ONCE(condition, "%s %s: " fmt, \
2724 		  dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2725 
2726 #endif /* LINUX_PCI_H */
2727