xref: /linux/include/linux/pci.h (revision 4793dae01f47754e288cdbb3a22581cac2317f2b) !
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *	pci.h
4  *
5  *	PCI defines and function prototypes
6  *	Copyright 1994, Drew Eckhardt
7  *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8  *
9  *	PCI Express ASPM defines and function prototypes
10  *	Copyright (c) 2007 Intel Corp.
11  *		Zhang Yanmin (yanmin.zhang@intel.com)
12  *		Shaohua Li (shaohua.li@intel.com)
13  *
14  *	For more information, please consult the following manuals (look at
15  *	http://www.pcisig.com/ for how to get them):
16  *
17  *	PCI BIOS Specification
18  *	PCI Local Bus Specification
19  *	PCI to PCI Bridge Specification
20  *	PCI Express Specification
21  *	PCI System Design Guide
22  */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25 
26 #include <linux/args.h>
27 #include <linux/mod_devicetable.h>
28 
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <linux/msi_api.h>
42 #include <uapi/linux/pci.h>
43 
44 #include <linux/pci_ids.h>
45 
46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY  | \
47 			       PCI_STATUS_SIG_SYSTEM_ERROR | \
48 			       PCI_STATUS_REC_MASTER_ABORT | \
49 			       PCI_STATUS_REC_TARGET_ABORT | \
50 			       PCI_STATUS_SIG_TARGET_ABORT | \
51 			       PCI_STATUS_PARITY)
52 
53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */
54 #define PCI_NUM_RESET_METHODS 8
55 
56 #define PCI_RESET_PROBE		true
57 #define PCI_RESET_DO_RESET	false
58 
59 /*
60  * The PCI interface treats multi-function devices as independent
61  * devices.  The slot/function address of each device is encoded
62  * in a single byte as follows:
63  *
64  *	7:3 = slot
65  *	2:0 = function
66  *
67  * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
68  * In the interest of not exposing interfaces to user-space unnecessarily,
69  * the following kernel-only defines are being added here.
70  */
71 #define PCI_DEVID(bus, devfn)	((((u16)(bus)) << 8) | (devfn))
72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
74 
75 /* pci_slot represents a physical slot */
76 struct pci_slot {
77 	struct pci_bus		*bus;		/* Bus this slot is on */
78 	struct list_head	list;		/* Node in list of slots */
79 	struct hotplug_slot	*hotplug;	/* Hotplug info (move here) */
80 	unsigned char		number;		/* PCI_SLOT(pci_dev->devfn) */
81 	struct kobject		kobj;
82 };
83 
pci_slot_name(const struct pci_slot * slot)84 static inline const char *pci_slot_name(const struct pci_slot *slot)
85 {
86 	return kobject_name(&slot->kobj);
87 }
88 
89 /* File state for mmap()s on /proc/bus/pci/X/Y */
90 enum pci_mmap_state {
91 	pci_mmap_io,
92 	pci_mmap_mem
93 };
94 
95 /* For PCI devices, the region numbers are assigned this way: */
96 enum {
97 	/* #0-5: standard PCI resources */
98 	PCI_STD_RESOURCES,
99 	PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
100 
101 	/* #6: expansion ROM resource */
102 	PCI_ROM_RESOURCE,
103 
104 	/* Device-specific resources */
105 #ifdef CONFIG_PCI_IOV
106 	PCI_IOV_RESOURCES,
107 	PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
108 #endif
109 
110 /* PCI-to-PCI (P2P) bridge windows */
111 #define PCI_BRIDGE_IO_WINDOW		(PCI_BRIDGE_RESOURCES + 0)
112 #define PCI_BRIDGE_MEM_WINDOW		(PCI_BRIDGE_RESOURCES + 1)
113 #define PCI_BRIDGE_PREF_MEM_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
114 
115 /* CardBus bridge windows */
116 #define PCI_CB_BRIDGE_IO_0_WINDOW	(PCI_BRIDGE_RESOURCES + 0)
117 #define PCI_CB_BRIDGE_IO_1_WINDOW	(PCI_BRIDGE_RESOURCES + 1)
118 #define PCI_CB_BRIDGE_MEM_0_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
119 #define PCI_CB_BRIDGE_MEM_1_WINDOW	(PCI_BRIDGE_RESOURCES + 3)
120 
121 /* Total number of bridge resources for P2P and CardBus */
122 #define PCI_P2P_BRIDGE_RESOURCE_NUM	3
123 #define PCI_BRIDGE_RESOURCE_NUM		4
124 
125 	/* Resources assigned to buses behind the bridge */
126 	PCI_BRIDGE_RESOURCES,
127 	PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
128 				  PCI_BRIDGE_RESOURCE_NUM - 1,
129 
130 	/* Total resources associated with a PCI device */
131 	PCI_NUM_RESOURCES,
132 
133 	/* Preserve this for compatibility */
134 	DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
135 };
136 
137 /**
138  * enum pci_interrupt_pin - PCI INTx interrupt values
139  * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
140  * @PCI_INTERRUPT_INTA: PCI INTA pin
141  * @PCI_INTERRUPT_INTB: PCI INTB pin
142  * @PCI_INTERRUPT_INTC: PCI INTC pin
143  * @PCI_INTERRUPT_INTD: PCI INTD pin
144  *
145  * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
146  * PCI_INTERRUPT_PIN register.
147  */
148 enum pci_interrupt_pin {
149 	PCI_INTERRUPT_UNKNOWN,
150 	PCI_INTERRUPT_INTA,
151 	PCI_INTERRUPT_INTB,
152 	PCI_INTERRUPT_INTC,
153 	PCI_INTERRUPT_INTD,
154 };
155 
156 /* The number of legacy PCI INTx interrupts */
157 #define PCI_NUM_INTX	4
158 
159 /*
160  * Reading from a device that doesn't respond typically returns ~0.  A
161  * successful read from a device may also return ~0, so you need additional
162  * information to reliably identify errors.
163  */
164 #define PCI_ERROR_RESPONSE		(~0ULL)
165 #define PCI_SET_ERROR_RESPONSE(val)	(*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
166 #define PCI_POSSIBLE_ERROR(val)		((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
167 
168 /*
169  * pci_power_t values must match the bits in the Capabilities PME_Support
170  * and Control/Status PowerState fields in the Power Management capability.
171  */
172 typedef int __bitwise pci_power_t;
173 
174 #define PCI_D0		((pci_power_t __force) 0)
175 #define PCI_D1		((pci_power_t __force) 1)
176 #define PCI_D2		((pci_power_t __force) 2)
177 #define PCI_D3hot	((pci_power_t __force) 3)
178 #define PCI_D3cold	((pci_power_t __force) 4)
179 #define PCI_UNKNOWN	((pci_power_t __force) 5)
180 #define PCI_POWER_ERROR	((pci_power_t __force) -1)
181 
182 /* Remember to update this when the list above changes! */
183 extern const char *pci_power_names[];
184 
pci_power_name(pci_power_t state)185 static inline const char *pci_power_name(pci_power_t state)
186 {
187 	return pci_power_names[1 + (__force int) state];
188 }
189 
190 /**
191  * typedef pci_channel_state_t
192  *
193  * The pci_channel state describes connectivity between the CPU and
194  * the PCI device.  If some PCI bus between here and the PCI device
195  * has crashed or locked up, this info is reflected here.
196  */
197 typedef unsigned int __bitwise pci_channel_state_t;
198 
199 enum {
200 	/* I/O channel is in normal state */
201 	pci_channel_io_normal = (__force pci_channel_state_t) 1,
202 
203 	/* I/O to channel is blocked */
204 	pci_channel_io_frozen = (__force pci_channel_state_t) 2,
205 
206 	/* PCI card is dead */
207 	pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
208 };
209 
210 typedef unsigned int __bitwise pcie_reset_state_t;
211 
212 enum pcie_reset_state {
213 	/* Reset is NOT asserted (Use to deassert reset) */
214 	pcie_deassert_reset = (__force pcie_reset_state_t) 1,
215 
216 	/* Use #PERST to reset PCIe device */
217 	pcie_warm_reset = (__force pcie_reset_state_t) 2,
218 
219 	/* Use PCIe Hot Reset to reset device */
220 	pcie_hot_reset = (__force pcie_reset_state_t) 3
221 };
222 
223 typedef unsigned short __bitwise pci_dev_flags_t;
224 enum pci_dev_flags {
225 	/* INTX_DISABLE in PCI_COMMAND register disables MSI too */
226 	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
227 	/* Device configuration is irrevocably lost if disabled into D3 */
228 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
229 	/* Provide indication device is assigned by a Virtual Machine Manager */
230 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
231 	/* Flag for quirk use to store if quirk-specific ACS is enabled */
232 	PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
233 	/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
234 	PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
235 	/* Do not use bus resets for device */
236 	PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
237 	/* Do not use PM reset even if device advertises NoSoftRst- */
238 	PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
239 	/* Get VPD from function 0 VPD */
240 	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
241 	/* A non-root bridge where translation occurs, stop alias search here */
242 	PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
243 	/* Do not use FLR even if device advertises PCI_AF_CAP */
244 	PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
245 	/* Don't use Relaxed Ordering for TLPs directed at this device */
246 	PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
247 	/* Device does honor MSI masking despite saying otherwise */
248 	PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
249 	/* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
250 	PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
251 	/*
252 	 * PCIe to PCI bridge does not create RID aliases because the bridge is
253 	 * integrated with the downstream devices and doesn't use real PCI.
254 	 */
255 	PCI_DEV_FLAGS_PCI_BRIDGE_NO_ALIAS = (__force pci_dev_flags_t) (1 << 14),
256 };
257 
258 enum pci_irq_reroute_variant {
259 	INTEL_IRQ_REROUTE_VARIANT = 1,
260 	MAX_IRQ_REROUTE_VARIANTS = 3
261 };
262 
263 typedef unsigned short __bitwise pci_bus_flags_t;
264 enum pci_bus_flags {
265 	PCI_BUS_FLAGS_NO_MSI	= (__force pci_bus_flags_t) 1,
266 	PCI_BUS_FLAGS_NO_MMRBC	= (__force pci_bus_flags_t) 2,
267 	PCI_BUS_FLAGS_NO_AERSID	= (__force pci_bus_flags_t) 4,
268 	PCI_BUS_FLAGS_NO_EXTCFG	= (__force pci_bus_flags_t) 8,
269 };
270 
271 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
272 enum pcie_link_width {
273 	PCIE_LNK_WIDTH_RESRV	= 0x00,
274 	PCIE_LNK_X1		= 0x01,
275 	PCIE_LNK_X2		= 0x02,
276 	PCIE_LNK_X4		= 0x04,
277 	PCIE_LNK_X8		= 0x08,
278 	PCIE_LNK_X12		= 0x0c,
279 	PCIE_LNK_X16		= 0x10,
280 	PCIE_LNK_X32		= 0x20,
281 	PCIE_LNK_WIDTH_UNKNOWN	= 0xff,
282 };
283 
284 /* See matching string table in pci_speed_string() */
285 enum pci_bus_speed {
286 	PCI_SPEED_33MHz			= 0x00,
287 	PCI_SPEED_66MHz			= 0x01,
288 	PCI_SPEED_66MHz_PCIX		= 0x02,
289 	PCI_SPEED_100MHz_PCIX		= 0x03,
290 	PCI_SPEED_133MHz_PCIX		= 0x04,
291 	PCI_SPEED_66MHz_PCIX_ECC	= 0x05,
292 	PCI_SPEED_100MHz_PCIX_ECC	= 0x06,
293 	PCI_SPEED_133MHz_PCIX_ECC	= 0x07,
294 	PCI_SPEED_66MHz_PCIX_266	= 0x09,
295 	PCI_SPEED_100MHz_PCIX_266	= 0x0a,
296 	PCI_SPEED_133MHz_PCIX_266	= 0x0b,
297 	AGP_UNKNOWN			= 0x0c,
298 	AGP_1X				= 0x0d,
299 	AGP_2X				= 0x0e,
300 	AGP_4X				= 0x0f,
301 	AGP_8X				= 0x10,
302 	PCI_SPEED_66MHz_PCIX_533	= 0x11,
303 	PCI_SPEED_100MHz_PCIX_533	= 0x12,
304 	PCI_SPEED_133MHz_PCIX_533	= 0x13,
305 	PCIE_SPEED_2_5GT		= 0x14,
306 	PCIE_SPEED_5_0GT		= 0x15,
307 	PCIE_SPEED_8_0GT		= 0x16,
308 	PCIE_SPEED_16_0GT		= 0x17,
309 	PCIE_SPEED_32_0GT		= 0x18,
310 	PCIE_SPEED_64_0GT		= 0x19,
311 	PCI_SPEED_UNKNOWN		= 0xff,
312 };
313 
314 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
315 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
316 
317 struct pci_vpd {
318 	struct mutex	lock;
319 	unsigned int	len;
320 	u8		cap;
321 };
322 
323 struct irq_affinity;
324 struct pcie_bwctrl_data;
325 struct pcie_link_state;
326 struct pci_sriov;
327 struct pci_p2pdma;
328 struct rcec_ea;
329 
330 /* struct pci_dev - describes a PCI device
331  *
332  * @supported_speeds:	PCIe Supported Link Speeds Vector (+ reserved 0 at
333  *			LSB). 0 when the supported speeds cannot be
334  *			determined (e.g., for Root Complex Integrated
335  *			Endpoints without the relevant Capability
336  *			Registers).
337  * @is_hotplug_bridge:	Hotplug bridge of any kind (e.g. PCIe Hot-Plug Capable,
338  *			Conventional PCI Hot-Plug, ACPI slot).
339  *			Such bridges are allocated additional MMIO and bus
340  *			number resources to allow for hierarchy expansion.
341  * @is_pciehp:		PCIe Hot-Plug Capable bridge.
342  */
343 struct pci_dev {
344 	struct list_head bus_list;	/* Node in per-bus list */
345 	struct pci_bus	*bus;		/* Bus this device is on */
346 	struct pci_bus	*subordinate;	/* Bus this device bridges to */
347 
348 	void		*sysdata;	/* Hook for sys-specific extension */
349 	struct proc_dir_entry *procent;	/* Device entry in /proc/bus/pci */
350 	struct pci_slot	*slot;		/* Physical slot this device is in */
351 
352 	unsigned int	devfn;		/* Encoded device & function index */
353 	unsigned short	vendor;
354 	unsigned short	device;
355 	unsigned short	subsystem_vendor;
356 	unsigned short	subsystem_device;
357 	unsigned int	class;		/* 3 bytes: (base,sub,prog-if) */
358 	u8		revision;	/* PCI revision, low byte of class word */
359 	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
360 #ifdef CONFIG_PCIEAER
361 	u16		aer_cap;	/* AER capability offset */
362 	struct aer_info	*aer_info;	/* AER info for this device */
363 #endif
364 #ifdef CONFIG_PCIEPORTBUS
365 	struct rcec_ea	*rcec_ea;	/* RCEC cached endpoint association */
366 	struct pci_dev  *rcec;          /* Associated RCEC device */
367 #endif
368 	u32		devcap;		/* PCIe Device Capabilities */
369 	u16		rebar_cap;	/* Resizable BAR capability offset */
370 	u8		pcie_cap;	/* PCIe capability offset */
371 	u8		msi_cap;	/* MSI capability offset */
372 	u8		msix_cap;	/* MSI-X capability offset */
373 	u8		pcie_mpss:3;	/* PCIe Max Payload Size Supported */
374 	u8		rom_base_reg;	/* Config register controlling ROM */
375 	u8		pin;		/* Interrupt pin this device uses */
376 	u16		pcie_flags_reg;	/* Cached PCIe Capabilities Register */
377 	unsigned long	*dma_alias_mask;/* Mask of enabled devfn aliases */
378 
379 	struct pci_driver *driver;	/* Driver bound to this device */
380 	u64		dma_mask;	/* Mask of the bits of bus address this
381 					   device implements.  Normally this is
382 					   0xffffffff.  You only need to change
383 					   this if your device has broken DMA
384 					   or supports 64-bit transfers.  */
385 	u64		msi_addr_mask;	/* Mask of the bits of bus address for
386 					   MSI that this device implements.
387 					   Normally set based on device
388 					   capabilities. You only need to
389 					   change this if your device claims
390 					   to support 64-bit MSI but implements
391 					   fewer than 64 address bits. */
392 
393 	struct device_dma_parameters dma_parms;
394 
395 	pci_power_t	current_state;	/* Current operating state. In ACPI,
396 					   this is D0-D3, D0 being fully
397 					   functional, and D3 being off. */
398 	u8		pm_cap;		/* PM capability offset */
399 	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
400 					   can be generated */
401 	unsigned int	pme_poll:1;	/* Poll device's PME status bit */
402 	unsigned int	pinned:1;	/* Whether this dev is pinned */
403 	unsigned int	config_rrs_sv:1; /* Config RRS software visibility */
404 	unsigned int	imm_ready:1;	/* Supports Immediate Readiness */
405 	unsigned int	d1_support:1;	/* Low power state D1 is supported */
406 	unsigned int	d2_support:1;	/* Low power state D2 is supported */
407 	unsigned int	no_d1d2:1;	/* D1 and D2 are forbidden */
408 	unsigned int	no_d3cold:1;	/* D3cold is forbidden */
409 	unsigned int	bridge_d3:1;	/* Allow D3 for bridge */
410 	unsigned int	d3cold_allowed:1;	/* D3cold is allowed by user */
411 	unsigned int	mmio_always_on:1;	/* Disallow turning off io/mem
412 						   decoding during BAR sizing */
413 	unsigned int	wakeup_prepared:1;
414 	unsigned int	skip_bus_pm:1;	/* Internal: Skip bus-level PM */
415 	unsigned int	ignore_hotplug:1;	/* Ignore hotplug events */
416 	unsigned int	hotplug_user_indicators:1; /* SlotCtl indicators
417 						      controlled exclusively by
418 						      user sysfs */
419 	unsigned int	clear_retrain_link:1;	/* Need to clear Retrain Link
420 						   bit manually */
421 	unsigned int	no_bw_notif:1;	/* BW notifications may cause issues */
422 	unsigned int	d3hot_delay;	/* D3hot->D0 transition time in ms */
423 	unsigned int	d3cold_delay;	/* D3cold->D0 transition time in ms */
424 
425 	u16		l1ss;		/* L1SS Capability pointer */
426 #ifdef CONFIG_PCIEASPM
427 	struct pcie_link_state	*link_state;	/* ASPM link state */
428 	unsigned int	aspm_l0s_support:1;	/* ASPM L0s support */
429 	unsigned int	aspm_l1_support:1;	/* ASPM L1 support */
430 	unsigned int	ltr_path:1;	/* Latency Tolerance Reporting
431 					   supported from root to here */
432 #endif
433 	unsigned int	pasid_no_tlp:1;		/* PASID works without TLP Prefix */
434 	unsigned int	eetlp_prefix_max:3;	/* Max # of End-End TLP Prefixes, 0=not supported */
435 
436 	pci_channel_state_t error_state;	/* Current connectivity state */
437 	struct device	dev;			/* Generic device interface */
438 
439 	int		cfg_size;		/* Size of config space */
440 
441 	/*
442 	 * Instead of touching interrupt line and base address registers
443 	 * directly, use the values stored here. They might be different!
444 	 */
445 	unsigned int	irq;
446 	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
447 	struct resource driver_exclusive_resource;	 /* driver exclusive resource ranges */
448 
449 	unsigned int	transparent:1;		/* Subtractive decode bridge */
450 	unsigned int	io_window:1;		/* Bridge has I/O window */
451 	unsigned int	pref_window:1;		/* Bridge has pref mem window */
452 	unsigned int	pref_64_window:1;	/* Pref mem window is 64-bit */
453 	unsigned int	multifunction:1;	/* Multi-function device */
454 
455 	unsigned int	is_busmaster:1;		/* Is busmaster */
456 	unsigned int	no_msi:1;		/* May not use MSI */
457 	unsigned int	block_cfg_access:1;	/* Config space access blocked */
458 	unsigned int	broken_parity_status:1;	/* Generates false positive parity */
459 	unsigned int	irq_reroute_variant:2;	/* Needs IRQ rerouting variant */
460 	unsigned int	msi_enabled:1;
461 	unsigned int	msix_enabled:1;
462 	unsigned int	ari_enabled:1;		/* ARI forwarding */
463 	unsigned int	ats_enabled:1;		/* Address Translation Svc */
464 	unsigned int	pasid_enabled:1;	/* Process Address Space ID */
465 	unsigned int	pri_enabled:1;		/* Page Request Interface */
466 	unsigned int	tph_enabled:1;		/* TLP Processing Hints */
467 	unsigned int	fm_enabled:1;		/* Flit Mode (segment captured) */
468 	unsigned int	is_managed:1;		/* Managed via devres */
469 	unsigned int	is_msi_managed:1;	/* MSI release via devres installed */
470 	unsigned int	needs_freset:1;		/* Requires fundamental reset */
471 	unsigned int	state_saved:1;
472 	unsigned int	is_physfn:1;
473 	unsigned int	is_virtfn:1;
474 	unsigned int	is_hotplug_bridge:1;
475 	unsigned int	is_pciehp:1;
476 	unsigned int	shpc_managed:1;		/* SHPC owned by shpchp */
477 	unsigned int	is_thunderbolt:1;	/* Thunderbolt controller */
478 	unsigned int	is_cxl:1;               /* Compute Express Link (CXL) */
479 	/*
480 	 * Devices marked being untrusted are the ones that can potentially
481 	 * execute DMA attacks and similar. They are typically connected
482 	 * through external ports such as Thunderbolt but not limited to
483 	 * that. When an IOMMU is enabled they should be getting full
484 	 * mappings to make sure they cannot access arbitrary memory.
485 	 */
486 	unsigned int	untrusted:1;
487 	/*
488 	 * Info from the platform, e.g., ACPI or device tree, may mark a
489 	 * device as "external-facing".  An external-facing device is
490 	 * itself internal but devices downstream from it are external.
491 	 */
492 	unsigned int	external_facing:1;
493 	unsigned int	broken_intx_masking:1;	/* INTx masking can't be used */
494 	unsigned int	io_window_1k:1;		/* Intel bridge 1K I/O windows */
495 	unsigned int	irq_managed:1;
496 	unsigned int	non_compliant_bars:1;	/* Broken BARs; ignore them */
497 	unsigned int	is_probed:1;		/* Device probing in progress */
498 	unsigned int	link_active_reporting:1;/* Device capable of reporting link active */
499 	unsigned int	no_vf_scan:1;		/* Don't scan for VFs after IOV enablement */
500 	unsigned int	no_command_memory:1;	/* No PCI_COMMAND_MEMORY */
501 	unsigned int	rom_bar_overlap:1;	/* ROM BAR disable broken */
502 	unsigned int	rom_attr_enabled:1;	/* Display of ROM attribute enabled? */
503 	unsigned int	non_mappable_bars:1;	/* BARs can't be mapped to user-space  */
504 	pci_dev_flags_t dev_flags;
505 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
506 
507 	spinlock_t	pcie_cap_lock;		/* Protects RMW ops in capability accessors */
508 	u32		saved_config_space[16]; /* Config space saved at suspend time */
509 	struct hlist_head saved_cap_space;
510 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
511 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
512 
513 #ifdef CONFIG_HOTPLUG_PCI_PCIE
514 	unsigned int	broken_cmd_compl:1;	/* No compl for some cmds */
515 #endif
516 #ifdef CONFIG_PCIE_PTM
517 	u16		ptm_cap;		/* PTM Capability */
518 	unsigned int	ptm_root:1;
519 	unsigned int	ptm_responder:1;
520 	unsigned int	ptm_requester:1;
521 	unsigned int	ptm_enabled:1;
522 	u8		ptm_granularity;
523 #endif
524 #ifdef CONFIG_PCI_MSI
525 	void __iomem	*msix_base;
526 	raw_spinlock_t	msi_lock;
527 #endif
528 	struct pci_vpd	vpd;
529 #ifdef CONFIG_PCIE_DPC
530 	u16		dpc_cap;
531 	unsigned int	dpc_rp_extensions:1;
532 	u8		dpc_rp_log_size;
533 #endif
534 	struct pcie_bwctrl_data		*link_bwctrl;
535 #ifdef CONFIG_PCI_ATS
536 	union {
537 		struct pci_sriov	*sriov;		/* PF: SR-IOV info */
538 		struct pci_dev		*physfn;	/* VF: related PF */
539 	};
540 	u16		ats_cap;	/* ATS Capability offset */
541 	u8		ats_stu;	/* ATS Smallest Translation Unit */
542 #endif
543 #ifdef CONFIG_PCI_PRI
544 	u16		pri_cap;	/* PRI Capability offset */
545 	u32		pri_reqs_alloc; /* Number of PRI requests allocated */
546 	unsigned int	pasid_required:1; /* PRG Response PASID Required */
547 #endif
548 #ifdef CONFIG_PCI_PASID
549 	u16		pasid_cap;	/* PASID Capability offset */
550 	u16		pasid_features;
551 #endif
552 #ifdef CONFIG_PCI_P2PDMA
553 	struct pci_p2pdma __rcu *p2pdma;
554 #endif
555 #ifdef CONFIG_PCI_DOE
556 	struct xarray	doe_mbs;	/* Data Object Exchange mailboxes */
557 #endif
558 #ifdef CONFIG_PCI_NPEM
559 	struct npem	*npem;		/* Native PCIe Enclosure Management */
560 #endif
561 #ifdef CONFIG_PCI_IDE
562 	u16		ide_cap;	/* Link Integrity & Data Encryption */
563 	u8		nr_ide_mem;	/* Address association resources for streams */
564 	u8		nr_link_ide;	/* Link Stream count (Selective Stream offset) */
565 	u16		nr_sel_ide;	/* Selective Stream count (register block allocator) */
566 	struct ida	ide_stream_ida;
567 	unsigned int	ide_cfg:1;	/* Config cycles over IDE */
568 	unsigned int	ide_tee_limit:1; /* Disallow T=0 traffic over IDE */
569 #endif
570 #ifdef CONFIG_PCI_TSM
571 	struct pci_tsm *tsm;		/* TSM operation state */
572 #endif
573 	u16		acs_cap;	/* ACS Capability offset */
574 	u16		acs_capabilities; /* ACS Capabilities */
575 	u8		supported_speeds; /* Supported Link Speeds Vector */
576 	phys_addr_t	rom;		/* Physical address if not from BAR */
577 	size_t		romlen;		/* Length if not from BAR */
578 	unsigned long	priv_flags;	/* Private flags for the PCI driver */
579 
580 	/* These methods index pci_reset_fn_methods[] */
581 	u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
582 
583 #ifdef CONFIG_PCIE_TPH
584 	u16		tph_cap;	/* TPH capability offset */
585 	u8		tph_mode;	/* TPH mode */
586 	u8		tph_req_type;	/* TPH requester type */
587 #endif
588 };
589 
pci_physfn(struct pci_dev * dev)590 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
591 {
592 #ifdef CONFIG_PCI_IOV
593 	if (dev->is_virtfn)
594 		dev = dev->physfn;
595 #endif
596 	return dev;
597 }
598 
599 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
600 
601 #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
602 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
603 #define for_each_pci_dev_reverse(d) \
604 	while ((d = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
605 
pci_channel_offline(struct pci_dev * pdev)606 static inline int pci_channel_offline(struct pci_dev *pdev)
607 {
608 	return (pdev->error_state != pci_channel_io_normal);
609 }
610 
611 /*
612  * Currently in ACPI spec, for each PCI host bridge, PCI Segment
613  * Group number is limited to a 16-bit value, therefore (int)-1 is
614  * not a valid PCI domain number, and can be used as a sentinel
615  * value indicating ->domain_nr is not set by the driver (and
616  * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
617  * pci_bus_find_domain_nr()).
618  */
619 #define PCI_DOMAIN_NR_NOT_SET (-1)
620 
621 struct pci_host_bridge {
622 	struct device	dev;
623 	struct pci_bus	*bus;		/* Root bus */
624 	struct pci_ops	*ops;
625 	struct pci_ops	*child_ops;
626 	void		*sysdata;
627 	int		busnr;
628 	int		domain_nr;
629 	struct list_head windows;	/* resource_entry */
630 	struct list_head dma_ranges;	/* dma ranges resource list */
631 #ifdef CONFIG_PCI_IDE
632 	u16 nr_ide_streams; /* Max streams possibly active in @ide_stream_ida */
633 	struct ida ide_stream_ida;
634 	struct ida ide_stream_ids_ida; /* track unique ids per domain */
635 #endif
636 	u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
637 	int (*map_irq)(const struct pci_dev *, u8, u8);
638 	void (*release_fn)(struct pci_host_bridge *);
639 	int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
640 	void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
641 	void		*release_data;
642 	unsigned int	ignore_reset_delay:1;	/* For entire hierarchy */
643 	unsigned int	no_ext_tags:1;		/* No Extended Tags */
644 	unsigned int	no_inc_mrrs:1;		/* No Increase MRRS */
645 	unsigned int	native_aer:1;		/* OS may use PCIe AER */
646 	unsigned int	native_pcie_hotplug:1;	/* OS may use PCIe hotplug */
647 	unsigned int	native_shpc_hotplug:1;	/* OS may use SHPC hotplug */
648 	unsigned int	native_pme:1;		/* OS may use PCIe PME */
649 	unsigned int	native_ltr:1;		/* OS may use PCIe LTR */
650 	unsigned int	native_dpc:1;		/* OS may use PCIe DPC */
651 	unsigned int	native_cxl_error:1;	/* OS may use CXL RAS/Events */
652 	unsigned int	preserve_config:1;	/* Preserve FW resource setup */
653 	unsigned int	size_windows:1;		/* Enable root bus sizing */
654 	unsigned int	msi_domain:1;		/* Bridge wants MSI domain */
655 
656 	/* Resource alignment requirements */
657 	resource_size_t (*align_resource)(struct pci_dev *dev,
658 			const struct resource *res,
659 			resource_size_t start,
660 			resource_size_t size,
661 			resource_size_t align);
662 	unsigned long	private[] ____cacheline_aligned;
663 };
664 
665 #define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
666 
pci_host_bridge_priv(struct pci_host_bridge * bridge)667 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
668 {
669 	return (void *)bridge->private;
670 }
671 
pci_host_bridge_from_priv(void * priv)672 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
673 {
674 	return container_of(priv, struct pci_host_bridge, private);
675 }
676 
677 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
678 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
679 						   size_t priv);
680 void pci_free_host_bridge(struct pci_host_bridge *bridge);
681 struct device *pci_get_host_bridge_device(struct pci_dev *dev);
682 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
683 
684 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
685 				 void (*release_fn)(struct pci_host_bridge *),
686 				 void *release_data);
687 
688 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
689 
690 #define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
691 
692 struct pci_bus {
693 	struct list_head node;		/* Node in list of buses */
694 	struct pci_bus	*parent;	/* Parent bus this bridge is on */
695 	struct list_head children;	/* List of child buses */
696 	struct list_head devices;	/* List of devices on this bus */
697 	struct pci_dev	*self;		/* Bridge device as seen by parent */
698 	struct list_head slots;		/* List of slots on this bus;
699 					   protected by pci_slot_mutex */
700 	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
701 	struct list_head resources;	/* Address space routed to this bus */
702 	struct resource busn_res;	/* Bus numbers routed to this bus */
703 
704 	struct pci_ops	*ops;		/* Configuration access functions */
705 	void		*sysdata;	/* Hook for sys-specific extension */
706 	struct proc_dir_entry *procdir;	/* Directory entry in /proc/bus/pci */
707 
708 	unsigned char	number;		/* Bus number */
709 	unsigned char	primary;	/* Number of primary bridge */
710 	unsigned char	max_bus_speed;	/* enum pci_bus_speed */
711 	unsigned char	cur_bus_speed;	/* enum pci_bus_speed */
712 #ifdef CONFIG_PCI_DOMAINS_GENERIC
713 	int		domain_nr;
714 #endif
715 
716 	char		name[48];
717 
718 	unsigned short	bridge_ctl;	/* Manage NO_ISA/FBB/et al behaviors */
719 	pci_bus_flags_t bus_flags;	/* Inherited by child buses */
720 	struct device		*bridge;
721 	struct device		dev;
722 	struct bin_attribute	*legacy_io;	/* Legacy I/O for this bus */
723 	struct bin_attribute	*legacy_mem;	/* Legacy mem */
724 	unsigned int		is_added:1;
725 	unsigned int		unsafe_warn:1;	/* warned about RW1C config write */
726 	unsigned int		flit_mode:1;	/* Link in Flit mode */
727 };
728 
729 #define to_pci_bus(n)	container_of(n, struct pci_bus, dev)
730 
pci_dev_id(struct pci_dev * dev)731 static inline u16 pci_dev_id(struct pci_dev *dev)
732 {
733 	return PCI_DEVID(dev->bus->number, dev->devfn);
734 }
735 
736 /*
737  * Returns true if the PCI bus is root (behind host-PCI bridge),
738  * false otherwise
739  *
740  * Some code assumes that "bus->self == NULL" means that bus is a root bus.
741  * This is incorrect because "virtual" buses added for SR-IOV (via
742  * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
743  */
pci_is_root_bus(struct pci_bus * pbus)744 static inline bool pci_is_root_bus(struct pci_bus *pbus)
745 {
746 	return !(pbus->parent);
747 }
748 
749 /**
750  * pci_is_bridge - check if the PCI device is a bridge
751  * @dev: PCI device
752  *
753  * Return true if the PCI device is bridge whether it has subordinate
754  * or not.
755  */
pci_is_bridge(struct pci_dev * dev)756 static inline bool pci_is_bridge(struct pci_dev *dev)
757 {
758 	return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
759 		dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
760 }
761 
762 /**
763  * pci_is_vga - check if the PCI device is a VGA device
764  * @pdev: PCI device
765  *
766  * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
767  * VGA Base Class and Sub-Classes:
768  *
769  *   03 00  PCI_CLASS_DISPLAY_VGA      VGA-compatible or 8514-compatible
770  *   00 01  PCI_CLASS_NOT_DEFINED_VGA  VGA-compatible (before Class Code)
771  *
772  * Return true if the PCI device is a VGA device and uses the legacy VGA
773  * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
774  * aliases).
775  */
pci_is_vga(struct pci_dev * pdev)776 static inline bool pci_is_vga(struct pci_dev *pdev)
777 {
778 	if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
779 		return true;
780 
781 	if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
782 		return true;
783 
784 	return false;
785 }
786 
787 /**
788  * pci_is_display - check if the PCI device is a display controller
789  * @pdev: PCI device
790  *
791  * Determine whether the given PCI device corresponds to a display
792  * controller. Display controllers are typically used for graphical output
793  * and are identified based on their class code.
794  *
795  * Return: true if the PCI device is a display controller, false otherwise.
796  */
pci_is_display(struct pci_dev * pdev)797 static inline bool pci_is_display(struct pci_dev *pdev)
798 {
799 	return (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY;
800 }
801 
pcie_is_cxl(struct pci_dev * pci_dev)802 static inline bool pcie_is_cxl(struct pci_dev *pci_dev)
803 {
804 	return pci_dev->is_cxl;
805 }
806 
807 #define for_each_pci_bridge(dev, bus)				\
808 	list_for_each_entry(dev, &bus->devices, bus_list)	\
809 		if (!pci_is_bridge(dev)) {} else
810 
pci_upstream_bridge(struct pci_dev * dev)811 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
812 {
813 	dev = pci_physfn(dev);
814 	if (pci_is_root_bus(dev->bus))
815 		return NULL;
816 
817 	return dev->bus->self;
818 }
819 
820 #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)821 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
822 {
823 	return pci_dev->msi_enabled || pci_dev->msix_enabled;
824 }
825 #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)826 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
827 #endif
828 
829 /* Error values that may be returned by PCI functions */
830 #define PCIBIOS_SUCCESSFUL		0x00
831 #define PCIBIOS_FUNC_NOT_SUPPORTED	0x81
832 #define PCIBIOS_BAD_VENDOR_ID		0x83
833 #define PCIBIOS_DEVICE_NOT_FOUND	0x86
834 #define PCIBIOS_BAD_REGISTER_NUMBER	0x87
835 #define PCIBIOS_SET_FAILED		0x88
836 #define PCIBIOS_BUFFER_TOO_SMALL	0x89
837 
838 /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)839 static inline int pcibios_err_to_errno(int err)
840 {
841 	if (err <= PCIBIOS_SUCCESSFUL)
842 		return err; /* Assume already errno */
843 
844 	switch (err) {
845 	case PCIBIOS_FUNC_NOT_SUPPORTED:
846 		return -ENOENT;
847 	case PCIBIOS_BAD_VENDOR_ID:
848 		return -ENOTTY;
849 	case PCIBIOS_DEVICE_NOT_FOUND:
850 		return -ENODEV;
851 	case PCIBIOS_BAD_REGISTER_NUMBER:
852 		return -EFAULT;
853 	case PCIBIOS_SET_FAILED:
854 		return -EIO;
855 	case PCIBIOS_BUFFER_TOO_SMALL:
856 		return -ENOSPC;
857 	}
858 
859 	return -ERANGE;
860 }
861 
862 /* Low-level architecture-dependent routines */
863 
864 struct pci_ops {
865 	int (*add_bus)(struct pci_bus *bus);
866 	void (*remove_bus)(struct pci_bus *bus);
867 	void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
868 	int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
869 	int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
870 };
871 
872 /*
873  * ACPI needs to be able to access PCI config space before we've done a
874  * PCI bus scan and created pci_bus structures.
875  */
876 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
877 		 int reg, int len, u32 *val);
878 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
879 		  int reg, int len, u32 val);
880 
881 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
882 typedef u64 pci_bus_addr_t;
883 #else
884 typedef u32 pci_bus_addr_t;
885 #endif
886 
887 struct pci_bus_region {
888 	pci_bus_addr_t	start;
889 	pci_bus_addr_t	end;
890 };
891 
pci_bus_region_size(const struct pci_bus_region * region)892 static inline pci_bus_addr_t pci_bus_region_size(const struct pci_bus_region *region)
893 {
894 	return region->end - region->start + 1;
895 }
896 
897 struct pci_dynids {
898 	spinlock_t		lock;	/* Protects list, index */
899 	struct list_head	list;	/* For IDs added at runtime */
900 };
901 
902 
903 /*
904  * PCI Error Recovery System (PCI-ERS).  If a PCI device driver provides
905  * a set of callbacks in struct pci_error_handlers, that device driver
906  * will be notified of PCI bus errors, and will be driven to recovery
907  * when an error occurs.
908  */
909 
910 typedef unsigned int __bitwise pci_ers_result_t;
911 
912 enum pci_ers_result {
913 	/* No result/none/not supported in device driver */
914 	PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
915 
916 	/* Device driver can recover without slot reset */
917 	PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
918 
919 	/* Device driver wants slot to be reset */
920 	PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
921 
922 	/* Device has completely failed, is unrecoverable */
923 	PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
924 
925 	/* Device driver is fully recovered and operational */
926 	PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
927 
928 	/* No AER capabilities registered for the driver */
929 	PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
930 };
931 
932 /* PCI bus error event callbacks */
933 struct pci_error_handlers {
934 	/* PCI bus error detected on this device */
935 	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
936 					   pci_channel_state_t error);
937 
938 	/* MMIO has been re-enabled, but not DMA */
939 	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
940 
941 	/* PCI slot has been reset */
942 	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
943 
944 	/* PCI function reset prepare or completed */
945 	void (*reset_prepare)(struct pci_dev *dev);
946 	void (*reset_done)(struct pci_dev *dev);
947 
948 	/* Device driver may resume normal operations */
949 	void (*resume)(struct pci_dev *dev);
950 
951 	/* Allow device driver to record more details of a correctable error */
952 	void (*cor_error_detected)(struct pci_dev *dev);
953 };
954 
955 
956 struct module;
957 
958 /**
959  * struct pci_driver - PCI driver structure
960  * @name:	Driver name.
961  * @id_table:	Pointer to table of device IDs the driver is
962  *		interested in.  Most drivers should export this
963  *		table using MODULE_DEVICE_TABLE(pci,...).
964  * @probe:	This probing function gets called (during execution
965  *		of pci_register_driver() for already existing
966  *		devices or later if a new device gets inserted) for
967  *		all PCI devices which match the ID table and are not
968  *		"owned" by the other drivers yet. This function gets
969  *		passed a "struct pci_dev \*" for each device whose
970  *		entry in the ID table matches the device. The probe
971  *		function returns zero when the driver chooses to
972  *		take "ownership" of the device or an error code
973  *		(negative number) otherwise.
974  *		The probe function always gets called from process
975  *		context, so it can sleep.
976  * @remove:	The remove() function gets called whenever a device
977  *		being handled by this driver is removed (either during
978  *		deregistration of the driver or when it's manually
979  *		pulled out of a hot-pluggable slot).
980  *		The remove function always gets called from process
981  *		context, so it can sleep.
982  * @suspend:	Put device into low power state.
983  * @resume:	Wake device from low power state.
984  *		(Please see Documentation/power/pci.rst for descriptions
985  *		of PCI Power Management and the related functions.)
986  * @shutdown:	Hook into reboot_notifier_list (kernel/sys.c).
987  *		Intended to stop any idling DMA operations.
988  *		Useful for enabling wake-on-lan (NIC) or changing
989  *		the power state of a device before reboot.
990  *		e.g. drivers/net/e100.c.
991  * @sriov_configure: Optional driver callback to allow configuration of
992  *		number of VFs to enable via sysfs "sriov_numvfs" file.
993  * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
994  *              vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
995  *              This will change MSI-X Table Size in the VF Message Control
996  *              registers.
997  * @sriov_get_vf_total_msix: PF driver callback to get the total number of
998  *              MSI-X vectors available for distribution to the VFs.
999  * @err_handler: See Documentation/PCI/pci-error-recovery.rst
1000  * @groups:	Sysfs attribute groups.
1001  * @dev_groups: Attributes attached to the device that will be
1002  *              created once it is bound to the driver.
1003  * @driver:	Driver model structure.
1004  * @dynids:	List of dynamically added device IDs.
1005  * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
1006  *		For most device drivers, no need to care about this flag
1007  *		as long as all DMAs are handled through the kernel DMA API.
1008  *		For some special ones, for example VFIO drivers, they know
1009  *		how to manage the DMA themselves and set this flag so that
1010  *		the IOMMU layer will allow them to setup and manage their
1011  *		own I/O address space.
1012  */
1013 struct pci_driver {
1014 	const char		*name;
1015 	const struct pci_device_id *id_table;	/* Must be non-NULL for probe to be called */
1016 	int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
1017 	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
1018 	int  (*suspend)(struct pci_dev *dev, pm_message_t state);	/* Device suspended */
1019 	int  (*resume)(struct pci_dev *dev);	/* Device woken up */
1020 	void (*shutdown)(struct pci_dev *dev);
1021 	int  (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
1022 	int  (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
1023 	u32  (*sriov_get_vf_total_msix)(struct pci_dev *pf);
1024 	const struct pci_error_handlers *err_handler;
1025 	const struct attribute_group **groups;
1026 	const struct attribute_group **dev_groups;
1027 	struct device_driver	driver;
1028 	struct pci_dynids	dynids;
1029 	bool driver_managed_dma;
1030 };
1031 
1032 #define to_pci_driver(__drv)	\
1033 	( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL )
1034 
1035 /**
1036  * PCI_DEVICE - macro used to describe a specific PCI device
1037  * @vend: the 16 bit PCI Vendor ID
1038  * @dev: the 16 bit PCI Device ID
1039  *
1040  * This macro is used to create a struct pci_device_id that matches a
1041  * specific device.  The subvendor and subdevice fields will be set to
1042  * PCI_ANY_ID.
1043  */
1044 #define PCI_DEVICE(vend,dev) \
1045 	.vendor = (vend), .device = (dev), \
1046 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1047 
1048 /**
1049  * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
1050  *                              override_only flags.
1051  * @vend: the 16 bit PCI Vendor ID
1052  * @dev: the 16 bit PCI Device ID
1053  * @driver_override: the 32 bit PCI Device override_only
1054  *
1055  * This macro is used to create a struct pci_device_id that matches only a
1056  * driver_override device. The subvendor and subdevice fields will be set to
1057  * PCI_ANY_ID.
1058  */
1059 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
1060 	.vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
1061 	.subdevice = PCI_ANY_ID, .override_only = (driver_override)
1062 
1063 /**
1064  * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
1065  *                                   "driver_override" PCI device.
1066  * @vend: the 16 bit PCI Vendor ID
1067  * @dev: the 16 bit PCI Device ID
1068  *
1069  * This macro is used to create a struct pci_device_id that matches a
1070  * specific device. The subvendor and subdevice fields will be set to
1071  * PCI_ANY_ID and the driver_override will be set to
1072  * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
1073  */
1074 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
1075 	PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
1076 
1077 /**
1078  * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
1079  * @vend: the 16 bit PCI Vendor ID
1080  * @dev: the 16 bit PCI Device ID
1081  * @subvend: the 16 bit PCI Subvendor ID
1082  * @subdev: the 16 bit PCI Subdevice ID
1083  *
1084  * This macro is used to create a struct pci_device_id that matches a
1085  * specific device with subsystem information.
1086  */
1087 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
1088 	.vendor = (vend), .device = (dev), \
1089 	.subvendor = (subvend), .subdevice = (subdev)
1090 
1091 /**
1092  * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
1093  * @dev_class: the class, subclass, prog-if triple for this device
1094  * @dev_class_mask: the class mask for this device
1095  *
1096  * This macro is used to create a struct pci_device_id that matches a
1097  * specific PCI class.  The vendor, device, subvendor, and subdevice
1098  * fields will be set to PCI_ANY_ID.
1099  */
1100 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
1101 	.class = (dev_class), .class_mask = (dev_class_mask), \
1102 	.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
1103 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1104 
1105 /**
1106  * PCI_VDEVICE - macro used to describe a specific PCI device in short form
1107  * @vend: the vendor name
1108  * @dev: the 16 bit PCI Device ID
1109  *
1110  * This macro is used to create a struct pci_device_id that matches a
1111  * specific PCI device.  The subvendor, and subdevice fields will be set
1112  * to PCI_ANY_ID. The macro allows the next field to follow as the device
1113  * private data.
1114  */
1115 #define PCI_VDEVICE(vend, dev) \
1116 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1117 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
1118 
1119 /**
1120  * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form
1121  * @vend: the vendor name
1122  * @dev: the 16 bit PCI Device ID
1123  * @subvend: the 16 bit PCI Subvendor ID
1124  * @subdev: the 16 bit PCI Subdevice ID
1125  *
1126  * Generate the pci_device_id struct layout for the specific PCI
1127  * device/subdevice. Private data may follow the output.
1128  */
1129 #define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \
1130 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1131 	.subvendor = (subvend), .subdevice = (subdev), 0, 0
1132 
1133 /**
1134  * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
1135  * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
1136  * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
1137  * @data: the driver data to be filled
1138  *
1139  * This macro is used to create a struct pci_device_id that matches a
1140  * specific PCI device.  The subvendor, and subdevice fields will be set
1141  * to PCI_ANY_ID.
1142  */
1143 #define PCI_DEVICE_DATA(vend, dev, data) \
1144 	.vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
1145 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
1146 	.driver_data = (kernel_ulong_t)(data)
1147 
1148 enum {
1149 	PCI_REASSIGN_ALL_RSRC	= 0x00000001,	/* Ignore firmware setup */
1150 	PCI_REASSIGN_ALL_BUS	= 0x00000002,	/* Reassign all bus numbers */
1151 	PCI_PROBE_ONLY		= 0x00000004,	/* Use existing setup */
1152 	PCI_CAN_SKIP_ISA_ALIGN	= 0x00000008,	/* Don't do ISA alignment */
1153 	PCI_ENABLE_PROC_DOMAINS	= 0x00000010,	/* Enable domains in /proc */
1154 	PCI_COMPAT_DOMAIN_0	= 0x00000020,	/* ... except domain 0 */
1155 	PCI_SCAN_ALL_PCIE_DEVS	= 0x00000040,	/* Scan all, not just dev 0 */
1156 };
1157 
1158 #define PCI_IRQ_INTX		(1 << 0) /* Allow INTx interrupts */
1159 #define PCI_IRQ_MSI		(1 << 1) /* Allow MSI interrupts */
1160 #define PCI_IRQ_MSIX		(1 << 2) /* Allow MSI-X interrupts */
1161 #define PCI_IRQ_AFFINITY	(1 << 3) /* Auto-assign affinity */
1162 
1163 /* These external functions are only available when PCI support is enabled */
1164 #ifdef CONFIG_PCI
1165 
1166 extern unsigned int pci_flags;
1167 
pci_set_flags(int flags)1168 static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)1169 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)1170 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)1171 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
1172 
1173 void pcie_bus_configure_settings(struct pci_bus *bus);
1174 
1175 enum pcie_bus_config_types {
1176 	PCIE_BUS_TUNE_OFF,	/* Don't touch MPS at all */
1177 	PCIE_BUS_DEFAULT,	/* Ensure MPS matches upstream bridge */
1178 	PCIE_BUS_SAFE,		/* Use largest MPS boot-time devices support */
1179 	PCIE_BUS_PERFORMANCE,	/* Use MPS and MRRS for best performance */
1180 	PCIE_BUS_PEER2PEER,	/* Set MPS = 128 for all devices */
1181 };
1182 
1183 extern enum pcie_bus_config_types pcie_bus_config;
1184 
1185 extern const struct bus_type pci_bus_type;
1186 
1187 /* Do NOT directly access these two variables, unless you are arch-specific PCI
1188  * code, or PCI core code. */
1189 extern struct list_head pci_root_buses;	/* List of all known PCI buses */
1190 /* Some device drivers need know if PCI is initiated */
1191 int no_pci_devices(void);
1192 
1193 void pcibios_resource_survey_bus(struct pci_bus *bus);
1194 void pcibios_bus_add_device(struct pci_dev *pdev);
1195 void pcibios_add_bus(struct pci_bus *bus);
1196 void pcibios_remove_bus(struct pci_bus *bus);
1197 void pcibios_fixup_bus(struct pci_bus *);
1198 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1199 /* Architecture-specific versions may override this (weak) */
1200 char *pcibios_setup(char *str);
1201 
1202 /* Used only when drivers/pci/setup.c is used */
1203 resource_size_t pcibios_align_resource(void *, const struct resource *,
1204 				resource_size_t,
1205 				resource_size_t);
1206 
1207 /* Generic PCI functions used internally */
1208 
1209 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1210 			     struct resource *res);
1211 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1212 			     struct pci_bus_region *region);
1213 void pcibios_scan_specific_bus(int busn);
1214 struct pci_bus *pci_find_bus(int domain, int busnr);
1215 void pci_bus_add_devices(const struct pci_bus *bus);
1216 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1217 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1218 				    struct pci_ops *ops, void *sysdata,
1219 				    struct list_head *resources);
1220 int pci_host_probe(struct pci_host_bridge *bridge);
1221 void pci_probe_flush_workqueue(void);
1222 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1223 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1224 void pci_bus_release_busn_res(struct pci_bus *b);
1225 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1226 				  struct pci_ops *ops, void *sysdata,
1227 				  struct list_head *resources);
1228 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1229 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1230 				int busnr);
1231 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1232 				 const char *name,
1233 				 struct hotplug_slot *hotplug);
1234 void pci_destroy_slot(struct pci_slot *slot);
1235 #ifdef CONFIG_SYSFS
1236 void pci_dev_assign_slot(struct pci_dev *dev);
1237 #else
pci_dev_assign_slot(struct pci_dev * dev)1238 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1239 #endif
1240 int pci_scan_slot(struct pci_bus *bus, int devfn);
1241 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1242 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1243 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1244 void pci_bus_add_device(struct pci_dev *dev);
1245 void pci_read_bridge_bases(struct pci_bus *child);
1246 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1247 					  struct resource *res);
1248 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1249 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1250 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1251 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1252 void pci_dev_put(struct pci_dev *dev);
1253 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
1254 void pci_remove_bus(struct pci_bus *b);
1255 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1256 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1257 void pci_stop_root_bus(struct pci_bus *bus);
1258 void pci_remove_root_bus(struct pci_bus *bus);
1259 #ifdef CONFIG_CARDBUS
1260 void pci_setup_cardbus_bridge(struct pci_bus *bus);
1261 #else
pci_setup_cardbus_bridge(struct pci_bus * bus)1262 static inline void pci_setup_cardbus_bridge(struct pci_bus *bus) { }
1263 #endif
1264 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1265 void pci_sort_breadthfirst(void);
1266 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1267 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1268 
1269 /* Generic PCI functions exported to card drivers */
1270 
1271 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1272 u8 pci_find_capability(struct pci_dev *dev, int cap);
1273 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1274 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1275 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
1276 u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
1277 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
1278 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1279 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
1280 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
1281 
1282 u64 pci_get_dsn(struct pci_dev *dev);
1283 
1284 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1285 			       struct pci_dev *from);
1286 struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int device,
1287 				       struct pci_dev *from);
1288 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1289 			       unsigned int ss_vendor, unsigned int ss_device,
1290 			       struct pci_dev *from);
1291 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1292 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1293 					    unsigned int devfn);
1294 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1295 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
1296 
1297 int pci_dev_present(const struct pci_device_id *ids);
1298 
1299 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1300 			     int where, u8 *val);
1301 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1302 			     int where, u16 *val);
1303 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1304 			      int where, u32 *val);
1305 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1306 			      int where, u8 val);
1307 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1308 			      int where, u16 val);
1309 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1310 			       int where, u32 val);
1311 
1312 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1313 			    int where, int size, u32 *val);
1314 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1315 			    int where, int size, u32 val);
1316 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1317 			      int where, int size, u32 *val);
1318 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1319 			       int where, int size, u32 val);
1320 
1321 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1322 
1323 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1324 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1325 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1326 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1327 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1328 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1329 void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
1330 				    u32 clear, u32 set);
1331 
1332 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1333 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1334 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1335 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1336 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
1337 						u16 clear, u16 set);
1338 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
1339 					      u16 clear, u16 set);
1340 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1341 					u32 clear, u32 set);
1342 
1343 /**
1344  * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
1345  * @dev:	PCI device structure of the PCI Express device
1346  * @pos:	PCI Express Capability Register
1347  * @clear:	Clear bitmask
1348  * @set:	Set bitmask
1349  *
1350  * Perform a Read-Modify-Write (RMW) operation using @clear and @set
1351  * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
1352  * Capability Registers are accessed concurrently in RMW fashion, hence
1353  * require locking which is handled transparently to the caller.
1354  */
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,u16 clear,u16 set)1355 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
1356 						     int pos,
1357 						     u16 clear, u16 set)
1358 {
1359 	switch (pos) {
1360 	case PCI_EXP_LNKCTL:
1361 	case PCI_EXP_LNKCTL2:
1362 	case PCI_EXP_RTCTL:
1363 		return pcie_capability_clear_and_set_word_locked(dev, pos,
1364 								 clear, set);
1365 	default:
1366 		return pcie_capability_clear_and_set_word_unlocked(dev, pos,
1367 								   clear, set);
1368 	}
1369 }
1370 
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1371 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1372 					   u16 set)
1373 {
1374 	return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1375 }
1376 
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1377 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1378 					    u32 set)
1379 {
1380 	return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1381 }
1382 
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1383 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1384 					     u16 clear)
1385 {
1386 	return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1387 }
1388 
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1389 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1390 					      u32 clear)
1391 {
1392 	return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1393 }
1394 
1395 /* User-space driven config access */
1396 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1397 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1398 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1399 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1400 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1401 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1402 
1403 int __must_check pci_enable_device(struct pci_dev *dev);
1404 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1405 int __must_check pci_reenable_device(struct pci_dev *);
1406 int __must_check pcim_enable_device(struct pci_dev *pdev);
1407 void pcim_pin_device(struct pci_dev *pdev);
1408 
pci_intx_mask_supported(struct pci_dev * pdev)1409 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1410 {
1411 	/*
1412 	 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1413 	 * writable and no quirk has marked the feature broken.
1414 	 */
1415 	return !pdev->broken_intx_masking;
1416 }
1417 
pci_is_enabled(struct pci_dev * pdev)1418 static inline int pci_is_enabled(struct pci_dev *pdev)
1419 {
1420 	return (atomic_read(&pdev->enable_cnt) > 0);
1421 }
1422 
pci_is_managed(struct pci_dev * pdev)1423 static inline int pci_is_managed(struct pci_dev *pdev)
1424 {
1425 	return pdev->is_managed;
1426 }
1427 
1428 void pci_disable_device(struct pci_dev *dev);
1429 
1430 extern unsigned int pcibios_max_latency;
1431 void pci_set_master(struct pci_dev *dev);
1432 void pci_clear_master(struct pci_dev *dev);
1433 
1434 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1435 int pci_set_cacheline_size(struct pci_dev *dev);
1436 int __must_check pci_set_mwi(struct pci_dev *dev);
1437 int __must_check pcim_set_mwi(struct pci_dev *dev);
1438 int pci_try_set_mwi(struct pci_dev *dev);
1439 void pci_clear_mwi(struct pci_dev *dev);
1440 void pci_disable_parity(struct pci_dev *dev);
1441 void pci_intx(struct pci_dev *dev, int enable);
1442 bool pci_check_and_mask_intx(struct pci_dev *dev);
1443 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1444 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1445 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1446 int pcix_get_max_mmrbc(struct pci_dev *dev);
1447 int pcix_get_mmrbc(struct pci_dev *dev);
1448 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1449 int pcie_get_readrq(struct pci_dev *dev);
1450 int pcie_set_readrq(struct pci_dev *dev, int rq);
1451 int pcie_get_mps(struct pci_dev *dev);
1452 int pcie_set_mps(struct pci_dev *dev, int mps);
1453 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1454 			     enum pci_bus_speed *speed,
1455 			     enum pcie_link_width *width);
1456 int pcie_link_speed_mbps(struct pci_dev *pdev);
1457 void pcie_print_link_status(struct pci_dev *dev);
1458 int pcie_reset_flr(struct pci_dev *dev, bool probe);
1459 int pcie_flr(struct pci_dev *dev);
1460 int __pci_reset_function_locked(struct pci_dev *dev);
1461 int pci_reset_function(struct pci_dev *dev);
1462 int pci_reset_function_locked(struct pci_dev *dev);
1463 int pci_try_reset_function(struct pci_dev *dev);
1464 int pci_probe_reset_slot(struct pci_slot *slot);
1465 int pci_probe_reset_bus(struct pci_bus *bus);
1466 int pci_reset_bus(struct pci_dev *dev);
1467 void pci_reset_secondary_bus(struct pci_dev *dev);
1468 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1469 void pci_update_resource(struct pci_dev *dev, int resno);
1470 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1471 int pci_release_resource(struct pci_dev *dev, int resno);
1472 
1473 /* Resizable BAR related routines */
1474 int pci_rebar_bytes_to_size(u64 bytes);
1475 resource_size_t pci_rebar_size_to_bytes(int size);
1476 u64 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
1477 bool pci_rebar_size_supported(struct pci_dev *pdev, int bar, int size);
1478 int pci_rebar_get_max_size(struct pci_dev *pdev, int bar);
1479 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size,
1480 				     int exclude_bars);
1481 
1482 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1483 bool pci_device_is_present(struct pci_dev *pdev);
1484 void pci_ignore_hotplug(struct pci_dev *dev);
1485 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1486 int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1487 
1488 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1489 		irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1490 		const char *fmt, ...);
1491 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1492 
1493 /* ROM control related routines */
1494 int pci_enable_rom(struct pci_dev *pdev);
1495 void pci_disable_rom(struct pci_dev *pdev);
1496 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1497 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1498 
1499 /* Power management related routines */
1500 int pci_save_state(struct pci_dev *dev);
1501 void pci_restore_state(struct pci_dev *dev);
1502 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1503 int pci_load_saved_state(struct pci_dev *dev,
1504 			 struct pci_saved_state *state);
1505 int pci_load_and_free_saved_state(struct pci_dev *dev,
1506 				  struct pci_saved_state **state);
1507 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1508 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1509 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
1510 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1511 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1512 void pci_pme_active(struct pci_dev *dev, bool enable);
1513 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1514 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1515 int pci_prepare_to_sleep(struct pci_dev *dev);
1516 int pci_back_from_sleep(struct pci_dev *dev);
1517 bool pci_dev_run_wake(struct pci_dev *dev);
1518 void pci_d3cold_enable(struct pci_dev *dev);
1519 void pci_d3cold_disable(struct pci_dev *dev);
1520 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1521 void pci_resume_bus(struct pci_bus *bus);
1522 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1523 
1524 /* For use by arch with custom probe code */
1525 void set_pcie_port_type(struct pci_dev *pdev);
1526 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1527 
1528 /* Functions for PCI Hotplug drivers to use */
1529 unsigned int pci_rescan_bus(struct pci_bus *bus);
1530 void pci_lock_rescan_remove(void);
1531 void pci_unlock_rescan_remove(void);
1532 
1533 /* Vital Product Data routines */
1534 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1535 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1536 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1537 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1538 
1539 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1540 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1541 void pci_bus_assign_resources(const struct pci_bus *bus);
1542 void pci_bus_claim_resources(struct pci_bus *bus);
1543 void pci_bus_size_bridges(struct pci_bus *bus);
1544 int pci_claim_resource(struct pci_dev *, int);
1545 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1546 void pci_assign_unassigned_resources(void);
1547 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1548 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1549 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1550 int pci_enable_resources(struct pci_dev *, int mask);
1551 void pci_assign_irq(struct pci_dev *dev);
1552 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1553 #define HAVE_PCI_REQ_REGIONS	2
1554 int __must_check pci_request_regions(struct pci_dev *, const char *);
1555 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1556 void pci_release_regions(struct pci_dev *);
1557 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1558 void pci_release_region(struct pci_dev *, int);
1559 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1560 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1561 void pci_release_selected_regions(struct pci_dev *, int);
1562 
1563 static inline __must_check struct resource *
pci_request_config_region_exclusive(struct pci_dev * pdev,unsigned int offset,unsigned int len,const char * name)1564 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
1565 				    unsigned int len, const char *name)
1566 {
1567 	return __request_region(&pdev->driver_exclusive_resource, offset, len,
1568 				name, IORESOURCE_EXCLUSIVE);
1569 }
1570 
pci_release_config_region(struct pci_dev * pdev,unsigned int offset,unsigned int len)1571 static inline void pci_release_config_region(struct pci_dev *pdev,
1572 					     unsigned int offset,
1573 					     unsigned int len)
1574 {
1575 	__release_region(&pdev->driver_exclusive_resource, offset, len);
1576 }
1577 
1578 /* drivers/pci/bus.c */
1579 void pci_add_resource(struct list_head *resources, struct resource *res);
1580 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1581 			     resource_size_t offset);
1582 void pci_free_resource_list(struct list_head *resources);
1583 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res);
1584 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1585 void pci_bus_remove_resources(struct pci_bus *bus);
1586 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
1587 int devm_request_pci_bus_resources(struct device *dev,
1588 				   struct list_head *resources);
1589 
1590 /* Temporary until new and working PCI SBR API in place */
1591 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1592 
1593 #define __pci_bus_for_each_res0(bus, res, ...)				\
1594 	for (unsigned int __b = 0;					\
1595 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1596 	     __b++)
1597 
1598 #define __pci_bus_for_each_res1(bus, res, __b)				\
1599 	for (__b = 0;							\
1600 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1601 	     __b++)
1602 
1603 /**
1604  * pci_bus_for_each_resource - iterate over PCI bus resources
1605  * @bus: the PCI bus
1606  * @res: pointer to the current resource
1607  * @...: optional index of the current resource
1608  *
1609  * Iterate over PCI bus resources. The first part is to go over PCI bus
1610  * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
1611  * After that continue with the separate list of the additional resources,
1612  * if not empty. That's why the Logical OR is being used.
1613  *
1614  * Possible usage:
1615  *
1616  *	struct pci_bus *bus = ...;
1617  *	struct resource *res;
1618  *	unsigned int i;
1619  *
1620  * 	// With optional index
1621  * 	pci_bus_for_each_resource(bus, res, i)
1622  * 		pr_info("PCI bus resource[%u]: %pR\n", i, res);
1623  *
1624  * 	// Without index
1625  * 	pci_bus_for_each_resource(bus, res)
1626  * 		_do_something_(res);
1627  */
1628 #define pci_bus_for_each_resource(bus, res, ...)			\
1629 	CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__))	\
1630 		    (bus, res, __VA_ARGS__)
1631 
1632 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1633 			struct resource *res, resource_size_t size,
1634 			resource_size_t align, resource_size_t min,
1635 			unsigned long type_mask,
1636 			resource_alignf alignf,
1637 			void *alignf_data);
1638 
1639 
1640 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
1641 			resource_size_t size);
1642 unsigned long pci_address_to_pio(phys_addr_t addr);
1643 phys_addr_t pci_pio_to_address(unsigned long pio);
1644 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1645 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1646 			   phys_addr_t phys_addr);
1647 void pci_unmap_iospace(struct resource *res);
1648 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1649 				      resource_size_t offset,
1650 				      resource_size_t size);
1651 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1652 					  struct resource *res);
1653 
pci_bus_address(struct pci_dev * pdev,int bar)1654 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1655 {
1656 	struct pci_bus_region region;
1657 
1658 	pcibios_resource_to_bus(pdev->bus, &region, &pdev->resource[bar]);
1659 	return region.start;
1660 }
1661 
1662 /* Proper probing supporting hot-pluggable devices */
1663 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1664 				       const char *mod_name);
1665 
1666 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1667 #define pci_register_driver(driver)		\
1668 	__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1669 
1670 void pci_unregister_driver(struct pci_driver *dev);
1671 
1672 /**
1673  * module_pci_driver() - Helper macro for registering a PCI driver
1674  * @__pci_driver: pci_driver struct
1675  *
1676  * Helper macro for PCI drivers which do not do anything special in module
1677  * init/exit. This eliminates a lot of boilerplate. Each module may only
1678  * use this macro once, and calling it replaces module_init() and module_exit()
1679  */
1680 #define module_pci_driver(__pci_driver) \
1681 	module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1682 
1683 /**
1684  * builtin_pci_driver() - Helper macro for registering a PCI driver
1685  * @__pci_driver: pci_driver struct
1686  *
1687  * Helper macro for PCI drivers which do not do anything special in their
1688  * init code. This eliminates a lot of boilerplate. Each driver may only
1689  * use this macro once, and calling it replaces device_initcall(...)
1690  */
1691 #define builtin_pci_driver(__pci_driver) \
1692 	builtin_driver(__pci_driver, pci_register_driver)
1693 
1694 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1695 int pci_add_dynid(struct pci_driver *drv,
1696 		  unsigned int vendor, unsigned int device,
1697 		  unsigned int subvendor, unsigned int subdevice,
1698 		  unsigned int class, unsigned int class_mask,
1699 		  unsigned long driver_data);
1700 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1701 					 struct pci_dev *dev);
1702 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1703 		    int pass);
1704 
1705 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1706 		  void *userdata);
1707 void pci_walk_bus_reverse(struct pci_bus *top,
1708 			  int (*cb)(struct pci_dev *, void *), void *userdata);
1709 int pci_cfg_space_size(struct pci_dev *dev);
1710 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1711 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1712 					 unsigned long type);
1713 
1714 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1715 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1716 
1717 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1718 		      unsigned int command_bits, u32 flags);
1719 
1720 /*
1721  * Virtual interrupts allow for more interrupts to be allocated
1722  * than the device has interrupts for. These are not programmed
1723  * into the device's MSI-X table and must be handled by some
1724  * other driver means.
1725  */
1726 #define PCI_IRQ_VIRTUAL		(1 << 4)
1727 
1728 #define PCI_IRQ_ALL_TYPES	(PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1729 
1730 #include <linux/dmapool.h>
1731 
1732 struct msix_entry {
1733 	u32	vector;	/* Kernel uses to write allocated vector */
1734 	u16	entry;	/* Driver uses to specify entry, OS writes */
1735 };
1736 
1737 #ifdef CONFIG_PCI_MSI
1738 int pci_msi_vec_count(struct pci_dev *dev);
1739 void pci_disable_msi(struct pci_dev *dev);
1740 int pci_msix_vec_count(struct pci_dev *dev);
1741 void pci_disable_msix(struct pci_dev *dev);
1742 void pci_restore_msi_state(struct pci_dev *dev);
1743 bool pci_msi_enabled(void);
1744 int pci_enable_msi(struct pci_dev *dev);
1745 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1746 			  int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1747 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1748 					struct msix_entry *entries, int nvec)
1749 {
1750 	int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1751 	if (rc < 0)
1752 		return rc;
1753 	return 0;
1754 }
1755 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1756 			  unsigned int max_vecs, unsigned int flags);
1757 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1758 				   unsigned int max_vecs, unsigned int flags,
1759 				   struct irq_affinity *affd);
1760 
1761 bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
1762 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1763 				     const struct irq_affinity_desc *affdesc);
1764 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
1765 
1766 void pci_free_irq_vectors(struct pci_dev *dev);
1767 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1768 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1769 
1770 #else
pci_msi_vec_count(struct pci_dev * dev)1771 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1772 static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1773 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1774 static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1775 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1776 static inline bool pci_msi_enabled(void) { return false; }
pci_enable_msi(struct pci_dev * dev)1777 static inline int pci_enable_msi(struct pci_dev *dev)
1778 { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1779 static inline int pci_enable_msix_range(struct pci_dev *dev,
1780 			struct msix_entry *entries, int minvec, int maxvec)
1781 { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1782 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1783 			struct msix_entry *entries, int nvec)
1784 { return -ENOSYS; }
1785 
1786 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1787 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1788 			       unsigned int max_vecs, unsigned int flags,
1789 			       struct irq_affinity *aff_desc)
1790 {
1791 	if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq)
1792 		return 1;
1793 	return -ENOSPC;
1794 }
1795 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1796 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1797 		      unsigned int max_vecs, unsigned int flags)
1798 {
1799 	return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
1800 					      flags, NULL);
1801 }
1802 
pci_msix_can_alloc_dyn(struct pci_dev * dev)1803 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
1804 { return false; }
pci_msix_alloc_irq_at(struct pci_dev * dev,unsigned int index,const struct irq_affinity_desc * affdesc)1805 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1806 						   const struct irq_affinity_desc *affdesc)
1807 {
1808 	struct msi_map map = { .index = -ENOSYS, };
1809 
1810 	return map;
1811 }
1812 
pci_msix_free_irq(struct pci_dev * pdev,struct msi_map map)1813 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
1814 {
1815 }
1816 
pci_free_irq_vectors(struct pci_dev * dev)1817 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1818 {
1819 }
1820 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1821 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1822 {
1823 	if (WARN_ON_ONCE(nr > 0))
1824 		return -EINVAL;
1825 	return dev->irq;
1826 }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1827 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1828 		int vec)
1829 {
1830 	return cpu_possible_mask;
1831 }
1832 #endif
1833 
1834 /**
1835  * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1836  * @d: the INTx IRQ domain
1837  * @node: the DT node for the device whose interrupt we're translating
1838  * @intspec: the interrupt specifier data from the DT
1839  * @intsize: the number of entries in @intspec
1840  * @out_hwirq: pointer at which to write the hwirq number
1841  * @out_type: pointer at which to write the interrupt type
1842  *
1843  * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1844  * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1845  * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1846  * INTx value to obtain the hwirq number.
1847  *
1848  * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1849  */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1850 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1851 				      struct device_node *node,
1852 				      const u32 *intspec,
1853 				      unsigned int intsize,
1854 				      unsigned long *out_hwirq,
1855 				      unsigned int *out_type)
1856 {
1857 	const u32 intx = intspec[0];
1858 
1859 	if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1860 		return -EINVAL;
1861 
1862 	*out_hwirq = intx - PCI_INTERRUPT_INTA;
1863 	return 0;
1864 }
1865 
1866 #ifdef CONFIG_PCIEPORTBUS
1867 extern bool pcie_ports_disabled;
1868 extern bool pcie_ports_native;
1869 
1870 int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
1871 			  bool use_lt);
1872 #else
1873 #define pcie_ports_disabled	true
1874 #define pcie_ports_native	false
1875 
pcie_set_target_speed(struct pci_dev * port,enum pci_bus_speed speed_req,bool use_lt)1876 static inline int pcie_set_target_speed(struct pci_dev *port,
1877 					enum pci_bus_speed speed_req,
1878 					bool use_lt)
1879 {
1880 	return -EOPNOTSUPP;
1881 }
1882 #endif
1883 
1884 #define PCIE_LINK_STATE_L0S		(BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */
1885 #define PCIE_LINK_STATE_L1		BIT(2)	/* L1 state */
1886 #define PCIE_LINK_STATE_L1_1		BIT(3)	/* ASPM L1.1 state */
1887 #define PCIE_LINK_STATE_L1_2		BIT(4)	/* ASPM L1.2 state */
1888 #define PCIE_LINK_STATE_L1_1_PCIPM	BIT(5)	/* PCI-PM L1.1 state */
1889 #define PCIE_LINK_STATE_L1_2_PCIPM	BIT(6)	/* PCI-PM L1.2 state */
1890 #define PCIE_LINK_STATE_ASPM_ALL	(PCIE_LINK_STATE_L0S		|\
1891 					 PCIE_LINK_STATE_L1		|\
1892 					 PCIE_LINK_STATE_L1_1		|\
1893 					 PCIE_LINK_STATE_L1_2		|\
1894 					 PCIE_LINK_STATE_L1_1_PCIPM	|\
1895 					 PCIE_LINK_STATE_L1_2_PCIPM)
1896 #define PCIE_LINK_STATE_CLKPM		BIT(7)
1897 #define PCIE_LINK_STATE_ALL		(PCIE_LINK_STATE_ASPM_ALL	|\
1898 					 PCIE_LINK_STATE_CLKPM)
1899 
1900 #ifdef CONFIG_PCIEASPM
1901 int pci_disable_link_state(struct pci_dev *pdev, int state);
1902 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1903 int pci_enable_link_state(struct pci_dev *pdev, int state);
1904 int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
1905 void pcie_no_aspm(void);
1906 bool pcie_aspm_support_enabled(void);
1907 bool pcie_aspm_enabled(struct pci_dev *pdev);
1908 #else
pci_disable_link_state(struct pci_dev * pdev,int state)1909 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1910 { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1911 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1912 { return 0; }
pci_enable_link_state(struct pci_dev * pdev,int state)1913 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
1914 { return 0; }
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1915 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1916 { return 0; }
pcie_no_aspm(void)1917 static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1918 static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1919 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1920 #endif
1921 
1922 #ifdef CONFIG_HOTPLUG_PCI
1923 void pci_hp_ignore_link_change(struct pci_dev *pdev);
1924 void pci_hp_unignore_link_change(struct pci_dev *pdev);
1925 #else
pci_hp_ignore_link_change(struct pci_dev * pdev)1926 static inline void pci_hp_ignore_link_change(struct pci_dev *pdev) { }
pci_hp_unignore_link_change(struct pci_dev * pdev)1927 static inline void pci_hp_unignore_link_change(struct pci_dev *pdev) { }
1928 #endif
1929 
1930 #ifdef CONFIG_PCIEAER
1931 bool pci_aer_available(void);
1932 #else
pci_aer_available(void)1933 static inline bool pci_aer_available(void) { return false; }
1934 #endif
1935 
1936 bool pci_ats_disabled(void);
1937 
1938 #define PCIE_PTM_CONTEXT_UPDATE_AUTO 0
1939 #define PCIE_PTM_CONTEXT_UPDATE_MANUAL 1
1940 
1941 struct pcie_ptm_ops {
1942 	int (*check_capability)(void *drvdata);
1943 	int (*context_update_write)(void *drvdata, u8 mode);
1944 	int (*context_update_read)(void *drvdata, u8 *mode);
1945 	int (*context_valid_write)(void *drvdata, bool valid);
1946 	int (*context_valid_read)(void *drvdata, bool *valid);
1947 	int (*local_clock_read)(void *drvdata, u64 *clock);
1948 	int (*master_clock_read)(void *drvdata, u64 *clock);
1949 	int (*t1_read)(void *drvdata, u64 *clock);
1950 	int (*t2_read)(void *drvdata, u64 *clock);
1951 	int (*t3_read)(void *drvdata, u64 *clock);
1952 	int (*t4_read)(void *drvdata, u64 *clock);
1953 
1954 	bool (*context_update_visible)(void *drvdata);
1955 	bool (*context_valid_visible)(void *drvdata);
1956 	bool (*local_clock_visible)(void *drvdata);
1957 	bool (*master_clock_visible)(void *drvdata);
1958 	bool (*t1_visible)(void *drvdata);
1959 	bool (*t2_visible)(void *drvdata);
1960 	bool (*t3_visible)(void *drvdata);
1961 	bool (*t4_visible)(void *drvdata);
1962 };
1963 
1964 struct pci_ptm_debugfs {
1965 	struct dentry *debugfs;
1966 	const struct pcie_ptm_ops *ops;
1967 	struct mutex lock;
1968 	void *pdata;
1969 };
1970 
1971 #ifdef CONFIG_PCIE_PTM
1972 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1973 void pci_disable_ptm(struct pci_dev *dev);
1974 bool pcie_ptm_enabled(struct pci_dev *dev);
1975 #else
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)1976 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1977 { return -EINVAL; }
pci_disable_ptm(struct pci_dev * dev)1978 static inline void pci_disable_ptm(struct pci_dev *dev) { }
pcie_ptm_enabled(struct pci_dev * dev)1979 static inline bool pcie_ptm_enabled(struct pci_dev *dev)
1980 { return false; }
1981 #endif
1982 
1983 #if IS_ENABLED(CONFIG_DEBUG_FS) && IS_ENABLED(CONFIG_PCIE_PTM)
1984 struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
1985 						const struct pcie_ptm_ops *ops);
1986 void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs);
1987 #else
1988 static inline struct pci_ptm_debugfs
pcie_ptm_create_debugfs(struct device * dev,void * pdata,const struct pcie_ptm_ops * ops)1989 *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
1990 			 const struct pcie_ptm_ops *ops) { return NULL; }
1991 static inline void
pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs * ptm_debugfs)1992 pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs) { }
1993 #endif
1994 
1995 void pci_cfg_access_lock(struct pci_dev *dev);
1996 bool pci_cfg_access_trylock(struct pci_dev *dev);
1997 void pci_cfg_access_unlock(struct pci_dev *dev);
1998 
1999 void pci_dev_lock(struct pci_dev *dev);
2000 int pci_dev_trylock(struct pci_dev *dev);
2001 void pci_dev_unlock(struct pci_dev *dev);
2002 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
2003 
2004 /*
2005  * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
2006  * a PCI domain is defined to be a set of PCI buses which share
2007  * configuration space.
2008  */
2009 #ifdef CONFIG_PCI_DOMAINS
2010 extern int pci_domains_supported;
2011 int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max);
2012 void pci_bus_release_emul_domain_nr(int domain_nr);
2013 #else
2014 enum { pci_domains_supported = 0 };
2015 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
2016 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
2017 static inline int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max)
2018 {
2019 	return 0;
2020 }
2021 static inline void pci_bus_release_emul_domain_nr(int domain_nr) { }
2022 #endif /* CONFIG_PCI_DOMAINS */
2023 
2024 /*
2025  * Generic implementation for PCI domain support. If your
2026  * architecture does not need custom management of PCI
2027  * domains then this implementation will be used
2028  */
2029 #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)2030 static inline int pci_domain_nr(struct pci_bus *bus)
2031 {
2032 	return bus->domain_nr;
2033 }
2034 #ifdef CONFIG_ACPI
2035 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
2036 #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)2037 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
2038 { return 0; }
2039 #endif
2040 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
2041 void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
2042 #endif
2043 
2044 /* Some architectures require additional setup to direct VGA traffic */
2045 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
2046 				    unsigned int command_bits, u32 flags);
2047 void pci_register_set_vga_state(arch_set_vga_state_t func);
2048 
2049 static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)2050 pci_request_io_regions(struct pci_dev *pdev, const char *name)
2051 {
2052 	return pci_request_selected_regions(pdev,
2053 			    pci_select_bars(pdev, IORESOURCE_IO), name);
2054 }
2055 
2056 static inline void
pci_release_io_regions(struct pci_dev * pdev)2057 pci_release_io_regions(struct pci_dev *pdev)
2058 {
2059 	return pci_release_selected_regions(pdev,
2060 			    pci_select_bars(pdev, IORESOURCE_IO));
2061 }
2062 
2063 static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)2064 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
2065 {
2066 	return pci_request_selected_regions(pdev,
2067 			    pci_select_bars(pdev, IORESOURCE_MEM), name);
2068 }
2069 
2070 static inline void
pci_release_mem_regions(struct pci_dev * pdev)2071 pci_release_mem_regions(struct pci_dev *pdev)
2072 {
2073 	return pci_release_selected_regions(pdev,
2074 			    pci_select_bars(pdev, IORESOURCE_MEM));
2075 }
2076 
2077 #else /* CONFIG_PCI is not enabled */
2078 
pci_set_flags(int flags)2079 static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)2080 static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)2081 static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)2082 static inline int pci_has_flag(int flag) { return 0; }
2083 
2084 /*
2085  * If the system does not have PCI, clearly these return errors.  Define
2086  * these as simple inline functions to avoid hair in drivers.
2087  */
2088 #define _PCI_NOP(o, s, t) \
2089 	static inline int pci_##o##_config_##s(struct pci_dev *dev, \
2090 						int where, t val) \
2091 		{ return PCIBIOS_FUNC_NOT_SUPPORTED; }
2092 
2093 #define _PCI_NOP_ALL(o, x)	_PCI_NOP(o, byte, u8 x) \
2094 				_PCI_NOP(o, word, u16 x) \
2095 				_PCI_NOP(o, dword, u32 x)
2096 _PCI_NOP_ALL(read, *)
2097 _PCI_NOP_ALL(write,)
2098 
pci_probe_flush_workqueue(void)2099 static inline void pci_probe_flush_workqueue(void) { }
2100 
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)2101 static inline struct pci_dev *pci_get_device(unsigned int vendor,
2102 					     unsigned int device,
2103 					     struct pci_dev *from)
2104 { return NULL; }
2105 
pci_get_device_reverse(unsigned int vendor,unsigned int device,struct pci_dev * from)2106 static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor,
2107 						     unsigned int device,
2108 						     struct pci_dev *from)
2109 { return NULL; }
2110 
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)2111 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
2112 					     unsigned int device,
2113 					     unsigned int ss_vendor,
2114 					     unsigned int ss_device,
2115 					     struct pci_dev *from)
2116 { return NULL; }
2117 
pci_get_class(unsigned int class,struct pci_dev * from)2118 static inline struct pci_dev *pci_get_class(unsigned int class,
2119 					    struct pci_dev *from)
2120 { return NULL; }
2121 
pci_get_base_class(unsigned int class,struct pci_dev * from)2122 static inline struct pci_dev *pci_get_base_class(unsigned int class,
2123 						 struct pci_dev *from)
2124 { return NULL; }
2125 
pci_dev_present(const struct pci_device_id * ids)2126 static inline int pci_dev_present(const struct pci_device_id *ids)
2127 { return 0; }
2128 
2129 #define no_pci_devices()	(1)
2130 #define pci_dev_put(dev)	do { } while (0)
2131 
pci_set_master(struct pci_dev * dev)2132 static inline void pci_set_master(struct pci_dev *dev) { }
pci_clear_master(struct pci_dev * dev)2133 static inline void pci_clear_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)2134 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)2135 static inline void pci_disable_device(struct pci_dev *dev) { }
pcim_enable_device(struct pci_dev * pdev)2136 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
pci_assign_resource(struct pci_dev * dev,int i)2137 static inline int pci_assign_resource(struct pci_dev *dev, int i)
2138 { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner,const char * mod_name)2139 static inline int __must_check __pci_register_driver(struct pci_driver *drv,
2140 						     struct module *owner,
2141 						     const char *mod_name)
2142 { return 0; }
pci_register_driver(struct pci_driver * drv)2143 static inline int pci_register_driver(struct pci_driver *drv)
2144 { return 0; }
pci_unregister_driver(struct pci_driver * drv)2145 static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)2146 static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
2147 { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)2148 static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap)
2149 { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)2150 static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
2151 { return 0; }
2152 
pci_get_dsn(struct pci_dev * dev)2153 static inline u64 pci_get_dsn(struct pci_dev *dev)
2154 { return 0; }
2155 
2156 /* Power management related routines */
pci_save_state(struct pci_dev * dev)2157 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)2158 static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)2159 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2160 { return 0; }
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)2161 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
2162 { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)2163 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2164 { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)2165 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
2166 					   pm_message_t state)
2167 { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)2168 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
2169 				  int enable)
2170 { return 0; }
2171 
pci_find_resource(struct pci_dev * dev,struct resource * res)2172 static inline struct resource *pci_find_resource(struct pci_dev *dev,
2173 						 struct resource *res)
2174 { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)2175 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
2176 { return -EIO; }
pci_release_regions(struct pci_dev * dev)2177 static inline void pci_release_regions(struct pci_dev *dev) { }
2178 
pci_register_io_range(const struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)2179 static inline int pci_register_io_range(const struct fwnode_handle *fwnode,
2180 					phys_addr_t addr, resource_size_t size)
2181 { return -EINVAL; }
2182 
pci_address_to_pio(phys_addr_t addr)2183 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
2184 
pci_find_next_bus(const struct pci_bus * from)2185 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
2186 { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)2187 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
2188 						unsigned int devfn)
2189 { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)2190 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
2191 					unsigned int bus, unsigned int devfn)
2192 { return NULL; }
2193 
pci_domain_nr(struct pci_bus * bus)2194 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)2195 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
2196 
2197 #define dev_is_pci(d) (false)
2198 #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)2199 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2200 { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)2201 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
2202 				      struct device_node *node,
2203 				      const u32 *intspec,
2204 				      unsigned int intsize,
2205 				      unsigned long *out_hwirq,
2206 				      unsigned int *out_type)
2207 { return -EINVAL; }
2208 
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)2209 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
2210 							 struct pci_dev *dev)
2211 { return NULL; }
pci_ats_disabled(void)2212 static inline bool pci_ats_disabled(void) { return true; }
2213 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)2214 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
2215 {
2216 	return -EINVAL;
2217 }
2218 
2219 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)2220 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
2221 			       unsigned int max_vecs, unsigned int flags,
2222 			       struct irq_affinity *aff_desc)
2223 {
2224 	return -ENOSPC;
2225 }
2226 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)2227 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
2228 		      unsigned int max_vecs, unsigned int flags)
2229 {
2230 	return -ENOSPC;
2231 }
2232 
pci_free_irq_vectors(struct pci_dev * dev)2233 static inline void pci_free_irq_vectors(struct pci_dev *dev)
2234 {
2235 }
2236 #endif /* CONFIG_PCI */
2237 
2238 /* Include architecture-dependent settings and functions */
2239 
2240 #include <asm/pci.h>
2241 
2242 /*
2243  * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
2244  * is expected to be an offset within that region.
2245  *
2246  */
2247 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
2248 			    struct vm_area_struct *vma,
2249 			    enum pci_mmap_state mmap_state, int write_combine);
2250 
2251 #ifndef arch_can_pci_mmap_wc
2252 #define arch_can_pci_mmap_wc()		0
2253 #endif
2254 
2255 #ifndef arch_can_pci_mmap_io
2256 #define arch_can_pci_mmap_io()		0
2257 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
2258 #else
2259 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
2260 #endif
2261 
2262 #ifndef pci_root_bus_fwnode
2263 #define pci_root_bus_fwnode(bus)	NULL
2264 #endif
2265 
2266 /*
2267  * These helpers provide future and backwards compatibility
2268  * for accessing popular PCI BAR info
2269  */
2270 #define pci_resource_n(dev, bar)	(&(dev)->resource[(bar)])
2271 #define pci_resource_start(dev, bar)	(pci_resource_n(dev, bar)->start)
2272 #define pci_resource_end(dev, bar)	(pci_resource_n(dev, bar)->end)
2273 #define pci_resource_flags(dev, bar)	(pci_resource_n(dev, bar)->flags)
2274 #define pci_resource_len(dev,bar)					\
2275 	(pci_resource_end((dev), (bar)) ? 				\
2276 	 resource_size(pci_resource_n((dev), (bar))) : 0)
2277 
2278 #define __pci_dev_for_each_res0(dev, res, ...)				  \
2279 	for (unsigned int __b = 0;					  \
2280 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2281 	     __b++)
2282 
2283 #define __pci_dev_for_each_res1(dev, res, __b)				  \
2284 	for (__b = 0;							  \
2285 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2286 	     __b++)
2287 
2288 #define pci_dev_for_each_resource(dev, res, ...)			\
2289 	CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) 	\
2290 		    (dev, res, __VA_ARGS__)
2291 
2292 /*
2293  * Similar to the helpers above, these manipulate per-pci_dev
2294  * driver-specific data.  They are really just a wrapper around
2295  * the generic device structure functions of these calls.
2296  */
pci_get_drvdata(struct pci_dev * pdev)2297 static inline void *pci_get_drvdata(struct pci_dev *pdev)
2298 {
2299 	return dev_get_drvdata(&pdev->dev);
2300 }
2301 
pci_set_drvdata(struct pci_dev * pdev,void * data)2302 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
2303 {
2304 	dev_set_drvdata(&pdev->dev, data);
2305 }
2306 
pci_name(const struct pci_dev * pdev)2307 static inline const char *pci_name(const struct pci_dev *pdev)
2308 {
2309 	return dev_name(&pdev->dev);
2310 }
2311 
2312 void pci_resource_to_user(const struct pci_dev *dev, int bar,
2313 			  const struct resource *rsrc,
2314 			  resource_size_t *start, resource_size_t *end);
2315 
2316 /*
2317  * The world is not perfect and supplies us with broken PCI devices.
2318  * For at least a part of these bugs we need a work-around, so both
2319  * generic (drivers/pci/quirks.c) and per-architecture code can define
2320  * fixup hooks to be called for particular buggy devices.
2321  */
2322 
2323 struct pci_fixup {
2324 	u16 vendor;			/* Or PCI_ANY_ID */
2325 	u16 device;			/* Or PCI_ANY_ID */
2326 	u32 class;			/* Or PCI_ANY_ID */
2327 	unsigned int class_shift;	/* should be 0, 8, 16 */
2328 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2329 	int hook_offset;
2330 #else
2331 	void (*hook)(struct pci_dev *dev);
2332 #endif
2333 };
2334 
2335 enum pci_fixup_pass {
2336 	pci_fixup_early,	/* Before probing BARs */
2337 	pci_fixup_header,	/* After reading configuration header */
2338 	pci_fixup_final,	/* Final phase of device fixups */
2339 	pci_fixup_enable,	/* pci_enable_device() time */
2340 	pci_fixup_resume,	/* pci_device_resume() */
2341 	pci_fixup_suspend,	/* pci_device_suspend() */
2342 	pci_fixup_resume_early, /* pci_device_resume_early() */
2343 	pci_fixup_suspend_late,	/* pci_device_suspend_late() */
2344 };
2345 
2346 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2347 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2348 				    class_shift, hook)			\
2349 	__ADDRESSABLE(hook)						\
2350 	asm(".section "	#sec ", \"a\"				\n"	\
2351 	    ".balign	16					\n"	\
2352 	    ".short "	#vendor ", " #device "			\n"	\
2353 	    ".long "	#class ", " #class_shift "		\n"	\
2354 	    ".long "	#hook " - .				\n"	\
2355 	    ".previous						\n");
2356 
2357 /*
2358  * Clang's LTO may rename static functions in C, but has no way to
2359  * handle such renamings when referenced from inline asm. To work
2360  * around this, create global C stubs for these cases.
2361  */
2362 #ifdef CONFIG_LTO_CLANG
2363 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2364 				  class_shift, hook, stub)		\
2365 	void stub(struct pci_dev *dev);					\
2366 	void stub(struct pci_dev *dev)					\
2367 	{ 								\
2368 		hook(dev); 						\
2369 	}								\
2370 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2371 				  class_shift, stub)
2372 #else
2373 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2374 				  class_shift, hook, stub)		\
2375 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2376 				  class_shift, hook)
2377 #endif
2378 
2379 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2380 				  class_shift, hook)			\
2381 	__DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2382 				  class_shift, hook, __UNIQUE_ID(hook))
2383 #else
2384 /* Anonymous variables would be nice... */
2385 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class,	\
2386 				  class_shift, hook)			\
2387 	static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used	\
2388 	__attribute__((__section__(#section), aligned((sizeof(void *)))))    \
2389 		= { vendor, device, class, class_shift, hook };
2390 #endif
2391 
2392 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class,		\
2393 					 class_shift, hook)		\
2394 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2395 		hook, vendor, device, class, class_shift, hook)
2396 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class,		\
2397 					 class_shift, hook)		\
2398 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2399 		hook, vendor, device, class, class_shift, hook)
2400 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class,		\
2401 					 class_shift, hook)		\
2402 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2403 		hook, vendor, device, class, class_shift, hook)
2404 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class,		\
2405 					 class_shift, hook)		\
2406 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2407 		hook, vendor, device, class, class_shift, hook)
2408 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class,		\
2409 					 class_shift, hook)		\
2410 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2411 		resume##hook, vendor, device, class, class_shift, hook)
2412 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class,	\
2413 					 class_shift, hook)		\
2414 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2415 		resume_early##hook, vendor, device, class, class_shift, hook)
2416 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class,		\
2417 					 class_shift, hook)		\
2418 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2419 		suspend##hook, vendor, device, class, class_shift, hook)
2420 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class,	\
2421 					 class_shift, hook)		\
2422 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2423 		suspend_late##hook, vendor, device, class, class_shift, hook)
2424 
2425 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\
2426 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2427 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2428 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)			\
2429 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2430 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2431 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)			\
2432 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2433 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2434 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)			\
2435 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2436 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2437 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook)			\
2438 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2439 		resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2440 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook)		\
2441 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2442 		resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2443 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook)			\
2444 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2445 		suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2446 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook)		\
2447 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2448 		suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2449 
2450 #ifdef CONFIG_PCI_QUIRKS
2451 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2452 #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)2453 static inline void pci_fixup_device(enum pci_fixup_pass pass,
2454 				    struct pci_dev *dev) { }
2455 #endif
2456 
2457 int pcim_intx(struct pci_dev *pdev, int enabled);
2458 int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
2459 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2460 void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
2461 				const char *name);
2462 void pcim_iounmap_region(struct pci_dev *pdev, int bar);
2463 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2464 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2465 int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
2466 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2467 void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
2468 				unsigned long offset, unsigned long len);
2469 
2470 extern int pci_pci_problems;
2471 #define PCIPCI_FAIL		1	/* No PCI PCI DMA */
2472 #define PCIPCI_TRITON		2
2473 #define PCIPCI_NATOMA		4
2474 #define PCIPCI_VIAETBF		8
2475 #define PCIPCI_VSFX		16
2476 #define PCIPCI_ALIMAGIK		32	/* Need low latency setting */
2477 #define PCIAGP_FAIL		64	/* No PCI to AGP DMA */
2478 
2479 extern u8 pci_dfl_cache_line_size;
2480 extern u8 pci_cache_line_size;
2481 
2482 /* Architecture-specific versions may override these (weak) */
2483 void pcibios_disable_device(struct pci_dev *dev);
2484 void pcibios_set_master(struct pci_dev *dev);
2485 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2486 				 enum pcie_reset_state state);
2487 int pcibios_device_add(struct pci_dev *dev);
2488 void pcibios_release_device(struct pci_dev *dev);
2489 #ifdef CONFIG_PCI
2490 void pcibios_penalize_isa_irq(int irq, int active);
2491 #else
pcibios_penalize_isa_irq(int irq,int active)2492 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2493 #endif
2494 int pcibios_alloc_irq(struct pci_dev *dev);
2495 void pcibios_free_irq(struct pci_dev *dev);
2496 resource_size_t pcibios_default_alignment(void);
2497 
2498 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
2499 extern int pci_create_resource_files(struct pci_dev *dev);
2500 extern void pci_remove_resource_files(struct pci_dev *dev);
2501 #endif
2502 
2503 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2504 void __init pci_mmcfg_early_init(void);
2505 void __init pci_mmcfg_late_init(void);
2506 #else
pci_mmcfg_early_init(void)2507 static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2508 static inline void pci_mmcfg_late_init(void) { }
2509 #endif
2510 
2511 int pci_ext_cfg_avail(void);
2512 
2513 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2514 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2515 
2516 #ifdef CONFIG_PCI_IOV
2517 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2518 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2519 int pci_iov_vf_id(struct pci_dev *dev);
2520 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
2521 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2522 void pci_disable_sriov(struct pci_dev *dev);
2523 
2524 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2525 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2526 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2527 int pci_num_vf(struct pci_dev *dev);
2528 int pci_vfs_assigned(struct pci_dev *dev);
2529 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2530 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2531 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2532 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2533 int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size);
2534 u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs);
2535 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2536 
2537 /* Arch may override these (weak) */
2538 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2539 int pcibios_sriov_disable(struct pci_dev *pdev);
2540 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2541 #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2542 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2543 {
2544 	return -ENOSYS;
2545 }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2546 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2547 {
2548 	return -ENOSYS;
2549 }
2550 
pci_iov_vf_id(struct pci_dev * dev)2551 static inline int pci_iov_vf_id(struct pci_dev *dev)
2552 {
2553 	return -ENOSYS;
2554 }
2555 
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)2556 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
2557 					   struct pci_driver *pf_driver)
2558 {
2559 	return ERR_PTR(-EINVAL);
2560 }
2561 
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2562 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2563 { return -ENODEV; }
2564 
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)2565 static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2566 				     struct pci_dev *virtfn, int id)
2567 {
2568 	return -ENODEV;
2569 }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2570 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2571 {
2572 	return -ENOSYS;
2573 }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2574 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2575 					 int id) { }
pci_disable_sriov(struct pci_dev * dev)2576 static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2577 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2578 static inline int pci_vfs_assigned(struct pci_dev *dev)
2579 { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2580 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2581 { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2582 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2583 { return 0; }
2584 #define pci_sriov_configure_simple	NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2585 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2586 { return 0; }
pci_iov_vf_bar_set_size(struct pci_dev * dev,int resno,int size)2587 static inline int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size)
2588 { return -ENODEV; }
pci_iov_vf_bar_get_sizes(struct pci_dev * dev,int resno,int num_vfs)2589 static inline u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs)
2590 { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2591 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2592 #endif
2593 
2594 /**
2595  * pci_pcie_cap - get the saved PCIe capability offset
2596  * @dev: PCI device
2597  *
2598  * PCIe capability offset is calculated at PCI device initialization
2599  * time and saved in the data structure. This function returns saved
2600  * PCIe capability offset. Using this instead of pci_find_capability()
2601  * reduces unnecessary search in the PCI configuration space. If you
2602  * need to calculate PCIe capability offset from raw device for some
2603  * reasons, please use pci_find_capability() instead.
2604  */
pci_pcie_cap(struct pci_dev * dev)2605 static inline int pci_pcie_cap(struct pci_dev *dev)
2606 {
2607 	return dev->pcie_cap;
2608 }
2609 
2610 /**
2611  * pci_is_pcie - check if the PCI device is PCI Express capable
2612  * @dev: PCI device
2613  *
2614  * Returns: true if the PCI device is PCI Express capable, false otherwise.
2615  */
pci_is_pcie(struct pci_dev * dev)2616 static inline bool pci_is_pcie(struct pci_dev *dev)
2617 {
2618 	return pci_pcie_cap(dev);
2619 }
2620 
2621 /**
2622  * pcie_caps_reg - get the PCIe Capabilities Register
2623  * @dev: PCI device
2624  */
pcie_caps_reg(const struct pci_dev * dev)2625 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2626 {
2627 	return dev->pcie_flags_reg;
2628 }
2629 
2630 /**
2631  * pci_pcie_type - get the PCIe device/port type
2632  * @dev: PCI device
2633  */
pci_pcie_type(const struct pci_dev * dev)2634 static inline int pci_pcie_type(const struct pci_dev *dev)
2635 {
2636 	return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2637 }
2638 
2639 /**
2640  * pcie_find_root_port - Get the PCIe root port device
2641  * @dev: PCI device
2642  *
2643  * Traverse up the parent chain and return the PCIe Root Port PCI Device
2644  * for a given PCI/PCIe Device.
2645  */
pcie_find_root_port(struct pci_dev * dev)2646 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2647 {
2648 	while (dev) {
2649 		if (pci_is_pcie(dev) &&
2650 		    pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2651 			return dev;
2652 		dev = pci_upstream_bridge(dev);
2653 	}
2654 
2655 	return NULL;
2656 }
2657 
pci_dev_is_disconnected(const struct pci_dev * dev)2658 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
2659 {
2660 	/*
2661 	 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
2662 	 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
2663 	 * the value (e.g. inside the loop in pci_dev_wait()).
2664 	 */
2665 	return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
2666 }
2667 
2668 void pci_request_acs(void);
2669 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2670 bool pci_acs_path_enabled(struct pci_dev *start,
2671 			  struct pci_dev *end, u16 acs_flags);
2672 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2673 
2674 #define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
2675 #define PCI_VPD_LRDT_ID(x)		((x) | PCI_VPD_LRDT)
2676 
2677 /* Large Resource Data Type Tag Item Names */
2678 #define PCI_VPD_LTIN_ID_STRING		0x02	/* Identifier String */
2679 #define PCI_VPD_LTIN_RO_DATA		0x10	/* Read-Only Data */
2680 #define PCI_VPD_LTIN_RW_DATA		0x11	/* Read-Write Data */
2681 
2682 #define PCI_VPD_LRDT_ID_STRING		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2683 #define PCI_VPD_LRDT_RO_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2684 #define PCI_VPD_LRDT_RW_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2685 
2686 #define PCI_VPD_RO_KEYWORD_PARTNO	"PN"
2687 #define PCI_VPD_RO_KEYWORD_SERIALNO	"SN"
2688 #define PCI_VPD_RO_KEYWORD_MFR_ID	"MN"
2689 #define PCI_VPD_RO_KEYWORD_VENDOR0	"V0"
2690 #define PCI_VPD_RO_KEYWORD_CHKSUM	"RV"
2691 
2692 /**
2693  * pci_vpd_alloc - Allocate buffer and read VPD into it
2694  * @dev: PCI device
2695  * @size: pointer to field where VPD length is returned
2696  *
2697  * Returns pointer to allocated buffer or an ERR_PTR in case of failure
2698  */
2699 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
2700 
2701 /**
2702  * pci_vpd_find_id_string - Locate id string in VPD
2703  * @buf: Pointer to buffered VPD data
2704  * @len: The length of the buffer area in which to search
2705  * @size: Pointer to field where length of id string is returned
2706  *
2707  * Returns the index of the id string or -ENOENT if not found.
2708  */
2709 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
2710 
2711 /**
2712  * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
2713  * @buf: Pointer to buffered VPD data
2714  * @len: The length of the buffer area in which to search
2715  * @kw: The keyword to search for
2716  * @size: Pointer to field where length of found keyword data is returned
2717  *
2718  * Returns the index of the information field keyword data or -ENOENT if
2719  * not found.
2720  */
2721 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
2722 				 const char *kw, unsigned int *size);
2723 
2724 /**
2725  * pci_vpd_check_csum - Check VPD checksum
2726  * @buf: Pointer to buffered VPD data
2727  * @len: VPD size
2728  *
2729  * Returns 1 if VPD has no checksum, otherwise 0 or an errno
2730  */
2731 int pci_vpd_check_csum(const void *buf, unsigned int len);
2732 
2733 /* PCI <-> OF binding helpers */
2734 #ifdef CONFIG_OF
2735 struct device_node;
2736 struct irq_domain;
2737 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2738 bool pci_host_of_has_msi_map(struct device *dev);
2739 
2740 /* Arch may override this (weak) */
2741 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2742 
2743 #else	/* CONFIG_OF */
2744 static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2745 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
pci_host_of_has_msi_map(struct device * dev)2746 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
2747 #endif  /* CONFIG_OF */
2748 
2749 static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2750 pci_device_to_OF_node(const struct pci_dev *pdev)
2751 {
2752 	return pdev ? pdev->dev.of_node : NULL;
2753 }
2754 
pci_bus_to_OF_node(struct pci_bus * bus)2755 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2756 {
2757 	return bus ? bus->dev.of_node : NULL;
2758 }
2759 
2760 #ifdef CONFIG_ACPI
2761 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2762 
2763 void
2764 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2765 bool pci_pr3_present(struct pci_dev *pdev);
2766 #else
2767 static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2768 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
pci_pr3_present(struct pci_dev * pdev)2769 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2770 #endif
2771 
2772 #if defined(CONFIG_X86) && defined(CONFIG_ACPI)
2773 bool arch_pci_dev_is_removable(struct pci_dev *pdev);
2774 #else
arch_pci_dev_is_removable(struct pci_dev * pdev)2775 static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; }
2776 #endif
2777 
2778 #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2779 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2780 {
2781 	return pdev->dev.archdata.edev;
2782 }
2783 #endif
2784 
2785 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2786 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2787 int pci_for_each_dma_alias(struct pci_dev *pdev,
2788 			   int (*fn)(struct pci_dev *pdev,
2789 				     u16 alias, void *data), void *data);
2790 
2791 /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2792 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2793 {
2794 	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2795 }
pci_clear_dev_assigned(struct pci_dev * pdev)2796 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2797 {
2798 	pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2799 }
pci_is_dev_assigned(struct pci_dev * pdev)2800 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2801 {
2802 	return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2803 }
2804 
2805 /**
2806  * pci_ari_enabled - query ARI forwarding status
2807  * @bus: the PCI bus
2808  *
2809  * Returns true if ARI forwarding is enabled.
2810  */
pci_ari_enabled(struct pci_bus * bus)2811 static inline bool pci_ari_enabled(struct pci_bus *bus)
2812 {
2813 	return bus->self && bus->self->ari_enabled;
2814 }
2815 
2816 /**
2817  * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2818  * @pdev: PCI device to check
2819  *
2820  * Walk upwards from @pdev and check for each encountered bridge if it's part
2821  * of a Thunderbolt controller.  Reaching the host bridge means @pdev is not
2822  * Thunderbolt-attached.  (But rather soldered to the mainboard usually.)
2823  */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2824 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2825 {
2826 	struct pci_dev *parent = pdev;
2827 
2828 	if (pdev->is_thunderbolt)
2829 		return true;
2830 
2831 	while ((parent = pci_upstream_bridge(parent)))
2832 		if (parent->is_thunderbolt)
2833 			return true;
2834 
2835 	return false;
2836 }
2837 
2838 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) || defined(CONFIG_S390)
2839 void pci_uevent_ers(struct pci_dev *pdev, enum  pci_ers_result err_type);
2840 #endif
2841 
2842 #include <linux/dma-mapping.h>
2843 
2844 #define pci_emerg(pdev, fmt, arg...)	dev_emerg(&(pdev)->dev, fmt, ##arg)
2845 #define pci_alert(pdev, fmt, arg...)	dev_alert(&(pdev)->dev, fmt, ##arg)
2846 #define pci_crit(pdev, fmt, arg...)	dev_crit(&(pdev)->dev, fmt, ##arg)
2847 #define pci_err(pdev, fmt, arg...)	dev_err(&(pdev)->dev, fmt, ##arg)
2848 #define pci_warn(pdev, fmt, arg...)	dev_warn(&(pdev)->dev, fmt, ##arg)
2849 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
2850 #define pci_notice(pdev, fmt, arg...)	dev_notice(&(pdev)->dev, fmt, ##arg)
2851 #define pci_info(pdev, fmt, arg...)	dev_info(&(pdev)->dev, fmt, ##arg)
2852 #define pci_dbg(pdev, fmt, arg...)	dev_dbg(&(pdev)->dev, fmt, ##arg)
2853 
2854 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2855 	dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2856 
2857 #define pci_info_ratelimited(pdev, fmt, arg...) \
2858 	dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2859 
2860 #define pci_WARN(pdev, condition, fmt, arg...) \
2861 	WARN(condition, "%s %s: " fmt, \
2862 	     dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2863 
2864 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2865 	WARN_ONCE(condition, "%s %s: " fmt, \
2866 		  dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2867 
2868 #endif /* LINUX_PCI_H */
2869