1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com>
4 * (C) Copyright 2002-2004 IBM Corp.
5 * (C) Copyright 2003 Matthew Wilcox
6 * (C) Copyright 2003 Hewlett-Packard
7 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com>
8 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com>
9 *
10 * File attributes for PCI devices
11 *
12 * Modeled after usb's driverfs.c
13 */
14
15 #include <linux/bitfield.h>
16 #include <linux/cleanup.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/pci.h>
20 #include <linux/stat.h>
21 #include <linux/export.h>
22 #include <linux/topology.h>
23 #include <linux/mm.h>
24 #include <linux/fs.h>
25 #include <linux/capability.h>
26 #include <linux/security.h>
27 #include <linux/slab.h>
28 #include <linux/vgaarb.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/msi.h>
31 #include <linux/of.h>
32 #include <linux/aperture.h>
33 #include <linux/unaligned.h>
34 #include "pci.h"
35
36 #ifndef ARCH_PCI_DEV_GROUPS
37 #define ARCH_PCI_DEV_GROUPS
38 #endif
39
40 static int sysfs_initialized; /* = 0 */
41
42 /* show configuration fields */
43 #define pci_config_attr(field, format_string) \
44 static ssize_t \
45 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
46 { \
47 struct pci_dev *pdev; \
48 \
49 pdev = to_pci_dev(dev); \
50 return sysfs_emit(buf, format_string, pdev->field); \
51 } \
52 static DEVICE_ATTR_RO(field)
53
54 pci_config_attr(vendor, "0x%04x\n");
55 pci_config_attr(device, "0x%04x\n");
56 pci_config_attr(subsystem_vendor, "0x%04x\n");
57 pci_config_attr(subsystem_device, "0x%04x\n");
58 pci_config_attr(revision, "0x%02x\n");
59 pci_config_attr(class, "0x%06x\n");
60
irq_show(struct device * dev,struct device_attribute * attr,char * buf)61 static ssize_t irq_show(struct device *dev,
62 struct device_attribute *attr,
63 char *buf)
64 {
65 struct pci_dev *pdev = to_pci_dev(dev);
66
67 #ifdef CONFIG_PCI_MSI
68 /*
69 * For MSI, show the first MSI IRQ; for all other cases including
70 * MSI-X, show the legacy INTx IRQ.
71 */
72 if (pdev->msi_enabled)
73 return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0));
74 #endif
75
76 return sysfs_emit(buf, "%u\n", pdev->irq);
77 }
78 static DEVICE_ATTR_RO(irq);
79
broken_parity_status_show(struct device * dev,struct device_attribute * attr,char * buf)80 static ssize_t broken_parity_status_show(struct device *dev,
81 struct device_attribute *attr,
82 char *buf)
83 {
84 struct pci_dev *pdev = to_pci_dev(dev);
85 return sysfs_emit(buf, "%u\n", pdev->broken_parity_status);
86 }
87
broken_parity_status_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)88 static ssize_t broken_parity_status_store(struct device *dev,
89 struct device_attribute *attr,
90 const char *buf, size_t count)
91 {
92 struct pci_dev *pdev = to_pci_dev(dev);
93 unsigned long val;
94
95 if (kstrtoul(buf, 0, &val) < 0)
96 return -EINVAL;
97
98 pdev->broken_parity_status = !!val;
99
100 return count;
101 }
102 static DEVICE_ATTR_RW(broken_parity_status);
103
pci_dev_show_local_cpu(struct device * dev,bool list,struct device_attribute * attr,char * buf)104 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list,
105 struct device_attribute *attr, char *buf)
106 {
107 const struct cpumask *mask;
108
109 #ifdef CONFIG_NUMA
110 if (dev_to_node(dev) == NUMA_NO_NODE)
111 mask = cpu_online_mask;
112 else
113 mask = cpumask_of_node(dev_to_node(dev));
114 #else
115 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
116 #endif
117 return cpumap_print_to_pagebuf(list, buf, mask);
118 }
119
local_cpus_show(struct device * dev,struct device_attribute * attr,char * buf)120 static ssize_t local_cpus_show(struct device *dev,
121 struct device_attribute *attr, char *buf)
122 {
123 return pci_dev_show_local_cpu(dev, false, attr, buf);
124 }
125 static DEVICE_ATTR_RO(local_cpus);
126
local_cpulist_show(struct device * dev,struct device_attribute * attr,char * buf)127 static ssize_t local_cpulist_show(struct device *dev,
128 struct device_attribute *attr, char *buf)
129 {
130 return pci_dev_show_local_cpu(dev, true, attr, buf);
131 }
132 static DEVICE_ATTR_RO(local_cpulist);
133
134 /*
135 * PCI Bus Class Devices
136 */
cpuaffinity_show(struct device * dev,struct device_attribute * attr,char * buf)137 static ssize_t cpuaffinity_show(struct device *dev,
138 struct device_attribute *attr, char *buf)
139 {
140 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
141
142 return cpumap_print_to_pagebuf(false, buf, cpumask);
143 }
144 static DEVICE_ATTR_RO(cpuaffinity);
145
cpulistaffinity_show(struct device * dev,struct device_attribute * attr,char * buf)146 static ssize_t cpulistaffinity_show(struct device *dev,
147 struct device_attribute *attr, char *buf)
148 {
149 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
150
151 return cpumap_print_to_pagebuf(true, buf, cpumask);
152 }
153 static DEVICE_ATTR_RO(cpulistaffinity);
154
power_state_show(struct device * dev,struct device_attribute * attr,char * buf)155 static ssize_t power_state_show(struct device *dev,
156 struct device_attribute *attr, char *buf)
157 {
158 struct pci_dev *pdev = to_pci_dev(dev);
159
160 return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state));
161 }
162 static DEVICE_ATTR_RO(power_state);
163
164 /* show resources */
resource_show(struct device * dev,struct device_attribute * attr,char * buf)165 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
166 char *buf)
167 {
168 struct pci_dev *pci_dev = to_pci_dev(dev);
169 int i;
170 int max;
171 resource_size_t start, end;
172 size_t len = 0;
173
174 if (pci_dev->subordinate)
175 max = DEVICE_COUNT_RESOURCE;
176 else
177 max = PCI_BRIDGE_RESOURCES;
178
179 for (i = 0; i < max; i++) {
180 struct resource *res = &pci_dev->resource[i];
181 struct resource zerores = {};
182
183 /* For backwards compatibility */
184 if (pci_resource_is_bridge_win(i) &&
185 res->flags & (IORESOURCE_UNSET | IORESOURCE_DISABLED))
186 res = &zerores;
187
188 pci_resource_to_user(pci_dev, i, res, &start, &end);
189 len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
190 (unsigned long long)start,
191 (unsigned long long)end,
192 (unsigned long long)res->flags);
193 }
194 return len;
195 }
196 static DEVICE_ATTR_RO(resource);
197
max_link_speed_show(struct device * dev,struct device_attribute * attr,char * buf)198 static ssize_t max_link_speed_show(struct device *dev,
199 struct device_attribute *attr, char *buf)
200 {
201 struct pci_dev *pdev = to_pci_dev(dev);
202
203 return sysfs_emit(buf, "%s\n",
204 pci_speed_string(pcie_get_speed_cap(pdev)));
205 }
206 static DEVICE_ATTR_RO(max_link_speed);
207
max_link_width_show(struct device * dev,struct device_attribute * attr,char * buf)208 static ssize_t max_link_width_show(struct device *dev,
209 struct device_attribute *attr, char *buf)
210 {
211 struct pci_dev *pdev = to_pci_dev(dev);
212 ssize_t ret;
213
214 /* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */
215 pci_config_pm_runtime_get(pdev);
216 ret = sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
217 pci_config_pm_runtime_put(pdev);
218
219 return ret;
220 }
221 static DEVICE_ATTR_RO(max_link_width);
222
current_link_speed_show(struct device * dev,struct device_attribute * attr,char * buf)223 static ssize_t current_link_speed_show(struct device *dev,
224 struct device_attribute *attr, char *buf)
225 {
226 struct pci_dev *pci_dev = to_pci_dev(dev);
227 u16 linkstat;
228 int err;
229 enum pci_bus_speed speed;
230
231 pci_config_pm_runtime_get(pci_dev);
232 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
233 pci_config_pm_runtime_put(pci_dev);
234
235 if (err)
236 return -EINVAL;
237
238 speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS];
239
240 return sysfs_emit(buf, "%s\n", pci_speed_string(speed));
241 }
242 static DEVICE_ATTR_RO(current_link_speed);
243
current_link_width_show(struct device * dev,struct device_attribute * attr,char * buf)244 static ssize_t current_link_width_show(struct device *dev,
245 struct device_attribute *attr, char *buf)
246 {
247 struct pci_dev *pci_dev = to_pci_dev(dev);
248 u16 linkstat;
249 int err;
250
251 pci_config_pm_runtime_get(pci_dev);
252 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
253 pci_config_pm_runtime_put(pci_dev);
254
255 if (err)
256 return -EINVAL;
257
258 return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat));
259 }
260 static DEVICE_ATTR_RO(current_link_width);
261
secondary_bus_number_show(struct device * dev,struct device_attribute * attr,char * buf)262 static ssize_t secondary_bus_number_show(struct device *dev,
263 struct device_attribute *attr,
264 char *buf)
265 {
266 struct pci_dev *pci_dev = to_pci_dev(dev);
267 u8 sec_bus;
268 int err;
269
270 pci_config_pm_runtime_get(pci_dev);
271 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
272 pci_config_pm_runtime_put(pci_dev);
273
274 if (err)
275 return -EINVAL;
276
277 return sysfs_emit(buf, "%u\n", sec_bus);
278 }
279 static DEVICE_ATTR_RO(secondary_bus_number);
280
subordinate_bus_number_show(struct device * dev,struct device_attribute * attr,char * buf)281 static ssize_t subordinate_bus_number_show(struct device *dev,
282 struct device_attribute *attr,
283 char *buf)
284 {
285 struct pci_dev *pci_dev = to_pci_dev(dev);
286 u8 sub_bus;
287 int err;
288
289 pci_config_pm_runtime_get(pci_dev);
290 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
291 pci_config_pm_runtime_put(pci_dev);
292
293 if (err)
294 return -EINVAL;
295
296 return sysfs_emit(buf, "%u\n", sub_bus);
297 }
298 static DEVICE_ATTR_RO(subordinate_bus_number);
299
ari_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)300 static ssize_t ari_enabled_show(struct device *dev,
301 struct device_attribute *attr,
302 char *buf)
303 {
304 struct pci_dev *pci_dev = to_pci_dev(dev);
305
306 return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
307 }
308 static DEVICE_ATTR_RO(ari_enabled);
309
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)310 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
311 char *buf)
312 {
313 struct pci_dev *pci_dev = to_pci_dev(dev);
314
315 return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
316 pci_dev->vendor, pci_dev->device,
317 pci_dev->subsystem_vendor, pci_dev->subsystem_device,
318 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
319 (u8)(pci_dev->class));
320 }
321 static DEVICE_ATTR_RO(modalias);
322
enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)323 static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
324 const char *buf, size_t count)
325 {
326 struct pci_dev *pdev = to_pci_dev(dev);
327 unsigned long val;
328 ssize_t result = 0;
329
330 /* this can crash the machine when done on the "wrong" device */
331 if (!capable(CAP_SYS_ADMIN))
332 return -EPERM;
333
334 if (kstrtoul(buf, 0, &val) < 0)
335 return -EINVAL;
336
337 device_lock(dev);
338 if (dev->driver)
339 result = -EBUSY;
340 else if (val)
341 result = pci_enable_device(pdev);
342 else if (pci_is_enabled(pdev))
343 pci_disable_device(pdev);
344 else
345 result = -EIO;
346 device_unlock(dev);
347
348 return result < 0 ? result : count;
349 }
350
enable_show(struct device * dev,struct device_attribute * attr,char * buf)351 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
352 char *buf)
353 {
354 struct pci_dev *pdev;
355
356 pdev = to_pci_dev(dev);
357 return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt));
358 }
359 static DEVICE_ATTR_RW(enable);
360
361 #ifdef CONFIG_NUMA
numa_node_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)362 static ssize_t numa_node_store(struct device *dev,
363 struct device_attribute *attr, const char *buf,
364 size_t count)
365 {
366 struct pci_dev *pdev = to_pci_dev(dev);
367 int node;
368
369 if (!capable(CAP_SYS_ADMIN))
370 return -EPERM;
371
372 if (kstrtoint(buf, 0, &node) < 0)
373 return -EINVAL;
374
375 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
376 return -EINVAL;
377
378 if (node != NUMA_NO_NODE && !node_online(node))
379 return -EINVAL;
380
381 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
382 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.",
383 node);
384
385 dev->numa_node = node;
386 return count;
387 }
388
numa_node_show(struct device * dev,struct device_attribute * attr,char * buf)389 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
390 char *buf)
391 {
392 return sysfs_emit(buf, "%d\n", dev->numa_node);
393 }
394 static DEVICE_ATTR_RW(numa_node);
395 #endif
396
dma_mask_bits_show(struct device * dev,struct device_attribute * attr,char * buf)397 static ssize_t dma_mask_bits_show(struct device *dev,
398 struct device_attribute *attr, char *buf)
399 {
400 struct pci_dev *pdev = to_pci_dev(dev);
401
402 return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask));
403 }
404 static DEVICE_ATTR_RO(dma_mask_bits);
405
consistent_dma_mask_bits_show(struct device * dev,struct device_attribute * attr,char * buf)406 static ssize_t consistent_dma_mask_bits_show(struct device *dev,
407 struct device_attribute *attr,
408 char *buf)
409 {
410 return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask));
411 }
412 static DEVICE_ATTR_RO(consistent_dma_mask_bits);
413
msi_bus_show(struct device * dev,struct device_attribute * attr,char * buf)414 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
415 char *buf)
416 {
417 struct pci_dev *pdev = to_pci_dev(dev);
418 struct pci_bus *subordinate = pdev->subordinate;
419
420 return sysfs_emit(buf, "%u\n", subordinate ?
421 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
422 : !pdev->no_msi);
423 }
424
msi_bus_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)425 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
426 const char *buf, size_t count)
427 {
428 struct pci_dev *pdev = to_pci_dev(dev);
429 struct pci_bus *subordinate = pdev->subordinate;
430 unsigned long val;
431
432 if (!capable(CAP_SYS_ADMIN))
433 return -EPERM;
434
435 if (kstrtoul(buf, 0, &val) < 0)
436 return -EINVAL;
437
438 /*
439 * "no_msi" and "bus_flags" only affect what happens when a driver
440 * requests MSI or MSI-X. They don't affect any drivers that have
441 * already requested MSI or MSI-X.
442 */
443 if (!subordinate) {
444 pdev->no_msi = !val;
445 pci_info(pdev, "MSI/MSI-X %s for future drivers\n",
446 val ? "allowed" : "disallowed");
447 return count;
448 }
449
450 if (val)
451 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
452 else
453 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
454
455 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n",
456 val ? "allowed" : "disallowed");
457 return count;
458 }
459 static DEVICE_ATTR_RW(msi_bus);
460
rescan_store(const struct bus_type * bus,const char * buf,size_t count)461 static ssize_t rescan_store(const struct bus_type *bus, const char *buf, size_t count)
462 {
463 unsigned long val;
464 struct pci_bus *b = NULL;
465
466 if (kstrtoul(buf, 0, &val) < 0)
467 return -EINVAL;
468
469 if (val) {
470 pci_lock_rescan_remove();
471 while ((b = pci_find_next_bus(b)) != NULL)
472 pci_rescan_bus(b);
473 pci_unlock_rescan_remove();
474 }
475 return count;
476 }
477 static BUS_ATTR_WO(rescan);
478
479 static struct attribute *pci_bus_attrs[] = {
480 &bus_attr_rescan.attr,
481 NULL,
482 };
483
484 static const struct attribute_group pci_bus_group = {
485 .attrs = pci_bus_attrs,
486 };
487
488 const struct attribute_group *pci_bus_groups[] = {
489 &pci_bus_group,
490 NULL,
491 };
492
dev_rescan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)493 static ssize_t dev_rescan_store(struct device *dev,
494 struct device_attribute *attr, const char *buf,
495 size_t count)
496 {
497 unsigned long val;
498 struct pci_dev *pdev = to_pci_dev(dev);
499
500 if (kstrtoul(buf, 0, &val) < 0)
501 return -EINVAL;
502
503 if (val) {
504 pci_lock_rescan_remove();
505 pci_rescan_bus(pdev->bus);
506 pci_unlock_rescan_remove();
507 }
508 return count;
509 }
510 static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL,
511 dev_rescan_store);
512
remove_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)513 static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
514 const char *buf, size_t count)
515 {
516 unsigned long val;
517
518 if (kstrtoul(buf, 0, &val) < 0)
519 return -EINVAL;
520
521 if (val && device_remove_file_self(dev, attr))
522 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
523 return count;
524 }
525 static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL,
526 remove_store);
527
bus_rescan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)528 static ssize_t bus_rescan_store(struct device *dev,
529 struct device_attribute *attr,
530 const char *buf, size_t count)
531 {
532 unsigned long val;
533 struct pci_bus *bus = to_pci_bus(dev);
534
535 if (kstrtoul(buf, 0, &val) < 0)
536 return -EINVAL;
537
538 if (val) {
539 pci_lock_rescan_remove();
540 if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
541 pci_rescan_bus_bridge_resize(bus->self);
542 else
543 pci_rescan_bus(bus);
544 pci_unlock_rescan_remove();
545 }
546 return count;
547 }
548 static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
549 bus_rescan_store);
550
reset_subordinate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)551 static ssize_t reset_subordinate_store(struct device *dev,
552 struct device_attribute *attr,
553 const char *buf, size_t count)
554 {
555 struct pci_dev *pdev = to_pci_dev(dev);
556 struct pci_bus *bus = pdev->subordinate;
557 unsigned long val;
558
559 if (!capable(CAP_SYS_ADMIN))
560 return -EPERM;
561
562 if (kstrtoul(buf, 0, &val) < 0)
563 return -EINVAL;
564
565 if (val) {
566 int ret = __pci_reset_bus(bus);
567
568 if (ret)
569 return ret;
570 }
571
572 return count;
573 }
574 static DEVICE_ATTR_WO(reset_subordinate);
575
576 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
d3cold_allowed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)577 static ssize_t d3cold_allowed_store(struct device *dev,
578 struct device_attribute *attr,
579 const char *buf, size_t count)
580 {
581 struct pci_dev *pdev = to_pci_dev(dev);
582 unsigned long val;
583
584 if (kstrtoul(buf, 0, &val) < 0)
585 return -EINVAL;
586
587 pdev->d3cold_allowed = !!val;
588 pci_bridge_d3_update(pdev);
589
590 pm_runtime_resume(dev);
591
592 return count;
593 }
594
d3cold_allowed_show(struct device * dev,struct device_attribute * attr,char * buf)595 static ssize_t d3cold_allowed_show(struct device *dev,
596 struct device_attribute *attr, char *buf)
597 {
598 struct pci_dev *pdev = to_pci_dev(dev);
599 return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed);
600 }
601 static DEVICE_ATTR_RW(d3cold_allowed);
602 #endif
603
604 #ifdef CONFIG_OF
devspec_show(struct device * dev,struct device_attribute * attr,char * buf)605 static ssize_t devspec_show(struct device *dev,
606 struct device_attribute *attr, char *buf)
607 {
608 struct pci_dev *pdev = to_pci_dev(dev);
609 struct device_node *np = pci_device_to_OF_node(pdev);
610
611 if (np == NULL)
612 return 0;
613 return sysfs_emit(buf, "%pOF\n", np);
614 }
615 static DEVICE_ATTR_RO(devspec);
616 #endif
617
618 static struct attribute *pci_dev_attrs[] = {
619 &dev_attr_power_state.attr,
620 &dev_attr_resource.attr,
621 &dev_attr_vendor.attr,
622 &dev_attr_device.attr,
623 &dev_attr_subsystem_vendor.attr,
624 &dev_attr_subsystem_device.attr,
625 &dev_attr_revision.attr,
626 &dev_attr_class.attr,
627 &dev_attr_irq.attr,
628 &dev_attr_local_cpus.attr,
629 &dev_attr_local_cpulist.attr,
630 &dev_attr_modalias.attr,
631 #ifdef CONFIG_NUMA
632 &dev_attr_numa_node.attr,
633 #endif
634 &dev_attr_dma_mask_bits.attr,
635 &dev_attr_consistent_dma_mask_bits.attr,
636 &dev_attr_enable.attr,
637 &dev_attr_broken_parity_status.attr,
638 &dev_attr_msi_bus.attr,
639 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
640 &dev_attr_d3cold_allowed.attr,
641 #endif
642 #ifdef CONFIG_OF
643 &dev_attr_devspec.attr,
644 #endif
645 &dev_attr_ari_enabled.attr,
646 NULL,
647 };
648
649 static struct attribute *pci_bridge_attrs[] = {
650 &dev_attr_subordinate_bus_number.attr,
651 &dev_attr_secondary_bus_number.attr,
652 &dev_attr_reset_subordinate.attr,
653 NULL,
654 };
655
656 static struct attribute *pcie_dev_attrs[] = {
657 &dev_attr_current_link_speed.attr,
658 &dev_attr_current_link_width.attr,
659 &dev_attr_max_link_width.attr,
660 &dev_attr_max_link_speed.attr,
661 NULL,
662 };
663
664 static struct attribute *pcibus_attrs[] = {
665 &dev_attr_bus_rescan.attr,
666 &dev_attr_cpuaffinity.attr,
667 &dev_attr_cpulistaffinity.attr,
668 NULL,
669 };
670
671 static const struct attribute_group pcibus_group = {
672 .attrs = pcibus_attrs,
673 };
674
675 const struct attribute_group *pcibus_groups[] = {
676 &pcibus_group,
677 NULL,
678 };
679
boot_vga_show(struct device * dev,struct device_attribute * attr,char * buf)680 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
681 char *buf)
682 {
683 struct pci_dev *pdev = to_pci_dev(dev);
684 struct pci_dev *vga_dev = vga_default_device();
685
686 if (vga_dev)
687 return sysfs_emit(buf, "%u\n", (pdev == vga_dev));
688
689 return sysfs_emit(buf, "%u\n",
690 !!(pdev->resource[PCI_ROM_RESOURCE].flags &
691 IORESOURCE_ROM_SHADOW));
692 }
693 static DEVICE_ATTR_RO(boot_vga);
694
serial_number_show(struct device * dev,struct device_attribute * attr,char * buf)695 static ssize_t serial_number_show(struct device *dev,
696 struct device_attribute *attr, char *buf)
697 {
698 struct pci_dev *pci_dev = to_pci_dev(dev);
699 u64 dsn;
700 u8 bytes[8];
701
702 dsn = pci_get_dsn(pci_dev);
703 if (!dsn)
704 return -EIO;
705
706 put_unaligned_be64(dsn, bytes);
707 return sysfs_emit(buf, "%8phD\n", bytes);
708 }
709 static DEVICE_ATTR_ADMIN_RO(serial_number);
710
pci_read_config(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)711 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
712 const struct bin_attribute *bin_attr, char *buf,
713 loff_t off, size_t count)
714 {
715 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
716 unsigned int size = 64;
717 loff_t init_off = off;
718 u8 *data = (u8 *) buf;
719
720 /* Several chips lock up trying to read undefined config space */
721 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
722 size = dev->cfg_size;
723 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
724 size = 128;
725
726 if (off > size)
727 return 0;
728 if (off + count > size) {
729 size -= off;
730 count = size;
731 } else {
732 size = count;
733 }
734
735 pci_config_pm_runtime_get(dev);
736
737 if ((off & 1) && size) {
738 u8 val;
739 pci_user_read_config_byte(dev, off, &val);
740 data[off - init_off] = val;
741 off++;
742 size--;
743 }
744
745 if ((off & 3) && size > 2) {
746 u16 val;
747 pci_user_read_config_word(dev, off, &val);
748 data[off - init_off] = val & 0xff;
749 data[off - init_off + 1] = (val >> 8) & 0xff;
750 off += 2;
751 size -= 2;
752 }
753
754 while (size > 3) {
755 u32 val;
756 pci_user_read_config_dword(dev, off, &val);
757 data[off - init_off] = val & 0xff;
758 data[off - init_off + 1] = (val >> 8) & 0xff;
759 data[off - init_off + 2] = (val >> 16) & 0xff;
760 data[off - init_off + 3] = (val >> 24) & 0xff;
761 off += 4;
762 size -= 4;
763 cond_resched();
764 }
765
766 if (size >= 2) {
767 u16 val;
768 pci_user_read_config_word(dev, off, &val);
769 data[off - init_off] = val & 0xff;
770 data[off - init_off + 1] = (val >> 8) & 0xff;
771 off += 2;
772 size -= 2;
773 }
774
775 if (size > 0) {
776 u8 val;
777 pci_user_read_config_byte(dev, off, &val);
778 data[off - init_off] = val;
779 }
780
781 pci_config_pm_runtime_put(dev);
782
783 return count;
784 }
785
pci_write_config(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)786 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
787 const struct bin_attribute *bin_attr, char *buf,
788 loff_t off, size_t count)
789 {
790 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
791 unsigned int size = count;
792 loff_t init_off = off;
793 u8 *data = (u8 *) buf;
794 int ret;
795
796 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
797 if (ret)
798 return ret;
799
800 if (resource_is_exclusive(&dev->driver_exclusive_resource, off,
801 count)) {
802 pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx",
803 current->comm, off);
804 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
805 }
806
807 if (off > dev->cfg_size)
808 return 0;
809 if (off + count > dev->cfg_size) {
810 size = dev->cfg_size - off;
811 count = size;
812 }
813
814 pci_config_pm_runtime_get(dev);
815
816 if ((off & 1) && size) {
817 pci_user_write_config_byte(dev, off, data[off - init_off]);
818 off++;
819 size--;
820 }
821
822 if ((off & 3) && size > 2) {
823 u16 val = data[off - init_off];
824 val |= (u16) data[off - init_off + 1] << 8;
825 pci_user_write_config_word(dev, off, val);
826 off += 2;
827 size -= 2;
828 }
829
830 while (size > 3) {
831 u32 val = data[off - init_off];
832 val |= (u32) data[off - init_off + 1] << 8;
833 val |= (u32) data[off - init_off + 2] << 16;
834 val |= (u32) data[off - init_off + 3] << 24;
835 pci_user_write_config_dword(dev, off, val);
836 off += 4;
837 size -= 4;
838 }
839
840 if (size >= 2) {
841 u16 val = data[off - init_off];
842 val |= (u16) data[off - init_off + 1] << 8;
843 pci_user_write_config_word(dev, off, val);
844 off += 2;
845 size -= 2;
846 }
847
848 if (size)
849 pci_user_write_config_byte(dev, off, data[off - init_off]);
850
851 pci_config_pm_runtime_put(dev);
852
853 return count;
854 }
855 static const BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
856
857 static const struct bin_attribute *const pci_dev_config_attrs[] = {
858 &bin_attr_config,
859 NULL,
860 };
861
pci_dev_config_attr_bin_size(struct kobject * kobj,const struct bin_attribute * a,int n)862 static size_t pci_dev_config_attr_bin_size(struct kobject *kobj,
863 const struct bin_attribute *a,
864 int n)
865 {
866 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
867
868 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
869 return PCI_CFG_SPACE_EXP_SIZE;
870 return PCI_CFG_SPACE_SIZE;
871 }
872
873 static const struct attribute_group pci_dev_config_attr_group = {
874 .bin_attrs = pci_dev_config_attrs,
875 .bin_size = pci_dev_config_attr_bin_size,
876 };
877
878 /*
879 * llseek operation for mmappable PCI resources.
880 * May be left unused if the arch doesn't provide them.
881 */
882 static __maybe_unused loff_t
pci_llseek_resource(struct file * filep,struct kobject * kobj __always_unused,const struct bin_attribute * attr,loff_t offset,int whence)883 pci_llseek_resource(struct file *filep,
884 struct kobject *kobj __always_unused,
885 const struct bin_attribute *attr,
886 loff_t offset, int whence)
887 {
888 return fixed_size_llseek(filep, offset, whence, attr->size);
889 }
890
891 #ifdef HAVE_PCI_LEGACY
892 /**
893 * pci_read_legacy_io - read byte(s) from legacy I/O port space
894 * @filp: open sysfs file
895 * @kobj: kobject corresponding to file to read from
896 * @bin_attr: struct bin_attribute for this file
897 * @buf: buffer to store results
898 * @off: offset into legacy I/O port space
899 * @count: number of bytes to read
900 *
901 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
902 * callback routine (pci_legacy_read).
903 */
pci_read_legacy_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)904 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
905 const struct bin_attribute *bin_attr,
906 char *buf, loff_t off, size_t count)
907 {
908 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
909
910 /* Only support 1, 2 or 4 byte accesses */
911 if (count != 1 && count != 2 && count != 4)
912 return -EINVAL;
913
914 return pci_legacy_read(bus, off, (u32 *)buf, count);
915 }
916
917 /**
918 * pci_write_legacy_io - write byte(s) to legacy I/O port space
919 * @filp: open sysfs file
920 * @kobj: kobject corresponding to file to read from
921 * @bin_attr: struct bin_attribute for this file
922 * @buf: buffer containing value to be written
923 * @off: offset into legacy I/O port space
924 * @count: number of bytes to write
925 *
926 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
927 * callback routine (pci_legacy_write).
928 */
pci_write_legacy_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)929 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
930 const struct bin_attribute *bin_attr,
931 char *buf, loff_t off, size_t count)
932 {
933 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
934
935 /* Only support 1, 2 or 4 byte accesses */
936 if (count != 1 && count != 2 && count != 4)
937 return -EINVAL;
938
939 return pci_legacy_write(bus, off, *(u32 *)buf, count);
940 }
941
942 /**
943 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space
944 * @filp: open sysfs file
945 * @kobj: kobject corresponding to device to be mapped
946 * @attr: struct bin_attribute for this file
947 * @vma: struct vm_area_struct passed to mmap
948 *
949 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap
950 * legacy memory space (first meg of bus space) into application virtual
951 * memory space.
952 */
pci_mmap_legacy_mem(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma)953 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
954 const struct bin_attribute *attr,
955 struct vm_area_struct *vma)
956 {
957 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
958
959 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
960 }
961
962 /**
963 * pci_mmap_legacy_io - map legacy PCI IO into user memory space
964 * @filp: open sysfs file
965 * @kobj: kobject corresponding to device to be mapped
966 * @attr: struct bin_attribute for this file
967 * @vma: struct vm_area_struct passed to mmap
968 *
969 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap
970 * legacy IO space (first meg of bus space) into application virtual
971 * memory space. Returns -ENOSYS if the operation isn't supported
972 */
pci_mmap_legacy_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma)973 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
974 const struct bin_attribute *attr,
975 struct vm_area_struct *vma)
976 {
977 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
978
979 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
980 }
981
982 /**
983 * pci_adjust_legacy_attr - adjustment of legacy file attributes
984 * @b: bus to create files under
985 * @mmap_type: I/O port or memory
986 *
987 * Stub implementation. Can be overridden by arch if necessary.
988 */
pci_adjust_legacy_attr(struct pci_bus * b,enum pci_mmap_state mmap_type)989 void __weak pci_adjust_legacy_attr(struct pci_bus *b,
990 enum pci_mmap_state mmap_type)
991 {
992 }
993
994 /**
995 * pci_create_legacy_files - create legacy I/O port and memory files
996 * @b: bus to create files under
997 *
998 * Some platforms allow access to legacy I/O port and ISA memory space on
999 * a per-bus basis. This routine creates the files and ties them into
1000 * their associated read, write and mmap files from pci-sysfs.c
1001 *
1002 * On error unwind, but don't propagate the error to the caller
1003 * as it is ok to set up the PCI bus without these files.
1004 */
pci_create_legacy_files(struct pci_bus * b)1005 void pci_create_legacy_files(struct pci_bus *b)
1006 {
1007 int error;
1008
1009 if (!sysfs_initialized)
1010 return;
1011
1012 b->legacy_io = kzalloc_objs(struct bin_attribute, 2, GFP_ATOMIC);
1013 if (!b->legacy_io)
1014 goto kzalloc_err;
1015
1016 sysfs_bin_attr_init(b->legacy_io);
1017 b->legacy_io->attr.name = "legacy_io";
1018 b->legacy_io->size = 0xffff;
1019 b->legacy_io->attr.mode = 0600;
1020 b->legacy_io->read = pci_read_legacy_io;
1021 b->legacy_io->write = pci_write_legacy_io;
1022 /* See pci_create_attr() for motivation */
1023 b->legacy_io->llseek = pci_llseek_resource;
1024 b->legacy_io->mmap = pci_mmap_legacy_io;
1025 b->legacy_io->f_mapping = iomem_get_mapping;
1026 pci_adjust_legacy_attr(b, pci_mmap_io);
1027 error = device_create_bin_file(&b->dev, b->legacy_io);
1028 if (error)
1029 goto legacy_io_err;
1030
1031 /* Allocated above after the legacy_io struct */
1032 b->legacy_mem = b->legacy_io + 1;
1033 sysfs_bin_attr_init(b->legacy_mem);
1034 b->legacy_mem->attr.name = "legacy_mem";
1035 b->legacy_mem->size = 1024*1024;
1036 b->legacy_mem->attr.mode = 0600;
1037 b->legacy_mem->mmap = pci_mmap_legacy_mem;
1038 /* See pci_create_attr() for motivation */
1039 b->legacy_mem->llseek = pci_llseek_resource;
1040 b->legacy_mem->f_mapping = iomem_get_mapping;
1041 pci_adjust_legacy_attr(b, pci_mmap_mem);
1042 error = device_create_bin_file(&b->dev, b->legacy_mem);
1043 if (error)
1044 goto legacy_mem_err;
1045
1046 return;
1047
1048 legacy_mem_err:
1049 device_remove_bin_file(&b->dev, b->legacy_io);
1050 legacy_io_err:
1051 kfree(b->legacy_io);
1052 b->legacy_io = NULL;
1053 kzalloc_err:
1054 dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n");
1055 }
1056
pci_remove_legacy_files(struct pci_bus * b)1057 void pci_remove_legacy_files(struct pci_bus *b)
1058 {
1059 if (b->legacy_io) {
1060 device_remove_bin_file(&b->dev, b->legacy_io);
1061 device_remove_bin_file(&b->dev, b->legacy_mem);
1062 kfree(b->legacy_io); /* both are allocated here */
1063 }
1064 }
1065 #endif /* HAVE_PCI_LEGACY */
1066
1067 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
1068 /**
1069 * pci_mmap_resource - map a PCI resource into user memory space
1070 * @kobj: kobject for mapping
1071 * @attr: struct bin_attribute for the file being mapped
1072 * @vma: struct vm_area_struct passed into the mmap
1073 * @write_combine: 1 for write_combine mapping
1074 *
1075 * Use the regular PCI mapping routines to map a PCI resource into userspace.
1076 */
pci_mmap_resource(struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma,int write_combine)1077 static int pci_mmap_resource(struct kobject *kobj, const struct bin_attribute *attr,
1078 struct vm_area_struct *vma, int write_combine)
1079 {
1080 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1081 int bar = (unsigned long)attr->private;
1082 enum pci_mmap_state mmap_type;
1083 struct resource *res = &pdev->resource[bar];
1084 int ret;
1085
1086 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1087 if (ret)
1088 return ret;
1089
1090 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
1091 return -EINVAL;
1092
1093 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS))
1094 return -EINVAL;
1095
1096 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
1097
1098 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
1099 }
1100
pci_mmap_resource_uc(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma)1101 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
1102 const struct bin_attribute *attr,
1103 struct vm_area_struct *vma)
1104 {
1105 return pci_mmap_resource(kobj, attr, vma, 0);
1106 }
1107
pci_mmap_resource_wc(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma)1108 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
1109 const struct bin_attribute *attr,
1110 struct vm_area_struct *vma)
1111 {
1112 return pci_mmap_resource(kobj, attr, vma, 1);
1113 }
1114
pci_resource_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count,bool write)1115 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
1116 const struct bin_attribute *attr, char *buf,
1117 loff_t off, size_t count, bool write)
1118 {
1119 #ifdef CONFIG_HAS_IOPORT
1120 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1121 int bar = (unsigned long)attr->private;
1122 unsigned long port = off;
1123
1124 port += pci_resource_start(pdev, bar);
1125
1126 if (port > pci_resource_end(pdev, bar))
1127 return 0;
1128
1129 if (port + count - 1 > pci_resource_end(pdev, bar))
1130 return -EINVAL;
1131
1132 switch (count) {
1133 case 1:
1134 if (write)
1135 outb(*(u8 *)buf, port);
1136 else
1137 *(u8 *)buf = inb(port);
1138 return 1;
1139 case 2:
1140 if (write)
1141 outw(*(u16 *)buf, port);
1142 else
1143 *(u16 *)buf = inw(port);
1144 return 2;
1145 case 4:
1146 if (write)
1147 outl(*(u32 *)buf, port);
1148 else
1149 *(u32 *)buf = inl(port);
1150 return 4;
1151 }
1152 return -EINVAL;
1153 #else
1154 return -ENXIO;
1155 #endif
1156 }
1157
pci_read_resource_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count)1158 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
1159 const struct bin_attribute *attr, char *buf,
1160 loff_t off, size_t count)
1161 {
1162 return pci_resource_io(filp, kobj, attr, buf, off, count, false);
1163 }
1164
pci_write_resource_io(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count)1165 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
1166 const struct bin_attribute *attr, char *buf,
1167 loff_t off, size_t count)
1168 {
1169 int ret;
1170
1171 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1172 if (ret)
1173 return ret;
1174
1175 return pci_resource_io(filp, kobj, attr, buf, off, count, true);
1176 }
1177
1178 /**
1179 * pci_remove_resource_files - cleanup resource files
1180 * @pdev: dev to cleanup
1181 *
1182 * If we created resource files for @pdev, remove them from sysfs and
1183 * free their resources.
1184 */
pci_remove_resource_files(struct pci_dev * pdev)1185 static void pci_remove_resource_files(struct pci_dev *pdev)
1186 {
1187 int i;
1188
1189 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1190 struct bin_attribute *res_attr;
1191
1192 res_attr = pdev->res_attr[i];
1193 if (res_attr) {
1194 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1195 kfree(res_attr);
1196 }
1197
1198 res_attr = pdev->res_attr_wc[i];
1199 if (res_attr) {
1200 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1201 kfree(res_attr);
1202 }
1203 }
1204 }
1205
pci_create_attr(struct pci_dev * pdev,int num,int write_combine)1206 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
1207 {
1208 /* allocate attribute structure, piggyback attribute name */
1209 int name_len = write_combine ? 13 : 10;
1210 struct bin_attribute *res_attr;
1211 char *res_attr_name;
1212 int retval;
1213
1214 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
1215 if (!res_attr)
1216 return -ENOMEM;
1217
1218 res_attr_name = (char *)(res_attr + 1);
1219
1220 sysfs_bin_attr_init(res_attr);
1221 if (write_combine) {
1222 sprintf(res_attr_name, "resource%d_wc", num);
1223 res_attr->mmap = pci_mmap_resource_wc;
1224 } else {
1225 sprintf(res_attr_name, "resource%d", num);
1226 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
1227 res_attr->read = pci_read_resource_io;
1228 res_attr->write = pci_write_resource_io;
1229 if (arch_can_pci_mmap_io())
1230 res_attr->mmap = pci_mmap_resource_uc;
1231 } else {
1232 res_attr->mmap = pci_mmap_resource_uc;
1233 }
1234 }
1235 if (res_attr->mmap) {
1236 res_attr->f_mapping = iomem_get_mapping;
1237 /*
1238 * generic_file_llseek() consults f_mapping->host to determine
1239 * the file size. As iomem_inode knows nothing about the
1240 * attribute, it's not going to work, so override it as well.
1241 */
1242 res_attr->llseek = pci_llseek_resource;
1243 }
1244 res_attr->attr.name = res_attr_name;
1245 res_attr->attr.mode = 0600;
1246 res_attr->size = pci_resource_len(pdev, num);
1247 res_attr->private = (void *)(unsigned long)num;
1248 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
1249 if (retval) {
1250 kfree(res_attr);
1251 return retval;
1252 }
1253
1254 if (write_combine)
1255 pdev->res_attr_wc[num] = res_attr;
1256 else
1257 pdev->res_attr[num] = res_attr;
1258
1259 return 0;
1260 }
1261
1262 /**
1263 * pci_create_resource_files - create resource files in sysfs for @dev
1264 * @pdev: dev in question
1265 *
1266 * Walk the resources in @pdev creating files for each resource available.
1267 */
pci_create_resource_files(struct pci_dev * pdev)1268 static int pci_create_resource_files(struct pci_dev *pdev)
1269 {
1270 int i;
1271 int retval;
1272
1273 /* Skip devices with non-mappable BARs */
1274 if (pdev->non_mappable_bars)
1275 return 0;
1276
1277 /* Expose the PCI resources from this device as files */
1278 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1279
1280 /* skip empty resources */
1281 if (!pci_resource_len(pdev, i))
1282 continue;
1283
1284 retval = pci_create_attr(pdev, i, 0);
1285 /* for prefetchable resources, create a WC mappable file */
1286 if (!retval && arch_can_pci_mmap_wc() &&
1287 pdev->resource[i].flags & IORESOURCE_PREFETCH)
1288 retval = pci_create_attr(pdev, i, 1);
1289 if (retval) {
1290 pci_remove_resource_files(pdev);
1291 return retval;
1292 }
1293 }
1294 return 0;
1295 }
1296 #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */
pci_create_resource_files(struct pci_dev * dev)1297 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
pci_remove_resource_files(struct pci_dev * dev)1298 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
1299 #endif
1300
1301 /**
1302 * pci_write_rom - used to enable access to the PCI ROM display
1303 * @filp: sysfs file
1304 * @kobj: kernel object handle
1305 * @bin_attr: struct bin_attribute for this file
1306 * @buf: user input
1307 * @off: file offset
1308 * @count: number of byte in input
1309 *
1310 * writing anything except 0 enables it
1311 */
pci_write_rom(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1312 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
1313 const struct bin_attribute *bin_attr, char *buf,
1314 loff_t off, size_t count)
1315 {
1316 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1317
1318 if ((off == 0) && (*buf == '0') && (count == 2))
1319 pdev->rom_attr_enabled = 0;
1320 else
1321 pdev->rom_attr_enabled = 1;
1322
1323 return count;
1324 }
1325
1326 /**
1327 * pci_read_rom - read a PCI ROM
1328 * @filp: sysfs file
1329 * @kobj: kernel object handle
1330 * @bin_attr: struct bin_attribute for this file
1331 * @buf: where to put the data we read from the ROM
1332 * @off: file offset
1333 * @count: number of bytes to read
1334 *
1335 * Put @count bytes starting at @off into @buf from the ROM in the PCI
1336 * device corresponding to @kobj.
1337 */
pci_read_rom(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1338 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
1339 const struct bin_attribute *bin_attr, char *buf,
1340 loff_t off, size_t count)
1341 {
1342 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1343 void __iomem *rom;
1344 size_t size;
1345
1346 if (!pdev->rom_attr_enabled)
1347 return -EINVAL;
1348
1349 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
1350 if (!rom || !size)
1351 return -EIO;
1352
1353 if (off >= size)
1354 count = 0;
1355 else {
1356 if (off + count > size)
1357 count = size - off;
1358
1359 memcpy_fromio(buf, rom + off, count);
1360 }
1361 pci_unmap_rom(pdev, rom);
1362
1363 return count;
1364 }
1365 static const BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
1366
1367 static const struct bin_attribute *const pci_dev_rom_attrs[] = {
1368 &bin_attr_rom,
1369 NULL,
1370 };
1371
pci_dev_rom_attr_is_visible(struct kobject * kobj,const struct bin_attribute * a,int n)1372 static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj,
1373 const struct bin_attribute *a, int n)
1374 {
1375 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1376
1377 /* If the device has a ROM, try to expose it in sysfs. */
1378 if (!pci_resource_end(pdev, PCI_ROM_RESOURCE))
1379 return 0;
1380
1381 return a->attr.mode;
1382 }
1383
pci_dev_rom_attr_bin_size(struct kobject * kobj,const struct bin_attribute * a,int n)1384 static size_t pci_dev_rom_attr_bin_size(struct kobject *kobj,
1385 const struct bin_attribute *a, int n)
1386 {
1387 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1388
1389 return pci_resource_len(pdev, PCI_ROM_RESOURCE);
1390 }
1391
1392 static const struct attribute_group pci_dev_rom_attr_group = {
1393 .bin_attrs = pci_dev_rom_attrs,
1394 .is_bin_visible = pci_dev_rom_attr_is_visible,
1395 .bin_size = pci_dev_rom_attr_bin_size,
1396 };
1397
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1398 static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
1399 const char *buf, size_t count)
1400 {
1401 struct pci_dev *pdev = to_pci_dev(dev);
1402 unsigned long val;
1403 ssize_t result;
1404
1405 if (kstrtoul(buf, 0, &val) < 0)
1406 return -EINVAL;
1407
1408 if (val != 1)
1409 return -EINVAL;
1410
1411 pm_runtime_get_sync(dev);
1412 result = pci_reset_function(pdev);
1413 pm_runtime_put(dev);
1414 if (result < 0)
1415 return result;
1416
1417 return count;
1418 }
1419 static DEVICE_ATTR_WO(reset);
1420
1421 static struct attribute *pci_dev_reset_attrs[] = {
1422 &dev_attr_reset.attr,
1423 NULL,
1424 };
1425
pci_dev_reset_attr_is_visible(struct kobject * kobj,struct attribute * a,int n)1426 static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
1427 struct attribute *a, int n)
1428 {
1429 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1430
1431 if (!pci_reset_supported(pdev))
1432 return 0;
1433
1434 return a->mode;
1435 }
1436
1437 static const struct attribute_group pci_dev_reset_attr_group = {
1438 .attrs = pci_dev_reset_attrs,
1439 .is_visible = pci_dev_reset_attr_is_visible,
1440 };
1441
reset_method_show(struct device * dev,struct device_attribute * attr,char * buf)1442 static ssize_t reset_method_show(struct device *dev,
1443 struct device_attribute *attr, char *buf)
1444 {
1445 struct pci_dev *pdev = to_pci_dev(dev);
1446 ssize_t len = 0;
1447 int i, m;
1448
1449 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
1450 m = pdev->reset_methods[i];
1451 if (!m)
1452 break;
1453
1454 len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
1455 pci_reset_fn_methods[m].name);
1456 }
1457
1458 if (len)
1459 len += sysfs_emit_at(buf, len, "\n");
1460
1461 return len;
1462 }
1463
reset_method_lookup(const char * name)1464 static int reset_method_lookup(const char *name)
1465 {
1466 int m;
1467
1468 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
1469 if (sysfs_streq(name, pci_reset_fn_methods[m].name))
1470 return m;
1471 }
1472
1473 return 0; /* not found */
1474 }
1475
reset_method_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1476 static ssize_t reset_method_store(struct device *dev,
1477 struct device_attribute *attr,
1478 const char *buf, size_t count)
1479 {
1480 struct pci_dev *pdev = to_pci_dev(dev);
1481 char *tmp_options, *name;
1482 int m, n;
1483 u8 reset_methods[PCI_NUM_RESET_METHODS] = {};
1484
1485 if (sysfs_streq(buf, "")) {
1486 pdev->reset_methods[0] = 0;
1487 pci_warn(pdev, "All device reset methods disabled by user");
1488 return count;
1489 }
1490
1491 PM_RUNTIME_ACQUIRE(dev, pm);
1492 if (PM_RUNTIME_ACQUIRE_ERR(&pm))
1493 return -ENXIO;
1494
1495 if (sysfs_streq(buf, "default")) {
1496 pci_init_reset_methods(pdev);
1497 return count;
1498 }
1499
1500 char *options __free(kfree) = kstrndup(buf, count, GFP_KERNEL);
1501 if (!options)
1502 return -ENOMEM;
1503
1504 n = 0;
1505 tmp_options = options;
1506 while ((name = strsep(&tmp_options, " ")) != NULL) {
1507 if (sysfs_streq(name, ""))
1508 continue;
1509
1510 name = strim(name);
1511
1512 /* Leave previous methods unchanged if input is invalid */
1513 m = reset_method_lookup(name);
1514 if (!m) {
1515 pci_err(pdev, "Invalid reset method '%s'", name);
1516 return -EINVAL;
1517 }
1518
1519 if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
1520 pci_err(pdev, "Unsupported reset method '%s'", name);
1521 return -EINVAL;
1522 }
1523
1524 if (n == PCI_NUM_RESET_METHODS - 1) {
1525 pci_err(pdev, "Too many reset methods\n");
1526 return -EINVAL;
1527 }
1528
1529 reset_methods[n++] = m;
1530 }
1531
1532 reset_methods[n] = 0;
1533
1534 /* Warn if dev-specific supported but not highest priority */
1535 if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
1536 reset_methods[0] != 1)
1537 pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
1538 memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
1539 return count;
1540 }
1541 static DEVICE_ATTR_RW(reset_method);
1542
1543 static struct attribute *pci_dev_reset_method_attrs[] = {
1544 &dev_attr_reset_method.attr,
1545 NULL,
1546 };
1547
1548 static const struct attribute_group pci_dev_reset_method_attr_group = {
1549 .attrs = pci_dev_reset_method_attrs,
1550 .is_visible = pci_dev_reset_attr_is_visible,
1551 };
1552
__resource_resize_show(struct device * dev,int n,char * buf)1553 static ssize_t __resource_resize_show(struct device *dev, int n, char *buf)
1554 {
1555 struct pci_dev *pdev = to_pci_dev(dev);
1556 ssize_t ret;
1557
1558 pci_config_pm_runtime_get(pdev);
1559
1560 ret = sysfs_emit(buf, "%016llx\n",
1561 pci_rebar_get_possible_sizes(pdev, n));
1562
1563 pci_config_pm_runtime_put(pdev);
1564
1565 return ret;
1566 }
1567
__resource_resize_store(struct device * dev,int n,const char * buf,size_t count)1568 static ssize_t __resource_resize_store(struct device *dev, int n,
1569 const char *buf, size_t count)
1570 {
1571 struct pci_dev *pdev = to_pci_dev(dev);
1572 struct pci_bus *bus = pdev->bus;
1573 unsigned long size;
1574 int ret;
1575 u16 cmd;
1576
1577 if (kstrtoul(buf, 0, &size) < 0)
1578 return -EINVAL;
1579
1580 device_lock(dev);
1581 if (dev->driver || pci_num_vf(pdev)) {
1582 ret = -EBUSY;
1583 goto unlock;
1584 }
1585
1586 pci_config_pm_runtime_get(pdev);
1587
1588 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
1589 ret = aperture_remove_conflicting_pci_devices(pdev,
1590 "resourceN_resize");
1591 if (ret)
1592 goto pm_put;
1593 }
1594
1595 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1596 pci_write_config_word(pdev, PCI_COMMAND,
1597 cmd & ~PCI_COMMAND_MEMORY);
1598
1599 pci_remove_resource_files(pdev);
1600
1601 ret = pci_resize_resource(pdev, n, size, 0);
1602
1603 pci_assign_unassigned_bus_resources(bus);
1604
1605 if (pci_create_resource_files(pdev))
1606 pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");
1607
1608 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1609 pm_put:
1610 pci_config_pm_runtime_put(pdev);
1611 unlock:
1612 device_unlock(dev);
1613
1614 return ret ? ret : count;
1615 }
1616
1617 #define pci_dev_resource_resize_attr(n) \
1618 static ssize_t resource##n##_resize_show(struct device *dev, \
1619 struct device_attribute *attr, \
1620 char *buf) \
1621 { \
1622 return __resource_resize_show(dev, n, buf); \
1623 } \
1624 static ssize_t resource##n##_resize_store(struct device *dev, \
1625 struct device_attribute *attr,\
1626 const char *buf, size_t count)\
1627 { \
1628 return __resource_resize_store(dev, n, buf, count); \
1629 } \
1630 static DEVICE_ATTR_RW(resource##n##_resize)
1631
1632 pci_dev_resource_resize_attr(0);
1633 pci_dev_resource_resize_attr(1);
1634 pci_dev_resource_resize_attr(2);
1635 pci_dev_resource_resize_attr(3);
1636 pci_dev_resource_resize_attr(4);
1637 pci_dev_resource_resize_attr(5);
1638
1639 static struct attribute *resource_resize_attrs[] = {
1640 &dev_attr_resource0_resize.attr,
1641 &dev_attr_resource1_resize.attr,
1642 &dev_attr_resource2_resize.attr,
1643 &dev_attr_resource3_resize.attr,
1644 &dev_attr_resource4_resize.attr,
1645 &dev_attr_resource5_resize.attr,
1646 NULL,
1647 };
1648
resource_resize_is_visible(struct kobject * kobj,struct attribute * a,int n)1649 static umode_t resource_resize_is_visible(struct kobject *kobj,
1650 struct attribute *a, int n)
1651 {
1652 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1653
1654 return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode;
1655 }
1656
1657 static const struct attribute_group pci_dev_resource_resize_group = {
1658 .attrs = resource_resize_attrs,
1659 .is_visible = resource_resize_is_visible,
1660 };
1661
pci_create_sysfs_dev_files(struct pci_dev * pdev)1662 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
1663 {
1664 if (!sysfs_initialized)
1665 return -EACCES;
1666
1667 return pci_create_resource_files(pdev);
1668 }
1669
1670 /**
1671 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files
1672 * @pdev: device whose entries we should free
1673 *
1674 * Cleanup when @pdev is removed from sysfs.
1675 */
pci_remove_sysfs_dev_files(struct pci_dev * pdev)1676 void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
1677 {
1678 if (!sysfs_initialized)
1679 return;
1680
1681 pci_remove_resource_files(pdev);
1682 }
1683
pci_sysfs_init(void)1684 static int __init pci_sysfs_init(void)
1685 {
1686 struct pci_dev *pdev = NULL;
1687 struct pci_bus *pbus = NULL;
1688 int retval;
1689
1690 sysfs_initialized = 1;
1691 for_each_pci_dev(pdev) {
1692 retval = pci_create_sysfs_dev_files(pdev);
1693 if (retval) {
1694 pci_dev_put(pdev);
1695 return retval;
1696 }
1697 }
1698
1699 while ((pbus = pci_find_next_bus(pbus)))
1700 pci_create_legacy_files(pbus);
1701
1702 return 0;
1703 }
1704 late_initcall(pci_sysfs_init);
1705
1706 static struct attribute *pci_dev_dev_attrs[] = {
1707 &dev_attr_boot_vga.attr,
1708 &dev_attr_serial_number.attr,
1709 NULL,
1710 };
1711
pci_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1712 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
1713 struct attribute *a, int n)
1714 {
1715 struct device *dev = kobj_to_dev(kobj);
1716 struct pci_dev *pdev = to_pci_dev(dev);
1717
1718 if (a == &dev_attr_boot_vga.attr && pci_is_vga(pdev))
1719 return a->mode;
1720
1721 if (a == &dev_attr_serial_number.attr && pci_get_dsn(pdev))
1722 return a->mode;
1723
1724 return 0;
1725 }
1726
1727 static struct attribute *pci_dev_hp_attrs[] = {
1728 &dev_attr_remove.attr,
1729 &dev_attr_dev_rescan.attr,
1730 NULL,
1731 };
1732
pci_dev_hp_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1733 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
1734 struct attribute *a, int n)
1735 {
1736 struct device *dev = kobj_to_dev(kobj);
1737 struct pci_dev *pdev = to_pci_dev(dev);
1738
1739 if (pdev->is_virtfn)
1740 return 0;
1741
1742 return a->mode;
1743 }
1744
pci_bridge_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1745 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
1746 struct attribute *a, int n)
1747 {
1748 struct device *dev = kobj_to_dev(kobj);
1749 struct pci_dev *pdev = to_pci_dev(dev);
1750
1751 if (pci_is_bridge(pdev))
1752 return a->mode;
1753
1754 return 0;
1755 }
1756
pcie_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1757 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
1758 struct attribute *a, int n)
1759 {
1760 struct device *dev = kobj_to_dev(kobj);
1761 struct pci_dev *pdev = to_pci_dev(dev);
1762
1763 if (pci_is_pcie(pdev))
1764 return a->mode;
1765
1766 return 0;
1767 }
1768
1769 static const struct attribute_group pci_dev_group = {
1770 .attrs = pci_dev_attrs,
1771 };
1772
1773 const struct attribute_group *pci_dev_groups[] = {
1774 &pci_dev_group,
1775 &pci_dev_config_attr_group,
1776 &pci_dev_rom_attr_group,
1777 &pci_dev_reset_attr_group,
1778 &pci_dev_reset_method_attr_group,
1779 &pci_dev_vpd_attr_group,
1780 #ifdef CONFIG_DMI
1781 &pci_dev_smbios_attr_group,
1782 #endif
1783 #ifdef CONFIG_ACPI
1784 &pci_dev_acpi_attr_group,
1785 #endif
1786 &pci_dev_resource_resize_group,
1787 ARCH_PCI_DEV_GROUPS
1788 NULL,
1789 };
1790
1791 static const struct attribute_group pci_dev_hp_attr_group = {
1792 .attrs = pci_dev_hp_attrs,
1793 .is_visible = pci_dev_hp_attrs_are_visible,
1794 };
1795
1796 static const struct attribute_group pci_dev_attr_group = {
1797 .attrs = pci_dev_dev_attrs,
1798 .is_visible = pci_dev_attrs_are_visible,
1799 };
1800
1801 static const struct attribute_group pci_bridge_attr_group = {
1802 .attrs = pci_bridge_attrs,
1803 .is_visible = pci_bridge_attrs_are_visible,
1804 };
1805
1806 static const struct attribute_group pcie_dev_attr_group = {
1807 .attrs = pcie_dev_attrs,
1808 .is_visible = pcie_dev_attrs_are_visible,
1809 };
1810
1811 const struct attribute_group *pci_dev_attr_groups[] = {
1812 &pci_dev_attr_group,
1813 &pci_dev_hp_attr_group,
1814 #ifdef CONFIG_PCI_IOV
1815 &sriov_pf_dev_attr_group,
1816 &sriov_vf_dev_attr_group,
1817 #endif
1818 &pci_bridge_attr_group,
1819 &pcie_dev_attr_group,
1820 #ifdef CONFIG_PCIEAER
1821 &aer_stats_attr_group,
1822 &aer_attr_group,
1823 #endif
1824 #ifdef CONFIG_PCIEASPM
1825 &aspm_ctrl_attr_group,
1826 #endif
1827 #ifdef CONFIG_PCI_DOE
1828 &pci_doe_sysfs_group,
1829 #endif
1830 #ifdef CONFIG_PCI_TSM
1831 &pci_tsm_auth_attr_group,
1832 &pci_tsm_attr_group,
1833 #endif
1834 NULL,
1835 };
1836