1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com>
4 * (C) Copyright 2002-2004 IBM Corp.
5 * (C) Copyright 2003 Matthew Wilcox
6 * (C) Copyright 2003 Hewlett-Packard
7 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com>
8 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com>
9 *
10 * File attributes for PCI devices
11 *
12 * Modeled after usb's driverfs.c
13 */
14
15
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/pci.h>
19 #include <linux/stat.h>
20 #include <linux/export.h>
21 #include <linux/topology.h>
22 #include <linux/mm.h>
23 #include <linux/fs.h>
24 #include <linux/capability.h>
25 #include <linux/security.h>
26 #include <linux/slab.h>
27 #include <linux/vgaarb.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/msi.h>
30 #include <linux/of.h>
31 #include <linux/aperture.h>
32 #include "pci.h"
33
34 static int sysfs_initialized; /* = 0 */
35
36 /* show configuration fields */
37 #define pci_config_attr(field, format_string) \
38 static ssize_t \
39 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
40 { \
41 struct pci_dev *pdev; \
42 \
43 pdev = to_pci_dev(dev); \
44 return sysfs_emit(buf, format_string, pdev->field); \
45 } \
46 static DEVICE_ATTR_RO(field)
47
48 pci_config_attr(vendor, "0x%04x\n");
49 pci_config_attr(device, "0x%04x\n");
50 pci_config_attr(subsystem_vendor, "0x%04x\n");
51 pci_config_attr(subsystem_device, "0x%04x\n");
52 pci_config_attr(revision, "0x%02x\n");
53 pci_config_attr(class, "0x%06x\n");
54
irq_show(struct device * dev,struct device_attribute * attr,char * buf)55 static ssize_t irq_show(struct device *dev,
56 struct device_attribute *attr,
57 char *buf)
58 {
59 struct pci_dev *pdev = to_pci_dev(dev);
60
61 #ifdef CONFIG_PCI_MSI
62 /*
63 * For MSI, show the first MSI IRQ; for all other cases including
64 * MSI-X, show the legacy INTx IRQ.
65 */
66 if (pdev->msi_enabled)
67 return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0));
68 #endif
69
70 return sysfs_emit(buf, "%u\n", pdev->irq);
71 }
72 static DEVICE_ATTR_RO(irq);
73
broken_parity_status_show(struct device * dev,struct device_attribute * attr,char * buf)74 static ssize_t broken_parity_status_show(struct device *dev,
75 struct device_attribute *attr,
76 char *buf)
77 {
78 struct pci_dev *pdev = to_pci_dev(dev);
79 return sysfs_emit(buf, "%u\n", pdev->broken_parity_status);
80 }
81
broken_parity_status_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)82 static ssize_t broken_parity_status_store(struct device *dev,
83 struct device_attribute *attr,
84 const char *buf, size_t count)
85 {
86 struct pci_dev *pdev = to_pci_dev(dev);
87 unsigned long val;
88
89 if (kstrtoul(buf, 0, &val) < 0)
90 return -EINVAL;
91
92 pdev->broken_parity_status = !!val;
93
94 return count;
95 }
96 static DEVICE_ATTR_RW(broken_parity_status);
97
pci_dev_show_local_cpu(struct device * dev,bool list,struct device_attribute * attr,char * buf)98 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list,
99 struct device_attribute *attr, char *buf)
100 {
101 const struct cpumask *mask;
102
103 #ifdef CONFIG_NUMA
104 if (dev_to_node(dev) == NUMA_NO_NODE)
105 mask = cpu_online_mask;
106 else
107 mask = cpumask_of_node(dev_to_node(dev));
108 #else
109 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
110 #endif
111 return cpumap_print_to_pagebuf(list, buf, mask);
112 }
113
local_cpus_show(struct device * dev,struct device_attribute * attr,char * buf)114 static ssize_t local_cpus_show(struct device *dev,
115 struct device_attribute *attr, char *buf)
116 {
117 return pci_dev_show_local_cpu(dev, false, attr, buf);
118 }
119 static DEVICE_ATTR_RO(local_cpus);
120
local_cpulist_show(struct device * dev,struct device_attribute * attr,char * buf)121 static ssize_t local_cpulist_show(struct device *dev,
122 struct device_attribute *attr, char *buf)
123 {
124 return pci_dev_show_local_cpu(dev, true, attr, buf);
125 }
126 static DEVICE_ATTR_RO(local_cpulist);
127
128 /*
129 * PCI Bus Class Devices
130 */
cpuaffinity_show(struct device * dev,struct device_attribute * attr,char * buf)131 static ssize_t cpuaffinity_show(struct device *dev,
132 struct device_attribute *attr, char *buf)
133 {
134 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
135
136 return cpumap_print_to_pagebuf(false, buf, cpumask);
137 }
138 static DEVICE_ATTR_RO(cpuaffinity);
139
cpulistaffinity_show(struct device * dev,struct device_attribute * attr,char * buf)140 static ssize_t cpulistaffinity_show(struct device *dev,
141 struct device_attribute *attr, char *buf)
142 {
143 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
144
145 return cpumap_print_to_pagebuf(true, buf, cpumask);
146 }
147 static DEVICE_ATTR_RO(cpulistaffinity);
148
power_state_show(struct device * dev,struct device_attribute * attr,char * buf)149 static ssize_t power_state_show(struct device *dev,
150 struct device_attribute *attr, char *buf)
151 {
152 struct pci_dev *pdev = to_pci_dev(dev);
153
154 return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state));
155 }
156 static DEVICE_ATTR_RO(power_state);
157
158 /* show resources */
resource_show(struct device * dev,struct device_attribute * attr,char * buf)159 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
160 char *buf)
161 {
162 struct pci_dev *pci_dev = to_pci_dev(dev);
163 int i;
164 int max;
165 resource_size_t start, end;
166 size_t len = 0;
167
168 if (pci_dev->subordinate)
169 max = DEVICE_COUNT_RESOURCE;
170 else
171 max = PCI_BRIDGE_RESOURCES;
172
173 for (i = 0; i < max; i++) {
174 struct resource *res = &pci_dev->resource[i];
175 pci_resource_to_user(pci_dev, i, res, &start, &end);
176 len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
177 (unsigned long long)start,
178 (unsigned long long)end,
179 (unsigned long long)res->flags);
180 }
181 return len;
182 }
183 static DEVICE_ATTR_RO(resource);
184
max_link_speed_show(struct device * dev,struct device_attribute * attr,char * buf)185 static ssize_t max_link_speed_show(struct device *dev,
186 struct device_attribute *attr, char *buf)
187 {
188 struct pci_dev *pdev = to_pci_dev(dev);
189
190 return sysfs_emit(buf, "%s\n",
191 pci_speed_string(pcie_get_speed_cap(pdev)));
192 }
193 static DEVICE_ATTR_RO(max_link_speed);
194
max_link_width_show(struct device * dev,struct device_attribute * attr,char * buf)195 static ssize_t max_link_width_show(struct device *dev,
196 struct device_attribute *attr, char *buf)
197 {
198 struct pci_dev *pdev = to_pci_dev(dev);
199
200 return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
201 }
202 static DEVICE_ATTR_RO(max_link_width);
203
current_link_speed_show(struct device * dev,struct device_attribute * attr,char * buf)204 static ssize_t current_link_speed_show(struct device *dev,
205 struct device_attribute *attr, char *buf)
206 {
207 struct pci_dev *pci_dev = to_pci_dev(dev);
208 u16 linkstat;
209 int err;
210 enum pci_bus_speed speed;
211
212 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
213 if (err)
214 return -EINVAL;
215
216 speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS];
217
218 return sysfs_emit(buf, "%s\n", pci_speed_string(speed));
219 }
220 static DEVICE_ATTR_RO(current_link_speed);
221
current_link_width_show(struct device * dev,struct device_attribute * attr,char * buf)222 static ssize_t current_link_width_show(struct device *dev,
223 struct device_attribute *attr, char *buf)
224 {
225 struct pci_dev *pci_dev = to_pci_dev(dev);
226 u16 linkstat;
227 int err;
228
229 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
230 if (err)
231 return -EINVAL;
232
233 return sysfs_emit(buf, "%u\n",
234 (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
235 }
236 static DEVICE_ATTR_RO(current_link_width);
237
secondary_bus_number_show(struct device * dev,struct device_attribute * attr,char * buf)238 static ssize_t secondary_bus_number_show(struct device *dev,
239 struct device_attribute *attr,
240 char *buf)
241 {
242 struct pci_dev *pci_dev = to_pci_dev(dev);
243 u8 sec_bus;
244 int err;
245
246 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
247 if (err)
248 return -EINVAL;
249
250 return sysfs_emit(buf, "%u\n", sec_bus);
251 }
252 static DEVICE_ATTR_RO(secondary_bus_number);
253
subordinate_bus_number_show(struct device * dev,struct device_attribute * attr,char * buf)254 static ssize_t subordinate_bus_number_show(struct device *dev,
255 struct device_attribute *attr,
256 char *buf)
257 {
258 struct pci_dev *pci_dev = to_pci_dev(dev);
259 u8 sub_bus;
260 int err;
261
262 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
263 if (err)
264 return -EINVAL;
265
266 return sysfs_emit(buf, "%u\n", sub_bus);
267 }
268 static DEVICE_ATTR_RO(subordinate_bus_number);
269
ari_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)270 static ssize_t ari_enabled_show(struct device *dev,
271 struct device_attribute *attr,
272 char *buf)
273 {
274 struct pci_dev *pci_dev = to_pci_dev(dev);
275
276 return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
277 }
278 static DEVICE_ATTR_RO(ari_enabled);
279
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)280 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
281 char *buf)
282 {
283 struct pci_dev *pci_dev = to_pci_dev(dev);
284
285 return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
286 pci_dev->vendor, pci_dev->device,
287 pci_dev->subsystem_vendor, pci_dev->subsystem_device,
288 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
289 (u8)(pci_dev->class));
290 }
291 static DEVICE_ATTR_RO(modalias);
292
enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)293 static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
294 const char *buf, size_t count)
295 {
296 struct pci_dev *pdev = to_pci_dev(dev);
297 unsigned long val;
298 ssize_t result = 0;
299
300 /* this can crash the machine when done on the "wrong" device */
301 if (!capable(CAP_SYS_ADMIN))
302 return -EPERM;
303
304 if (kstrtoul(buf, 0, &val) < 0)
305 return -EINVAL;
306
307 device_lock(dev);
308 if (dev->driver)
309 result = -EBUSY;
310 else if (val)
311 result = pci_enable_device(pdev);
312 else if (pci_is_enabled(pdev))
313 pci_disable_device(pdev);
314 else
315 result = -EIO;
316 device_unlock(dev);
317
318 return result < 0 ? result : count;
319 }
320
enable_show(struct device * dev,struct device_attribute * attr,char * buf)321 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
322 char *buf)
323 {
324 struct pci_dev *pdev;
325
326 pdev = to_pci_dev(dev);
327 return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt));
328 }
329 static DEVICE_ATTR_RW(enable);
330
331 #ifdef CONFIG_NUMA
numa_node_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)332 static ssize_t numa_node_store(struct device *dev,
333 struct device_attribute *attr, const char *buf,
334 size_t count)
335 {
336 struct pci_dev *pdev = to_pci_dev(dev);
337 int node;
338
339 if (!capable(CAP_SYS_ADMIN))
340 return -EPERM;
341
342 if (kstrtoint(buf, 0, &node) < 0)
343 return -EINVAL;
344
345 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
346 return -EINVAL;
347
348 if (node != NUMA_NO_NODE && !node_online(node))
349 return -EINVAL;
350
351 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
352 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.",
353 node);
354
355 dev->numa_node = node;
356 return count;
357 }
358
numa_node_show(struct device * dev,struct device_attribute * attr,char * buf)359 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
360 char *buf)
361 {
362 return sysfs_emit(buf, "%d\n", dev->numa_node);
363 }
364 static DEVICE_ATTR_RW(numa_node);
365 #endif
366
dma_mask_bits_show(struct device * dev,struct device_attribute * attr,char * buf)367 static ssize_t dma_mask_bits_show(struct device *dev,
368 struct device_attribute *attr, char *buf)
369 {
370 struct pci_dev *pdev = to_pci_dev(dev);
371
372 return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask));
373 }
374 static DEVICE_ATTR_RO(dma_mask_bits);
375
consistent_dma_mask_bits_show(struct device * dev,struct device_attribute * attr,char * buf)376 static ssize_t consistent_dma_mask_bits_show(struct device *dev,
377 struct device_attribute *attr,
378 char *buf)
379 {
380 return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask));
381 }
382 static DEVICE_ATTR_RO(consistent_dma_mask_bits);
383
msi_bus_show(struct device * dev,struct device_attribute * attr,char * buf)384 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
385 char *buf)
386 {
387 struct pci_dev *pdev = to_pci_dev(dev);
388 struct pci_bus *subordinate = pdev->subordinate;
389
390 return sysfs_emit(buf, "%u\n", subordinate ?
391 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
392 : !pdev->no_msi);
393 }
394
msi_bus_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)395 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
396 const char *buf, size_t count)
397 {
398 struct pci_dev *pdev = to_pci_dev(dev);
399 struct pci_bus *subordinate = pdev->subordinate;
400 unsigned long val;
401
402 if (!capable(CAP_SYS_ADMIN))
403 return -EPERM;
404
405 if (kstrtoul(buf, 0, &val) < 0)
406 return -EINVAL;
407
408 /*
409 * "no_msi" and "bus_flags" only affect what happens when a driver
410 * requests MSI or MSI-X. They don't affect any drivers that have
411 * already requested MSI or MSI-X.
412 */
413 if (!subordinate) {
414 pdev->no_msi = !val;
415 pci_info(pdev, "MSI/MSI-X %s for future drivers\n",
416 val ? "allowed" : "disallowed");
417 return count;
418 }
419
420 if (val)
421 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
422 else
423 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
424
425 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n",
426 val ? "allowed" : "disallowed");
427 return count;
428 }
429 static DEVICE_ATTR_RW(msi_bus);
430
rescan_store(const struct bus_type * bus,const char * buf,size_t count)431 static ssize_t rescan_store(const struct bus_type *bus, const char *buf, size_t count)
432 {
433 unsigned long val;
434 struct pci_bus *b = NULL;
435
436 if (kstrtoul(buf, 0, &val) < 0)
437 return -EINVAL;
438
439 if (val) {
440 pci_lock_rescan_remove();
441 while ((b = pci_find_next_bus(b)) != NULL)
442 pci_rescan_bus(b);
443 pci_unlock_rescan_remove();
444 }
445 return count;
446 }
447 static BUS_ATTR_WO(rescan);
448
449 static struct attribute *pci_bus_attrs[] = {
450 &bus_attr_rescan.attr,
451 NULL,
452 };
453
454 static const struct attribute_group pci_bus_group = {
455 .attrs = pci_bus_attrs,
456 };
457
458 const struct attribute_group *pci_bus_groups[] = {
459 &pci_bus_group,
460 NULL,
461 };
462
dev_rescan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)463 static ssize_t dev_rescan_store(struct device *dev,
464 struct device_attribute *attr, const char *buf,
465 size_t count)
466 {
467 unsigned long val;
468 struct pci_dev *pdev = to_pci_dev(dev);
469
470 if (kstrtoul(buf, 0, &val) < 0)
471 return -EINVAL;
472
473 if (val) {
474 pci_lock_rescan_remove();
475 pci_rescan_bus(pdev->bus);
476 pci_unlock_rescan_remove();
477 }
478 return count;
479 }
480 static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL,
481 dev_rescan_store);
482
remove_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)483 static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
484 const char *buf, size_t count)
485 {
486 unsigned long val;
487
488 if (kstrtoul(buf, 0, &val) < 0)
489 return -EINVAL;
490
491 if (val && device_remove_file_self(dev, attr))
492 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
493 return count;
494 }
495 static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL,
496 remove_store);
497
bus_rescan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)498 static ssize_t bus_rescan_store(struct device *dev,
499 struct device_attribute *attr,
500 const char *buf, size_t count)
501 {
502 unsigned long val;
503 struct pci_bus *bus = to_pci_bus(dev);
504
505 if (kstrtoul(buf, 0, &val) < 0)
506 return -EINVAL;
507
508 if (val) {
509 pci_lock_rescan_remove();
510 if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
511 pci_rescan_bus_bridge_resize(bus->self);
512 else
513 pci_rescan_bus(bus);
514 pci_unlock_rescan_remove();
515 }
516 return count;
517 }
518 static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
519 bus_rescan_store);
520
521 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
d3cold_allowed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)522 static ssize_t d3cold_allowed_store(struct device *dev,
523 struct device_attribute *attr,
524 const char *buf, size_t count)
525 {
526 struct pci_dev *pdev = to_pci_dev(dev);
527 unsigned long val;
528
529 if (kstrtoul(buf, 0, &val) < 0)
530 return -EINVAL;
531
532 pdev->d3cold_allowed = !!val;
533 if (pdev->d3cold_allowed)
534 pci_d3cold_enable(pdev);
535 else
536 pci_d3cold_disable(pdev);
537
538 pm_runtime_resume(dev);
539
540 return count;
541 }
542
d3cold_allowed_show(struct device * dev,struct device_attribute * attr,char * buf)543 static ssize_t d3cold_allowed_show(struct device *dev,
544 struct device_attribute *attr, char *buf)
545 {
546 struct pci_dev *pdev = to_pci_dev(dev);
547 return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed);
548 }
549 static DEVICE_ATTR_RW(d3cold_allowed);
550 #endif
551
552 #ifdef CONFIG_OF
devspec_show(struct device * dev,struct device_attribute * attr,char * buf)553 static ssize_t devspec_show(struct device *dev,
554 struct device_attribute *attr, char *buf)
555 {
556 struct pci_dev *pdev = to_pci_dev(dev);
557 struct device_node *np = pci_device_to_OF_node(pdev);
558
559 if (np == NULL)
560 return 0;
561 return sysfs_emit(buf, "%pOF\n", np);
562 }
563 static DEVICE_ATTR_RO(devspec);
564 #endif
565
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)566 static ssize_t driver_override_store(struct device *dev,
567 struct device_attribute *attr,
568 const char *buf, size_t count)
569 {
570 struct pci_dev *pdev = to_pci_dev(dev);
571 int ret;
572
573 ret = driver_set_override(dev, &pdev->driver_override, buf, count);
574 if (ret)
575 return ret;
576
577 return count;
578 }
579
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)580 static ssize_t driver_override_show(struct device *dev,
581 struct device_attribute *attr, char *buf)
582 {
583 struct pci_dev *pdev = to_pci_dev(dev);
584 ssize_t len;
585
586 device_lock(dev);
587 len = sysfs_emit(buf, "%s\n", pdev->driver_override);
588 device_unlock(dev);
589 return len;
590 }
591 static DEVICE_ATTR_RW(driver_override);
592
593 static struct attribute *pci_dev_attrs[] = {
594 &dev_attr_power_state.attr,
595 &dev_attr_resource.attr,
596 &dev_attr_vendor.attr,
597 &dev_attr_device.attr,
598 &dev_attr_subsystem_vendor.attr,
599 &dev_attr_subsystem_device.attr,
600 &dev_attr_revision.attr,
601 &dev_attr_class.attr,
602 &dev_attr_irq.attr,
603 &dev_attr_local_cpus.attr,
604 &dev_attr_local_cpulist.attr,
605 &dev_attr_modalias.attr,
606 #ifdef CONFIG_NUMA
607 &dev_attr_numa_node.attr,
608 #endif
609 &dev_attr_dma_mask_bits.attr,
610 &dev_attr_consistent_dma_mask_bits.attr,
611 &dev_attr_enable.attr,
612 &dev_attr_broken_parity_status.attr,
613 &dev_attr_msi_bus.attr,
614 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
615 &dev_attr_d3cold_allowed.attr,
616 #endif
617 #ifdef CONFIG_OF
618 &dev_attr_devspec.attr,
619 #endif
620 &dev_attr_driver_override.attr,
621 &dev_attr_ari_enabled.attr,
622 NULL,
623 };
624
625 static struct attribute *pci_bridge_attrs[] = {
626 &dev_attr_subordinate_bus_number.attr,
627 &dev_attr_secondary_bus_number.attr,
628 NULL,
629 };
630
631 static struct attribute *pcie_dev_attrs[] = {
632 &dev_attr_current_link_speed.attr,
633 &dev_attr_current_link_width.attr,
634 &dev_attr_max_link_width.attr,
635 &dev_attr_max_link_speed.attr,
636 NULL,
637 };
638
639 static struct attribute *pcibus_attrs[] = {
640 &dev_attr_bus_rescan.attr,
641 &dev_attr_cpuaffinity.attr,
642 &dev_attr_cpulistaffinity.attr,
643 NULL,
644 };
645
646 static const struct attribute_group pcibus_group = {
647 .attrs = pcibus_attrs,
648 };
649
650 const struct attribute_group *pcibus_groups[] = {
651 &pcibus_group,
652 NULL,
653 };
654
boot_vga_show(struct device * dev,struct device_attribute * attr,char * buf)655 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
656 char *buf)
657 {
658 struct pci_dev *pdev = to_pci_dev(dev);
659 struct pci_dev *vga_dev = vga_default_device();
660
661 if (vga_dev)
662 return sysfs_emit(buf, "%u\n", (pdev == vga_dev));
663
664 return sysfs_emit(buf, "%u\n",
665 !!(pdev->resource[PCI_ROM_RESOURCE].flags &
666 IORESOURCE_ROM_SHADOW));
667 }
668 static DEVICE_ATTR_RO(boot_vga);
669
pci_read_config(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)670 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
671 struct bin_attribute *bin_attr, char *buf,
672 loff_t off, size_t count)
673 {
674 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
675 unsigned int size = 64;
676 loff_t init_off = off;
677 u8 *data = (u8 *) buf;
678
679 /* Several chips lock up trying to read undefined config space */
680 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
681 size = dev->cfg_size;
682 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
683 size = 128;
684
685 if (off > size)
686 return 0;
687 if (off + count > size) {
688 size -= off;
689 count = size;
690 } else {
691 size = count;
692 }
693
694 pci_config_pm_runtime_get(dev);
695
696 if ((off & 1) && size) {
697 u8 val;
698 pci_user_read_config_byte(dev, off, &val);
699 data[off - init_off] = val;
700 off++;
701 size--;
702 }
703
704 if ((off & 3) && size > 2) {
705 u16 val;
706 pci_user_read_config_word(dev, off, &val);
707 data[off - init_off] = val & 0xff;
708 data[off - init_off + 1] = (val >> 8) & 0xff;
709 off += 2;
710 size -= 2;
711 }
712
713 while (size > 3) {
714 u32 val;
715 pci_user_read_config_dword(dev, off, &val);
716 data[off - init_off] = val & 0xff;
717 data[off - init_off + 1] = (val >> 8) & 0xff;
718 data[off - init_off + 2] = (val >> 16) & 0xff;
719 data[off - init_off + 3] = (val >> 24) & 0xff;
720 off += 4;
721 size -= 4;
722 cond_resched();
723 }
724
725 if (size >= 2) {
726 u16 val;
727 pci_user_read_config_word(dev, off, &val);
728 data[off - init_off] = val & 0xff;
729 data[off - init_off + 1] = (val >> 8) & 0xff;
730 off += 2;
731 size -= 2;
732 }
733
734 if (size > 0) {
735 u8 val;
736 pci_user_read_config_byte(dev, off, &val);
737 data[off - init_off] = val;
738 }
739
740 pci_config_pm_runtime_put(dev);
741
742 return count;
743 }
744
pci_write_config(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)745 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
746 struct bin_attribute *bin_attr, char *buf,
747 loff_t off, size_t count)
748 {
749 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
750 unsigned int size = count;
751 loff_t init_off = off;
752 u8 *data = (u8 *) buf;
753 int ret;
754
755 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
756 if (ret)
757 return ret;
758
759 if (resource_is_exclusive(&dev->driver_exclusive_resource, off,
760 count)) {
761 pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx",
762 current->comm, off);
763 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
764 }
765
766 if (off > dev->cfg_size)
767 return 0;
768 if (off + count > dev->cfg_size) {
769 size = dev->cfg_size - off;
770 count = size;
771 }
772
773 pci_config_pm_runtime_get(dev);
774
775 if ((off & 1) && size) {
776 pci_user_write_config_byte(dev, off, data[off - init_off]);
777 off++;
778 size--;
779 }
780
781 if ((off & 3) && size > 2) {
782 u16 val = data[off - init_off];
783 val |= (u16) data[off - init_off + 1] << 8;
784 pci_user_write_config_word(dev, off, val);
785 off += 2;
786 size -= 2;
787 }
788
789 while (size > 3) {
790 u32 val = data[off - init_off];
791 val |= (u32) data[off - init_off + 1] << 8;
792 val |= (u32) data[off - init_off + 2] << 16;
793 val |= (u32) data[off - init_off + 3] << 24;
794 pci_user_write_config_dword(dev, off, val);
795 off += 4;
796 size -= 4;
797 }
798
799 if (size >= 2) {
800 u16 val = data[off - init_off];
801 val |= (u16) data[off - init_off + 1] << 8;
802 pci_user_write_config_word(dev, off, val);
803 off += 2;
804 size -= 2;
805 }
806
807 if (size)
808 pci_user_write_config_byte(dev, off, data[off - init_off]);
809
810 pci_config_pm_runtime_put(dev);
811
812 return count;
813 }
814 static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
815
816 static struct bin_attribute *pci_dev_config_attrs[] = {
817 &bin_attr_config,
818 NULL,
819 };
820
pci_dev_config_attr_is_visible(struct kobject * kobj,struct bin_attribute * a,int n)821 static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj,
822 struct bin_attribute *a, int n)
823 {
824 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
825
826 a->size = PCI_CFG_SPACE_SIZE;
827 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
828 a->size = PCI_CFG_SPACE_EXP_SIZE;
829
830 return a->attr.mode;
831 }
832
833 static const struct attribute_group pci_dev_config_attr_group = {
834 .bin_attrs = pci_dev_config_attrs,
835 .is_bin_visible = pci_dev_config_attr_is_visible,
836 };
837
838 #ifdef HAVE_PCI_LEGACY
839 /**
840 * pci_read_legacy_io - read byte(s) from legacy I/O port space
841 * @filp: open sysfs file
842 * @kobj: kobject corresponding to file to read from
843 * @bin_attr: struct bin_attribute for this file
844 * @buf: buffer to store results
845 * @off: offset into legacy I/O port space
846 * @count: number of bytes to read
847 *
848 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
849 * callback routine (pci_legacy_read).
850 */
pci_read_legacy_io(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)851 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
852 struct bin_attribute *bin_attr, char *buf,
853 loff_t off, size_t count)
854 {
855 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
856
857 /* Only support 1, 2 or 4 byte accesses */
858 if (count != 1 && count != 2 && count != 4)
859 return -EINVAL;
860
861 return pci_legacy_read(bus, off, (u32 *)buf, count);
862 }
863
864 /**
865 * pci_write_legacy_io - write byte(s) to legacy I/O port space
866 * @filp: open sysfs file
867 * @kobj: kobject corresponding to file to read from
868 * @bin_attr: struct bin_attribute for this file
869 * @buf: buffer containing value to be written
870 * @off: offset into legacy I/O port space
871 * @count: number of bytes to write
872 *
873 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
874 * callback routine (pci_legacy_write).
875 */
pci_write_legacy_io(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)876 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
877 struct bin_attribute *bin_attr, char *buf,
878 loff_t off, size_t count)
879 {
880 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
881
882 /* Only support 1, 2 or 4 byte accesses */
883 if (count != 1 && count != 2 && count != 4)
884 return -EINVAL;
885
886 return pci_legacy_write(bus, off, *(u32 *)buf, count);
887 }
888
889 /**
890 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space
891 * @filp: open sysfs file
892 * @kobj: kobject corresponding to device to be mapped
893 * @attr: struct bin_attribute for this file
894 * @vma: struct vm_area_struct passed to mmap
895 *
896 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap
897 * legacy memory space (first meg of bus space) into application virtual
898 * memory space.
899 */
pci_mmap_legacy_mem(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)900 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
901 struct bin_attribute *attr,
902 struct vm_area_struct *vma)
903 {
904 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
905
906 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
907 }
908
909 /**
910 * pci_mmap_legacy_io - map legacy PCI IO into user memory space
911 * @filp: open sysfs file
912 * @kobj: kobject corresponding to device to be mapped
913 * @attr: struct bin_attribute for this file
914 * @vma: struct vm_area_struct passed to mmap
915 *
916 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap
917 * legacy IO space (first meg of bus space) into application virtual
918 * memory space. Returns -ENOSYS if the operation isn't supported
919 */
pci_mmap_legacy_io(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)920 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
921 struct bin_attribute *attr,
922 struct vm_area_struct *vma)
923 {
924 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
925
926 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
927 }
928
929 /**
930 * pci_adjust_legacy_attr - adjustment of legacy file attributes
931 * @b: bus to create files under
932 * @mmap_type: I/O port or memory
933 *
934 * Stub implementation. Can be overridden by arch if necessary.
935 */
pci_adjust_legacy_attr(struct pci_bus * b,enum pci_mmap_state mmap_type)936 void __weak pci_adjust_legacy_attr(struct pci_bus *b,
937 enum pci_mmap_state mmap_type)
938 {
939 }
940
941 /**
942 * pci_create_legacy_files - create legacy I/O port and memory files
943 * @b: bus to create files under
944 *
945 * Some platforms allow access to legacy I/O port and ISA memory space on
946 * a per-bus basis. This routine creates the files and ties them into
947 * their associated read, write and mmap files from pci-sysfs.c
948 *
949 * On error unwind, but don't propagate the error to the caller
950 * as it is ok to set up the PCI bus without these files.
951 */
pci_create_legacy_files(struct pci_bus * b)952 void pci_create_legacy_files(struct pci_bus *b)
953 {
954 int error;
955
956 if (!sysfs_initialized)
957 return;
958
959 b->legacy_io = kcalloc(2, sizeof(struct bin_attribute),
960 GFP_ATOMIC);
961 if (!b->legacy_io)
962 goto kzalloc_err;
963
964 sysfs_bin_attr_init(b->legacy_io);
965 b->legacy_io->attr.name = "legacy_io";
966 b->legacy_io->size = 0xffff;
967 b->legacy_io->attr.mode = 0600;
968 b->legacy_io->read = pci_read_legacy_io;
969 b->legacy_io->write = pci_write_legacy_io;
970 b->legacy_io->mmap = pci_mmap_legacy_io;
971 b->legacy_io->f_mapping = iomem_get_mapping;
972 pci_adjust_legacy_attr(b, pci_mmap_io);
973 error = device_create_bin_file(&b->dev, b->legacy_io);
974 if (error)
975 goto legacy_io_err;
976
977 /* Allocated above after the legacy_io struct */
978 b->legacy_mem = b->legacy_io + 1;
979 sysfs_bin_attr_init(b->legacy_mem);
980 b->legacy_mem->attr.name = "legacy_mem";
981 b->legacy_mem->size = 1024*1024;
982 b->legacy_mem->attr.mode = 0600;
983 b->legacy_mem->mmap = pci_mmap_legacy_mem;
984 b->legacy_mem->f_mapping = iomem_get_mapping;
985 pci_adjust_legacy_attr(b, pci_mmap_mem);
986 error = device_create_bin_file(&b->dev, b->legacy_mem);
987 if (error)
988 goto legacy_mem_err;
989
990 return;
991
992 legacy_mem_err:
993 device_remove_bin_file(&b->dev, b->legacy_io);
994 legacy_io_err:
995 kfree(b->legacy_io);
996 b->legacy_io = NULL;
997 kzalloc_err:
998 dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n");
999 }
1000
pci_remove_legacy_files(struct pci_bus * b)1001 void pci_remove_legacy_files(struct pci_bus *b)
1002 {
1003 if (b->legacy_io) {
1004 device_remove_bin_file(&b->dev, b->legacy_io);
1005 device_remove_bin_file(&b->dev, b->legacy_mem);
1006 kfree(b->legacy_io); /* both are allocated here */
1007 }
1008 }
1009 #endif /* HAVE_PCI_LEGACY */
1010
1011 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
1012
pci_mmap_fits(struct pci_dev * pdev,int resno,struct vm_area_struct * vma,enum pci_mmap_api mmap_api)1013 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
1014 enum pci_mmap_api mmap_api)
1015 {
1016 unsigned long nr, start, size;
1017 resource_size_t pci_start = 0, pci_end;
1018
1019 if (pci_resource_len(pdev, resno) == 0)
1020 return 0;
1021 nr = vma_pages(vma);
1022 start = vma->vm_pgoff;
1023 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
1024 if (mmap_api == PCI_MMAP_PROCFS) {
1025 pci_resource_to_user(pdev, resno, &pdev->resource[resno],
1026 &pci_start, &pci_end);
1027 pci_start >>= PAGE_SHIFT;
1028 }
1029 if (start >= pci_start && start < pci_start + size &&
1030 start + nr <= pci_start + size)
1031 return 1;
1032 return 0;
1033 }
1034
1035 /**
1036 * pci_mmap_resource - map a PCI resource into user memory space
1037 * @kobj: kobject for mapping
1038 * @attr: struct bin_attribute for the file being mapped
1039 * @vma: struct vm_area_struct passed into the mmap
1040 * @write_combine: 1 for write_combine mapping
1041 *
1042 * Use the regular PCI mapping routines to map a PCI resource into userspace.
1043 */
pci_mmap_resource(struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma,int write_combine)1044 static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
1045 struct vm_area_struct *vma, int write_combine)
1046 {
1047 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1048 int bar = (unsigned long)attr->private;
1049 enum pci_mmap_state mmap_type;
1050 struct resource *res = &pdev->resource[bar];
1051 int ret;
1052
1053 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1054 if (ret)
1055 return ret;
1056
1057 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
1058 return -EINVAL;
1059
1060 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS))
1061 return -EINVAL;
1062
1063 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
1064
1065 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
1066 }
1067
pci_mmap_resource_uc(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)1068 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
1069 struct bin_attribute *attr,
1070 struct vm_area_struct *vma)
1071 {
1072 return pci_mmap_resource(kobj, attr, vma, 0);
1073 }
1074
pci_mmap_resource_wc(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)1075 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
1076 struct bin_attribute *attr,
1077 struct vm_area_struct *vma)
1078 {
1079 return pci_mmap_resource(kobj, attr, vma, 1);
1080 }
1081
pci_resource_io(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count,bool write)1082 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
1083 struct bin_attribute *attr, char *buf,
1084 loff_t off, size_t count, bool write)
1085 {
1086 #ifdef CONFIG_HAS_IOPORT
1087 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1088 int bar = (unsigned long)attr->private;
1089 unsigned long port = off;
1090
1091 port += pci_resource_start(pdev, bar);
1092
1093 if (port > pci_resource_end(pdev, bar))
1094 return 0;
1095
1096 if (port + count - 1 > pci_resource_end(pdev, bar))
1097 return -EINVAL;
1098
1099 switch (count) {
1100 case 1:
1101 if (write)
1102 outb(*(u8 *)buf, port);
1103 else
1104 *(u8 *)buf = inb(port);
1105 return 1;
1106 case 2:
1107 if (write)
1108 outw(*(u16 *)buf, port);
1109 else
1110 *(u16 *)buf = inw(port);
1111 return 2;
1112 case 4:
1113 if (write)
1114 outl(*(u32 *)buf, port);
1115 else
1116 *(u32 *)buf = inl(port);
1117 return 4;
1118 }
1119 return -EINVAL;
1120 #else
1121 return -ENXIO;
1122 #endif
1123 }
1124
pci_read_resource_io(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)1125 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
1126 struct bin_attribute *attr, char *buf,
1127 loff_t off, size_t count)
1128 {
1129 return pci_resource_io(filp, kobj, attr, buf, off, count, false);
1130 }
1131
pci_write_resource_io(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)1132 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
1133 struct bin_attribute *attr, char *buf,
1134 loff_t off, size_t count)
1135 {
1136 int ret;
1137
1138 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1139 if (ret)
1140 return ret;
1141
1142 return pci_resource_io(filp, kobj, attr, buf, off, count, true);
1143 }
1144
1145 /**
1146 * pci_remove_resource_files - cleanup resource files
1147 * @pdev: dev to cleanup
1148 *
1149 * If we created resource files for @pdev, remove them from sysfs and
1150 * free their resources.
1151 */
pci_remove_resource_files(struct pci_dev * pdev)1152 static void pci_remove_resource_files(struct pci_dev *pdev)
1153 {
1154 int i;
1155
1156 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1157 struct bin_attribute *res_attr;
1158
1159 res_attr = pdev->res_attr[i];
1160 if (res_attr) {
1161 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1162 kfree(res_attr);
1163 }
1164
1165 res_attr = pdev->res_attr_wc[i];
1166 if (res_attr) {
1167 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1168 kfree(res_attr);
1169 }
1170 }
1171 }
1172
pci_create_attr(struct pci_dev * pdev,int num,int write_combine)1173 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
1174 {
1175 /* allocate attribute structure, piggyback attribute name */
1176 int name_len = write_combine ? 13 : 10;
1177 struct bin_attribute *res_attr;
1178 char *res_attr_name;
1179 int retval;
1180
1181 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
1182 if (!res_attr)
1183 return -ENOMEM;
1184
1185 res_attr_name = (char *)(res_attr + 1);
1186
1187 sysfs_bin_attr_init(res_attr);
1188 if (write_combine) {
1189 sprintf(res_attr_name, "resource%d_wc", num);
1190 res_attr->mmap = pci_mmap_resource_wc;
1191 } else {
1192 sprintf(res_attr_name, "resource%d", num);
1193 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
1194 res_attr->read = pci_read_resource_io;
1195 res_attr->write = pci_write_resource_io;
1196 if (arch_can_pci_mmap_io())
1197 res_attr->mmap = pci_mmap_resource_uc;
1198 } else {
1199 res_attr->mmap = pci_mmap_resource_uc;
1200 }
1201 }
1202 if (res_attr->mmap)
1203 res_attr->f_mapping = iomem_get_mapping;
1204 res_attr->attr.name = res_attr_name;
1205 res_attr->attr.mode = 0600;
1206 res_attr->size = pci_resource_len(pdev, num);
1207 res_attr->private = (void *)(unsigned long)num;
1208 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
1209 if (retval) {
1210 kfree(res_attr);
1211 return retval;
1212 }
1213
1214 if (write_combine)
1215 pdev->res_attr_wc[num] = res_attr;
1216 else
1217 pdev->res_attr[num] = res_attr;
1218
1219 return 0;
1220 }
1221
1222 /**
1223 * pci_create_resource_files - create resource files in sysfs for @dev
1224 * @pdev: dev in question
1225 *
1226 * Walk the resources in @pdev creating files for each resource available.
1227 */
pci_create_resource_files(struct pci_dev * pdev)1228 static int pci_create_resource_files(struct pci_dev *pdev)
1229 {
1230 int i;
1231 int retval;
1232
1233 /* Expose the PCI resources from this device as files */
1234 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1235
1236 /* skip empty resources */
1237 if (!pci_resource_len(pdev, i))
1238 continue;
1239
1240 retval = pci_create_attr(pdev, i, 0);
1241 /* for prefetchable resources, create a WC mappable file */
1242 if (!retval && arch_can_pci_mmap_wc() &&
1243 pdev->resource[i].flags & IORESOURCE_PREFETCH)
1244 retval = pci_create_attr(pdev, i, 1);
1245 if (retval) {
1246 pci_remove_resource_files(pdev);
1247 return retval;
1248 }
1249 }
1250 return 0;
1251 }
1252 #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */
pci_create_resource_files(struct pci_dev * dev)1253 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
pci_remove_resource_files(struct pci_dev * dev)1254 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
1255 #endif
1256
1257 /**
1258 * pci_write_rom - used to enable access to the PCI ROM display
1259 * @filp: sysfs file
1260 * @kobj: kernel object handle
1261 * @bin_attr: struct bin_attribute for this file
1262 * @buf: user input
1263 * @off: file offset
1264 * @count: number of byte in input
1265 *
1266 * writing anything except 0 enables it
1267 */
pci_write_rom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1268 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
1269 struct bin_attribute *bin_attr, char *buf,
1270 loff_t off, size_t count)
1271 {
1272 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1273
1274 if ((off == 0) && (*buf == '0') && (count == 2))
1275 pdev->rom_attr_enabled = 0;
1276 else
1277 pdev->rom_attr_enabled = 1;
1278
1279 return count;
1280 }
1281
1282 /**
1283 * pci_read_rom - read a PCI ROM
1284 * @filp: sysfs file
1285 * @kobj: kernel object handle
1286 * @bin_attr: struct bin_attribute for this file
1287 * @buf: where to put the data we read from the ROM
1288 * @off: file offset
1289 * @count: number of bytes to read
1290 *
1291 * Put @count bytes starting at @off into @buf from the ROM in the PCI
1292 * device corresponding to @kobj.
1293 */
pci_read_rom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1294 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
1295 struct bin_attribute *bin_attr, char *buf,
1296 loff_t off, size_t count)
1297 {
1298 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1299 void __iomem *rom;
1300 size_t size;
1301
1302 if (!pdev->rom_attr_enabled)
1303 return -EINVAL;
1304
1305 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
1306 if (!rom || !size)
1307 return -EIO;
1308
1309 if (off >= size)
1310 count = 0;
1311 else {
1312 if (off + count > size)
1313 count = size - off;
1314
1315 memcpy_fromio(buf, rom + off, count);
1316 }
1317 pci_unmap_rom(pdev, rom);
1318
1319 return count;
1320 }
1321 static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
1322
1323 static struct bin_attribute *pci_dev_rom_attrs[] = {
1324 &bin_attr_rom,
1325 NULL,
1326 };
1327
pci_dev_rom_attr_is_visible(struct kobject * kobj,struct bin_attribute * a,int n)1328 static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj,
1329 struct bin_attribute *a, int n)
1330 {
1331 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1332 size_t rom_size;
1333
1334 /* If the device has a ROM, try to expose it in sysfs. */
1335 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
1336 if (!rom_size)
1337 return 0;
1338
1339 a->size = rom_size;
1340
1341 return a->attr.mode;
1342 }
1343
1344 static const struct attribute_group pci_dev_rom_attr_group = {
1345 .bin_attrs = pci_dev_rom_attrs,
1346 .is_bin_visible = pci_dev_rom_attr_is_visible,
1347 };
1348
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1349 static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
1350 const char *buf, size_t count)
1351 {
1352 struct pci_dev *pdev = to_pci_dev(dev);
1353 unsigned long val;
1354 ssize_t result;
1355
1356 if (kstrtoul(buf, 0, &val) < 0)
1357 return -EINVAL;
1358
1359 if (val != 1)
1360 return -EINVAL;
1361
1362 pm_runtime_get_sync(dev);
1363 result = pci_reset_function(pdev);
1364 pm_runtime_put(dev);
1365 if (result < 0)
1366 return result;
1367
1368 return count;
1369 }
1370 static DEVICE_ATTR_WO(reset);
1371
1372 static struct attribute *pci_dev_reset_attrs[] = {
1373 &dev_attr_reset.attr,
1374 NULL,
1375 };
1376
pci_dev_reset_attr_is_visible(struct kobject * kobj,struct attribute * a,int n)1377 static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
1378 struct attribute *a, int n)
1379 {
1380 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1381
1382 if (!pci_reset_supported(pdev))
1383 return 0;
1384
1385 return a->mode;
1386 }
1387
1388 static const struct attribute_group pci_dev_reset_attr_group = {
1389 .attrs = pci_dev_reset_attrs,
1390 .is_visible = pci_dev_reset_attr_is_visible,
1391 };
1392
1393 #define pci_dev_resource_resize_attr(n) \
1394 static ssize_t resource##n##_resize_show(struct device *dev, \
1395 struct device_attribute *attr, \
1396 char * buf) \
1397 { \
1398 struct pci_dev *pdev = to_pci_dev(dev); \
1399 ssize_t ret; \
1400 \
1401 pci_config_pm_runtime_get(pdev); \
1402 \
1403 ret = sysfs_emit(buf, "%016llx\n", \
1404 (u64)pci_rebar_get_possible_sizes(pdev, n)); \
1405 \
1406 pci_config_pm_runtime_put(pdev); \
1407 \
1408 return ret; \
1409 } \
1410 \
1411 static ssize_t resource##n##_resize_store(struct device *dev, \
1412 struct device_attribute *attr,\
1413 const char *buf, size_t count)\
1414 { \
1415 struct pci_dev *pdev = to_pci_dev(dev); \
1416 unsigned long size, flags; \
1417 int ret, i; \
1418 u16 cmd; \
1419 \
1420 if (kstrtoul(buf, 0, &size) < 0) \
1421 return -EINVAL; \
1422 \
1423 device_lock(dev); \
1424 if (dev->driver) { \
1425 ret = -EBUSY; \
1426 goto unlock; \
1427 } \
1428 \
1429 pci_config_pm_runtime_get(pdev); \
1430 \
1431 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \
1432 ret = aperture_remove_conflicting_pci_devices(pdev, \
1433 "resourceN_resize"); \
1434 if (ret) \
1435 goto pm_put; \
1436 } \
1437 \
1438 pci_read_config_word(pdev, PCI_COMMAND, &cmd); \
1439 pci_write_config_word(pdev, PCI_COMMAND, \
1440 cmd & ~PCI_COMMAND_MEMORY); \
1441 \
1442 flags = pci_resource_flags(pdev, n); \
1443 \
1444 pci_remove_resource_files(pdev); \
1445 \
1446 for (i = 0; i < PCI_STD_NUM_BARS; i++) { \
1447 if (pci_resource_len(pdev, i) && \
1448 pci_resource_flags(pdev, i) == flags) \
1449 pci_release_resource(pdev, i); \
1450 } \
1451 \
1452 ret = pci_resize_resource(pdev, n, size); \
1453 \
1454 pci_assign_unassigned_bus_resources(pdev->bus); \
1455 \
1456 if (pci_create_resource_files(pdev)) \
1457 pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\
1458 \
1459 pci_write_config_word(pdev, PCI_COMMAND, cmd); \
1460 pm_put: \
1461 pci_config_pm_runtime_put(pdev); \
1462 unlock: \
1463 device_unlock(dev); \
1464 \
1465 return ret ? ret : count; \
1466 } \
1467 static DEVICE_ATTR_RW(resource##n##_resize)
1468
1469 pci_dev_resource_resize_attr(0);
1470 pci_dev_resource_resize_attr(1);
1471 pci_dev_resource_resize_attr(2);
1472 pci_dev_resource_resize_attr(3);
1473 pci_dev_resource_resize_attr(4);
1474 pci_dev_resource_resize_attr(5);
1475
1476 static struct attribute *resource_resize_attrs[] = {
1477 &dev_attr_resource0_resize.attr,
1478 &dev_attr_resource1_resize.attr,
1479 &dev_attr_resource2_resize.attr,
1480 &dev_attr_resource3_resize.attr,
1481 &dev_attr_resource4_resize.attr,
1482 &dev_attr_resource5_resize.attr,
1483 NULL,
1484 };
1485
resource_resize_is_visible(struct kobject * kobj,struct attribute * a,int n)1486 static umode_t resource_resize_is_visible(struct kobject *kobj,
1487 struct attribute *a, int n)
1488 {
1489 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1490
1491 return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode;
1492 }
1493
1494 static const struct attribute_group pci_dev_resource_resize_group = {
1495 .attrs = resource_resize_attrs,
1496 .is_visible = resource_resize_is_visible,
1497 };
1498
pci_create_sysfs_dev_files(struct pci_dev * pdev)1499 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
1500 {
1501 if (!sysfs_initialized)
1502 return -EACCES;
1503
1504 return pci_create_resource_files(pdev);
1505 }
1506
1507 /**
1508 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files
1509 * @pdev: device whose entries we should free
1510 *
1511 * Cleanup when @pdev is removed from sysfs.
1512 */
pci_remove_sysfs_dev_files(struct pci_dev * pdev)1513 void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
1514 {
1515 if (!sysfs_initialized)
1516 return;
1517
1518 pci_remove_resource_files(pdev);
1519 }
1520
pci_sysfs_init(void)1521 static int __init pci_sysfs_init(void)
1522 {
1523 struct pci_dev *pdev = NULL;
1524 struct pci_bus *pbus = NULL;
1525 int retval;
1526
1527 sysfs_initialized = 1;
1528 for_each_pci_dev(pdev) {
1529 retval = pci_create_sysfs_dev_files(pdev);
1530 if (retval) {
1531 pci_dev_put(pdev);
1532 return retval;
1533 }
1534 }
1535
1536 while ((pbus = pci_find_next_bus(pbus)))
1537 pci_create_legacy_files(pbus);
1538
1539 return 0;
1540 }
1541 late_initcall(pci_sysfs_init);
1542
1543 static struct attribute *pci_dev_dev_attrs[] = {
1544 &dev_attr_boot_vga.attr,
1545 NULL,
1546 };
1547
pci_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1548 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
1549 struct attribute *a, int n)
1550 {
1551 struct device *dev = kobj_to_dev(kobj);
1552 struct pci_dev *pdev = to_pci_dev(dev);
1553
1554 if (a == &dev_attr_boot_vga.attr)
1555 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
1556 return 0;
1557
1558 return a->mode;
1559 }
1560
1561 static struct attribute *pci_dev_hp_attrs[] = {
1562 &dev_attr_remove.attr,
1563 &dev_attr_dev_rescan.attr,
1564 NULL,
1565 };
1566
pci_dev_hp_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1567 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
1568 struct attribute *a, int n)
1569 {
1570 struct device *dev = kobj_to_dev(kobj);
1571 struct pci_dev *pdev = to_pci_dev(dev);
1572
1573 if (pdev->is_virtfn)
1574 return 0;
1575
1576 return a->mode;
1577 }
1578
pci_bridge_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1579 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
1580 struct attribute *a, int n)
1581 {
1582 struct device *dev = kobj_to_dev(kobj);
1583 struct pci_dev *pdev = to_pci_dev(dev);
1584
1585 if (pci_is_bridge(pdev))
1586 return a->mode;
1587
1588 return 0;
1589 }
1590
pcie_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1591 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
1592 struct attribute *a, int n)
1593 {
1594 struct device *dev = kobj_to_dev(kobj);
1595 struct pci_dev *pdev = to_pci_dev(dev);
1596
1597 if (pci_is_pcie(pdev))
1598 return a->mode;
1599
1600 return 0;
1601 }
1602
1603 static const struct attribute_group pci_dev_group = {
1604 .attrs = pci_dev_attrs,
1605 };
1606
1607 const struct attribute_group *pci_dev_groups[] = {
1608 &pci_dev_group,
1609 &pci_dev_config_attr_group,
1610 &pci_dev_rom_attr_group,
1611 &pci_dev_reset_attr_group,
1612 &pci_dev_reset_method_attr_group,
1613 &pci_dev_vpd_attr_group,
1614 #ifdef CONFIG_DMI
1615 &pci_dev_smbios_attr_group,
1616 #endif
1617 #ifdef CONFIG_ACPI
1618 &pci_dev_acpi_attr_group,
1619 #endif
1620 &pci_dev_resource_resize_group,
1621 NULL,
1622 };
1623
1624 static const struct attribute_group pci_dev_hp_attr_group = {
1625 .attrs = pci_dev_hp_attrs,
1626 .is_visible = pci_dev_hp_attrs_are_visible,
1627 };
1628
1629 static const struct attribute_group pci_dev_attr_group = {
1630 .attrs = pci_dev_dev_attrs,
1631 .is_visible = pci_dev_attrs_are_visible,
1632 };
1633
1634 static const struct attribute_group pci_bridge_attr_group = {
1635 .attrs = pci_bridge_attrs,
1636 .is_visible = pci_bridge_attrs_are_visible,
1637 };
1638
1639 static const struct attribute_group pcie_dev_attr_group = {
1640 .attrs = pcie_dev_attrs,
1641 .is_visible = pcie_dev_attrs_are_visible,
1642 };
1643
1644 static const struct attribute_group *pci_dev_attr_groups[] = {
1645 &pci_dev_attr_group,
1646 &pci_dev_hp_attr_group,
1647 #ifdef CONFIG_PCI_IOV
1648 &sriov_pf_dev_attr_group,
1649 &sriov_vf_dev_attr_group,
1650 #endif
1651 &pci_bridge_attr_group,
1652 &pcie_dev_attr_group,
1653 #ifdef CONFIG_PCIEAER
1654 &aer_stats_attr_group,
1655 #endif
1656 #ifdef CONFIG_PCIEASPM
1657 &aspm_ctrl_attr_group,
1658 #endif
1659 NULL,
1660 };
1661
1662 const struct device_type pci_dev_type = {
1663 .groups = pci_dev_attr_groups,
1664 };
1665