1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * pci_root.c - ACPI PCI Root Bridge Driver ($Revision: 40 $)
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 */
8
9 #define pr_fmt(fmt) "ACPI: " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/mutex.h>
16 #include <linux/pm.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/pci.h>
19 #include <linux/pci-acpi.h>
20 #include <linux/dmar.h>
21 #include <linux/acpi.h>
22 #include <linux/slab.h>
23 #include <linux/dmi.h>
24 #include <linux/platform_data/x86/apple.h>
25 #include "internal.h"
26
27 static int acpi_pci_root_add(struct acpi_device *device,
28 const struct acpi_device_id *not_used);
29 static void acpi_pci_root_remove(struct acpi_device *device);
30
acpi_pci_root_scan_dependent(struct acpi_device * adev)31 static int acpi_pci_root_scan_dependent(struct acpi_device *adev)
32 {
33 acpiphp_check_host_bridge(adev);
34 return 0;
35 }
36
37 #define ACPI_PCIE_REQ_SUPPORT (OSC_PCI_EXT_CONFIG_SUPPORT \
38 | OSC_PCI_ASPM_SUPPORT \
39 | OSC_PCI_CLOCK_PM_SUPPORT \
40 | OSC_PCI_MSI_SUPPORT)
41
42 static const struct acpi_device_id root_device_ids[] = {
43 {"PNP0A03", 0},
44 {"", 0},
45 };
46
47 static struct acpi_scan_handler pci_root_handler = {
48 .ids = root_device_ids,
49 .attach = acpi_pci_root_add,
50 .detach = acpi_pci_root_remove,
51 .hotplug = {
52 .enabled = true,
53 .scan_dependent = acpi_pci_root_scan_dependent,
54 },
55 };
56
57 /**
58 * acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge
59 * @handle: the ACPI CA node in question.
60 *
61 * Note: we could make this API take a struct acpi_device * instead, but
62 * for now, it's more convenient to operate on an acpi_handle.
63 */
acpi_is_root_bridge(acpi_handle handle)64 int acpi_is_root_bridge(acpi_handle handle)
65 {
66 struct acpi_device *device = acpi_fetch_acpi_dev(handle);
67 int ret;
68
69 if (!device)
70 return 0;
71
72 ret = acpi_match_device_ids(device, root_device_ids);
73 if (ret)
74 return 0;
75 else
76 return 1;
77 }
78 EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
79
80 static acpi_status
get_root_bridge_busnr_callback(struct acpi_resource * resource,void * data)81 get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
82 {
83 struct resource *res = data;
84 struct acpi_resource_address64 address;
85 acpi_status status;
86
87 status = acpi_resource_to_address64(resource, &address);
88 if (ACPI_FAILURE(status))
89 return AE_OK;
90
91 if ((address.address.address_length > 0) &&
92 (address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
93 res->start = address.address.minimum;
94 res->end = address.address.minimum + address.address.address_length - 1;
95 }
96
97 return AE_OK;
98 }
99
try_get_root_bridge_busnr(acpi_handle handle,struct resource * res)100 static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
101 struct resource *res)
102 {
103 acpi_status status;
104
105 res->start = -1;
106 status =
107 acpi_walk_resources(handle, METHOD_NAME__CRS,
108 get_root_bridge_busnr_callback, res);
109 if (ACPI_FAILURE(status))
110 return status;
111 if (res->start == -1)
112 return AE_ERROR;
113 return AE_OK;
114 }
115
116 struct pci_osc_bit_struct {
117 u32 bit;
118 char *desc;
119 };
120
121 static struct pci_osc_bit_struct pci_osc_support_bit[] = {
122 { OSC_PCI_EXT_CONFIG_SUPPORT, "ExtendedConfig" },
123 { OSC_PCI_ASPM_SUPPORT, "ASPM" },
124 { OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" },
125 { OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" },
126 { OSC_PCI_MSI_SUPPORT, "MSI" },
127 { OSC_PCI_EDR_SUPPORT, "EDR" },
128 { OSC_PCI_HPX_TYPE_3_SUPPORT, "HPX-Type3" },
129 };
130
131 static struct pci_osc_bit_struct pci_osc_control_bit[] = {
132 { OSC_PCI_EXPRESS_NATIVE_HP_CONTROL, "PCIeHotplug" },
133 { OSC_PCI_SHPC_NATIVE_HP_CONTROL, "SHPCHotplug" },
134 { OSC_PCI_EXPRESS_PME_CONTROL, "PME" },
135 { OSC_PCI_EXPRESS_AER_CONTROL, "AER" },
136 { OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" },
137 { OSC_PCI_EXPRESS_LTR_CONTROL, "LTR" },
138 { OSC_PCI_EXPRESS_DPC_CONTROL, "DPC" },
139 };
140
141 static struct pci_osc_bit_struct cxl_osc_support_bit[] = {
142 { OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT, "CXL11PortRegAccess" },
143 { OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT, "CXL20PortDevRegAccess" },
144 { OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT, "CXLProtocolErrorReporting" },
145 { OSC_CXL_NATIVE_HP_SUPPORT, "CXLNativeHotPlug" },
146 };
147
148 static struct pci_osc_bit_struct cxl_osc_control_bit[] = {
149 { OSC_CXL_ERROR_REPORTING_CONTROL, "CXLMemErrorReporting" },
150 };
151
decode_osc_bits(struct acpi_pci_root * root,char * msg,u32 word,struct pci_osc_bit_struct * table,int size)152 static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
153 struct pci_osc_bit_struct *table, int size)
154 {
155 char buf[80];
156 int i, len = 0;
157 struct pci_osc_bit_struct *entry;
158
159 buf[0] = '\0';
160 for (i = 0, entry = table; i < size; i++, entry++)
161 if (word & entry->bit)
162 len += scnprintf(buf + len, sizeof(buf) - len, "%s%s",
163 len ? " " : "", entry->desc);
164
165 dev_info(&root->device->dev, "_OSC: %s [%s]\n", msg, buf);
166 }
167
decode_osc_support(struct acpi_pci_root * root,char * msg,u32 word)168 static void decode_osc_support(struct acpi_pci_root *root, char *msg, u32 word)
169 {
170 decode_osc_bits(root, msg, word, pci_osc_support_bit,
171 ARRAY_SIZE(pci_osc_support_bit));
172 }
173
decode_osc_control(struct acpi_pci_root * root,char * msg,u32 word)174 static void decode_osc_control(struct acpi_pci_root *root, char *msg, u32 word)
175 {
176 decode_osc_bits(root, msg, word, pci_osc_control_bit,
177 ARRAY_SIZE(pci_osc_control_bit));
178 }
179
decode_cxl_osc_support(struct acpi_pci_root * root,char * msg,u32 word)180 static void decode_cxl_osc_support(struct acpi_pci_root *root, char *msg, u32 word)
181 {
182 decode_osc_bits(root, msg, word, cxl_osc_support_bit,
183 ARRAY_SIZE(cxl_osc_support_bit));
184 }
185
decode_cxl_osc_control(struct acpi_pci_root * root,char * msg,u32 word)186 static void decode_cxl_osc_control(struct acpi_pci_root *root, char *msg, u32 word)
187 {
188 decode_osc_bits(root, msg, word, cxl_osc_control_bit,
189 ARRAY_SIZE(cxl_osc_control_bit));
190 }
191
is_pcie(struct acpi_pci_root * root)192 static inline bool is_pcie(struct acpi_pci_root *root)
193 {
194 return root->bridge_type == ACPI_BRIDGE_TYPE_PCIE;
195 }
196
is_cxl(struct acpi_pci_root * root)197 static inline bool is_cxl(struct acpi_pci_root *root)
198 {
199 return root->bridge_type == ACPI_BRIDGE_TYPE_CXL;
200 }
201
202 static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
203 static u8 cxl_osc_uuid_str[] = "68F2D50B-C469-4d8A-BD3D-941A103FD3FC";
204
to_uuid(struct acpi_pci_root * root)205 static char *to_uuid(struct acpi_pci_root *root)
206 {
207 if (is_cxl(root))
208 return cxl_osc_uuid_str;
209 return pci_osc_uuid_str;
210 }
211
cap_length(struct acpi_pci_root * root)212 static int cap_length(struct acpi_pci_root *root)
213 {
214 if (is_cxl(root))
215 return sizeof(u32) * OSC_CXL_CAPABILITY_DWORDS;
216 return sizeof(u32) * OSC_PCI_CAPABILITY_DWORDS;
217 }
218
acpi_pci_run_osc(struct acpi_pci_root * root,const u32 * capbuf,u32 * pci_control,u32 * cxl_control)219 static acpi_status acpi_pci_run_osc(struct acpi_pci_root *root,
220 const u32 *capbuf, u32 *pci_control,
221 u32 *cxl_control)
222 {
223 struct acpi_osc_context context = {
224 .uuid_str = to_uuid(root),
225 .rev = 1,
226 .cap.length = cap_length(root),
227 .cap.pointer = (void *)capbuf,
228 };
229 acpi_status status;
230
231 status = acpi_run_osc(root->device->handle, &context);
232 if (ACPI_SUCCESS(status)) {
233 *pci_control = acpi_osc_ctx_get_pci_control(&context);
234 if (is_cxl(root))
235 *cxl_control = acpi_osc_ctx_get_cxl_control(&context);
236 kfree(context.ret.pointer);
237 }
238 return status;
239 }
240
acpi_pci_query_osc(struct acpi_pci_root * root,u32 support,u32 * control,u32 cxl_support,u32 * cxl_control)241 static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 support,
242 u32 *control, u32 cxl_support,
243 u32 *cxl_control)
244 {
245 acpi_status status;
246 u32 pci_result, cxl_result, capbuf[OSC_CXL_CAPABILITY_DWORDS];
247
248 support |= root->osc_support_set;
249
250 capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
251 capbuf[OSC_SUPPORT_DWORD] = support;
252 capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set;
253
254 if (is_cxl(root)) {
255 cxl_support |= root->osc_ext_support_set;
256 capbuf[OSC_EXT_SUPPORT_DWORD] = cxl_support;
257 capbuf[OSC_EXT_CONTROL_DWORD] = *cxl_control | root->osc_ext_control_set;
258 }
259
260 retry:
261 status = acpi_pci_run_osc(root, capbuf, &pci_result, &cxl_result);
262 if (ACPI_SUCCESS(status)) {
263 root->osc_support_set = support;
264 *control = pci_result;
265 if (is_cxl(root)) {
266 root->osc_ext_support_set = cxl_support;
267 *cxl_control = cxl_result;
268 }
269 } else if (is_cxl(root)) {
270 /*
271 * CXL _OSC is optional on CXL 1.1 hosts. Fall back to PCIe _OSC
272 * upon any failure using CXL _OSC.
273 */
274 root->bridge_type = ACPI_BRIDGE_TYPE_PCIE;
275 goto retry;
276 }
277 return status;
278 }
279
acpi_pci_find_root(acpi_handle handle)280 struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
281 {
282 struct acpi_device *device = acpi_fetch_acpi_dev(handle);
283 struct acpi_pci_root *root;
284
285 if (!device || acpi_match_device_ids(device, root_device_ids))
286 return NULL;
287
288 root = acpi_driver_data(device);
289
290 return root;
291 }
292 EXPORT_SYMBOL_GPL(acpi_pci_find_root);
293
294 /**
295 * acpi_get_pci_dev - convert ACPI CA handle to struct pci_dev
296 * @handle: the handle in question
297 *
298 * Given an ACPI CA handle, the desired PCI device is located in the
299 * list of PCI devices.
300 *
301 * If the device is found, its reference count is increased and this
302 * function returns a pointer to its data structure. The caller must
303 * decrement the reference count by calling pci_dev_put().
304 * If no device is found, %NULL is returned.
305 */
acpi_get_pci_dev(acpi_handle handle)306 struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
307 {
308 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
309 struct acpi_device_physical_node *pn;
310 struct pci_dev *pci_dev = NULL;
311
312 if (!adev)
313 return NULL;
314
315 mutex_lock(&adev->physical_node_lock);
316
317 list_for_each_entry(pn, &adev->physical_node_list, node) {
318 if (dev_is_pci(pn->dev)) {
319 get_device(pn->dev);
320 pci_dev = to_pci_dev(pn->dev);
321 break;
322 }
323 }
324
325 mutex_unlock(&adev->physical_node_lock);
326
327 return pci_dev;
328 }
329 EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
330
331 /**
332 * acpi_pci_osc_control_set - Request control of PCI root _OSC features.
333 * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
334 * @mask: Mask of _OSC bits to request control of, place to store control mask.
335 * @support: _OSC supported capability.
336 * @cxl_mask: Mask of CXL _OSC control bits, place to store control mask.
337 * @cxl_support: CXL _OSC supported capability.
338 *
339 * Run _OSC query for @mask and if that is successful, compare the returned
340 * mask of control bits with @req. If all of the @req bits are set in the
341 * returned mask, run _OSC request for it.
342 *
343 * The variable at the @mask address may be modified regardless of whether or
344 * not the function returns success. On success it will contain the mask of
345 * _OSC bits the BIOS has granted control of, but its contents are meaningless
346 * on failure.
347 **/
acpi_pci_osc_control_set(acpi_handle handle,u32 * mask,u32 support,u32 * cxl_mask,u32 cxl_support)348 static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask,
349 u32 support, u32 *cxl_mask,
350 u32 cxl_support)
351 {
352 u32 req = OSC_PCI_EXPRESS_CAPABILITY_CONTROL;
353 struct acpi_pci_root *root;
354 acpi_status status;
355 u32 ctrl, cxl_ctrl = 0, capbuf[OSC_CXL_CAPABILITY_DWORDS];
356
357 if (!mask)
358 return AE_BAD_PARAMETER;
359
360 root = acpi_pci_find_root(handle);
361 if (!root)
362 return AE_NOT_EXIST;
363
364 ctrl = *mask;
365 *mask |= root->osc_control_set;
366
367 if (is_cxl(root)) {
368 cxl_ctrl = *cxl_mask;
369 *cxl_mask |= root->osc_ext_control_set;
370 }
371
372 /* Need to check the available controls bits before requesting them. */
373 do {
374 u32 pci_missing = 0, cxl_missing = 0;
375
376 status = acpi_pci_query_osc(root, support, mask, cxl_support,
377 cxl_mask);
378 if (ACPI_FAILURE(status))
379 return status;
380 if (is_cxl(root)) {
381 if (ctrl == *mask && cxl_ctrl == *cxl_mask)
382 break;
383 pci_missing = ctrl & ~(*mask);
384 cxl_missing = cxl_ctrl & ~(*cxl_mask);
385 } else {
386 if (ctrl == *mask)
387 break;
388 pci_missing = ctrl & ~(*mask);
389 }
390 if (pci_missing)
391 decode_osc_control(root, "platform does not support",
392 pci_missing);
393 if (cxl_missing)
394 decode_cxl_osc_control(root, "CXL platform does not support",
395 cxl_missing);
396 ctrl = *mask;
397 cxl_ctrl = *cxl_mask;
398 } while (*mask || *cxl_mask);
399
400 /* No need to request _OSC if the control was already granted. */
401 if ((root->osc_control_set & ctrl) == ctrl &&
402 (root->osc_ext_control_set & cxl_ctrl) == cxl_ctrl)
403 return AE_OK;
404
405 if ((ctrl & req) != req) {
406 decode_osc_control(root, "not requesting control; platform does not support",
407 req & ~(ctrl));
408 return AE_SUPPORT;
409 }
410
411 capbuf[OSC_QUERY_DWORD] = 0;
412 capbuf[OSC_SUPPORT_DWORD] = root->osc_support_set;
413 capbuf[OSC_CONTROL_DWORD] = ctrl;
414 if (is_cxl(root)) {
415 capbuf[OSC_EXT_SUPPORT_DWORD] = root->osc_ext_support_set;
416 capbuf[OSC_EXT_CONTROL_DWORD] = cxl_ctrl;
417 }
418
419 status = acpi_pci_run_osc(root, capbuf, mask, cxl_mask);
420 if (ACPI_FAILURE(status))
421 return status;
422
423 root->osc_control_set = *mask;
424 root->osc_ext_control_set = *cxl_mask;
425 return AE_OK;
426 }
427
calculate_support(void)428 static u32 calculate_support(void)
429 {
430 u32 support;
431
432 /*
433 * All supported architectures that use ACPI have support for
434 * PCI domains, so we indicate this in _OSC support capabilities.
435 */
436 support = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
437 support |= OSC_PCI_HPX_TYPE_3_SUPPORT;
438 if (pci_ext_cfg_avail())
439 support |= OSC_PCI_EXT_CONFIG_SUPPORT;
440 if (pcie_aspm_support_enabled())
441 support |= OSC_PCI_ASPM_SUPPORT | OSC_PCI_CLOCK_PM_SUPPORT;
442 if (pci_msi_enabled())
443 support |= OSC_PCI_MSI_SUPPORT;
444 if (IS_ENABLED(CONFIG_PCIE_EDR))
445 support |= OSC_PCI_EDR_SUPPORT;
446
447 return support;
448 }
449
450 /*
451 * Background on hotplug support, and making it depend on only
452 * CONFIG_HOTPLUG_PCI_PCIE vs. also considering CONFIG_MEMORY_HOTPLUG:
453 *
454 * CONFIG_ACPI_HOTPLUG_MEMORY does depend on CONFIG_MEMORY_HOTPLUG, but
455 * there is no existing _OSC for memory hotplug support. The reason is that
456 * ACPI memory hotplug requires the OS to acknowledge / coordinate with
457 * memory plug events via a scan handler. On the CXL side the equivalent
458 * would be if Linux supported the Mechanical Retention Lock [1], or
459 * otherwise had some coordination for the driver of a PCI device
460 * undergoing hotplug to be consulted on whether the hotplug should
461 * proceed or not.
462 *
463 * The concern is that if Linux says no to supporting CXL hotplug then
464 * the BIOS may say no to giving the OS hotplug control of any other PCIe
465 * device. So the question here is not whether hotplug is enabled, it's
466 * whether it is handled natively by the at all OS, and if
467 * CONFIG_HOTPLUG_PCI_PCIE is enabled then the answer is "yes".
468 *
469 * Otherwise, the plan for CXL coordinated remove, since the kernel does
470 * not support blocking hotplug, is to require the memory device to be
471 * disabled before hotplug is attempted. When CONFIG_MEMORY_HOTPLUG is
472 * disabled that step will fail and the remove attempt cancelled by the
473 * user. If that is not honored and the card is removed anyway then it
474 * does not matter if CONFIG_MEMORY_HOTPLUG is enabled or not, it will
475 * cause a crash and other badness.
476 *
477 * Therefore, just say yes to CXL hotplug and require removal to
478 * be coordinated by userspace unless and until the kernel grows better
479 * mechanisms for doing "managed" removal of devices in consultation with
480 * the driver.
481 *
482 * [1]: https://lore.kernel.org/all/20201122014203.4706-1-ashok.raj@intel.com/
483 */
calculate_cxl_support(void)484 static u32 calculate_cxl_support(void)
485 {
486 u32 support;
487
488 support = OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT;
489 support |= OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT;
490 if (pci_aer_available())
491 support |= OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT;
492 if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
493 support |= OSC_CXL_NATIVE_HP_SUPPORT;
494
495 return support;
496 }
497
calculate_control(void)498 static u32 calculate_control(void)
499 {
500 u32 control;
501
502 control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
503 | OSC_PCI_EXPRESS_PME_CONTROL;
504
505 if (IS_ENABLED(CONFIG_PCIEASPM))
506 control |= OSC_PCI_EXPRESS_LTR_CONTROL;
507
508 if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
509 control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
510
511 if (IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
512 control |= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
513
514 if (pci_aer_available())
515 control |= OSC_PCI_EXPRESS_AER_CONTROL;
516
517 /*
518 * Per the Downstream Port Containment Related Enhancements ECN to
519 * the PCI Firmware Spec, r3.2, sec 4.5.1, table 4-5,
520 * OSC_PCI_EXPRESS_DPC_CONTROL indicates the OS supports both DPC
521 * and EDR.
522 */
523 if (IS_ENABLED(CONFIG_PCIE_DPC) && IS_ENABLED(CONFIG_PCIE_EDR))
524 control |= OSC_PCI_EXPRESS_DPC_CONTROL;
525
526 return control;
527 }
528
calculate_cxl_control(void)529 static u32 calculate_cxl_control(void)
530 {
531 u32 control = 0;
532
533 if (IS_ENABLED(CONFIG_MEMORY_FAILURE))
534 control |= OSC_CXL_ERROR_REPORTING_CONTROL;
535
536 return control;
537 }
538
os_control_query_checks(struct acpi_pci_root * root,u32 support)539 static bool os_control_query_checks(struct acpi_pci_root *root, u32 support)
540 {
541 struct acpi_device *device = root->device;
542
543 if (pcie_ports_disabled) {
544 dev_info(&device->dev, "PCIe port services disabled; not requesting _OSC control\n");
545 return false;
546 }
547
548 if ((support & ACPI_PCIE_REQ_SUPPORT) != ACPI_PCIE_REQ_SUPPORT) {
549 decode_osc_support(root, "not requesting OS control; OS requires",
550 ACPI_PCIE_REQ_SUPPORT);
551 return false;
552 }
553
554 return true;
555 }
556
negotiate_os_control(struct acpi_pci_root * root,int * no_aspm)557 static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
558 {
559 u32 support, control = 0, requested = 0;
560 u32 cxl_support = 0, cxl_control = 0, cxl_requested = 0;
561 acpi_status status;
562 struct acpi_device *device = root->device;
563 acpi_handle handle = device->handle;
564
565 /*
566 * Apple always return failure on _OSC calls when _OSI("Darwin") has
567 * been called successfully. We know the feature set supported by the
568 * platform, so avoid calling _OSC at all
569 */
570 if (x86_apple_machine) {
571 root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL;
572 decode_osc_control(root, "OS assumes control of",
573 root->osc_control_set);
574 return;
575 }
576
577 support = calculate_support();
578
579 decode_osc_support(root, "OS supports", support);
580
581 if (os_control_query_checks(root, support))
582 requested = control = calculate_control();
583
584 if (is_cxl(root)) {
585 cxl_support = calculate_cxl_support();
586 decode_cxl_osc_support(root, "OS supports", cxl_support);
587 cxl_requested = cxl_control = calculate_cxl_control();
588 }
589
590 status = acpi_pci_osc_control_set(handle, &control, support,
591 &cxl_control, cxl_support);
592 if (ACPI_SUCCESS(status)) {
593 if (control)
594 decode_osc_control(root, "OS now controls", control);
595 if (cxl_control)
596 decode_cxl_osc_control(root, "OS now controls",
597 cxl_control);
598
599 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
600 /*
601 * We have ASPM control, but the FADT indicates that
602 * it's unsupported. Leave existing configuration
603 * intact and prevent the OS from touching it.
604 */
605 dev_info(&device->dev, "FADT indicates ASPM is unsupported, using BIOS configuration\n");
606 *no_aspm = 1;
607 }
608 } else {
609 /*
610 * We want to disable ASPM here, but aspm_disabled
611 * needs to remain in its state from boot so that we
612 * properly handle PCIe 1.1 devices. So we set this
613 * flag here, to defer the action until after the ACPI
614 * root scan.
615 */
616 *no_aspm = 1;
617
618 /* _OSC is optional for PCI host bridges */
619 if (status == AE_NOT_FOUND && !is_pcie(root))
620 return;
621
622 if (control) {
623 decode_osc_control(root, "OS requested", requested);
624 decode_osc_control(root, "platform willing to grant", control);
625 }
626 if (cxl_control) {
627 decode_cxl_osc_control(root, "OS requested", cxl_requested);
628 decode_cxl_osc_control(root, "platform willing to grant",
629 cxl_control);
630 }
631
632 dev_info(&device->dev, "_OSC: platform retains control of PCIe features (%s)\n",
633 acpi_format_exception(status));
634 }
635 }
636
acpi_pci_root_add(struct acpi_device * device,const struct acpi_device_id * not_used)637 static int acpi_pci_root_add(struct acpi_device *device,
638 const struct acpi_device_id *not_used)
639 {
640 unsigned long long segment, bus;
641 acpi_status status;
642 int result;
643 struct acpi_pci_root *root;
644 acpi_handle handle = device->handle;
645 int no_aspm = 0;
646 bool hotadd = system_state == SYSTEM_RUNNING;
647 const char *acpi_hid;
648
649 root = kzalloc_obj(struct acpi_pci_root);
650 if (!root)
651 return -ENOMEM;
652
653 segment = 0;
654 status = acpi_evaluate_integer(handle, METHOD_NAME__SEG, NULL,
655 &segment);
656 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
657 dev_err(&device->dev, "can't evaluate _SEG\n");
658 result = -ENODEV;
659 goto end;
660 }
661
662 /* Check _CRS first, then _BBN. If no _BBN, default to zero. */
663 root->secondary.flags = IORESOURCE_BUS;
664 status = try_get_root_bridge_busnr(handle, &root->secondary);
665 if (ACPI_FAILURE(status)) {
666 /*
667 * We need both the start and end of the downstream bus range
668 * to interpret _CBA (MMCONFIG base address), so it really is
669 * supposed to be in _CRS. If we don't find it there, all we
670 * can do is assume [_BBN-0xFF] or [0-0xFF].
671 */
672 root->secondary.end = 0xFF;
673 dev_warn(&device->dev,
674 FW_BUG "no secondary bus range in _CRS\n");
675 status = acpi_evaluate_integer(handle, METHOD_NAME__BBN,
676 NULL, &bus);
677 if (ACPI_SUCCESS(status))
678 root->secondary.start = bus;
679 else if (status == AE_NOT_FOUND)
680 root->secondary.start = 0;
681 else {
682 dev_err(&device->dev, "can't evaluate _BBN\n");
683 result = -ENODEV;
684 goto end;
685 }
686 }
687
688 root->device = device;
689 root->segment = segment & 0xFFFF;
690 device->driver_data = root;
691
692 if (hotadd && dmar_device_add(handle)) {
693 result = -ENXIO;
694 goto end;
695 }
696
697 pr_info("PCI Root Bridge [%s] (domain %04x %pR)\n",
698 acpi_device_bid(device), root->segment, &root->secondary);
699
700 root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle);
701
702 acpi_hid = acpi_device_hid(root->device);
703 if (strcmp(acpi_hid, "PNP0A08") == 0)
704 root->bridge_type = ACPI_BRIDGE_TYPE_PCIE;
705 else if (strcmp(acpi_hid, "ACPI0016") == 0)
706 root->bridge_type = ACPI_BRIDGE_TYPE_CXL;
707 else
708 dev_dbg(&device->dev, "Assuming non-PCIe host bridge\n");
709
710 negotiate_os_control(root, &no_aspm);
711
712 /*
713 * TBD: Need PCI interface for enumeration/configuration of roots.
714 */
715
716 /*
717 * Scan the Root Bridge
718 * --------------------
719 * Must do this prior to any attempt to bind the root device, as the
720 * PCI namespace does not get created until this call is made (and
721 * thus the root bridge's pci_dev does not exist).
722 */
723 root->bus = pci_acpi_scan_root(root);
724 if (!root->bus) {
725 dev_err(&device->dev,
726 "Bus %04x:%02x not present in PCI namespace\n",
727 root->segment, (unsigned int)root->secondary.start);
728 device->driver_data = NULL;
729 result = -ENODEV;
730 goto remove_dmar;
731 }
732
733 if (no_aspm)
734 pcie_no_aspm();
735
736 pci_acpi_add_root_pm_notifier(device, root);
737 device_set_wakeup_capable(root->bus->bridge, device->wakeup.flags.valid);
738
739 if (hotadd) {
740 pcibios_resource_survey_bus(root->bus);
741 pci_assign_unassigned_root_bus_resources(root->bus);
742 /*
743 * This is only called for the hotadd case. For the boot-time
744 * case, we need to wait until after PCI initialization in
745 * order to deal with IOAPICs mapped in on a PCI BAR.
746 *
747 * This is currently x86-specific, because acpi_ioapic_add()
748 * is an empty function without CONFIG_ACPI_HOTPLUG_IOAPIC.
749 * And CONFIG_ACPI_HOTPLUG_IOAPIC depends on CONFIG_X86_IO_APIC
750 * (see drivers/acpi/Kconfig).
751 */
752 acpi_ioapic_add(root->device->handle);
753 }
754
755 pci_lock_rescan_remove();
756 pci_bus_add_devices(root->bus);
757 pci_unlock_rescan_remove();
758 return 1;
759
760 remove_dmar:
761 if (hotadd)
762 dmar_device_remove(handle);
763 end:
764 kfree(root);
765 return result;
766 }
767
acpi_pci_root_remove(struct acpi_device * device)768 static void acpi_pci_root_remove(struct acpi_device *device)
769 {
770 struct acpi_pci_root *root = acpi_driver_data(device);
771
772 pci_lock_rescan_remove();
773
774 pci_stop_root_bus(root->bus);
775
776 pci_ioapic_remove(root);
777 device_set_wakeup_capable(root->bus->bridge, false);
778 pci_acpi_remove_bus_pm_notifier(device);
779
780 pci_remove_root_bus(root->bus);
781 WARN_ON(acpi_ioapic_remove(root));
782
783 dmar_device_remove(device->handle);
784
785 pci_unlock_rescan_remove();
786
787 kfree(root);
788 }
789
790 /*
791 * Following code to support acpi_pci_root_create() is copied from
792 * arch/x86/pci/acpi.c and modified so it could be reused by x86, IA64
793 * and ARM64.
794 */
acpi_pci_root_validate_resources(struct device * dev,struct list_head * resources,unsigned long type)795 static void acpi_pci_root_validate_resources(struct device *dev,
796 struct list_head *resources,
797 unsigned long type)
798 {
799 LIST_HEAD(list);
800 struct resource *res1, *res2, *root = NULL;
801 struct resource_entry *tmp, *entry, *entry2;
802
803 BUG_ON((type & (IORESOURCE_MEM | IORESOURCE_IO)) == 0);
804 root = (type & IORESOURCE_MEM) ? &iomem_resource : &ioport_resource;
805
806 list_splice_init(resources, &list);
807 resource_list_for_each_entry_safe(entry, tmp, &list) {
808 bool free = false;
809 resource_size_t end;
810
811 res1 = entry->res;
812 if (!(res1->flags & type))
813 goto next;
814
815 /* Exclude non-addressable range or non-addressable portion */
816 end = min(res1->end, root->end);
817 if (end <= res1->start) {
818 dev_info(dev, "host bridge window %pR (ignored, not CPU addressable)\n",
819 res1);
820 free = true;
821 goto next;
822 } else if (res1->end != end) {
823 dev_info(dev, "host bridge window %pR ([%#llx-%#llx] ignored, not CPU addressable)\n",
824 res1, (unsigned long long)end + 1,
825 (unsigned long long)res1->end);
826 res1->end = end;
827 }
828
829 resource_list_for_each_entry(entry2, resources) {
830 res2 = entry2->res;
831 if (!(res2->flags & type))
832 continue;
833
834 /*
835 * I don't like throwing away windows because then
836 * our resources no longer match the ACPI _CRS, but
837 * the kernel resource tree doesn't allow overlaps.
838 */
839 if (resource_union(res1, res2, res2)) {
840 dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n",
841 res2, res1);
842 free = true;
843 goto next;
844 }
845 }
846
847 next:
848 resource_list_del(entry);
849 if (free)
850 resource_list_free_entry(entry);
851 else
852 resource_list_add_tail(entry, resources);
853 }
854 }
855
acpi_pci_root_remap_iospace(const struct fwnode_handle * fwnode,struct resource_entry * entry)856 static void acpi_pci_root_remap_iospace(const struct fwnode_handle *fwnode,
857 struct resource_entry *entry)
858 {
859 #ifdef PCI_IOBASE
860 struct resource *res = entry->res;
861 resource_size_t cpu_addr = res->start;
862 resource_size_t pci_addr = cpu_addr - entry->offset;
863 resource_size_t length = resource_size(res);
864 unsigned long port;
865
866 if (pci_register_io_range(fwnode, cpu_addr, length))
867 goto err;
868
869 port = pci_address_to_pio(cpu_addr);
870 if (port == (unsigned long)-1)
871 goto err;
872
873 res->start = port;
874 res->end = port + length - 1;
875 entry->offset = port - pci_addr;
876
877 if (pci_remap_iospace(res, cpu_addr) < 0)
878 goto err;
879
880 pr_info("Remapped I/O %pa to %pR\n", &cpu_addr, res);
881 return;
882 err:
883 res->flags |= IORESOURCE_DISABLED;
884 #endif
885 }
886
acpi_pci_probe_root_resources(struct acpi_pci_root_info * info)887 int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info)
888 {
889 int ret;
890 struct list_head *list = &info->resources;
891 struct acpi_device *device = info->bridge;
892 struct resource_entry *entry, *tmp;
893 unsigned long flags;
894
895 flags = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT;
896 ret = acpi_dev_get_resources(device, list,
897 acpi_dev_filter_resource_type_cb,
898 (void *)flags);
899 if (ret < 0)
900 dev_warn(&device->dev,
901 "failed to parse _CRS method, error code %d\n", ret);
902 else if (ret == 0)
903 dev_dbg(&device->dev,
904 "no IO and memory resources present in _CRS\n");
905 else {
906 resource_list_for_each_entry_safe(entry, tmp, list) {
907 if (entry->res->flags & IORESOURCE_IO)
908 acpi_pci_root_remap_iospace(&device->fwnode,
909 entry);
910
911 if (entry->res->flags & IORESOURCE_DISABLED)
912 resource_list_destroy_entry(entry);
913 else
914 entry->res->name = info->name;
915 }
916 acpi_pci_root_validate_resources(&device->dev, list,
917 IORESOURCE_MEM);
918 acpi_pci_root_validate_resources(&device->dev, list,
919 IORESOURCE_IO);
920 }
921
922 return ret;
923 }
924
pci_acpi_root_add_resources(struct acpi_pci_root_info * info)925 static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info)
926 {
927 struct resource_entry *entry, *tmp;
928 struct resource *res, *conflict, *root = NULL;
929
930 resource_list_for_each_entry_safe(entry, tmp, &info->resources) {
931 res = entry->res;
932 if (res->flags & IORESOURCE_MEM)
933 root = &iomem_resource;
934 else if (res->flags & IORESOURCE_IO)
935 root = &ioport_resource;
936 else
937 continue;
938
939 /*
940 * Some legacy x86 host bridge drivers use iomem_resource and
941 * ioport_resource as default resource pool, skip it.
942 */
943 if (res == root)
944 continue;
945
946 conflict = insert_resource_conflict(root, res);
947 if (conflict) {
948 dev_info(&info->bridge->dev,
949 "ignoring host bridge window %pR (conflicts with %s %pR)\n",
950 res, conflict->name, conflict);
951 resource_list_destroy_entry(entry);
952 }
953 }
954 }
955
__acpi_pci_root_release_info(struct acpi_pci_root_info * info)956 static void __acpi_pci_root_release_info(struct acpi_pci_root_info *info)
957 {
958 struct resource *res;
959 struct resource_entry *entry, *tmp;
960
961 if (!info)
962 return;
963
964 resource_list_for_each_entry_safe(entry, tmp, &info->resources) {
965 res = entry->res;
966 if (res->parent &&
967 (res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
968 release_resource(res);
969 resource_list_destroy_entry(entry);
970 }
971
972 info->ops->release_info(info);
973 }
974
acpi_pci_root_release_info(struct pci_host_bridge * bridge)975 static void acpi_pci_root_release_info(struct pci_host_bridge *bridge)
976 {
977 struct resource *res;
978 struct resource_entry *entry;
979
980 resource_list_for_each_entry(entry, &bridge->windows) {
981 res = entry->res;
982 if (res->flags & IORESOURCE_IO)
983 pci_unmap_iospace(res);
984 if (res->parent &&
985 (res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
986 release_resource(res);
987 }
988 __acpi_pci_root_release_info(bridge->release_data);
989 }
990
acpi_pci_root_create(struct acpi_pci_root * root,struct acpi_pci_root_ops * ops,struct acpi_pci_root_info * info,void * sysdata)991 struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
992 struct acpi_pci_root_ops *ops,
993 struct acpi_pci_root_info *info,
994 void *sysdata)
995 {
996 int ret, busnum = root->secondary.start;
997 struct acpi_device *device = root->device;
998 int node = acpi_get_node(device->handle);
999 struct pci_bus *bus;
1000 struct pci_host_bridge *host_bridge;
1001
1002 info->root = root;
1003 info->bridge = device;
1004 info->ops = ops;
1005 INIT_LIST_HEAD(&info->resources);
1006 snprintf(info->name, sizeof(info->name), "PCI Bus %04x:%02x",
1007 root->segment, busnum);
1008
1009 if (ops->init_info && ops->init_info(info))
1010 goto out_release_info;
1011 if (ops->prepare_resources)
1012 ret = ops->prepare_resources(info);
1013 else
1014 ret = acpi_pci_probe_root_resources(info);
1015 if (ret < 0)
1016 goto out_release_info;
1017
1018 pci_acpi_root_add_resources(info);
1019 pci_add_resource(&info->resources, &root->secondary);
1020 bus = pci_create_root_bus(NULL, busnum, ops->pci_ops,
1021 sysdata, &info->resources);
1022 if (!bus)
1023 goto out_release_info;
1024
1025 host_bridge = to_pci_host_bridge(bus->bridge);
1026 if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
1027 host_bridge->native_pcie_hotplug = 0;
1028 if (!(root->osc_control_set & OSC_PCI_SHPC_NATIVE_HP_CONTROL))
1029 host_bridge->native_shpc_hotplug = 0;
1030 if (!(root->osc_control_set & OSC_PCI_EXPRESS_AER_CONTROL))
1031 host_bridge->native_aer = 0;
1032 if (!(root->osc_control_set & OSC_PCI_EXPRESS_PME_CONTROL))
1033 host_bridge->native_pme = 0;
1034 if (!(root->osc_control_set & OSC_PCI_EXPRESS_LTR_CONTROL))
1035 host_bridge->native_ltr = 0;
1036 if (!(root->osc_control_set & OSC_PCI_EXPRESS_DPC_CONTROL))
1037 host_bridge->native_dpc = 0;
1038
1039 if (!(root->osc_ext_control_set & OSC_CXL_ERROR_REPORTING_CONTROL))
1040 host_bridge->native_cxl_error = 0;
1041
1042 acpi_dev_power_up_children_with_adr(device);
1043
1044 pci_scan_child_bus(bus);
1045 pci_set_host_bridge_release(host_bridge, acpi_pci_root_release_info,
1046 info);
1047 if (node != NUMA_NO_NODE)
1048 dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
1049 return bus;
1050
1051 out_release_info:
1052 __acpi_pci_root_release_info(info);
1053 return NULL;
1054 }
1055
acpi_pci_root_init(void)1056 void __init acpi_pci_root_init(void)
1057 {
1058 if (acpi_pci_disabled)
1059 return;
1060
1061 pci_acpi_crs_quirks();
1062 acpi_scan_add_handler_with_hotplug(&pci_root_handler, "pci_root");
1063 }
1064