1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * efi.c - EFI subsystem
4 *
5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8 *
9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
11 * The existance of /sys/firmware/efi may also be used by userspace to
12 * determine that the system supports EFI.
13 */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/initrd.h>
25 #include <linux/io.h>
26 #include <linux/kexec.h>
27 #include <linux/platform_device.h>
28 #include <linux/random.h>
29 #include <linux/reboot.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/ucs2_string.h>
33 #include <linux/memblock.h>
34 #include <linux/security.h>
35 #include <linux/notifier.h>
36
37 #include <asm/early_ioremap.h>
38
39 struct efi __read_mostly efi = {
40 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
41 .acpi = EFI_INVALID_TABLE_ADDR,
42 .acpi20 = EFI_INVALID_TABLE_ADDR,
43 .smbios = EFI_INVALID_TABLE_ADDR,
44 .smbios3 = EFI_INVALID_TABLE_ADDR,
45 .esrt = EFI_INVALID_TABLE_ADDR,
46 .tpm_log = EFI_INVALID_TABLE_ADDR,
47 .tpm_final_log = EFI_INVALID_TABLE_ADDR,
48 .ovmf_debug_log = EFI_INVALID_TABLE_ADDR,
49 #ifdef CONFIG_LOAD_UEFI_KEYS
50 .mokvar_table = EFI_INVALID_TABLE_ADDR,
51 #endif
52 #ifdef CONFIG_EFI_COCO_SECRET
53 .coco_secret = EFI_INVALID_TABLE_ADDR,
54 #endif
55 #ifdef CONFIG_UNACCEPTED_MEMORY
56 .unaccepted = EFI_INVALID_TABLE_ADDR,
57 #endif
58 };
59 EXPORT_SYMBOL(efi);
60
61 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
62 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
63 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
64 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
65
66 extern unsigned long screen_info_table;
67
68 struct mm_struct efi_mm = {
69 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
70 .mm_users = ATOMIC_INIT(2),
71 .mm_count = ATOMIC_INIT(1),
72 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
73 MMAP_LOCK_INITIALIZER(efi_mm)
74 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
75 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
76 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
77 };
78
79 struct workqueue_struct *efi_rts_wq;
80
81 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
setup_noefi(char * arg)82 static int __init setup_noefi(char *arg)
83 {
84 disable_runtime = true;
85 return 0;
86 }
87 early_param("noefi", setup_noefi);
88
efi_runtime_disabled(void)89 bool efi_runtime_disabled(void)
90 {
91 return disable_runtime;
92 }
93
__efi_soft_reserve_enabled(void)94 bool __pure __efi_soft_reserve_enabled(void)
95 {
96 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
97 }
98
parse_efi_cmdline(char * str)99 static int __init parse_efi_cmdline(char *str)
100 {
101 if (!str) {
102 pr_warn("need at least one option\n");
103 return -EINVAL;
104 }
105
106 if (parse_option_str(str, "debug"))
107 set_bit(EFI_DBG, &efi.flags);
108
109 if (parse_option_str(str, "noruntime"))
110 disable_runtime = true;
111
112 if (parse_option_str(str, "runtime"))
113 disable_runtime = false;
114
115 if (parse_option_str(str, "nosoftreserve"))
116 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
117
118 return 0;
119 }
120 early_param("efi", parse_efi_cmdline);
121
122 struct kobject *efi_kobj;
123
124 /*
125 * Let's not leave out systab information that snuck into
126 * the efivars driver
127 * Note, do not add more fields in systab sysfs file as it breaks sysfs
128 * one value per file rule!
129 */
systab_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)130 static ssize_t systab_show(struct kobject *kobj,
131 struct kobj_attribute *attr, char *buf)
132 {
133 char *str = buf;
134
135 if (!kobj || !buf)
136 return -EINVAL;
137
138 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
139 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
140 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
141 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
142 /*
143 * If both SMBIOS and SMBIOS3 entry points are implemented, the
144 * SMBIOS3 entry point shall be preferred, so we list it first to
145 * let applications stop parsing after the first match.
146 */
147 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
148 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
149 if (efi.smbios != EFI_INVALID_TABLE_ADDR)
150 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
151
152 return str - buf;
153 }
154
155 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
156
fw_platform_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)157 static ssize_t fw_platform_size_show(struct kobject *kobj,
158 struct kobj_attribute *attr, char *buf)
159 {
160 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
161 }
162
163 extern __weak struct kobj_attribute efi_attr_fw_vendor;
164 extern __weak struct kobj_attribute efi_attr_runtime;
165 extern __weak struct kobj_attribute efi_attr_config_table;
166 static struct kobj_attribute efi_attr_fw_platform_size =
167 __ATTR_RO(fw_platform_size);
168
169 static struct attribute *efi_subsys_attrs[] = {
170 &efi_attr_systab.attr,
171 &efi_attr_fw_platform_size.attr,
172 &efi_attr_fw_vendor.attr,
173 &efi_attr_runtime.attr,
174 &efi_attr_config_table.attr,
175 NULL,
176 };
177
efi_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)178 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
179 int n)
180 {
181 return attr->mode;
182 }
183
184 static const struct attribute_group efi_subsys_attr_group = {
185 .attrs = efi_subsys_attrs,
186 .is_visible = efi_attr_is_visible,
187 };
188
189 struct blocking_notifier_head efivar_ops_nh;
190 EXPORT_SYMBOL_GPL(efivar_ops_nh);
191
192 static struct efivars generic_efivars;
193 static struct efivar_operations generic_ops;
194
generic_ops_supported(void)195 static bool generic_ops_supported(void)
196 {
197 unsigned long name_size;
198 efi_status_t status;
199 efi_char16_t name;
200 efi_guid_t guid;
201
202 name_size = sizeof(name);
203
204 if (!efi.get_next_variable)
205 return false;
206 status = efi.get_next_variable(&name_size, &name, &guid);
207 if (status == EFI_UNSUPPORTED)
208 return false;
209
210 return true;
211 }
212
generic_ops_register(void)213 static int generic_ops_register(void)
214 {
215 if (!generic_ops_supported())
216 return 0;
217
218 generic_ops.get_variable = efi.get_variable;
219 generic_ops.get_next_variable = efi.get_next_variable;
220 generic_ops.query_variable_store = efi_query_variable_store;
221 generic_ops.query_variable_info = efi.query_variable_info;
222
223 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
224 generic_ops.set_variable = efi.set_variable;
225 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
226 }
227 return efivars_register(&generic_efivars, &generic_ops);
228 }
229
generic_ops_unregister(void)230 static void generic_ops_unregister(void)
231 {
232 if (!generic_ops.get_variable)
233 return;
234
235 efivars_unregister(&generic_efivars);
236 }
237
efivars_generic_ops_register(void)238 void efivars_generic_ops_register(void)
239 {
240 generic_ops_register();
241 }
242 EXPORT_SYMBOL_GPL(efivars_generic_ops_register);
243
efivars_generic_ops_unregister(void)244 void efivars_generic_ops_unregister(void)
245 {
246 generic_ops_unregister();
247 }
248 EXPORT_SYMBOL_GPL(efivars_generic_ops_unregister);
249
250 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
251 #define EFIVAR_SSDT_NAME_MAX 16UL
252 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
efivar_ssdt_setup(char * str)253 static int __init efivar_ssdt_setup(char *str)
254 {
255 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
256
257 if (ret)
258 return ret;
259
260 if (strlen(str) < sizeof(efivar_ssdt))
261 memcpy(efivar_ssdt, str, strlen(str));
262 else
263 pr_warn("efivar_ssdt: name too long: %s\n", str);
264 return 1;
265 }
266 __setup("efivar_ssdt=", efivar_ssdt_setup);
267
efivar_ssdt_load(void)268 static __init int efivar_ssdt_load(void)
269 {
270 unsigned long name_size = 256;
271 efi_char16_t *name = NULL;
272 efi_status_t status;
273 efi_guid_t guid;
274 int ret = 0;
275
276 if (!efivar_ssdt[0])
277 return 0;
278
279 name = kzalloc(name_size, GFP_KERNEL);
280 if (!name)
281 return -ENOMEM;
282
283 for (;;) {
284 char utf8_name[EFIVAR_SSDT_NAME_MAX];
285 unsigned long data_size = 0;
286 void *data;
287 int limit;
288
289 status = efi.get_next_variable(&name_size, name, &guid);
290 if (status == EFI_NOT_FOUND) {
291 break;
292 } else if (status == EFI_BUFFER_TOO_SMALL) {
293 efi_char16_t *name_tmp =
294 krealloc(name, name_size, GFP_KERNEL);
295 if (!name_tmp) {
296 ret = -ENOMEM;
297 goto out;
298 }
299 name = name_tmp;
300 continue;
301 }
302
303 limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
304 ucs2_as_utf8(utf8_name, name, limit - 1);
305 if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
306 continue;
307
308 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
309
310 status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
311 if (status != EFI_BUFFER_TOO_SMALL || !data_size) {
312 ret = -EIO;
313 goto out;
314 }
315
316 data = kmalloc(data_size, GFP_KERNEL);
317 if (!data) {
318 ret = -ENOMEM;
319 goto out;
320 }
321
322 status = efi.get_variable(name, &guid, NULL, &data_size, data);
323 if (status == EFI_SUCCESS) {
324 acpi_status acpi_ret = acpi_load_table(data, NULL);
325 if (ACPI_FAILURE(acpi_ret)) {
326 pr_err("efivar_ssdt: failed to load table: %u\n",
327 acpi_ret);
328 } else {
329 /*
330 * The @data will be in use by ACPI engine,
331 * do not free it!
332 */
333 continue;
334 }
335 } else {
336 pr_err("efivar_ssdt: failed to get var data: 0x%lx\n", status);
337 }
338 kfree(data);
339 }
340 out:
341 kfree(name);
342 return ret;
343 }
344 #else
efivar_ssdt_load(void)345 static inline int efivar_ssdt_load(void) { return 0; }
346 #endif
347
348 #ifdef CONFIG_DEBUG_FS
349
350 #define EFI_DEBUGFS_MAX_BLOBS 32
351
352 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
353
efi_debugfs_init(void)354 static void __init efi_debugfs_init(void)
355 {
356 struct dentry *efi_debugfs;
357 efi_memory_desc_t *md;
358 char name[32];
359 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
360 int i = 0;
361
362 efi_debugfs = debugfs_create_dir("efi", NULL);
363 if (IS_ERR(efi_debugfs))
364 return;
365
366 for_each_efi_memory_desc(md) {
367 switch (md->type) {
368 case EFI_BOOT_SERVICES_CODE:
369 snprintf(name, sizeof(name), "boot_services_code%d",
370 type_count[md->type]++);
371 break;
372 case EFI_BOOT_SERVICES_DATA:
373 snprintf(name, sizeof(name), "boot_services_data%d",
374 type_count[md->type]++);
375 break;
376 default:
377 continue;
378 }
379
380 if (i >= EFI_DEBUGFS_MAX_BLOBS) {
381 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
382 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
383 break;
384 }
385
386 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
387 debugfs_blob[i].data = memremap(md->phys_addr,
388 debugfs_blob[i].size,
389 MEMREMAP_WB);
390 if (!debugfs_blob[i].data)
391 continue;
392
393 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
394 i++;
395 }
396 }
397 #else
efi_debugfs_init(void)398 static inline void efi_debugfs_init(void) {}
399 #endif
400
401 /*
402 * We register the efi subsystem with the firmware subsystem and the
403 * efivars subsystem with the efi subsystem, if the system was booted with
404 * EFI.
405 */
efisubsys_init(void)406 static int __init efisubsys_init(void)
407 {
408 int error;
409
410 if (!efi_enabled(EFI_RUNTIME_SERVICES))
411 efi.runtime_supported_mask = 0;
412
413 if (!efi_enabled(EFI_BOOT))
414 return 0;
415
416 if (efi.runtime_supported_mask) {
417 /*
418 * Since we process only one efi_runtime_service() at a time, an
419 * ordered workqueue (which creates only one execution context)
420 * should suffice for all our needs.
421 */
422 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
423 if (!efi_rts_wq) {
424 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
425 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
426 efi.runtime_supported_mask = 0;
427 return 0;
428 }
429 }
430
431 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
432 platform_device_register_simple("rtc-efi", 0, NULL, 0);
433
434 /* We register the efi directory at /sys/firmware/efi */
435 efi_kobj = kobject_create_and_add("efi", firmware_kobj);
436 if (!efi_kobj) {
437 pr_err("efi: Firmware registration failed.\n");
438 error = -ENOMEM;
439 goto err_destroy_wq;
440 }
441
442 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
443 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
444 error = generic_ops_register();
445 if (error)
446 goto err_put;
447 error = efivar_ssdt_load();
448 if (error)
449 pr_err("efi: failed to load SSDT, error %d.\n", error);
450 platform_device_register_simple("efivars", 0, NULL, 0);
451 }
452
453 BLOCKING_INIT_NOTIFIER_HEAD(&efivar_ops_nh);
454
455 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
456 if (error) {
457 pr_err("efi: Sysfs attribute export failed with error %d.\n",
458 error);
459 goto err_unregister;
460 }
461
462 /* and the standard mountpoint for efivarfs */
463 error = sysfs_create_mount_point(efi_kobj, "efivars");
464 if (error) {
465 pr_err("efivars: Subsystem registration failed.\n");
466 goto err_remove_group;
467 }
468
469 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
470 efi_debugfs_init();
471
472 #ifdef CONFIG_EFI_COCO_SECRET
473 if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
474 platform_device_register_simple("efi_secret", 0, NULL, 0);
475 #endif
476
477 if (IS_ENABLED(CONFIG_OVMF_DEBUG_LOG) &&
478 efi.ovmf_debug_log != EFI_INVALID_TABLE_ADDR)
479 ovmf_log_probe(efi.ovmf_debug_log);
480
481 return 0;
482
483 err_remove_group:
484 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
485 err_unregister:
486 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
487 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
488 generic_ops_unregister();
489 err_put:
490 kobject_put(efi_kobj);
491 efi_kobj = NULL;
492 err_destroy_wq:
493 if (efi_rts_wq)
494 destroy_workqueue(efi_rts_wq);
495
496 return error;
497 }
498
499 subsys_initcall(efisubsys_init);
500
efi_find_mirror(void)501 void __init efi_find_mirror(void)
502 {
503 efi_memory_desc_t *md;
504 u64 mirror_size = 0, total_size = 0;
505
506 if (!efi_enabled(EFI_MEMMAP))
507 return;
508
509 for_each_efi_memory_desc(md) {
510 unsigned long long start = md->phys_addr;
511 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
512
513 total_size += size;
514 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
515 memblock_mark_mirror(start, size);
516 mirror_size += size;
517 }
518 }
519 if (mirror_size)
520 pr_info("Memory: %lldM/%lldM mirrored memory\n",
521 mirror_size>>20, total_size>>20);
522 }
523
524 /*
525 * Find the efi memory descriptor for a given physical address. Given a
526 * physical address, determine if it exists within an EFI Memory Map entry,
527 * and if so, populate the supplied memory descriptor with the appropriate
528 * data.
529 */
__efi_mem_desc_lookup(u64 phys_addr,efi_memory_desc_t * out_md)530 int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
531 {
532 efi_memory_desc_t *md;
533
534 if (!efi_enabled(EFI_MEMMAP)) {
535 pr_err_once("EFI_MEMMAP is not enabled.\n");
536 return -EINVAL;
537 }
538
539 if (!out_md) {
540 pr_err_once("out_md is null.\n");
541 return -EINVAL;
542 }
543
544 for_each_efi_memory_desc(md) {
545 u64 size;
546 u64 end;
547
548 /* skip bogus entries (including empty ones) */
549 if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
550 (md->num_pages <= 0) ||
551 (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
552 continue;
553
554 size = md->num_pages << EFI_PAGE_SHIFT;
555 end = md->phys_addr + size;
556 if (phys_addr >= md->phys_addr && phys_addr < end) {
557 memcpy(out_md, md, sizeof(*out_md));
558 return 0;
559 }
560 }
561 return -ENOENT;
562 }
563
564 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
565 __weak __alias(__efi_mem_desc_lookup);
566 EXPORT_SYMBOL_GPL(efi_mem_desc_lookup);
567
568 /*
569 * Calculate the highest address of an efi memory descriptor.
570 */
efi_mem_desc_end(efi_memory_desc_t * md)571 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
572 {
573 u64 size = md->num_pages << EFI_PAGE_SHIFT;
574 u64 end = md->phys_addr + size;
575 return end;
576 }
577
efi_arch_mem_reserve(phys_addr_t addr,u64 size)578 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
579
580 /**
581 * efi_mem_reserve - Reserve an EFI memory region
582 * @addr: Physical address to reserve
583 * @size: Size of reservation
584 *
585 * Mark a region as reserved from general kernel allocation and
586 * prevent it being released by efi_free_boot_services().
587 *
588 * This function should be called drivers once they've parsed EFI
589 * configuration tables to figure out where their data lives, e.g.
590 * efi_esrt_init().
591 */
efi_mem_reserve(phys_addr_t addr,u64 size)592 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
593 {
594 /* efi_mem_reserve() does not work under Xen */
595 if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
596 return;
597
598 if (!memblock_is_region_reserved(addr, size))
599 memblock_reserve(addr, size);
600
601 /*
602 * Some architectures (x86) reserve all boot services ranges
603 * until efi_free_boot_services() because of buggy firmware
604 * implementations. This means the above memblock_reserve() is
605 * superfluous on x86 and instead what it needs to do is
606 * ensure the @start, @size is not freed.
607 */
608 efi_arch_mem_reserve(addr, size);
609 }
610
611 static const efi_config_table_type_t common_tables[] __initconst = {
612 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" },
613 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" },
614 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" },
615 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" },
616 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" },
617 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
618 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
619 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
620 {EFI_TCG2_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "TPMFinalLog" },
621 {EFI_CC_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "CCFinalLog" },
622 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
623 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
624 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
625 #ifdef CONFIG_OVMF_DEBUG_LOG
626 {OVMF_MEMORY_LOG_TABLE_GUID, &efi.ovmf_debug_log, "OvmfDebugLog" },
627 #endif
628 #ifdef CONFIG_EFI_RCI2_TABLE
629 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
630 #endif
631 #ifdef CONFIG_LOAD_UEFI_KEYS
632 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" },
633 #endif
634 #ifdef CONFIG_EFI_COCO_SECRET
635 {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" },
636 #endif
637 #ifdef CONFIG_UNACCEPTED_MEMORY
638 {LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID, &efi.unaccepted, "Unaccepted" },
639 #endif
640 #ifdef CONFIG_EFI_GENERIC_STUB
641 {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table },
642 #endif
643 {},
644 };
645
match_config_table(const efi_guid_t * guid,unsigned long table,const efi_config_table_type_t * table_types)646 static __init int match_config_table(const efi_guid_t *guid,
647 unsigned long table,
648 const efi_config_table_type_t *table_types)
649 {
650 int i;
651
652 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
653 if (efi_guidcmp(*guid, table_types[i].guid))
654 continue;
655
656 if (!efi_config_table_is_usable(guid, table)) {
657 if (table_types[i].name[0])
658 pr_cont("(%s=0x%lx unusable) ",
659 table_types[i].name, table);
660 return 1;
661 }
662
663 *(table_types[i].ptr) = table;
664 if (table_types[i].name[0])
665 pr_cont("%s=0x%lx ", table_types[i].name, table);
666 return 1;
667 }
668
669 return 0;
670 }
671
672 /**
673 * reserve_unaccepted - Map and reserve unaccepted configuration table
674 * @unaccepted: Pointer to unaccepted memory table
675 *
676 * memblock_add() makes sure that the table is mapped in direct mapping. During
677 * normal boot it happens automatically because the table is allocated from
678 * usable memory. But during crashkernel boot only memory specifically reserved
679 * for crash scenario is mapped. memblock_add() forces the table to be mapped
680 * in crashkernel case.
681 *
682 * Align the range to the nearest page borders. Ranges smaller than page size
683 * are not going to be mapped.
684 *
685 * memblock_reserve() makes sure that future allocations will not touch the
686 * table.
687 */
688
reserve_unaccepted(struct efi_unaccepted_memory * unaccepted)689 static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
690 {
691 phys_addr_t start, size;
692
693 start = PAGE_ALIGN_DOWN(efi.unaccepted);
694 size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size);
695
696 memblock_add(start, size);
697 memblock_reserve(start, size);
698 }
699
efi_config_parse_tables(const efi_config_table_t * config_tables,int count,const efi_config_table_type_t * arch_tables)700 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
701 int count,
702 const efi_config_table_type_t *arch_tables)
703 {
704 const efi_config_table_64_t *tbl64 = (void *)config_tables;
705 const efi_config_table_32_t *tbl32 = (void *)config_tables;
706 const efi_guid_t *guid;
707 unsigned long table;
708 int i;
709
710 pr_info("");
711 for (i = 0; i < count; i++) {
712 if (!IS_ENABLED(CONFIG_X86)) {
713 guid = &config_tables[i].guid;
714 table = (unsigned long)config_tables[i].table;
715 } else if (efi_enabled(EFI_64BIT)) {
716 guid = &tbl64[i].guid;
717 table = tbl64[i].table;
718
719 if (IS_ENABLED(CONFIG_X86_32) &&
720 tbl64[i].table > U32_MAX) {
721 pr_cont("\n");
722 pr_err("Table located above 4GB, disabling EFI.\n");
723 return -EINVAL;
724 }
725 } else {
726 guid = &tbl32[i].guid;
727 table = tbl32[i].table;
728 }
729
730 if (!match_config_table(guid, table, common_tables) && arch_tables)
731 match_config_table(guid, table, arch_tables);
732 }
733 pr_cont("\n");
734 set_bit(EFI_CONFIG_TABLES, &efi.flags);
735
736 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
737 struct linux_efi_random_seed *seed;
738 u32 size = 0;
739
740 seed = early_memremap(efi_rng_seed, sizeof(*seed));
741 if (seed != NULL) {
742 size = min_t(u32, seed->size, SZ_1K); // sanity check
743 early_memunmap(seed, sizeof(*seed));
744 } else {
745 pr_err("Could not map UEFI random seed!\n");
746 }
747 if (size > 0) {
748 seed = early_memremap(efi_rng_seed,
749 sizeof(*seed) + size);
750 if (seed != NULL) {
751 add_bootloader_randomness(seed->bits, size);
752 memzero_explicit(seed->bits, size);
753 early_memunmap(seed, sizeof(*seed) + size);
754 } else {
755 pr_err("Could not map UEFI random seed!\n");
756 }
757 }
758 }
759
760 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
761 efi_memattr_init();
762
763 efi_tpm_eventlog_init();
764
765 if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
766 unsigned long prsv = mem_reserve;
767
768 while (prsv) {
769 struct linux_efi_memreserve *rsv;
770 u8 *p;
771
772 /*
773 * Just map a full page: that is what we will get
774 * anyway, and it permits us to map the entire entry
775 * before knowing its size.
776 */
777 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
778 PAGE_SIZE);
779 if (p == NULL) {
780 pr_err("Could not map UEFI memreserve entry!\n");
781 return -ENOMEM;
782 }
783
784 rsv = (void *)(p + prsv % PAGE_SIZE);
785
786 /* reserve the entry itself */
787 memblock_reserve(prsv,
788 struct_size(rsv, entry, rsv->size));
789
790 for (i = 0; i < atomic_read(&rsv->count); i++) {
791 memblock_reserve(rsv->entry[i].base,
792 rsv->entry[i].size);
793 }
794
795 prsv = rsv->next;
796 early_memunmap(p, PAGE_SIZE);
797 }
798 }
799
800 if (rt_prop != EFI_INVALID_TABLE_ADDR) {
801 efi_rt_properties_table_t *tbl;
802
803 tbl = early_memremap(rt_prop, sizeof(*tbl));
804 if (tbl) {
805 efi.runtime_supported_mask &= tbl->runtime_services_supported;
806 early_memunmap(tbl, sizeof(*tbl));
807 }
808 }
809
810 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
811 initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
812 struct linux_efi_initrd *tbl;
813
814 tbl = early_memremap(initrd, sizeof(*tbl));
815 if (tbl) {
816 phys_initrd_start = tbl->base;
817 phys_initrd_size = tbl->size;
818 early_memunmap(tbl, sizeof(*tbl));
819 }
820 }
821
822 if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
823 efi.unaccepted != EFI_INVALID_TABLE_ADDR) {
824 struct efi_unaccepted_memory *unaccepted;
825
826 unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
827 if (unaccepted) {
828
829 if (unaccepted->version == 1) {
830 reserve_unaccepted(unaccepted);
831 } else {
832 efi.unaccepted = EFI_INVALID_TABLE_ADDR;
833 }
834
835 early_memunmap(unaccepted, sizeof(*unaccepted));
836 }
837 }
838
839 return 0;
840 }
841
efi_systab_check_header(const efi_table_hdr_t * systab_hdr)842 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
843 {
844 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
845 pr_err("System table signature incorrect!\n");
846 return -EINVAL;
847 }
848
849 return 0;
850 }
851
map_fw_vendor(unsigned long fw_vendor,size_t size)852 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
853 size_t size)
854 {
855 const efi_char16_t *ret;
856
857 ret = early_memremap_ro(fw_vendor, size);
858 if (!ret)
859 pr_err("Could not map the firmware vendor!\n");
860 return ret;
861 }
862
unmap_fw_vendor(const void * fw_vendor,size_t size)863 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
864 {
865 early_memunmap((void *)fw_vendor, size);
866 }
867
efi_systab_report_header(const efi_table_hdr_t * systab_hdr,unsigned long fw_vendor)868 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
869 unsigned long fw_vendor)
870 {
871 char vendor[100] = "unknown";
872 const efi_char16_t *c16;
873 size_t i;
874 u16 rev;
875
876 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
877 if (c16) {
878 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
879 vendor[i] = c16[i];
880 vendor[i] = '\0';
881
882 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
883 }
884
885 rev = (u16)systab_hdr->revision;
886 pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
887
888 rev %= 10;
889 if (rev)
890 pr_cont(".%u", rev);
891
892 pr_cont(" by %s\n", vendor);
893
894 if (IS_ENABLED(CONFIG_X86_64) &&
895 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
896 !strcmp(vendor, "Apple")) {
897 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
898 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
899 }
900 }
901
902 static __initdata char memory_type_name[][13] = {
903 "Reserved",
904 "Loader Code",
905 "Loader Data",
906 "Boot Code",
907 "Boot Data",
908 "Runtime Code",
909 "Runtime Data",
910 "Conventional",
911 "Unusable",
912 "ACPI Reclaim",
913 "ACPI Mem NVS",
914 "MMIO",
915 "MMIO Port",
916 "PAL Code",
917 "Persistent",
918 "Unaccepted",
919 };
920
efi_md_typeattr_format(char * buf,size_t size,const efi_memory_desc_t * md)921 char * __init efi_md_typeattr_format(char *buf, size_t size,
922 const efi_memory_desc_t *md)
923 {
924 char *pos;
925 int type_len;
926 u64 attr;
927
928 pos = buf;
929 if (md->type >= ARRAY_SIZE(memory_type_name))
930 type_len = snprintf(pos, size, "[type=%u", md->type);
931 else
932 type_len = snprintf(pos, size, "[%-*s",
933 (int)(sizeof(memory_type_name[0]) - 1),
934 memory_type_name[md->type]);
935 if (type_len >= size)
936 return buf;
937
938 pos += type_len;
939 size -= type_len;
940
941 attr = md->attribute;
942 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
943 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
944 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
945 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
946 EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE |
947 EFI_MEMORY_RUNTIME))
948 snprintf(pos, size, "|attr=0x%016llx]",
949 (unsigned long long)attr);
950 else
951 snprintf(pos, size,
952 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
953 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
954 attr & EFI_MEMORY_HOT_PLUGGABLE ? "HP" : "",
955 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
956 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
957 attr & EFI_MEMORY_SP ? "SP" : "",
958 attr & EFI_MEMORY_NV ? "NV" : "",
959 attr & EFI_MEMORY_XP ? "XP" : "",
960 attr & EFI_MEMORY_RP ? "RP" : "",
961 attr & EFI_MEMORY_WP ? "WP" : "",
962 attr & EFI_MEMORY_RO ? "RO" : "",
963 attr & EFI_MEMORY_UCE ? "UCE" : "",
964 attr & EFI_MEMORY_WB ? "WB" : "",
965 attr & EFI_MEMORY_WT ? "WT" : "",
966 attr & EFI_MEMORY_WC ? "WC" : "",
967 attr & EFI_MEMORY_UC ? "UC" : "");
968 return buf;
969 }
970
971 /*
972 * efi_mem_attributes - lookup memmap attributes for physical address
973 * @phys_addr: the physical address to lookup
974 *
975 * Search in the EFI memory map for the region covering
976 * @phys_addr. Returns the EFI memory attributes if the region
977 * was found in the memory map, 0 otherwise.
978 */
efi_mem_attributes(unsigned long phys_addr)979 u64 efi_mem_attributes(unsigned long phys_addr)
980 {
981 efi_memory_desc_t *md;
982
983 if (!efi_enabled(EFI_MEMMAP))
984 return 0;
985
986 for_each_efi_memory_desc(md) {
987 if ((md->phys_addr <= phys_addr) &&
988 (phys_addr < (md->phys_addr +
989 (md->num_pages << EFI_PAGE_SHIFT))))
990 return md->attribute;
991 }
992 return 0;
993 }
994
995 /*
996 * efi_mem_type - lookup memmap type for physical address
997 * @phys_addr: the physical address to lookup
998 *
999 * Search in the EFI memory map for the region covering @phys_addr.
1000 * Returns the EFI memory type if the region was found in the memory
1001 * map, -EINVAL otherwise.
1002 */
efi_mem_type(unsigned long phys_addr)1003 int efi_mem_type(unsigned long phys_addr)
1004 {
1005 const efi_memory_desc_t *md;
1006
1007 if (!efi_enabled(EFI_MEMMAP))
1008 return -ENOTSUPP;
1009
1010 for_each_efi_memory_desc(md) {
1011 if ((md->phys_addr <= phys_addr) &&
1012 (phys_addr < (md->phys_addr +
1013 (md->num_pages << EFI_PAGE_SHIFT))))
1014 return md->type;
1015 }
1016 return -EINVAL;
1017 }
1018
efi_status_to_err(efi_status_t status)1019 int efi_status_to_err(efi_status_t status)
1020 {
1021 int err;
1022
1023 switch (status) {
1024 case EFI_SUCCESS:
1025 err = 0;
1026 break;
1027 case EFI_INVALID_PARAMETER:
1028 err = -EINVAL;
1029 break;
1030 case EFI_OUT_OF_RESOURCES:
1031 err = -ENOSPC;
1032 break;
1033 case EFI_DEVICE_ERROR:
1034 err = -EIO;
1035 break;
1036 case EFI_WRITE_PROTECTED:
1037 err = -EROFS;
1038 break;
1039 case EFI_SECURITY_VIOLATION:
1040 err = -EACCES;
1041 break;
1042 case EFI_NOT_FOUND:
1043 err = -ENOENT;
1044 break;
1045 case EFI_ABORTED:
1046 err = -EINTR;
1047 break;
1048 default:
1049 err = -EINVAL;
1050 }
1051
1052 return err;
1053 }
1054 EXPORT_SYMBOL_GPL(efi_status_to_err);
1055
1056 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
1057 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
1058
efi_memreserve_map_root(void)1059 static int __init efi_memreserve_map_root(void)
1060 {
1061 if (mem_reserve == EFI_INVALID_TABLE_ADDR)
1062 return -ENODEV;
1063
1064 efi_memreserve_root = memremap(mem_reserve,
1065 sizeof(*efi_memreserve_root),
1066 MEMREMAP_WB);
1067 if (WARN_ON_ONCE(!efi_memreserve_root))
1068 return -ENOMEM;
1069 return 0;
1070 }
1071
efi_mem_reserve_iomem(phys_addr_t addr,u64 size)1072 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
1073 {
1074 struct resource *res, *parent;
1075 int ret;
1076
1077 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1078 if (!res)
1079 return -ENOMEM;
1080
1081 res->name = "reserved";
1082 res->flags = IORESOURCE_MEM;
1083 res->start = addr;
1084 res->end = addr + size - 1;
1085
1086 /* we expect a conflict with a 'System RAM' region */
1087 parent = request_resource_conflict(&iomem_resource, res);
1088 ret = parent ? request_resource(parent, res) : 0;
1089
1090 /*
1091 * Given that efi_mem_reserve_iomem() can be called at any
1092 * time, only call memblock_reserve() if the architecture
1093 * keeps the infrastructure around.
1094 */
1095 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
1096 memblock_reserve(addr, size);
1097
1098 return ret;
1099 }
1100
efi_mem_reserve_persistent(phys_addr_t addr,u64 size)1101 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
1102 {
1103 struct linux_efi_memreserve *rsv;
1104 unsigned long prsv;
1105 int rc, index;
1106
1107 if (efi_memreserve_root == (void *)ULONG_MAX)
1108 return -ENODEV;
1109
1110 if (!efi_memreserve_root) {
1111 rc = efi_memreserve_map_root();
1112 if (rc)
1113 return rc;
1114 }
1115
1116 /* first try to find a slot in an existing linked list entry */
1117 for (prsv = efi_memreserve_root->next; prsv; ) {
1118 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1119 if (!rsv)
1120 return -ENOMEM;
1121 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1122 if (index < rsv->size) {
1123 rsv->entry[index].base = addr;
1124 rsv->entry[index].size = size;
1125
1126 memunmap(rsv);
1127 return efi_mem_reserve_iomem(addr, size);
1128 }
1129 prsv = rsv->next;
1130 memunmap(rsv);
1131 }
1132
1133 /* no slot found - allocate a new linked list entry */
1134 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1135 if (!rsv)
1136 return -ENOMEM;
1137
1138 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1139 if (rc) {
1140 free_page((unsigned long)rsv);
1141 return rc;
1142 }
1143
1144 /*
1145 * The memremap() call above assumes that a linux_efi_memreserve entry
1146 * never crosses a page boundary, so let's ensure that this remains true
1147 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1148 * using SZ_4K explicitly in the size calculation below.
1149 */
1150 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1151 atomic_set(&rsv->count, 1);
1152 rsv->entry[0].base = addr;
1153 rsv->entry[0].size = size;
1154
1155 spin_lock(&efi_mem_reserve_persistent_lock);
1156 rsv->next = efi_memreserve_root->next;
1157 efi_memreserve_root->next = __pa(rsv);
1158 spin_unlock(&efi_mem_reserve_persistent_lock);
1159
1160 return efi_mem_reserve_iomem(addr, size);
1161 }
1162
efi_memreserve_root_init(void)1163 static int __init efi_memreserve_root_init(void)
1164 {
1165 if (efi_memreserve_root)
1166 return 0;
1167 if (efi_memreserve_map_root())
1168 efi_memreserve_root = (void *)ULONG_MAX;
1169 return 0;
1170 }
1171 early_initcall(efi_memreserve_root_init);
1172
1173 #ifdef CONFIG_KEXEC
update_efi_random_seed(struct notifier_block * nb,unsigned long code,void * unused)1174 static int update_efi_random_seed(struct notifier_block *nb,
1175 unsigned long code, void *unused)
1176 {
1177 struct linux_efi_random_seed *seed;
1178 u32 size = 0;
1179
1180 if (!kexec_in_progress)
1181 return NOTIFY_DONE;
1182
1183 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1184 if (seed != NULL) {
1185 size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1186 memunmap(seed);
1187 } else {
1188 pr_err("Could not map UEFI random seed!\n");
1189 }
1190 if (size > 0) {
1191 seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1192 MEMREMAP_WB);
1193 if (seed != NULL) {
1194 seed->size = size;
1195 get_random_bytes(seed->bits, seed->size);
1196 memunmap(seed);
1197 } else {
1198 pr_err("Could not map UEFI random seed!\n");
1199 }
1200 }
1201 return NOTIFY_DONE;
1202 }
1203
1204 static struct notifier_block efi_random_seed_nb = {
1205 .notifier_call = update_efi_random_seed,
1206 };
1207
register_update_efi_random_seed(void)1208 static int __init register_update_efi_random_seed(void)
1209 {
1210 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1211 return 0;
1212 return register_reboot_notifier(&efi_random_seed_nb);
1213 }
1214 late_initcall(register_update_efi_random_seed);
1215 #endif
1216