xref: /linux/drivers/firmware/efi/efi.c (revision 948ef73f7ec39622ebd27bba4e94d78a983109f6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * efi.c - EFI subsystem
4  *
5  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8  *
9  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11  * The existance of /sys/firmware/efi may also be used by userspace to
12  * determine that the system supports EFI.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/initrd.h>
25 #include <linux/io.h>
26 #include <linux/kexec.h>
27 #include <linux/platform_device.h>
28 #include <linux/random.h>
29 #include <linux/reboot.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/ucs2_string.h>
33 #include <linux/memblock.h>
34 #include <linux/security.h>
35 #include <linux/notifier.h>
36 
37 #include <asm/early_ioremap.h>
38 
39 struct efi __read_mostly efi = {
40 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
41 	.acpi			= EFI_INVALID_TABLE_ADDR,
42 	.acpi20			= EFI_INVALID_TABLE_ADDR,
43 	.smbios			= EFI_INVALID_TABLE_ADDR,
44 	.smbios3		= EFI_INVALID_TABLE_ADDR,
45 	.esrt			= EFI_INVALID_TABLE_ADDR,
46 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
47 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
48 	.ovmf_debug_log         = EFI_INVALID_TABLE_ADDR,
49 #ifdef CONFIG_LOAD_UEFI_KEYS
50 	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
51 #endif
52 #ifdef CONFIG_EFI_COCO_SECRET
53 	.coco_secret		= EFI_INVALID_TABLE_ADDR,
54 #endif
55 #ifdef CONFIG_UNACCEPTED_MEMORY
56 	.unaccepted		= EFI_INVALID_TABLE_ADDR,
57 #endif
58 };
59 EXPORT_SYMBOL(efi);
60 
61 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
62 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
63 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
64 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
65 
66 extern unsigned long primary_display_table;
67 
68 struct mm_struct efi_mm = {
69 	.mm_mt			= MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
70 	.mm_users		= ATOMIC_INIT(2),
71 	.mm_count		= ATOMIC_INIT(1),
72 	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
73 	MMAP_LOCK_INITIALIZER(efi_mm)
74 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
75 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
76 	.user_ns		= &init_user_ns,
77 #ifdef CONFIG_SCHED_MM_CID
78 	.mm_cid.lock		= __RAW_SPIN_LOCK_UNLOCKED(efi_mm.mm_cid.lock),
79 #endif
80 	.flexible_array		= MM_STRUCT_FLEXIBLE_ARRAY_INIT,
81 };
82 
83 struct workqueue_struct *efi_rts_wq;
84 
85 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
setup_noefi(char * arg)86 static int __init setup_noefi(char *arg)
87 {
88 	disable_runtime = true;
89 	return 0;
90 }
91 early_param("noefi", setup_noefi);
92 
efi_runtime_disabled(void)93 bool efi_runtime_disabled(void)
94 {
95 	return disable_runtime;
96 }
97 
__efi_soft_reserve_enabled(void)98 bool __pure __efi_soft_reserve_enabled(void)
99 {
100 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
101 }
102 
parse_efi_cmdline(char * str)103 static int __init parse_efi_cmdline(char *str)
104 {
105 	if (!str) {
106 		pr_warn("need at least one option\n");
107 		return -EINVAL;
108 	}
109 
110 	if (parse_option_str(str, "debug"))
111 		set_bit(EFI_DBG, &efi.flags);
112 
113 	if (parse_option_str(str, "noruntime"))
114 		disable_runtime = true;
115 
116 	if (parse_option_str(str, "runtime"))
117 		disable_runtime = false;
118 
119 	if (parse_option_str(str, "nosoftreserve"))
120 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
121 
122 	return 0;
123 }
124 early_param("efi", parse_efi_cmdline);
125 
126 struct kobject *efi_kobj;
127 
128 /*
129  * Let's not leave out systab information that snuck into
130  * the efivars driver
131  * Note, do not add more fields in systab sysfs file as it breaks sysfs
132  * one value per file rule!
133  */
systab_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)134 static ssize_t systab_show(struct kobject *kobj,
135 			   struct kobj_attribute *attr, char *buf)
136 {
137 	char *str = buf;
138 
139 	if (!kobj || !buf)
140 		return -EINVAL;
141 
142 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
143 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
144 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
145 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
146 	/*
147 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
148 	 * SMBIOS3 entry point shall be preferred, so we list it first to
149 	 * let applications stop parsing after the first match.
150 	 */
151 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
152 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
153 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
154 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
155 
156 	return str - buf;
157 }
158 
159 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
160 
fw_platform_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)161 static ssize_t fw_platform_size_show(struct kobject *kobj,
162 				     struct kobj_attribute *attr, char *buf)
163 {
164 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
165 }
166 
167 extern __weak struct kobj_attribute efi_attr_fw_vendor;
168 extern __weak struct kobj_attribute efi_attr_runtime;
169 extern __weak struct kobj_attribute efi_attr_config_table;
170 static struct kobj_attribute efi_attr_fw_platform_size =
171 	__ATTR_RO(fw_platform_size);
172 
173 static struct attribute *efi_subsys_attrs[] = {
174 	&efi_attr_systab.attr,
175 	&efi_attr_fw_platform_size.attr,
176 	&efi_attr_fw_vendor.attr,
177 	&efi_attr_runtime.attr,
178 	&efi_attr_config_table.attr,
179 	NULL,
180 };
181 
efi_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)182 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
183 				   int n)
184 {
185 	return attr->mode;
186 }
187 
188 static const struct attribute_group efi_subsys_attr_group = {
189 	.attrs = efi_subsys_attrs,
190 	.is_visible = efi_attr_is_visible,
191 };
192 
193 struct blocking_notifier_head efivar_ops_nh;
194 EXPORT_SYMBOL_GPL(efivar_ops_nh);
195 
196 static struct efivars generic_efivars;
197 static struct efivar_operations generic_ops;
198 
generic_ops_supported(void)199 static bool generic_ops_supported(void)
200 {
201 	unsigned long name_size;
202 	efi_status_t status;
203 	efi_char16_t name;
204 	efi_guid_t guid;
205 
206 	name_size = sizeof(name);
207 
208 	if (!efi.get_next_variable)
209 		return false;
210 	status = efi.get_next_variable(&name_size, &name, &guid);
211 	if (status == EFI_UNSUPPORTED)
212 		return false;
213 
214 	return true;
215 }
216 
generic_ops_register(void)217 static int generic_ops_register(void)
218 {
219 	if (!generic_ops_supported())
220 		return 0;
221 
222 	generic_ops.get_variable = efi.get_variable;
223 	generic_ops.get_next_variable = efi.get_next_variable;
224 	generic_ops.query_variable_store = efi_query_variable_store;
225 	generic_ops.query_variable_info = efi.query_variable_info;
226 
227 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
228 		generic_ops.set_variable = efi.set_variable;
229 		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
230 	}
231 	return efivars_register(&generic_efivars, &generic_ops);
232 }
233 
generic_ops_unregister(void)234 static void generic_ops_unregister(void)
235 {
236 	if (!generic_ops.get_variable)
237 		return;
238 
239 	efivars_unregister(&generic_efivars);
240 }
241 
efivars_generic_ops_register(void)242 void efivars_generic_ops_register(void)
243 {
244 	generic_ops_register();
245 }
246 EXPORT_SYMBOL_GPL(efivars_generic_ops_register);
247 
efivars_generic_ops_unregister(void)248 void efivars_generic_ops_unregister(void)
249 {
250 	generic_ops_unregister();
251 }
252 EXPORT_SYMBOL_GPL(efivars_generic_ops_unregister);
253 
254 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
255 #define EFIVAR_SSDT_NAME_MAX	16UL
256 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
efivar_ssdt_setup(char * str)257 static int __init efivar_ssdt_setup(char *str)
258 {
259 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
260 
261 	if (ret)
262 		return ret;
263 
264 	if (strlen(str) < sizeof(efivar_ssdt))
265 		memcpy(efivar_ssdt, str, strlen(str));
266 	else
267 		pr_warn("efivar_ssdt: name too long: %s\n", str);
268 	return 1;
269 }
270 __setup("efivar_ssdt=", efivar_ssdt_setup);
271 
efivar_ssdt_load(void)272 static __init int efivar_ssdt_load(void)
273 {
274 	unsigned long name_size = 256;
275 	efi_char16_t *name = NULL;
276 	efi_status_t status;
277 	efi_guid_t guid;
278 	int ret = 0;
279 
280 	if (!efivar_ssdt[0])
281 		return 0;
282 
283 	name = kzalloc(name_size, GFP_KERNEL);
284 	if (!name)
285 		return -ENOMEM;
286 
287 	for (;;) {
288 		char utf8_name[EFIVAR_SSDT_NAME_MAX];
289 		unsigned long data_size = 0;
290 		void *data;
291 		int limit;
292 
293 		status = efi.get_next_variable(&name_size, name, &guid);
294 		if (status == EFI_NOT_FOUND) {
295 			break;
296 		} else if (status == EFI_BUFFER_TOO_SMALL) {
297 			efi_char16_t *name_tmp =
298 				krealloc(name, name_size, GFP_KERNEL);
299 			if (!name_tmp) {
300 				ret = -ENOMEM;
301 				goto out;
302 			}
303 			name = name_tmp;
304 			continue;
305 		}
306 
307 		limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
308 		ucs2_as_utf8(utf8_name, name, limit - 1);
309 		if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
310 			continue;
311 
312 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
313 
314 		status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
315 		if (status != EFI_BUFFER_TOO_SMALL || !data_size) {
316 			ret = -EIO;
317 			goto out;
318 		}
319 
320 		data = kmalloc(data_size, GFP_KERNEL);
321 		if (!data) {
322 			ret = -ENOMEM;
323 			goto out;
324 		}
325 
326 		status = efi.get_variable(name, &guid, NULL, &data_size, data);
327 		if (status == EFI_SUCCESS) {
328 			acpi_status acpi_ret = acpi_load_table(data, NULL);
329 			if (ACPI_FAILURE(acpi_ret)) {
330 				pr_err("efivar_ssdt: failed to load table: %u\n",
331 				       acpi_ret);
332 			} else {
333 				/*
334 				 * The @data will be in use by ACPI engine,
335 				 * do not free it!
336 				 */
337 				continue;
338 			}
339 		} else {
340 			pr_err("efivar_ssdt: failed to get var data: 0x%lx\n", status);
341 		}
342 		kfree(data);
343 	}
344 out:
345 	kfree(name);
346 	return ret;
347 }
348 #else
efivar_ssdt_load(void)349 static inline int efivar_ssdt_load(void) { return 0; }
350 #endif
351 
352 #ifdef CONFIG_DEBUG_FS
353 
354 #define EFI_DEBUGFS_MAX_BLOBS 32
355 
356 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
357 
efi_debugfs_init(void)358 static void __init efi_debugfs_init(void)
359 {
360 	struct dentry *efi_debugfs;
361 	efi_memory_desc_t *md;
362 	char name[32];
363 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
364 	int i = 0;
365 
366 	efi_debugfs = debugfs_create_dir("efi", NULL);
367 	if (IS_ERR(efi_debugfs))
368 		return;
369 
370 	for_each_efi_memory_desc(md) {
371 		switch (md->type) {
372 		case EFI_BOOT_SERVICES_CODE:
373 			snprintf(name, sizeof(name), "boot_services_code%d",
374 				 type_count[md->type]++);
375 			break;
376 		case EFI_BOOT_SERVICES_DATA:
377 			snprintf(name, sizeof(name), "boot_services_data%d",
378 				 type_count[md->type]++);
379 			break;
380 		default:
381 			continue;
382 		}
383 
384 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
385 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
386 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
387 			break;
388 		}
389 
390 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
391 		debugfs_blob[i].data = memremap(md->phys_addr,
392 						debugfs_blob[i].size,
393 						MEMREMAP_WB);
394 		if (!debugfs_blob[i].data)
395 			continue;
396 
397 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
398 		i++;
399 	}
400 }
401 #else
efi_debugfs_init(void)402 static inline void efi_debugfs_init(void) {}
403 #endif
404 
405 /*
406  * We register the efi subsystem with the firmware subsystem and the
407  * efivars subsystem with the efi subsystem, if the system was booted with
408  * EFI.
409  */
efisubsys_init(void)410 static int __init efisubsys_init(void)
411 {
412 	int error;
413 
414 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
415 		efi.runtime_supported_mask = 0;
416 
417 	if (!efi_enabled(EFI_BOOT))
418 		return 0;
419 
420 	if (efi.runtime_supported_mask) {
421 		/*
422 		 * Since we process only one efi_runtime_service() at a time, an
423 		 * ordered workqueue (which creates only one execution context)
424 		 * should suffice for all our needs.
425 		 */
426 		efi_rts_wq = alloc_ordered_workqueue("efi_runtime", WQ_SYSFS);
427 		if (!efi_rts_wq) {
428 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
429 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
430 			efi.runtime_supported_mask = 0;
431 			return 0;
432 		}
433 	}
434 
435 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
436 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
437 
438 	/* We register the efi directory at /sys/firmware/efi */
439 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
440 	if (!efi_kobj) {
441 		pr_err("efi: Firmware registration failed.\n");
442 		error = -ENOMEM;
443 		goto err_destroy_wq;
444 	}
445 
446 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
447 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
448 		error = generic_ops_register();
449 		if (error)
450 			goto err_put;
451 		error = efivar_ssdt_load();
452 		if (error)
453 			pr_err("efi: failed to load SSDT, error %d.\n", error);
454 		platform_device_register_simple("efivars", 0, NULL, 0);
455 	}
456 
457 	BLOCKING_INIT_NOTIFIER_HEAD(&efivar_ops_nh);
458 
459 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
460 	if (error) {
461 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
462 		       error);
463 		goto err_unregister;
464 	}
465 
466 	/* and the standard mountpoint for efivarfs */
467 	error = sysfs_create_mount_point(efi_kobj, "efivars");
468 	if (error) {
469 		pr_err("efivars: Subsystem registration failed.\n");
470 		goto err_remove_group;
471 	}
472 
473 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
474 		efi_debugfs_init();
475 
476 #ifdef CONFIG_EFI_COCO_SECRET
477 	if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
478 		platform_device_register_simple("efi_secret", 0, NULL, 0);
479 #endif
480 
481 	if (IS_ENABLED(CONFIG_OVMF_DEBUG_LOG) &&
482 	    efi.ovmf_debug_log != EFI_INVALID_TABLE_ADDR)
483 		ovmf_log_probe(efi.ovmf_debug_log);
484 
485 	return 0;
486 
487 err_remove_group:
488 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
489 err_unregister:
490 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
491 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
492 		generic_ops_unregister();
493 err_put:
494 	kobject_put(efi_kobj);
495 	efi_kobj = NULL;
496 err_destroy_wq:
497 	if (efi_rts_wq)
498 		destroy_workqueue(efi_rts_wq);
499 
500 	return error;
501 }
502 
503 subsys_initcall(efisubsys_init);
504 
efi_find_mirror(void)505 void __init efi_find_mirror(void)
506 {
507 	efi_memory_desc_t *md;
508 	u64 mirror_size = 0, total_size = 0;
509 
510 	if (!efi_enabled(EFI_MEMMAP))
511 		return;
512 
513 	for_each_efi_memory_desc(md) {
514 		unsigned long long start = md->phys_addr;
515 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
516 
517 		total_size += size;
518 		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
519 			memblock_mark_mirror(start, size);
520 			mirror_size += size;
521 		}
522 	}
523 	if (mirror_size)
524 		pr_info("Memory: %lldM/%lldM mirrored memory\n",
525 			mirror_size>>20, total_size>>20);
526 }
527 
528 /*
529  * Find the efi memory descriptor for a given physical address.  Given a
530  * physical address, determine if it exists within an EFI Memory Map entry,
531  * and if so, populate the supplied memory descriptor with the appropriate
532  * data.
533  */
__efi_mem_desc_lookup(u64 phys_addr,efi_memory_desc_t * out_md)534 int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
535 {
536 	efi_memory_desc_t *md;
537 
538 	if (!efi_enabled(EFI_MEMMAP)) {
539 		pr_err_once("EFI_MEMMAP is not enabled.\n");
540 		return -EINVAL;
541 	}
542 
543 	if (!out_md) {
544 		pr_err_once("out_md is null.\n");
545 		return -EINVAL;
546         }
547 
548 	for_each_efi_memory_desc(md) {
549 		u64 size;
550 		u64 end;
551 
552 		/* skip bogus entries (including empty ones) */
553 		if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
554 		    (md->num_pages <= 0) ||
555 		    (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
556 			continue;
557 
558 		size = md->num_pages << EFI_PAGE_SHIFT;
559 		end = md->phys_addr + size;
560 		if (phys_addr >= md->phys_addr && phys_addr < end) {
561 			memcpy(out_md, md, sizeof(*out_md));
562 			return 0;
563 		}
564 	}
565 	return -ENOENT;
566 }
567 
568 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
569 	__weak __alias(__efi_mem_desc_lookup);
570 EXPORT_SYMBOL_GPL(efi_mem_desc_lookup);
571 
572 /*
573  * Calculate the highest address of an efi memory descriptor.
574  */
efi_mem_desc_end(efi_memory_desc_t * md)575 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
576 {
577 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
578 	u64 end = md->phys_addr + size;
579 	return end;
580 }
581 
efi_arch_mem_reserve(phys_addr_t addr,u64 size)582 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
583 
584 /**
585  * efi_mem_reserve - Reserve an EFI memory region
586  * @addr: Physical address to reserve
587  * @size: Size of reservation
588  *
589  * Mark a region as reserved from general kernel allocation and
590  * prevent it being released by efi_free_boot_services().
591  *
592  * This function should be called drivers once they've parsed EFI
593  * configuration tables to figure out where their data lives, e.g.
594  * efi_esrt_init().
595  */
efi_mem_reserve(phys_addr_t addr,u64 size)596 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
597 {
598 	/* efi_mem_reserve() does not work under Xen */
599 	if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
600 		return;
601 
602 	if (!memblock_is_region_reserved(addr, size))
603 		memblock_reserve_kern(addr, size);
604 	else
605 		memblock_reserved_mark_kern(addr, size);
606 
607 	/*
608 	 * Some architectures (x86) reserve all boot services ranges
609 	 * until efi_free_boot_services() because of buggy firmware
610 	 * implementations. This means the above memblock_reserve() is
611 	 * superfluous on x86 and instead what it needs to do is
612 	 * ensure the @start, @size is not freed.
613 	 */
614 	efi_arch_mem_reserve(addr, size);
615 }
616 
617 static const efi_config_table_type_t common_tables[] __initconst = {
618 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
619 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
620 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
621 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
622 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
623 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
624 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
625 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
626 	{EFI_TCG2_FINAL_EVENTS_TABLE_GUID,	&efi.tpm_final_log,	"TPMFinalLog"	},
627 	{EFI_CC_FINAL_EVENTS_TABLE_GUID,	&efi.tpm_final_log,	"CCFinalLog"	},
628 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
629 	{LINUX_EFI_INITRD_MEDIA_GUID,		&initrd,		"INITRD"	},
630 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
631 #ifdef CONFIG_OVMF_DEBUG_LOG
632 	{OVMF_MEMORY_LOG_TABLE_GUID,		&efi.ovmf_debug_log,	"OvmfDebugLog"	},
633 #endif
634 #ifdef CONFIG_EFI_RCI2_TABLE
635 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
636 #endif
637 #ifdef CONFIG_LOAD_UEFI_KEYS
638 	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
639 #endif
640 #ifdef CONFIG_EFI_COCO_SECRET
641 	{LINUX_EFI_COCO_SECRET_AREA_GUID,	&efi.coco_secret,	"CocoSecret"	},
642 #endif
643 #ifdef CONFIG_UNACCEPTED_MEMORY
644 	{LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID,	&efi.unaccepted,	"Unaccepted"	},
645 #endif
646 #ifdef CONFIG_EFI_GENERIC_STUB
647 	{LINUX_EFI_PRIMARY_DISPLAY_TABLE_GUID,	&primary_display_table			},
648 #endif
649 	{},
650 };
651 
match_config_table(const efi_guid_t * guid,unsigned long table,const efi_config_table_type_t * table_types)652 static __init int match_config_table(const efi_guid_t *guid,
653 				     unsigned long table,
654 				     const efi_config_table_type_t *table_types)
655 {
656 	int i;
657 
658 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
659 		if (efi_guidcmp(*guid, table_types[i].guid))
660 			continue;
661 
662 		if (!efi_config_table_is_usable(guid, table)) {
663 			if (table_types[i].name[0])
664 				pr_cont("(%s=0x%lx unusable) ",
665 					table_types[i].name, table);
666 			return 1;
667 		}
668 
669 		*(table_types[i].ptr) = table;
670 		if (table_types[i].name[0])
671 			pr_cont("%s=0x%lx ", table_types[i].name, table);
672 		return 1;
673 	}
674 
675 	return 0;
676 }
677 
678 /**
679  * reserve_unaccepted - Map and reserve unaccepted configuration table
680  * @unaccepted: Pointer to unaccepted memory table
681  *
682  * memblock_add() makes sure that the table is mapped in direct mapping. During
683  * normal boot it happens automatically because the table is allocated from
684  * usable memory. But during crashkernel boot only memory specifically reserved
685  * for crash scenario is mapped. memblock_add() forces the table to be mapped
686  * in crashkernel case.
687  *
688  * Align the range to the nearest page borders. Ranges smaller than page size
689  * are not going to be mapped.
690  *
691  * memblock_reserve() makes sure that future allocations will not touch the
692  * table.
693  */
694 
reserve_unaccepted(struct efi_unaccepted_memory * unaccepted)695 static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
696 {
697 	phys_addr_t start, end;
698 
699 	start = PAGE_ALIGN_DOWN(efi.unaccepted);
700 	end = PAGE_ALIGN(efi.unaccepted + sizeof(*unaccepted) + unaccepted->size);
701 
702 	memblock_add(start, end - start);
703 	memblock_reserve(start, end - start);
704 }
705 
efi_config_parse_tables(const efi_config_table_t * config_tables,int count,const efi_config_table_type_t * arch_tables)706 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
707 				   int count,
708 				   const efi_config_table_type_t *arch_tables)
709 {
710 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
711 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
712 	const efi_guid_t *guid;
713 	unsigned long table;
714 	int i;
715 
716 	pr_info("");
717 	for (i = 0; i < count; i++) {
718 		if (!IS_ENABLED(CONFIG_X86)) {
719 			guid = &config_tables[i].guid;
720 			table = (unsigned long)config_tables[i].table;
721 		} else if (efi_enabled(EFI_64BIT)) {
722 			guid = &tbl64[i].guid;
723 			table = tbl64[i].table;
724 
725 			if (IS_ENABLED(CONFIG_X86_32) &&
726 			    tbl64[i].table > U32_MAX) {
727 				pr_cont("\n");
728 				pr_err("Table located above 4GB, disabling EFI.\n");
729 				return -EINVAL;
730 			}
731 		} else {
732 			guid = &tbl32[i].guid;
733 			table = tbl32[i].table;
734 		}
735 
736 		if (!match_config_table(guid, table, common_tables) && arch_tables)
737 			match_config_table(guid, table, arch_tables);
738 	}
739 	pr_cont("\n");
740 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
741 
742 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
743 		struct linux_efi_random_seed *seed;
744 		u32 size = 0;
745 
746 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
747 		if (seed != NULL) {
748 			size = min_t(u32, seed->size, SZ_1K); // sanity check
749 			early_memunmap(seed, sizeof(*seed));
750 		} else {
751 			pr_err("Could not map UEFI random seed!\n");
752 		}
753 		if (size > 0) {
754 			seed = early_memremap(efi_rng_seed,
755 					      sizeof(*seed) + size);
756 			if (seed != NULL) {
757 				add_bootloader_randomness(seed->bits, size);
758 				memzero_explicit(seed->bits, size);
759 				early_memunmap(seed, sizeof(*seed) + size);
760 			} else {
761 				pr_err("Could not map UEFI random seed!\n");
762 			}
763 		}
764 	}
765 
766 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
767 		efi_memattr_init();
768 
769 	efi_tpm_eventlog_init();
770 
771 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
772 		unsigned long prsv = mem_reserve;
773 
774 		while (prsv) {
775 			struct linux_efi_memreserve *rsv;
776 			u8 *p;
777 
778 			/*
779 			 * Just map a full page: that is what we will get
780 			 * anyway, and it permits us to map the entire entry
781 			 * before knowing its size.
782 			 */
783 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
784 					   PAGE_SIZE);
785 			if (p == NULL) {
786 				pr_err("Could not map UEFI memreserve entry!\n");
787 				return -ENOMEM;
788 			}
789 
790 			rsv = (void *)(p + prsv % PAGE_SIZE);
791 
792 			/* reserve the entry itself */
793 			memblock_reserve(prsv,
794 					 struct_size(rsv, entry, rsv->size));
795 
796 			for (i = 0; i < atomic_read(&rsv->count); i++) {
797 				memblock_reserve(rsv->entry[i].base,
798 						 rsv->entry[i].size);
799 			}
800 
801 			prsv = rsv->next;
802 			early_memunmap(p, PAGE_SIZE);
803 		}
804 	}
805 
806 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
807 		efi_rt_properties_table_t *tbl;
808 
809 		tbl = early_memremap(rt_prop, sizeof(*tbl));
810 		if (tbl) {
811 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
812 			early_memunmap(tbl, sizeof(*tbl));
813 		}
814 	}
815 
816 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
817 	    initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
818 		struct linux_efi_initrd *tbl;
819 
820 		tbl = early_memremap(initrd, sizeof(*tbl));
821 		if (tbl) {
822 			phys_initrd_start = tbl->base;
823 			phys_initrd_size = tbl->size;
824 			tbl->base = tbl->size = 0;
825 			early_memunmap(tbl, sizeof(*tbl));
826 		}
827 	}
828 
829 	if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
830 	    efi.unaccepted != EFI_INVALID_TABLE_ADDR) {
831 		struct efi_unaccepted_memory *unaccepted;
832 
833 		unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
834 		if (unaccepted) {
835 
836 			if (unaccepted->version == 1) {
837 				reserve_unaccepted(unaccepted);
838 			} else {
839 				efi.unaccepted = EFI_INVALID_TABLE_ADDR;
840 			}
841 
842 			early_memunmap(unaccepted, sizeof(*unaccepted));
843 		}
844 	}
845 
846 	return 0;
847 }
848 
efi_systab_check_header(const efi_table_hdr_t * systab_hdr)849 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
850 {
851 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
852 		pr_err("System table signature incorrect!\n");
853 		return -EINVAL;
854 	}
855 
856 	return 0;
857 }
858 
map_fw_vendor(unsigned long fw_vendor,size_t size)859 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
860 						size_t size)
861 {
862 	const efi_char16_t *ret;
863 
864 	ret = early_memremap_ro(fw_vendor, size);
865 	if (!ret)
866 		pr_err("Could not map the firmware vendor!\n");
867 	return ret;
868 }
869 
unmap_fw_vendor(const void * fw_vendor,size_t size)870 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
871 {
872 	early_memunmap((void *)fw_vendor, size);
873 }
874 
efi_systab_report_header(const efi_table_hdr_t * systab_hdr,unsigned long fw_vendor)875 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
876 				     unsigned long fw_vendor)
877 {
878 	char vendor[100] = "unknown";
879 	const efi_char16_t *c16;
880 	size_t i;
881 	u16 rev;
882 
883 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
884 	if (c16) {
885 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
886 			vendor[i] = c16[i];
887 		vendor[i] = '\0';
888 
889 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
890 	}
891 
892 	rev = (u16)systab_hdr->revision;
893 	pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
894 
895 	rev %= 10;
896 	if (rev)
897 		pr_cont(".%u", rev);
898 
899 	pr_cont(" by %s\n", vendor);
900 
901 	if (IS_ENABLED(CONFIG_X86_64) &&
902 	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
903 	    !strcmp(vendor, "Apple")) {
904 		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
905 		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
906 	}
907 }
908 
909 static __initdata char memory_type_name[][13] = {
910 	"Reserved",
911 	"Loader Code",
912 	"Loader Data",
913 	"Boot Code",
914 	"Boot Data",
915 	"Runtime Code",
916 	"Runtime Data",
917 	"Conventional",
918 	"Unusable",
919 	"ACPI Reclaim",
920 	"ACPI Mem NVS",
921 	"MMIO",
922 	"MMIO Port",
923 	"PAL Code",
924 	"Persistent",
925 	"Unaccepted",
926 };
927 
efi_md_typeattr_format(char * buf,size_t size,const efi_memory_desc_t * md)928 char * __init efi_md_typeattr_format(char *buf, size_t size,
929 				     const efi_memory_desc_t *md)
930 {
931 	char *pos;
932 	int type_len;
933 	u64 attr;
934 
935 	pos = buf;
936 	if (md->type >= ARRAY_SIZE(memory_type_name))
937 		type_len = snprintf(pos, size, "[type=%u", md->type);
938 	else
939 		type_len = snprintf(pos, size, "[%-*s",
940 				    (int)(sizeof(memory_type_name[0]) - 1),
941 				    memory_type_name[md->type]);
942 	if (type_len >= size)
943 		return buf;
944 
945 	pos += type_len;
946 	size -= type_len;
947 
948 	attr = md->attribute;
949 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
950 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
951 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
952 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
953 		     EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE |
954 		     EFI_MEMORY_RUNTIME))
955 		snprintf(pos, size, "|attr=0x%016llx]",
956 			 (unsigned long long)attr);
957 	else
958 		snprintf(pos, size,
959 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
960 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
961 			 attr & EFI_MEMORY_HOT_PLUGGABLE	? "HP"  : "",
962 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
963 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
964 			 attr & EFI_MEMORY_SP			? "SP"  : "",
965 			 attr & EFI_MEMORY_NV			? "NV"  : "",
966 			 attr & EFI_MEMORY_XP			? "XP"  : "",
967 			 attr & EFI_MEMORY_RP			? "RP"  : "",
968 			 attr & EFI_MEMORY_WP			? "WP"  : "",
969 			 attr & EFI_MEMORY_RO			? "RO"  : "",
970 			 attr & EFI_MEMORY_UCE			? "UCE" : "",
971 			 attr & EFI_MEMORY_WB			? "WB"  : "",
972 			 attr & EFI_MEMORY_WT			? "WT"  : "",
973 			 attr & EFI_MEMORY_WC			? "WC"  : "",
974 			 attr & EFI_MEMORY_UC			? "UC"  : "");
975 	return buf;
976 }
977 
978 /*
979  * efi_mem_attributes - lookup memmap attributes for physical address
980  * @phys_addr: the physical address to lookup
981  *
982  * Search in the EFI memory map for the region covering
983  * @phys_addr. Returns the EFI memory attributes if the region
984  * was found in the memory map, 0 otherwise.
985  */
efi_mem_attributes(unsigned long phys_addr)986 u64 efi_mem_attributes(unsigned long phys_addr)
987 {
988 	efi_memory_desc_t md;
989 
990 	if (efi_mem_desc_lookup(phys_addr, &md))
991 		return 0;
992 
993 	return md.attribute;
994 }
995 
996 /*
997  * efi_mem_type - lookup memmap type for physical address
998  * @phys_addr: the physical address to lookup
999  *
1000  * Search in the EFI memory map for the region covering @phys_addr.
1001  * Returns the EFI memory type if the region was found in the memory
1002  * map, -EINVAL otherwise.
1003  */
efi_mem_type(unsigned long phys_addr)1004 int efi_mem_type(unsigned long phys_addr)
1005 {
1006 	efi_memory_desc_t md;
1007 
1008 	if (!efi_enabled(EFI_MEMMAP) && !efi_enabled(EFI_PARAVIRT))
1009 		return -ENOTSUPP;
1010 
1011 	if (efi_mem_desc_lookup(phys_addr, &md))
1012 		return -EINVAL;
1013 
1014 	return md.type;
1015 }
1016 
efi_status_to_err(efi_status_t status)1017 int efi_status_to_err(efi_status_t status)
1018 {
1019 	int err;
1020 
1021 	switch (status) {
1022 	case EFI_SUCCESS:
1023 		err = 0;
1024 		break;
1025 	case EFI_INVALID_PARAMETER:
1026 		err = -EINVAL;
1027 		break;
1028 	case EFI_OUT_OF_RESOURCES:
1029 		err = -ENOSPC;
1030 		break;
1031 	case EFI_DEVICE_ERROR:
1032 		err = -EIO;
1033 		break;
1034 	case EFI_WRITE_PROTECTED:
1035 		err = -EROFS;
1036 		break;
1037 	case EFI_SECURITY_VIOLATION:
1038 		err = -EACCES;
1039 		break;
1040 	case EFI_NOT_FOUND:
1041 		err = -ENOENT;
1042 		break;
1043 	case EFI_ABORTED:
1044 		err = -EINTR;
1045 		break;
1046 	default:
1047 		err = -EINVAL;
1048 	}
1049 
1050 	return err;
1051 }
1052 EXPORT_SYMBOL_GPL(efi_status_to_err);
1053 
1054 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
1055 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
1056 
efi_memreserve_map_root(void)1057 static int __init efi_memreserve_map_root(void)
1058 {
1059 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
1060 		return -ENODEV;
1061 
1062 	efi_memreserve_root = memremap(mem_reserve,
1063 				       sizeof(*efi_memreserve_root),
1064 				       MEMREMAP_WB);
1065 	if (WARN_ON_ONCE(!efi_memreserve_root))
1066 		return -ENOMEM;
1067 	return 0;
1068 }
1069 
efi_mem_reserve_iomem(phys_addr_t addr,u64 size)1070 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
1071 {
1072 	struct resource *res, *parent;
1073 	int ret;
1074 
1075 	res = kzalloc_obj(struct resource, GFP_ATOMIC);
1076 	if (!res)
1077 		return -ENOMEM;
1078 
1079 	res->name	= "reserved";
1080 	res->flags	= IORESOURCE_MEM;
1081 	res->start	= addr;
1082 	res->end	= addr + size - 1;
1083 
1084 	/* we expect a conflict with a 'System RAM' region */
1085 	parent = request_resource_conflict(&iomem_resource, res);
1086 	ret = parent ? request_resource(parent, res) : 0;
1087 
1088 	/*
1089 	 * Given that efi_mem_reserve_iomem() can be called at any
1090 	 * time, only call memblock_reserve() if the architecture
1091 	 * keeps the infrastructure around.
1092 	 */
1093 	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
1094 		memblock_reserve(addr, size);
1095 
1096 	return ret;
1097 }
1098 
efi_mem_reserve_persistent(phys_addr_t addr,u64 size)1099 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
1100 {
1101 	struct linux_efi_memreserve *rsv;
1102 	unsigned long prsv;
1103 	int rc, index;
1104 
1105 	if (efi_memreserve_root == (void *)ULONG_MAX)
1106 		return -ENODEV;
1107 
1108 	if (!efi_memreserve_root) {
1109 		rc = efi_memreserve_map_root();
1110 		if (rc)
1111 			return rc;
1112 	}
1113 
1114 	/* first try to find a slot in an existing linked list entry */
1115 	for (prsv = efi_memreserve_root->next; prsv; ) {
1116 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1117 		if (!rsv)
1118 			return -ENOMEM;
1119 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1120 		if (index < rsv->size) {
1121 			rsv->entry[index].base = addr;
1122 			rsv->entry[index].size = size;
1123 
1124 			memunmap(rsv);
1125 			return efi_mem_reserve_iomem(addr, size);
1126 		}
1127 		prsv = rsv->next;
1128 		memunmap(rsv);
1129 	}
1130 
1131 	/* no slot found - allocate a new linked list entry */
1132 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1133 	if (!rsv)
1134 		return -ENOMEM;
1135 
1136 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1137 	if (rc) {
1138 		free_page((unsigned long)rsv);
1139 		return rc;
1140 	}
1141 
1142 	/*
1143 	 * The memremap() call above assumes that a linux_efi_memreserve entry
1144 	 * never crosses a page boundary, so let's ensure that this remains true
1145 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1146 	 * using SZ_4K explicitly in the size calculation below.
1147 	 */
1148 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1149 	atomic_set(&rsv->count, 1);
1150 	rsv->entry[0].base = addr;
1151 	rsv->entry[0].size = size;
1152 
1153 	spin_lock(&efi_mem_reserve_persistent_lock);
1154 	rsv->next = efi_memreserve_root->next;
1155 	efi_memreserve_root->next = __pa(rsv);
1156 	spin_unlock(&efi_mem_reserve_persistent_lock);
1157 
1158 	return efi_mem_reserve_iomem(addr, size);
1159 }
1160 
efi_memreserve_root_init(void)1161 static int __init efi_memreserve_root_init(void)
1162 {
1163 	if (efi_memreserve_root)
1164 		return 0;
1165 	if (efi_memreserve_map_root())
1166 		efi_memreserve_root = (void *)ULONG_MAX;
1167 	return 0;
1168 }
1169 early_initcall(efi_memreserve_root_init);
1170 
1171 #ifdef CONFIG_KEXEC
update_efi_random_seed(struct notifier_block * nb,unsigned long code,void * unused)1172 static int update_efi_random_seed(struct notifier_block *nb,
1173 				  unsigned long code, void *unused)
1174 {
1175 	struct linux_efi_random_seed *seed;
1176 	u32 size = 0;
1177 
1178 	if (!kexec_in_progress)
1179 		return NOTIFY_DONE;
1180 
1181 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1182 	if (seed != NULL) {
1183 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1184 		memunmap(seed);
1185 	} else {
1186 		pr_err("Could not map UEFI random seed!\n");
1187 	}
1188 	if (size > 0) {
1189 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1190 				MEMREMAP_WB);
1191 		if (seed != NULL) {
1192 			seed->size = size;
1193 			get_random_bytes(seed->bits, seed->size);
1194 			memunmap(seed);
1195 		} else {
1196 			pr_err("Could not map UEFI random seed!\n");
1197 		}
1198 	}
1199 	return NOTIFY_DONE;
1200 }
1201 
1202 static struct notifier_block efi_random_seed_nb = {
1203 	.notifier_call = update_efi_random_seed,
1204 };
1205 
register_update_efi_random_seed(void)1206 static int __init register_update_efi_random_seed(void)
1207 {
1208 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1209 		return 0;
1210 	return register_reboot_notifier(&efi_random_seed_nb);
1211 }
1212 late_initcall(register_update_efi_random_seed);
1213 #endif
1214