xref: /linux/arch/x86/platform/efi/efi_64.c (revision 9f2bb6c7b364f186aa37c524f6df33bd488d4efa)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * x86_64 specific EFI support functions
4  * Based on Extensible Firmware Interface Specification version 1.0
5  *
6  * Copyright (C) 2005-2008 Intel Co.
7  *	Fenghua Yu <fenghua.yu@intel.com>
8  *	Bibo Mao <bibo.mao@intel.com>
9  *	Chandramouli Narayanan <mouli@linux.intel.com>
10  *	Huang Ying <ying.huang@intel.com>
11  *
12  * Code to convert EFI to E820 map has been implemented in elilo bootloader
13  * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
14  * is setup appropriately for EFI runtime code.
15  * - mouli 06/14/2007.
16  *
17  */
18 
19 #define pr_fmt(fmt) "efi: " fmt
20 
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/mm.h>
24 #include <linux/types.h>
25 #include <linux/spinlock.h>
26 #include <linux/memblock.h>
27 #include <linux/ioport.h>
28 #include <linux/mc146818rtc.h>
29 #include <linux/efi.h>
30 #include <linux/export.h>
31 #include <linux/uaccess.h>
32 #include <linux/io.h>
33 #include <linux/reboot.h>
34 #include <linux/slab.h>
35 #include <linux/ucs2_string.h>
36 #include <linux/cc_platform.h>
37 #include <linux/sched/task.h>
38 
39 #include <asm/setup.h>
40 #include <asm/page.h>
41 #include <asm/e820/api.h>
42 #include <asm/tlbflush.h>
43 #include <asm/proto.h>
44 #include <asm/efi.h>
45 #include <asm/cacheflush.h>
46 #include <asm/fixmap.h>
47 #include <asm/realmode.h>
48 #include <asm/time.h>
49 #include <asm/pgalloc.h>
50 #include <asm/sev.h>
51 
52 /*
53  * We allocate runtime services regions top-down, starting from -4G, i.e.
54  * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
55  */
56 static u64 efi_va = EFI_VA_START;
57 static struct mm_struct *efi_prev_mm;
58 static unsigned long efi_cr4_lass;
59 
60 /*
61  * We need our own copy of the higher levels of the page tables
62  * because we want to avoid inserting EFI region mappings (EFI_VA_END
63  * to EFI_VA_START) into the standard kernel page tables. Everything
64  * else can be shared, see efi_sync_low_kernel_mappings().
65  *
66  * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
67  * allocation.
68  */
efi_alloc_page_tables(void)69 int __init efi_alloc_page_tables(void)
70 {
71 	pgd_t *pgd, *efi_pgd;
72 	p4d_t *p4d;
73 	pud_t *pud;
74 	gfp_t gfp_mask;
75 
76 	gfp_mask = GFP_KERNEL | __GFP_ZERO;
77 	efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, pgd_allocation_order());
78 	if (!efi_pgd)
79 		goto fail;
80 
81 	pgd = efi_pgd + pgd_index(EFI_VA_END);
82 	p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
83 	if (!p4d)
84 		goto free_pgd;
85 
86 	pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
87 	if (!pud)
88 		goto free_p4d;
89 
90 	efi_mm.pgd = efi_pgd;
91 	mm_init_cpumask(&efi_mm);
92 	init_new_context(NULL, &efi_mm);
93 	set_notrack_mm(&efi_mm);
94 
95 	return 0;
96 
97 free_p4d:
98 	if (pgtable_l5_enabled())
99 		free_page((unsigned long)pgd_page_vaddr(*pgd));
100 free_pgd:
101 	free_pages((unsigned long)efi_pgd, pgd_allocation_order());
102 fail:
103 	return -ENOMEM;
104 }
105 
106 /*
107  * Add low kernel mappings for passing arguments to EFI functions.
108  */
efi_sync_low_kernel_mappings(void)109 void efi_sync_low_kernel_mappings(void)
110 {
111 	unsigned num_entries;
112 	pgd_t *pgd_k, *pgd_efi;
113 	p4d_t *p4d_k, *p4d_efi;
114 	pud_t *pud_k, *pud_efi;
115 	pgd_t *efi_pgd = efi_mm.pgd;
116 
117 	pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
118 	pgd_k = pgd_offset_k(PAGE_OFFSET);
119 
120 	num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
121 	memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
122 
123 	pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
124 	pgd_k = pgd_offset_k(EFI_VA_END);
125 	p4d_efi = p4d_offset(pgd_efi, 0);
126 	p4d_k = p4d_offset(pgd_k, 0);
127 
128 	num_entries = p4d_index(EFI_VA_END);
129 	memcpy(p4d_efi, p4d_k, sizeof(p4d_t) * num_entries);
130 
131 	/*
132 	 * We share all the PUD entries apart from those that map the
133 	 * EFI regions. Copy around them.
134 	 */
135 	BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
136 	BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
137 
138 	p4d_efi = p4d_offset(pgd_efi, EFI_VA_END);
139 	p4d_k = p4d_offset(pgd_k, EFI_VA_END);
140 	pud_efi = pud_offset(p4d_efi, 0);
141 	pud_k = pud_offset(p4d_k, 0);
142 
143 	num_entries = pud_index(EFI_VA_END);
144 	memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
145 
146 	pud_efi = pud_offset(p4d_efi, EFI_VA_START);
147 	pud_k = pud_offset(p4d_k, EFI_VA_START);
148 
149 	num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
150 	memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
151 }
152 
153 /*
154  * Wrapper for slow_virt_to_phys() that handles NULL addresses.
155  */
156 static inline phys_addr_t
virt_to_phys_or_null_size(void * va,unsigned long size)157 virt_to_phys_or_null_size(void *va, unsigned long size)
158 {
159 	phys_addr_t pa;
160 
161 	if (!va)
162 		return 0;
163 
164 	if (virt_addr_valid(va))
165 		return virt_to_phys(va);
166 
167 	pa = slow_virt_to_phys(va);
168 
169 	/* check if the object crosses a page boundary */
170 	if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
171 		return 0;
172 
173 	return pa;
174 }
175 
176 #define virt_to_phys_or_null(addr)				\
177 	virt_to_phys_or_null_size((addr), sizeof(*(addr)))
178 
efi_setup_page_tables(unsigned long pa_memmap,unsigned num_pages)179 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
180 {
181 	extern const u8 __efi64_thunk_ret_tramp[];
182 	unsigned long pfn, text, pf, rodata, tramp;
183 	struct page *page;
184 	unsigned npages;
185 	pgd_t *pgd = efi_mm.pgd;
186 
187 	/*
188 	 * It can happen that the physical address of new_memmap lands in memory
189 	 * which is not mapped in the EFI page table. Therefore we need to go
190 	 * and ident-map those pages containing the map before calling
191 	 * phys_efi_set_virtual_address_map().
192 	 */
193 	pfn = pa_memmap >> PAGE_SHIFT;
194 	pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
195 	if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
196 		pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
197 		return 1;
198 	}
199 
200 	/*
201 	 * Certain firmware versions are way too sentimental and still believe
202 	 * they are exclusive and unquestionable owners of the first physical page,
203 	 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
204 	 * (but then write-access it later during SetVirtualAddressMap()).
205 	 *
206 	 * Create a 1:1 mapping for this page, to avoid triple faults during early
207 	 * boot with such firmware. We are free to hand this page to the BIOS,
208 	 * as trim_bios_range() will reserve the first page and isolate it away
209 	 * from memory allocators anyway.
210 	 */
211 	if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
212 		pr_err("Failed to create 1:1 mapping for the first page!\n");
213 		return 1;
214 	}
215 
216 	/*
217 	 * When SEV-ES is active, the GHCB as set by the kernel will be used
218 	 * by firmware. Create a 1:1 unencrypted mapping for each GHCB.
219 	 */
220 	if (sev_es_efi_map_ghcbs_cas(pgd)) {
221 		pr_err("Failed to create 1:1 mapping for the GHCBs and CAs!\n");
222 		return 1;
223 	}
224 
225 	/*
226 	 * When making calls to the firmware everything needs to be 1:1
227 	 * mapped and addressable with 32-bit pointers. Map the kernel
228 	 * text and allocate a new stack because we can't rely on the
229 	 * stack pointer being < 4GB.
230 	 */
231 	if (!efi_is_mixed())
232 		return 0;
233 
234 	page = alloc_page(GFP_KERNEL|__GFP_DMA32);
235 	if (!page) {
236 		pr_err("Unable to allocate EFI runtime stack < 4GB\n");
237 		return 1;
238 	}
239 
240 	efi_mixed_mode_stack_pa = page_to_phys(page + 1); /* stack grows down */
241 
242 	npages = (_etext - _text) >> PAGE_SHIFT;
243 	text = __pa(_text);
244 
245 	if (kernel_unmap_pages_in_pgd(pgd, text, npages)) {
246 		pr_err("Failed to unmap kernel text 1:1 mapping\n");
247 		return 1;
248 	}
249 
250 	npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
251 	rodata = __pa(__start_rodata);
252 	pfn = rodata >> PAGE_SHIFT;
253 
254 	pf = _PAGE_NX | _PAGE_ENC;
255 	if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
256 		pr_err("Failed to map kernel rodata 1:1\n");
257 		return 1;
258 	}
259 
260 	tramp = __pa(__efi64_thunk_ret_tramp);
261 	pfn = tramp >> PAGE_SHIFT;
262 
263 	pf = _PAGE_ENC;
264 	if (kernel_map_pages_in_pgd(pgd, pfn, tramp, 1, pf)) {
265 		pr_err("Failed to map mixed mode return trampoline\n");
266 		return 1;
267 	}
268 
269 	return 0;
270 }
271 
__map_region(efi_memory_desc_t * md,u64 va)272 static void __init __map_region(efi_memory_desc_t *md, u64 va)
273 {
274 	unsigned long flags = _PAGE_RW;
275 	unsigned long pfn;
276 	pgd_t *pgd = efi_mm.pgd;
277 
278 	/*
279 	 * EFI_RUNTIME_SERVICES_CODE regions typically cover PE/COFF
280 	 * executable images in memory that consist of both R-X and
281 	 * RW- sections, so we cannot apply read-only or non-exec
282 	 * permissions just yet. However, modern EFI systems provide
283 	 * a memory attributes table that describes those sections
284 	 * with the appropriate restricted permissions, which are
285 	 * applied in efi_runtime_update_mappings() below. All other
286 	 * regions can be mapped non-executable at this point, with
287 	 * the exception of boot services code regions, but those will
288 	 * be unmapped again entirely in efi_free_boot_services().
289 	 */
290 	if (md->type != EFI_BOOT_SERVICES_CODE &&
291 	    md->type != EFI_RUNTIME_SERVICES_CODE)
292 		flags |= _PAGE_NX;
293 
294 	if (!(md->attribute & EFI_MEMORY_WB))
295 		flags |= _PAGE_PCD;
296 
297 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
298 	    md->type != EFI_MEMORY_MAPPED_IO)
299 		flags |= _PAGE_ENC;
300 
301 	pfn = md->phys_addr >> PAGE_SHIFT;
302 	if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
303 		pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
304 			   md->phys_addr, va);
305 }
306 
efi_map_region(efi_memory_desc_t * md)307 void __init efi_map_region(efi_memory_desc_t *md)
308 {
309 	unsigned long size = md->num_pages << PAGE_SHIFT;
310 	u64 pa = md->phys_addr;
311 
312 	/*
313 	 * Make sure the 1:1 mappings are present as a catch-all for b0rked
314 	 * firmware which doesn't update all internal pointers after switching
315 	 * to virtual mode and would otherwise crap on us.
316 	 */
317 	__map_region(md, md->phys_addr);
318 
319 	/*
320 	 * Enforce the 1:1 mapping as the default virtual address when
321 	 * booting in EFI mixed mode, because even though we may be
322 	 * running a 64-bit kernel, the firmware may only be 32-bit.
323 	 */
324 	if (efi_is_mixed()) {
325 		md->virt_addr = md->phys_addr;
326 		return;
327 	}
328 
329 	efi_va -= size;
330 
331 	/* Is PA 2M-aligned? */
332 	if (!(pa & (PMD_SIZE - 1))) {
333 		efi_va &= PMD_MASK;
334 	} else {
335 		u64 pa_offset = pa & (PMD_SIZE - 1);
336 		u64 prev_va = efi_va;
337 
338 		/* get us the same offset within this 2M page */
339 		efi_va = (efi_va & PMD_MASK) + pa_offset;
340 
341 		if (efi_va > prev_va)
342 			efi_va -= PMD_SIZE;
343 	}
344 
345 	if (efi_va < EFI_VA_END) {
346 		pr_warn(FW_WARN "VA address range overflow!\n");
347 		return;
348 	}
349 
350 	/* Do the VA map */
351 	__map_region(md, efi_va);
352 	md->virt_addr = efi_va;
353 }
354 
355 /*
356  * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
357  * md->virt_addr is the original virtual address which had been mapped in kexec
358  * 1st kernel.
359  */
efi_map_region_fixed(efi_memory_desc_t * md)360 void __init efi_map_region_fixed(efi_memory_desc_t *md)
361 {
362 	__map_region(md, md->phys_addr);
363 	__map_region(md, md->virt_addr);
364 }
365 
parse_efi_setup(u64 phys_addr,u32 data_len)366 void __init parse_efi_setup(u64 phys_addr, u32 data_len)
367 {
368 	efi_setup = phys_addr + sizeof(struct setup_data);
369 }
370 
efi_update_mappings(efi_memory_desc_t * md,unsigned long pf)371 static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
372 {
373 	unsigned long pfn;
374 	pgd_t *pgd = efi_mm.pgd;
375 	int err1, err2;
376 
377 	/* Update the 1:1 mapping */
378 	pfn = md->phys_addr >> PAGE_SHIFT;
379 	err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
380 	if (err1) {
381 		pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
382 			   md->phys_addr, md->virt_addr);
383 	}
384 
385 	err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
386 	if (err2) {
387 		pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
388 			   md->phys_addr, md->virt_addr);
389 	}
390 
391 	return err1 || err2;
392 }
393 
394 bool efi_disable_ibt_for_runtime __ro_after_init = true;
395 
efi_update_mem_attr(struct mm_struct * mm,efi_memory_desc_t * md,bool has_ibt)396 static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md,
397 				      bool has_ibt)
398 {
399 	unsigned long pf = 0;
400 
401 	efi_disable_ibt_for_runtime |= !has_ibt;
402 
403 	if (md->attribute & EFI_MEMORY_XP)
404 		pf |= _PAGE_NX;
405 
406 	if (!(md->attribute & EFI_MEMORY_RO))
407 		pf |= _PAGE_RW;
408 
409 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
410 		pf |= _PAGE_ENC;
411 
412 	return efi_update_mappings(md, pf);
413 }
414 
efi_runtime_update_mappings(void)415 void __init efi_runtime_update_mappings(void)
416 {
417 	if (efi_enabled(EFI_MEM_ATTR)) {
418 		efi_disable_ibt_for_runtime = false;
419 		efi_memattr_apply_permissions(NULL, efi_update_mem_attr);
420 	}
421 }
422 
efi_dump_pagetable(void)423 void __init efi_dump_pagetable(void)
424 {
425 #ifdef CONFIG_EFI_PGT_DUMP
426 	ptdump_walk_pgd_level(NULL, &efi_mm);
427 #endif
428 }
429 
430 /*
431  * Makes the calling thread switch to/from efi_mm context. Can be used
432  * in a kernel thread and user context. Preemption needs to remain disabled
433  * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm
434  * can not change under us.
435  * It should be ensured that there are no concurrent calls to this function.
436  */
efi_enter_mm(void)437 static void efi_enter_mm(void)
438 {
439 	efi_prev_mm = use_temporary_mm(&efi_mm);
440 }
441 
efi_leave_mm(void)442 static void efi_leave_mm(void)
443 {
444 	unuse_temporary_mm(efi_prev_mm);
445 }
446 
447 /*
448  * Toggle LASS to allow EFI to access any 1:1 mapped region in the lower
449  * half.
450  *
451  * Disable LASS only after switching to EFI-mm, as userspace is not
452  * mapped in it. Similar to EFI-mm, these rely on preemption being
453  * disabled and the calls being serialized.
454  */
455 
efi_disable_lass(void)456 static void efi_disable_lass(void)
457 {
458 	if (!cpu_feature_enabled(X86_FEATURE_LASS))
459 		return;
460 
461 	lockdep_assert_preemption_disabled();
462 
463 	/* Save current CR4.LASS state */
464 	efi_cr4_lass = cr4_read_shadow() & X86_CR4_LASS;
465 	cr4_clear_bits(efi_cr4_lass);
466 }
467 
efi_enable_lass(void)468 static void efi_enable_lass(void)
469 {
470 	if (!cpu_feature_enabled(X86_FEATURE_LASS))
471 		return;
472 
473 	lockdep_assert_preemption_disabled();
474 
475 	/* Reprogram CR4.LASS only if it was set earlier */
476 	cr4_set_bits(efi_cr4_lass);
477 }
478 
arch_efi_call_virt_setup(void)479 void arch_efi_call_virt_setup(void)
480 {
481 	efi_sync_low_kernel_mappings();
482 	efi_fpu_begin();
483 	firmware_restrict_branch_speculation_start();
484 	efi_enter_mm();
485 	efi_disable_lass();
486 }
487 
arch_efi_call_virt_teardown(void)488 void arch_efi_call_virt_teardown(void)
489 {
490 	efi_enable_lass();
491 	efi_leave_mm();
492 	firmware_restrict_branch_speculation_end();
493 	efi_fpu_end();
494 }
495 
496 static DEFINE_SPINLOCK(efi_runtime_lock);
497 
498 /*
499  * DS and ES contain user values.  We need to save them.
500  * The 32-bit EFI code needs a valid DS, ES, and SS.  There's no
501  * need to save the old SS: __KERNEL_DS is always acceptable.
502  */
503 #define __efi_thunk(func, ...)						\
504 ({									\
505 	unsigned short __ds, __es;					\
506 	efi_status_t ____s;						\
507 									\
508 	savesegment(ds, __ds);						\
509 	savesegment(es, __es);						\
510 									\
511 	loadsegment(ss, __KERNEL_DS);					\
512 	loadsegment(ds, __KERNEL_DS);					\
513 	loadsegment(es, __KERNEL_DS);					\
514 									\
515 	____s = efi64_thunk(efi.runtime->mixed_mode.func, __VA_ARGS__);	\
516 									\
517 	loadsegment(ds, __ds);						\
518 	loadsegment(es, __es);						\
519 									\
520 	____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32;	\
521 	____s;								\
522 })
523 
524 /*
525  * Switch to the EFI page tables early so that we can access the 1:1
526  * runtime services mappings which are not mapped in any other page
527  * tables.
528  *
529  * Also, disable interrupts because the IDT points to 64-bit handlers,
530  * which aren't going to function correctly when we switch to 32-bit.
531  */
532 #define efi_thunk(func...)						\
533 ({									\
534 	efi_status_t __s;						\
535 									\
536 	arch_efi_call_virt_setup();					\
537 									\
538 	__s = __efi_thunk(func);					\
539 									\
540 	arch_efi_call_virt_teardown();					\
541 									\
542 	__s;								\
543 })
544 
545 static efi_status_t __init __no_sanitize_address
efi_thunk_set_virtual_address_map(unsigned long memory_map_size,unsigned long descriptor_size,u32 descriptor_version,efi_memory_desc_t * virtual_map)546 efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
547 				  unsigned long descriptor_size,
548 				  u32 descriptor_version,
549 				  efi_memory_desc_t *virtual_map)
550 {
551 	efi_status_t status;
552 	unsigned long flags;
553 
554 	efi_sync_low_kernel_mappings();
555 	local_irq_save(flags);
556 
557 	efi_enter_mm();
558 
559 	status = __efi_thunk(set_virtual_address_map, memory_map_size,
560 			     descriptor_size, descriptor_version, virtual_map);
561 
562 	efi_leave_mm();
563 	local_irq_restore(flags);
564 
565 	return status;
566 }
567 
efi_thunk_get_time(efi_time_t * tm,efi_time_cap_t * tc)568 static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
569 {
570 	return EFI_UNSUPPORTED;
571 }
572 
efi_thunk_set_time(efi_time_t * tm)573 static efi_status_t efi_thunk_set_time(efi_time_t *tm)
574 {
575 	return EFI_UNSUPPORTED;
576 }
577 
578 static efi_status_t
efi_thunk_get_wakeup_time(efi_bool_t * enabled,efi_bool_t * pending,efi_time_t * tm)579 efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
580 			  efi_time_t *tm)
581 {
582 	return EFI_UNSUPPORTED;
583 }
584 
585 static efi_status_t
efi_thunk_set_wakeup_time(efi_bool_t enabled,efi_time_t * tm)586 efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
587 {
588 	return EFI_UNSUPPORTED;
589 }
590 
efi_name_size(efi_char16_t * name)591 static unsigned long efi_name_size(efi_char16_t *name)
592 {
593 	return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
594 }
595 
596 static efi_status_t
efi_thunk_get_variable(efi_char16_t * name,efi_guid_t * vendor,u32 * attr,unsigned long * data_size,void * data)597 efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
598 		       u32 *attr, unsigned long *data_size, void *data)
599 {
600 	u8 buf[24] __aligned(8);
601 	efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
602 	efi_status_t status;
603 	u32 phys_name, phys_vendor, phys_attr;
604 	u32 phys_data_size, phys_data;
605 	unsigned long flags;
606 
607 	spin_lock_irqsave(&efi_runtime_lock, flags);
608 
609 	*vnd = *vendor;
610 
611 	phys_data_size = virt_to_phys_or_null(data_size);
612 	phys_vendor = virt_to_phys_or_null(vnd);
613 	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
614 	phys_attr = virt_to_phys_or_null(attr);
615 	phys_data = virt_to_phys_or_null_size(data, *data_size);
616 
617 	if (!phys_name || (data && !phys_data))
618 		status = EFI_INVALID_PARAMETER;
619 	else
620 		status = efi_thunk(get_variable, phys_name, phys_vendor,
621 				   phys_attr, phys_data_size, phys_data);
622 
623 	spin_unlock_irqrestore(&efi_runtime_lock, flags);
624 
625 	return status;
626 }
627 
628 static efi_status_t
efi_thunk_set_variable(efi_char16_t * name,efi_guid_t * vendor,u32 attr,unsigned long data_size,void * data)629 efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
630 		       u32 attr, unsigned long data_size, void *data)
631 {
632 	u8 buf[24] __aligned(8);
633 	efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
634 	u32 phys_name, phys_vendor, phys_data;
635 	efi_status_t status;
636 	unsigned long flags;
637 
638 	spin_lock_irqsave(&efi_runtime_lock, flags);
639 
640 	*vnd = *vendor;
641 
642 	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
643 	phys_vendor = virt_to_phys_or_null(vnd);
644 	phys_data = virt_to_phys_or_null_size(data, data_size);
645 
646 	if (!phys_name || (data && !phys_data))
647 		status = EFI_INVALID_PARAMETER;
648 	else
649 		status = efi_thunk(set_variable, phys_name, phys_vendor,
650 				   attr, data_size, phys_data);
651 
652 	spin_unlock_irqrestore(&efi_runtime_lock, flags);
653 
654 	return status;
655 }
656 
657 static efi_status_t
efi_thunk_set_variable_nonblocking(efi_char16_t * name,efi_guid_t * vendor,u32 attr,unsigned long data_size,void * data)658 efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
659 				   u32 attr, unsigned long data_size,
660 				   void *data)
661 {
662 	u8 buf[24] __aligned(8);
663 	efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
664 	u32 phys_name, phys_vendor, phys_data;
665 	efi_status_t status;
666 	unsigned long flags;
667 
668 	if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
669 		return EFI_NOT_READY;
670 
671 	*vnd = *vendor;
672 
673 	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
674 	phys_vendor = virt_to_phys_or_null(vnd);
675 	phys_data = virt_to_phys_or_null_size(data, data_size);
676 
677 	if (!phys_name || (data && !phys_data))
678 		status = EFI_INVALID_PARAMETER;
679 	else
680 		status = efi_thunk(set_variable, phys_name, phys_vendor,
681 				   attr, data_size, phys_data);
682 
683 	spin_unlock_irqrestore(&efi_runtime_lock, flags);
684 
685 	return status;
686 }
687 
688 static efi_status_t
efi_thunk_get_next_variable(unsigned long * name_size,efi_char16_t * name,efi_guid_t * vendor)689 efi_thunk_get_next_variable(unsigned long *name_size,
690 			    efi_char16_t *name,
691 			    efi_guid_t *vendor)
692 {
693 	u8 buf[24] __aligned(8);
694 	efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
695 	efi_status_t status;
696 	u32 phys_name_size, phys_name, phys_vendor;
697 	unsigned long flags;
698 
699 	spin_lock_irqsave(&efi_runtime_lock, flags);
700 
701 	*vnd = *vendor;
702 
703 	phys_name_size = virt_to_phys_or_null(name_size);
704 	phys_vendor = virt_to_phys_or_null(vnd);
705 	phys_name = virt_to_phys_or_null_size(name, *name_size);
706 
707 	if (!phys_name)
708 		status = EFI_INVALID_PARAMETER;
709 	else
710 		status = efi_thunk(get_next_variable, phys_name_size,
711 				   phys_name, phys_vendor);
712 
713 	spin_unlock_irqrestore(&efi_runtime_lock, flags);
714 
715 	*vendor = *vnd;
716 	return status;
717 }
718 
719 static efi_status_t
efi_thunk_get_next_high_mono_count(u32 * count)720 efi_thunk_get_next_high_mono_count(u32 *count)
721 {
722 	return EFI_UNSUPPORTED;
723 }
724 
725 static void
efi_thunk_reset_system(int reset_type,efi_status_t status,unsigned long data_size,efi_char16_t * data)726 efi_thunk_reset_system(int reset_type, efi_status_t status,
727 		       unsigned long data_size, efi_char16_t *data)
728 {
729 	u32 phys_data;
730 	unsigned long flags;
731 
732 	spin_lock_irqsave(&efi_runtime_lock, flags);
733 
734 	phys_data = virt_to_phys_or_null_size(data, data_size);
735 
736 	efi_thunk(reset_system, reset_type, status, data_size, phys_data);
737 
738 	spin_unlock_irqrestore(&efi_runtime_lock, flags);
739 }
740 
741 static efi_status_t
efi_thunk_update_capsule(efi_capsule_header_t ** capsules,unsigned long count,unsigned long sg_list)742 efi_thunk_update_capsule(efi_capsule_header_t **capsules,
743 			 unsigned long count, unsigned long sg_list)
744 {
745 	/*
746 	 * To properly support this function we would need to repackage
747 	 * 'capsules' because the firmware doesn't understand 64-bit
748 	 * pointers.
749 	 */
750 	return EFI_UNSUPPORTED;
751 }
752 
753 static efi_status_t
efi_thunk_query_variable_info(u32 attr,u64 * storage_space,u64 * remaining_space,u64 * max_variable_size)754 efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
755 			      u64 *remaining_space,
756 			      u64 *max_variable_size)
757 {
758 	efi_status_t status;
759 	u32 phys_storage, phys_remaining, phys_max;
760 	unsigned long flags;
761 
762 	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
763 		return EFI_UNSUPPORTED;
764 
765 	spin_lock_irqsave(&efi_runtime_lock, flags);
766 
767 	phys_storage = virt_to_phys_or_null(storage_space);
768 	phys_remaining = virt_to_phys_or_null(remaining_space);
769 	phys_max = virt_to_phys_or_null(max_variable_size);
770 
771 	status = efi_thunk(query_variable_info, attr, phys_storage,
772 			   phys_remaining, phys_max);
773 
774 	spin_unlock_irqrestore(&efi_runtime_lock, flags);
775 
776 	return status;
777 }
778 
779 static efi_status_t
efi_thunk_query_variable_info_nonblocking(u32 attr,u64 * storage_space,u64 * remaining_space,u64 * max_variable_size)780 efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
781 					  u64 *remaining_space,
782 					  u64 *max_variable_size)
783 {
784 	efi_status_t status;
785 	u32 phys_storage, phys_remaining, phys_max;
786 	unsigned long flags;
787 
788 	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
789 		return EFI_UNSUPPORTED;
790 
791 	if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
792 		return EFI_NOT_READY;
793 
794 	phys_storage = virt_to_phys_or_null(storage_space);
795 	phys_remaining = virt_to_phys_or_null(remaining_space);
796 	phys_max = virt_to_phys_or_null(max_variable_size);
797 
798 	status = efi_thunk(query_variable_info, attr, phys_storage,
799 			   phys_remaining, phys_max);
800 
801 	spin_unlock_irqrestore(&efi_runtime_lock, flags);
802 
803 	return status;
804 }
805 
806 static efi_status_t
efi_thunk_query_capsule_caps(efi_capsule_header_t ** capsules,unsigned long count,u64 * max_size,int * reset_type)807 efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
808 			     unsigned long count, u64 *max_size,
809 			     int *reset_type)
810 {
811 	/*
812 	 * To properly support this function we would need to repackage
813 	 * 'capsules' because the firmware doesn't understand 64-bit
814 	 * pointers.
815 	 */
816 	return EFI_UNSUPPORTED;
817 }
818 
efi_thunk_runtime_setup(void)819 void __init efi_thunk_runtime_setup(void)
820 {
821 	if (!IS_ENABLED(CONFIG_EFI_MIXED))
822 		return;
823 
824 	efi.get_time = efi_thunk_get_time;
825 	efi.set_time = efi_thunk_set_time;
826 	efi.get_wakeup_time = efi_thunk_get_wakeup_time;
827 	efi.set_wakeup_time = efi_thunk_set_wakeup_time;
828 	efi.get_variable = efi_thunk_get_variable;
829 	efi.get_next_variable = efi_thunk_get_next_variable;
830 	efi.set_variable = efi_thunk_set_variable;
831 	efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
832 	efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
833 	efi.reset_system = efi_thunk_reset_system;
834 	efi.query_variable_info = efi_thunk_query_variable_info;
835 	efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
836 	efi.update_capsule = efi_thunk_update_capsule;
837 	efi.query_capsule_caps = efi_thunk_query_capsule_caps;
838 }
839 
840 efi_status_t __init __no_sanitize_address
efi_set_virtual_address_map(unsigned long memory_map_size,unsigned long descriptor_size,u32 descriptor_version,efi_memory_desc_t * virtual_map,unsigned long systab_phys)841 efi_set_virtual_address_map(unsigned long memory_map_size,
842 			    unsigned long descriptor_size,
843 			    u32 descriptor_version,
844 			    efi_memory_desc_t *virtual_map,
845 			    unsigned long systab_phys)
846 {
847 	const efi_system_table_t *systab = (efi_system_table_t *)systab_phys;
848 	efi_status_t status;
849 	unsigned long flags;
850 
851 	if (efi_is_mixed())
852 		return efi_thunk_set_virtual_address_map(memory_map_size,
853 							 descriptor_size,
854 							 descriptor_version,
855 							 virtual_map);
856 	efi_enter_mm();
857 
858 	efi_fpu_begin();
859 
860 	/* Disable interrupts around EFI calls: */
861 	local_irq_save(flags);
862 	status = arch_efi_call_virt(efi.runtime, set_virtual_address_map,
863 				    memory_map_size, descriptor_size,
864 				    descriptor_version, virtual_map);
865 	local_irq_restore(flags);
866 
867 	efi_fpu_end();
868 
869 	/* grab the virtually remapped EFI runtime services table pointer */
870 	efi.runtime = READ_ONCE(systab->runtime);
871 
872 	efi_leave_mm();
873 
874 	return status;
875 }
876