1 /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-4-Clause
5 *
6 * Copyright (c) 2004 Olivier Houchard
7 * Copyright (c) 1994-1998 Mark Brinicombe.
8 * Copyright (c) 1994 Brini.
9 * All rights reserved.
10 *
11 * This code is derived from software written for Brini by Mark Brinicombe
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by Mark Brinicombe
24 * for the NetBSD Project.
25 * 4. The name of the company nor the name of the author may be used to
26 * endorse or promote products derived from this software without specific
27 * prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
30 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
31 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
33 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
34 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
35 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * Machine dependent functions for kernel setup
42 *
43 * Created : 17/09/94
44 * Updated : 18/04/01 updated for new wscons
45 */
46
47 #include "opt_ddb.h"
48 #include "opt_kstack_pages.h"
49 #include "opt_platform.h"
50 #include "opt_sched.h"
51
52 #include <sys/param.h>
53 #include <sys/buf.h>
54 #include <sys/bus.h>
55 #include <sys/cons.h>
56 #include <sys/cpu.h>
57 #include <sys/devmap.h>
58 #include <sys/efi.h>
59 #include <sys/efi_map.h>
60 #include <sys/imgact.h>
61 #include <sys/kdb.h>
62 #include <sys/kernel.h>
63 #include <sys/ktr.h>
64 #include <sys/linker.h>
65 #include <sys/msgbuf.h>
66 #include <sys/physmem.h>
67 #include <sys/reboot.h>
68 #include <sys/rwlock.h>
69 #include <sys/sched.h>
70 #include <sys/syscallsubr.h>
71 #include <sys/sysent.h>
72 #include <sys/sysproto.h>
73 #include <sys/vmmeter.h>
74
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_pager.h>
78
79 #include <machine/asm.h>
80 #include <machine/debug_monitor.h>
81 #include <machine/machdep.h>
82 #include <machine/metadata.h>
83 #include <machine/pcb.h>
84 #include <machine/platform.h>
85 #include <machine/sysarch.h>
86 #include <machine/undefined.h>
87 #include <machine/vfp.h>
88 #include <machine/vmparam.h>
89
90 #ifdef FDT
91 #include <dev/fdt/fdt_common.h>
92 #include <machine/ofw_machdep.h>
93 #endif
94
95 #ifdef DEBUG
96 #define debugf(fmt, args...) printf(fmt, ##args)
97 #else
98 #define debugf(fmt, args...)
99 #endif
100
101 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
102 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \
103 defined(COMPAT_FREEBSD9)
104 #error FreeBSD/arm doesn't provide compatibility with releases prior to 10
105 #endif
106
107
108 #if __ARM_ARCH < 7
109 #error FreeBSD requires ARMv7 or later
110 #endif
111
112 struct pcpu __pcpu[MAXCPU];
113 struct pcpu *pcpup = &__pcpu[0];
114
115 static struct trapframe proc0_tf;
116 uint32_t cpu_reset_address = 0;
117 int cold = 1;
118 vm_offset_t vector_page;
119
120 /* The address at which the kernel was loaded. Set early in initarm(). */
121 vm_paddr_t arm_physmem_kernaddr;
122
123 #ifdef FDT
124 vm_paddr_t pmap_pa;
125 vm_offset_t systempage;
126 vm_offset_t irqstack;
127 vm_offset_t undstack;
128 vm_offset_t abtstack;
129 #endif /* FDT */
130
131 #ifdef PLATFORM
132 static delay_func *delay_impl;
133 static void *delay_arg;
134 #endif
135
136 #if defined(SOCDEV_PA)
137 #if !defined(SOCDEV_VA)
138 #error SOCDEV_PA defined, but not SOCDEV_VA
139 #endif
140 uintptr_t socdev_va = SOCDEV_VA;
141 #endif
142
143
144 struct kva_md_info kmi;
145 /*
146 * arm32_vector_init:
147 *
148 * Initialize the vector page, and select whether or not to
149 * relocate the vectors.
150 *
151 * NOTE: We expect the vector page to be mapped at its expected
152 * destination.
153 */
154
155 extern unsigned int page0[], page0_data[];
156 void
arm_vector_init(vm_offset_t va,int which)157 arm_vector_init(vm_offset_t va, int which)
158 {
159 unsigned int *vectors = (int *) va;
160 unsigned int *vectors_data = vectors + (page0_data - page0);
161 int vec;
162
163 /*
164 * Loop through the vectors we're taking over, and copy the
165 * vector's insn and data word.
166 */
167 for (vec = 0; vec < ARM_NVEC; vec++) {
168 if ((which & (1 << vec)) == 0) {
169 /* Don't want to take over this vector. */
170 continue;
171 }
172 vectors[vec] = page0[vec];
173 vectors_data[vec] = page0_data[vec];
174 }
175
176 /* Now sync the vectors. */
177 icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int));
178
179 vector_page = va;
180 }
181
182 static void
cpu_startup(void * dummy)183 cpu_startup(void *dummy)
184 {
185 struct pcb *pcb = thread0.td_pcb;
186 const unsigned int mbyte = 1024 * 1024;
187
188 identify_arm_cpu();
189
190 vm_ksubmap_init(&kmi);
191
192 /*
193 * Display the RAM layout.
194 */
195 printf("real memory = %ju (%ju MB)\n",
196 (uintmax_t)arm32_ptob(realmem),
197 (uintmax_t)arm32_ptob(realmem) / mbyte);
198 printf("avail memory = %ju (%ju MB)\n",
199 (uintmax_t)arm32_ptob(vm_free_count()),
200 (uintmax_t)arm32_ptob(vm_free_count()) / mbyte);
201 if (bootverbose) {
202 physmem_print_tables();
203 devmap_print_table();
204 }
205
206 bufinit();
207 vm_pager_bufferinit();
208 pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
209 USPACE_SVC_STACK_TOP;
210 pmap_set_pcb_pagedir(kernel_pmap, pcb);
211 }
212
213 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
214
215 /*
216 * Flush the D-cache for non-DMA I/O so that the I-cache can
217 * be made coherent later.
218 */
219 void
cpu_flush_dcache(void * ptr,size_t len)220 cpu_flush_dcache(void *ptr, size_t len)
221 {
222
223 dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len);
224 }
225
226 /* Get current clock frequency for the given cpu id. */
227 int
cpu_est_clockrate(int cpu_id,uint64_t * rate)228 cpu_est_clockrate(int cpu_id, uint64_t *rate)
229 {
230 struct pcpu *pc;
231
232 pc = pcpu_find(cpu_id);
233 if (pc == NULL || rate == NULL)
234 return (EINVAL);
235
236 if (pc->pc_clock == 0)
237 return (EOPNOTSUPP);
238
239 *rate = pc->pc_clock;
240
241 return (0);
242 }
243
244 void
cpu_idle(int busy)245 cpu_idle(int busy)
246 {
247
248 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
249 spinlock_enter();
250 if (!busy)
251 cpu_idleclock();
252 if (!sched_runnable())
253 cpu_sleep(0);
254 if (!busy)
255 cpu_activeclock();
256 spinlock_exit();
257 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu);
258 }
259
260 int
cpu_idle_wakeup(int cpu)261 cpu_idle_wakeup(int cpu)
262 {
263
264 return (0);
265 }
266
267 void
cpu_initclocks(void)268 cpu_initclocks(void)
269 {
270
271 #ifdef SMP
272 if (PCPU_GET(cpuid) == 0)
273 cpu_initclocks_bsp();
274 else
275 cpu_initclocks_ap();
276 #else
277 cpu_initclocks_bsp();
278 #endif
279 }
280
281 #ifdef PLATFORM
282 void
arm_set_delay(delay_func * impl,void * arg)283 arm_set_delay(delay_func *impl, void *arg)
284 {
285
286 KASSERT(impl != NULL, ("No DELAY implementation"));
287 delay_impl = impl;
288 delay_arg = arg;
289 }
290
291 void
DELAY(int usec)292 DELAY(int usec)
293 {
294
295 TSENTER();
296 delay_impl(usec, delay_arg);
297 TSEXIT();
298 }
299 #endif
300
301 void
cpu_pcpu_init(struct pcpu * pcpu,int cpuid,size_t size)302 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
303 {
304
305 pcpu->pc_mpidr = 0xffffffff;
306 }
307
308 void
spinlock_enter(void)309 spinlock_enter(void)
310 {
311 struct thread *td;
312 register_t cspr;
313
314 td = curthread;
315 if (td->td_md.md_spinlock_count == 0) {
316 cspr = disable_interrupts(PSR_I);
317 td->td_md.md_spinlock_count = 1;
318 td->td_md.md_saved_cspr = cspr;
319 critical_enter();
320 } else
321 td->td_md.md_spinlock_count++;
322 }
323
324 void
spinlock_exit(void)325 spinlock_exit(void)
326 {
327 struct thread *td;
328 register_t cspr;
329
330 td = curthread;
331 cspr = td->td_md.md_saved_cspr;
332 td->td_md.md_spinlock_count--;
333 if (td->td_md.md_spinlock_count == 0) {
334 critical_exit();
335 restore_interrupts(cspr);
336 }
337 }
338
339 /*
340 * Construct a PCB from a trapframe. This is called from kdb_trap() where
341 * we want to start a backtrace from the function that caused us to enter
342 * the debugger. We have the context in the trapframe, but base the trace
343 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
344 * enough for a backtrace.
345 */
346 void
makectx(struct trapframe * tf,struct pcb * pcb)347 makectx(struct trapframe *tf, struct pcb *pcb)
348 {
349 pcb->pcb_regs.sf_r4 = tf->tf_r4;
350 pcb->pcb_regs.sf_r5 = tf->tf_r5;
351 pcb->pcb_regs.sf_r6 = tf->tf_r6;
352 pcb->pcb_regs.sf_r7 = tf->tf_r7;
353 pcb->pcb_regs.sf_r8 = tf->tf_r8;
354 pcb->pcb_regs.sf_r9 = tf->tf_r9;
355 pcb->pcb_regs.sf_r10 = tf->tf_r10;
356 pcb->pcb_regs.sf_r11 = tf->tf_r11;
357 pcb->pcb_regs.sf_r12 = tf->tf_r12;
358 pcb->pcb_regs.sf_pc = tf->tf_pc;
359 pcb->pcb_regs.sf_lr = tf->tf_usr_lr;
360 pcb->pcb_regs.sf_sp = tf->tf_usr_sp;
361 }
362
363 void
pcpu0_init(void)364 pcpu0_init(void)
365 {
366 set_curthread(&thread0);
367 pcpu_init(pcpup, 0, sizeof(struct pcpu));
368 pcpup->pc_mpidr = cp15_mpidr_get() & 0xFFFFFF;
369 PCPU_SET(curthread, &thread0);
370 }
371
372 /*
373 * Initialize proc0
374 */
375 static void
init_proc0(vm_offset_t kstack)376 init_proc0(vm_offset_t kstack)
377 {
378 proc_linkup0(&proc0, &thread0);
379 thread0.td_kstack = kstack;
380 thread0.td_kstack_pages = kstack_pages;
381 thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
382 thread0.td_kstack_pages * PAGE_SIZE) - 1;
383 thread0.td_pcb->pcb_flags = 0;
384 thread0.td_pcb->pcb_fpflags = 0;
385 thread0.td_pcb->pcb_vfpcpu = -1;
386 thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN;
387 thread0.td_pcb->pcb_vfpsaved = &thread0.td_pcb->pcb_vfpstate;
388 thread0.td_frame = &proc0_tf;
389 pcpup->pc_curpcb = thread0.td_pcb;
390 }
391
392 void
set_stackptrs(int cpu)393 set_stackptrs(int cpu)
394 {
395
396 set_stackptr(PSR_IRQ32_MODE,
397 irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
398 set_stackptr(PSR_ABT32_MODE,
399 abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
400 set_stackptr(PSR_UND32_MODE,
401 undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
402 }
403
404 static void
arm_kdb_init(void)405 arm_kdb_init(void)
406 {
407
408 kdb_init();
409 #ifdef KDB
410 if (boothowto & RB_KDB)
411 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
412 #endif
413 }
414
415 #ifdef FDT
416 static void
fdt_physmem_hardware_region_cb(const struct mem_region * mr,void * arg __unused)417 fdt_physmem_hardware_region_cb(const struct mem_region *mr, void *arg __unused)
418 {
419 physmem_hardware_region(mr->mr_start, mr->mr_size);
420 }
421
422 static void
fdt_physmem_exclude_region_cb(const struct mem_region * mr,void * arg __unused)423 fdt_physmem_exclude_region_cb(const struct mem_region *mr, void *arg __unused)
424 {
425 physmem_exclude_region(mr->mr_start, mr->mr_size,
426 EXFLAG_NODUMP | EXFLAG_NOALLOC);
427 }
428
429 void *
initarm(struct arm_boot_params * abp)430 initarm(struct arm_boot_params *abp)
431 {
432 vm_paddr_t lastaddr;
433 vm_offset_t dtbp, kernelstack, dpcpu;
434 char *env;
435 int err_devmap;
436 phandle_t root;
437 char dts_version[255];
438 #ifdef EFI
439 struct efi_map_header *efihdr;
440 #endif
441
442 /* get last allocated physical address */
443 arm_physmem_kernaddr = abp->abp_physaddr;
444 lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
445
446 set_cpufuncs();
447 cpuinfo_init();
448 sched_instance_select();
449 link_elf_ireloc();
450
451 /*
452 * Find the dtb passed in by the boot loader.
453 */
454 dtbp = MD_FETCH(preload_kmdp, MODINFOMD_DTBP, vm_offset_t);
455 #if defined(FDT_DTB_STATIC)
456 /*
457 * In case the device tree blob was not retrieved (from metadata) try
458 * to use the statically embedded one.
459 */
460 if (dtbp == (vm_offset_t)NULL)
461 dtbp = (vm_offset_t)&fdt_static_dtb;
462 #endif
463
464 if (OF_install(OFW_FDT, 0) == FALSE)
465 panic("Cannot install FDT");
466
467 if (OF_init((void *)dtbp) != 0)
468 panic("OF_init failed with the found device tree");
469
470 #if defined(LINUX_BOOT_ABI)
471 arm_parse_fdt_bootargs();
472 #endif
473
474 #ifdef EFI
475 efihdr = (struct efi_map_header *)preload_search_info(preload_kmdp,
476 MODINFO_METADATA | MODINFOMD_EFI_MAP);
477 if (efihdr != NULL) {
478 efi_map_add_entries(efihdr);
479 efi_map_exclude_entries(efihdr);
480 } else
481 #endif
482 {
483 /* Grab physical memory regions information from device tree. */
484 if (fdt_foreach_mem_region(fdt_physmem_hardware_region_cb,
485 NULL) != 0)
486 panic("Cannot get physical memory regions");
487
488 /* Grab reserved memory regions information from device tree. */
489 fdt_foreach_reserved_region(fdt_physmem_exclude_region_cb,
490 NULL);
491 }
492
493 /*
494 * Set TEX remapping registers.
495 * Setup kernel page tables and switch to kernel L1 page table.
496 */
497 pmap_set_tex();
498 pmap_bootstrap_prepare(lastaddr);
499
500 /*
501 * If EARLY_PRINTF support is enabled, we need to re-establish the
502 * mapping after pmap_bootstrap_prepare() switches to new page tables.
503 * Note that we can only do the remapping if the VA is outside the
504 * kernel, now that we have real virtual (not VA=PA) mappings in effect.
505 * Early printf does not work between the time pmap_set_tex() does
506 * cp15_prrr_set() and this code remaps the VA.
507 */
508 #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE
509 pmap_preboot_map_attr(SOCDEV_PA, SOCDEV_VA, 1024 * 1024,
510 VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE);
511 #endif
512
513 /*
514 * Now that proper page tables are installed, call cpu_setup() to enable
515 * instruction and data caches and other chip-specific features.
516 */
517 cpu_setup();
518
519 /* Platform-specific initialisation */
520 platform_probe_and_attach();
521 pcpu0_init();
522
523 /* Do basic tuning, hz etc */
524 init_param1();
525
526 /*
527 * Allocate a page for the system page mapped to 0xffff0000
528 * This page will just contain the system vectors and can be
529 * shared by all processes.
530 */
531 systempage = pmap_preboot_get_pages(1);
532
533 /* Map the vector page. */
534 pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1);
535 if (virtual_end >= ARM_VECTORS_HIGH)
536 virtual_end = ARM_VECTORS_HIGH - 1;
537
538 /* Allocate dynamic per-cpu area. */
539 dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
540 dpcpu_init((void *)dpcpu, 0);
541
542 /* Allocate stacks for all modes */
543 irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
544 abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
545 undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
546 kernelstack = pmap_preboot_get_vpages(kstack_pages);
547
548 /* Allocate message buffer. */
549 msgbufp = (void *)pmap_preboot_get_vpages(
550 round_page(msgbufsize) / PAGE_SIZE);
551
552 /*
553 * Pages were allocated during the secondary bootstrap for the
554 * stacks for different CPU modes.
555 * We must now set the r13 registers in the different CPU modes to
556 * point to these stacks.
557 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
558 * of the stack memory.
559 */
560 set_stackptrs(0);
561 mutex_init();
562
563 /* Establish static device mappings. */
564 err_devmap = platform_devmap_init();
565 devmap_bootstrap();
566 vm_max_kernel_address = platform_lastaddr();
567
568 /*
569 * Only after the SOC registers block is mapped we can perform device
570 * tree fixups, as they may attempt to read parameters from hardware.
571 */
572 OF_interpret("perform-fixup", 0);
573 platform_gpio_init();
574 cninit();
575
576 /*
577 * If we made a mapping for EARLY_PRINTF after pmap_bootstrap_prepare(),
578 * undo it now that the normal console printf works.
579 */
580 #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE
581 pmap_kremove(SOCDEV_VA);
582 #endif
583
584 debugf("initarm: console initialized\n");
585 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)preload_kmdp);
586 debugf(" boothowto = 0x%08x\n", boothowto);
587 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
588 debugf(" lastaddr1: 0x%08x\n", lastaddr);
589 arm_print_kenv();
590
591 env = kern_getenv("kernelname");
592 if (env != NULL)
593 strlcpy(kernelname, env, sizeof(kernelname));
594
595 if (err_devmap != 0)
596 printf("WARNING: could not fully configure devmap, error=%d\n",
597 err_devmap);
598
599 platform_late_init();
600
601 root = OF_finddevice("/");
602 if (OF_getprop(root, "freebsd,dts-version", dts_version, sizeof(dts_version)) > 0) {
603 if (strcmp(LINUX_DTS_VERSION, dts_version) != 0)
604 printf("WARNING: DTB version is %s while kernel expects %s, "
605 "please update the DTB in the ESP\n",
606 dts_version,
607 LINUX_DTS_VERSION);
608 } else {
609 printf("WARNING: Cannot find freebsd,dts-version property, "
610 "cannot check DTB compliance\n");
611 }
612
613 /*
614 * We must now clean the cache again....
615 * Cleaning may be done by reading new data to displace any
616 * dirty data in the cache. This will have happened in cpu_setttb()
617 * but since we are boot strapping the addresses used for the read
618 * may have just been remapped and thus the cache could be out
619 * of sync. A re-clean after the switch will cure this.
620 * After booting there are no gross relocations of the kernel thus
621 * this problem will not occur after initarm().
622 */
623 /* Set stack for exception handlers */
624 undefined_init();
625 init_proc0(kernelstack);
626 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
627 enable_interrupts(PSR_A);
628 pmap_bootstrap(0);
629
630 /* Exclude the kernel (and all the things we allocated which immediately
631 * follow the kernel) from the VM allocation pool but not from crash
632 * dumps. virtual_avail is a global variable which tracks the kva we've
633 * "allocated" while setting up pmaps.
634 *
635 * Prepare the list of physical memory available to the vm subsystem.
636 */
637 physmem_exclude_region(abp->abp_physaddr,
638 pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
639 physmem_init_kernel_globals();
640
641 init_param2(physmem);
642 /* Init message buffer. */
643 msgbufinit(msgbufp, msgbufsize);
644 dbg_monitor_init();
645 arm_kdb_init();
646 /* Apply possible BP hardening. */
647 cpuinfo_init_bp_hardening();
648
649 #ifdef EFI
650 if (boothowto & RB_VERBOSE) {
651 if (efihdr != NULL)
652 efi_map_print_entries(efihdr);
653 }
654 #endif
655
656 return (STACKALIGN(thread0.td_pcb));
657
658 }
659 #endif /* FDT */
660