1 /*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*-
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55 */
56
57 #include <sys/cdefs.h>
58 #include "opt_ddb.h"
59 #include "opt_kstack_pages.h"
60 #include "opt_platform.h"
61
62 #include <sys/endian.h>
63 #include <sys/param.h>
64 #include <sys/proc.h>
65 #include <sys/systm.h>
66 #include <sys/bio.h>
67 #include <sys/buf.h>
68 #include <sys/bus.h>
69 #include <sys/cons.h>
70 #include <sys/cpu.h>
71 #include <sys/eventhandler.h>
72 #include <sys/exec.h>
73 #include <sys/imgact.h>
74 #include <sys/kdb.h>
75 #include <sys/kernel.h>
76 #include <sys/ktr.h>
77 #include <sys/linker.h>
78 #include <sys/lock.h>
79 #include <sys/malloc.h>
80 #include <sys/mbuf.h>
81 #include <sys/msgbuf.h>
82 #include <sys/mutex.h>
83 #include <sys/ptrace.h>
84 #include <sys/reboot.h>
85 #include <sys/rwlock.h>
86 #include <sys/signalvar.h>
87 #include <sys/syscallsubr.h>
88 #include <sys/sysctl.h>
89 #include <sys/sysent.h>
90 #include <sys/sysproto.h>
91 #include <sys/ucontext.h>
92 #include <sys/uio.h>
93 #include <sys/vmmeter.h>
94 #include <sys/vnode.h>
95
96 #include <net/netisr.h>
97
98 #include <vm/vm.h>
99 #include <vm/vm_extern.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_pager.h>
105
106 #include <machine/altivec.h>
107 #ifndef __powerpc64__
108 #include <machine/bat.h>
109 #endif
110 #include <machine/cpu.h>
111 #include <machine/elf.h>
112 #include <machine/fpu.h>
113 #include <machine/hid.h>
114 #include <machine/kdb.h>
115 #include <machine/md_var.h>
116 #include <machine/metadata.h>
117 #include <machine/mmuvar.h>
118 #include <machine/pcb.h>
119 #include <machine/sigframe.h>
120 #include <machine/spr.h>
121 #include <machine/trap.h>
122 #include <machine/vmparam.h>
123 #include <machine/ofw_machdep.h>
124
125 #include <ddb/ddb.h>
126
127 #include <dev/ofw/openfirm.h>
128
129 #ifdef __powerpc64__
130 #include "mmu_oea64.h"
131 #endif
132
133 #ifndef __powerpc64__
134 struct bat battable[16];
135 #endif
136
137 int radix_mmu = 0;
138
139 #ifndef __powerpc64__
140 /* Bits for running on 64-bit systems in 32-bit mode. */
141 extern void *testppc64, *testppc64size;
142 extern void *restorebridge, *restorebridgesize;
143 extern void *rfid_patch, *rfi_patch1, *rfi_patch2;
144 extern void *trapcode64;
145
146 extern Elf_Addr _GLOBAL_OFFSET_TABLE_[];
147 #endif
148
149 extern void *rstcode, *rstcodeend;
150 extern void *trapcode, *trapcodeend;
151 extern void *hypertrapcode, *hypertrapcodeend;
152 extern void *generictrap, *generictrap64;
153 extern void *alitrap, *aliend;
154 extern void *dsitrap, *dsiend;
155 extern void *decrint, *decrsize;
156 extern void *extint, *extsize;
157 extern void *dblow, *dbend;
158 extern void *imisstrap, *imisssize;
159 extern void *dlmisstrap, *dlmisssize;
160 extern void *dsmisstrap, *dsmisssize;
161
162 extern void *ap_pcpu;
163 extern void __restartkernel(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr);
164 extern void __restartkernel_virtual(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr);
165
166 void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry,
167 void *mdp, uint32_t mdp_cookie);
168 void aim_cpu_init(vm_offset_t toc);
169
170 void
aim_early_init(vm_offset_t fdt,vm_offset_t toc,vm_offset_t ofentry,void * mdp,uint32_t mdp_cookie)171 aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
172 uint32_t mdp_cookie)
173 {
174 register_t scratch;
175
176 /*
177 * If running from an FDT, make sure we are in real mode to avoid
178 * tromping on firmware page tables. Everything in the kernel assumes
179 * 1:1 mappings out of firmware, so this won't break anything not
180 * already broken. This doesn't work if there is live OF, since OF
181 * may internally use non-1:1 mappings.
182 */
183 if (ofentry == 0)
184 mtmsr(mfmsr() & ~(PSL_IR | PSL_DR));
185
186 #ifdef __powerpc64__
187 /*
188 * Relocate to high memory so that the kernel
189 * can execute from the direct map.
190 *
191 * If we are in virtual mode already, use a special entry point
192 * that sets up a temporary DMAP to execute from until we can
193 * properly set up the MMU.
194 */
195 if ((vm_offset_t)&aim_early_init < DMAP_BASE_ADDRESS) {
196 if (mfmsr() & PSL_DR) {
197 __restartkernel_virtual(fdt, 0, ofentry, mdp,
198 mdp_cookie, DMAP_BASE_ADDRESS, mfmsr());
199 } else {
200 __restartkernel(fdt, 0, ofentry, mdp, mdp_cookie,
201 DMAP_BASE_ADDRESS, mfmsr());
202 }
203 }
204 #endif
205
206 /* Various very early CPU fix ups */
207 switch (mfpvr() >> 16) {
208 /*
209 * PowerPC 970 CPUs have a misfeature requested by Apple that
210 * makes them pretend they have a 32-byte cacheline. Turn this
211 * off before we measure the cacheline size.
212 */
213 case IBM970:
214 case IBM970FX:
215 case IBM970MP:
216 case IBM970GX:
217 scratch = mfspr(SPR_HID5);
218 scratch &= ~HID5_970_DCBZ_SIZE_HI;
219 mtspr(SPR_HID5, scratch);
220 break;
221 #ifdef __powerpc64__
222 case IBMPOWER7:
223 case IBMPOWER7PLUS:
224 case IBMPOWER8:
225 case IBMPOWER8E:
226 case IBMPOWER8NVL:
227 case IBMPOWER9:
228 /* XXX: get from ibm,slb-size in device tree */
229 n_slbs = 32;
230 break;
231 #endif
232 }
233 }
234
235 void
aim_cpu_init(vm_offset_t toc)236 aim_cpu_init(vm_offset_t toc)
237 {
238 size_t trap_offset, trapsize;
239 vm_offset_t trap;
240 register_t msr;
241 uint8_t *cache_check;
242 int cacheline_warn;
243 #ifndef __powerpc64__
244 register_t scratch;
245 int ppc64;
246 #endif
247
248 trap_offset = 0;
249 cacheline_warn = 0;
250
251 /* General setup for AIM CPUs */
252 psl_kernset = PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI;
253
254 #ifdef __powerpc64__
255 psl_kernset |= PSL_SF;
256 if (mfmsr() & PSL_HV)
257 psl_kernset |= PSL_HV;
258
259 #if BYTE_ORDER == LITTLE_ENDIAN
260 psl_kernset |= PSL_LE;
261 #endif
262
263 #endif
264 psl_userset = psl_kernset | PSL_PR;
265 #ifdef __powerpc64__
266 psl_userset32 = psl_userset & ~PSL_SF;
267 #endif
268
269 /*
270 * Zeroed bits in this variable signify that the value of the bit
271 * in its position is allowed to vary between userspace contexts.
272 *
273 * All other bits are required to be identical for every userspace
274 * context. The actual *value* of the bit is determined by
275 * psl_userset and/or psl_userset32, and is not allowed to change.
276 *
277 * Remember to update this set when implementing support for
278 * *conditionally* enabling a processor facility. Failing to do
279 * this will cause swapcontext() in userspace to break when a
280 * process uses a conditionally-enabled facility.
281 *
282 * When *unconditionally* implementing support for a processor
283 * facility, update psl_userset / psl_userset32 instead.
284 *
285 * See the access control check in set_mcontext().
286 */
287 psl_userstatic = ~(PSL_VSX | PSL_VEC | PSL_FP | PSL_FE0 | PSL_FE1);
288 /*
289 * Mask bits from the SRR1 that aren't really the MSR:
290 * Bits 1-4, 10-15 (ppc32), 33-36, 42-47 (ppc64)
291 */
292 psl_userstatic &= ~0x783f0000UL;
293
294 /*
295 * Initialize the interrupt tables and figure out our cache line
296 * size and whether or not we need the 64-bit bridge code.
297 */
298
299 /*
300 * Disable translation in case the vector area hasn't been
301 * mapped (G5). Note that no OFW calls can be made until
302 * translation is re-enabled.
303 */
304
305 msr = mfmsr();
306 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
307
308 /*
309 * Measure the cacheline size using dcbz
310 *
311 * Use EXC_PGM as a playground. We are about to overwrite it
312 * anyway, we know it exists, and we know it is cache-aligned.
313 */
314
315 cache_check = (void *)EXC_PGM;
316
317 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
318 cache_check[cacheline_size] = 0xff;
319
320 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
321
322 /* Find the first byte dcbz did not zero to get the cache line size */
323 for (cacheline_size = 0; cacheline_size < 0x100 &&
324 cache_check[cacheline_size] == 0; cacheline_size++);
325
326 /* Work around psim bug */
327 if (cacheline_size == 0) {
328 cacheline_warn = 1;
329 cacheline_size = 32;
330 }
331
332 #ifndef __powerpc64__
333 /*
334 * Figure out whether we need to use the 64 bit PMAP. This works by
335 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
336 * and setting ppc64 = 0 if that causes a trap.
337 */
338
339 ppc64 = 1;
340
341 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size);
342 __syncicache((void *)EXC_PGM, (size_t)&testppc64size);
343
344 __asm __volatile("\
345 mfmsr %0; \
346 mtsprg2 %1; \
347 \
348 mtmsrd %0; \
349 mfsprg2 %1;"
350 : "=r"(scratch), "=r"(ppc64));
351
352 if (ppc64)
353 cpu_features |= PPC_FEATURE_64;
354
355 /*
356 * Now copy restorebridge into all the handlers, if necessary,
357 * and set up the trap tables.
358 */
359
360 if (cpu_features & PPC_FEATURE_64) {
361 /* Patch the two instances of rfi -> rfid */
362 bcopy(&rfid_patch,&rfi_patch1,4);
363 #ifdef KDB
364 /* rfi_patch2 is at the end of dbleave */
365 bcopy(&rfid_patch,&rfi_patch2,4);
366 #endif
367 }
368 #else /* powerpc64 */
369 cpu_features |= PPC_FEATURE_64;
370 #endif
371
372 trapsize = (size_t)&trapcodeend - (size_t)&trapcode;
373
374 /*
375 * Copy generic handler into every possible trap. Special cases will get
376 * different ones in a minute.
377 */
378 for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20)
379 bcopy(&trapcode, (void *)trap, trapsize);
380
381 #ifndef __powerpc64__
382 if (cpu_features & PPC_FEATURE_64) {
383 /*
384 * Copy a code snippet to restore 32-bit bridge mode
385 * to the top of every non-generic trap handler
386 */
387
388 trap_offset += (size_t)&restorebridgesize;
389 bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
390 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
391 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
392 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
393 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
394 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
395 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
396 } else {
397 /*
398 * Use an IBAT and a DBAT to map the bottom 256M segment.
399 *
400 * It is very important to do it *now* to avoid taking a
401 * fault in .text / .data before the MMU is bootstrapped,
402 * because until then, the translation data has not been
403 * copied over from OpenFirmware, so our DSI/ISI will fail
404 * to find a match.
405 */
406
407 battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
408 battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
409
410 __asm (".balign 32; \n"
411 "mtibatu 0,%0; mtibatl 0,%1; isync; \n"
412 "mtdbatu 0,%0; mtdbatl 0,%1; isync"
413 :: "r"(battable[0].batu), "r"(battable[0].batl));
414 }
415 #else
416 trapsize = (size_t)&hypertrapcodeend - (size_t)&hypertrapcode;
417 bcopy(&hypertrapcode, (void *)(EXC_HEA + trap_offset), trapsize);
418 bcopy(&hypertrapcode, (void *)(EXC_HMI + trap_offset), trapsize);
419 bcopy(&hypertrapcode, (void *)(EXC_HVI + trap_offset), trapsize);
420 bcopy(&hypertrapcode, (void *)(EXC_HFAC + trap_offset), trapsize);
421 bcopy(&hypertrapcode, (void *)(EXC_SOFT_PATCH + trap_offset), trapsize);
422 #endif
423
424 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend -
425 (size_t)&rstcode);
426
427 #ifdef KDB
428 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend -
429 (size_t)&dblow);
430 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend -
431 (size_t)&dblow);
432 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend -
433 (size_t)&dblow);
434 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend -
435 (size_t)&dblow);
436 #endif
437 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend -
438 (size_t)&alitrap);
439 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend -
440 (size_t)&dsitrap);
441
442 /* Set address of generictrap for self-reloc calculations */
443 *((void **)TRAP_GENTRAP) = &generictrap;
444 #ifdef __powerpc64__
445 /* Set TOC base so that the interrupt code can get at it */
446 *((void **)TRAP_ENTRY) = &generictrap;
447 *((register_t *)TRAP_TOCBASE) = toc;
448 #else
449 /* Set branch address for trap code */
450 if (cpu_features & PPC_FEATURE_64)
451 *((void **)TRAP_ENTRY) = &generictrap64;
452 else
453 *((void **)TRAP_ENTRY) = &generictrap;
454 *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_;
455
456 /* G2-specific TLB miss helper handlers */
457 bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize);
458 bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize);
459 bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize);
460 #endif
461 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
462
463 /*
464 * Restore MSR
465 */
466 mtmsr(msr);
467
468 /* Warn if cachline size was not determined */
469 if (cacheline_warn == 1) {
470 printf("WARNING: cacheline size undetermined, setting to 32\n");
471 }
472
473 /*
474 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
475 * in case the platform module had a better idea of what we
476 * should do.
477 */
478 if (radix_mmu)
479 pmap_mmu_install(MMU_TYPE_RADIX, BUS_PROBE_GENERIC);
480 else if (cpu_features & PPC_FEATURE_64)
481 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
482 else
483 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
484 }
485
486 /*
487 * Shutdown the CPU as much as possible.
488 */
489 void
cpu_halt(void)490 cpu_halt(void)
491 {
492
493 OF_exit();
494 }
495
496 int
ptrace_single_step(struct thread * td)497 ptrace_single_step(struct thread *td)
498 {
499 struct trapframe *tf;
500
501 tf = td->td_frame;
502 tf->srr1 |= PSL_SE;
503
504 return (0);
505 }
506
507 int
ptrace_clear_single_step(struct thread * td)508 ptrace_clear_single_step(struct thread *td)
509 {
510 struct trapframe *tf;
511
512 tf = td->td_frame;
513 tf->srr1 &= ~PSL_SE;
514
515 return (0);
516 }
517
518 void
kdb_cpu_clear_singlestep(void)519 kdb_cpu_clear_singlestep(void)
520 {
521
522 kdb_frame->srr1 &= ~PSL_SE;
523 }
524
525 void
kdb_cpu_set_singlestep(void)526 kdb_cpu_set_singlestep(void)
527 {
528
529 kdb_frame->srr1 |= PSL_SE;
530 }
531
532 /*
533 * Initialise a struct pcpu.
534 */
535 void
cpu_pcpu_init(struct pcpu * pcpu,int cpuid,size_t sz)536 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
537 {
538 #ifdef __powerpc64__
539 /* Copy the SLB contents from the current CPU */
540 memcpy(pcpu->pc_aim.slb, PCPU_GET(aim.slb), sizeof(pcpu->pc_aim.slb));
541 #endif
542 }
543
544 /* Return 0 on handled success, otherwise signal number. */
545 int
cpu_machine_check(struct thread * td,struct trapframe * frame,int * ucode)546 cpu_machine_check(struct thread *td, struct trapframe *frame, int *ucode)
547 {
548 #ifdef __powerpc64__
549 /*
550 * This block is 64-bit CPU specific currently. Punt running in 32-bit
551 * mode on 64-bit CPUs.
552 */
553 /* Check if the important information is in DSISR */
554 if ((frame->srr1 & SRR1_MCHK_DATA) != 0) {
555 printf("Machine check, DSISR: %016lx\n", frame->cpu.aim.dsisr);
556 /* SLB multi-hit is recoverable. */
557 if ((frame->cpu.aim.dsisr & DSISR_MC_SLB_MULTIHIT) != 0)
558 return (0);
559 if ((frame->cpu.aim.dsisr &
560 (DSISR_MC_DERAT_MULTIHIT | DSISR_MC_TLB_MULTIHIT)) != 0) {
561 pmap_tlbie_all();
562 return (0);
563 }
564 /* TODO: Add other machine check recovery procedures. */
565 } else {
566 if ((frame->srr1 & SRR1_MCHK_IFETCH_M) == SRR1_MCHK_IFETCH_SLBMH)
567 return (0);
568 }
569 #endif
570 *ucode = BUS_OBJERR;
571 return (SIGBUS);
572 }
573
574 #ifndef __powerpc64__
575 uint64_t
va_to_vsid(pmap_t pm,vm_offset_t va)576 va_to_vsid(pmap_t pm, vm_offset_t va)
577 {
578 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
579 }
580
581 #endif
582
583 void
pmap_early_io_map_init(void)584 pmap_early_io_map_init(void)
585 {
586 if ((cpu_features2 & PPC_FEATURE2_ARCH_3_00) == 0)
587 radix_mmu = 0;
588 else {
589 radix_mmu = 1;
590 TUNABLE_INT_FETCH("radix_mmu", &radix_mmu);
591 }
592
593 /*
594 * When using Radix, set the start and end of kva early, to be able to
595 * use KVAs on pmap_early_io_map and avoid issues when remapping them
596 * later.
597 */
598 if (radix_mmu) {
599 virtual_avail = VM_MIN_KERNEL_ADDRESS;
600 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
601 }
602 }
603
604 /*
605 * These functions need to provide addresses that both (a) work in real mode
606 * (or whatever mode/circumstances the kernel is in in early boot (now)) and
607 * (b) can still, in principle, work once the kernel is going. Because these
608 * rely on existing mappings/real mode, unmap is a no-op.
609 */
610 vm_offset_t
pmap_early_io_map(vm_paddr_t pa,vm_size_t size)611 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
612 {
613 KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!"));
614
615 /*
616 * If we have the MMU up in early boot, assume it is 1:1. Otherwise,
617 * try to get the address in a memory region compatible with the
618 * direct map for efficiency later.
619 * Except for Radix MMU, for which current implementation doesn't
620 * support mapping arbitrary virtual addresses, such as the ones
621 * generated by "direct mapping" I/O addresses. In this case, use
622 * addresses from KVA area.
623 */
624 if (mfmsr() & PSL_DR)
625 return (pa);
626 else if (radix_mmu) {
627 vm_offset_t va;
628
629 va = virtual_avail;
630 virtual_avail += round_page(size + pa - trunc_page(pa));
631 return (va);
632 } else
633 return (DMAP_BASE_ADDRESS + pa);
634 }
635
636 void
pmap_early_io_unmap(vm_offset_t va,vm_size_t size)637 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
638 {
639
640 KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!"));
641 }
642
643 /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */
644 void
flush_disable_caches(void)645 flush_disable_caches(void)
646 {
647 register_t msr;
648 register_t msscr0;
649 register_t cache_reg;
650 volatile uint32_t *memp;
651 int i;
652 int x;
653
654 msr = mfmsr();
655 powerpc_sync();
656 mtmsr(msr & ~(PSL_EE | PSL_DR));
657 msscr0 = mfspr(SPR_MSSCR0);
658 msscr0 &= ~MSSCR0_L2PFE;
659 mtspr(SPR_MSSCR0, msscr0);
660 powerpc_sync();
661 isync();
662 /* 7e00066c: dssall */
663 __asm__ __volatile__(".long 0x7e00066c; sync");
664 powerpc_sync();
665 isync();
666 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
667 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
668 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
669
670 /* Lock the L1 Data cache. */
671 mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF);
672 powerpc_sync();
673 isync();
674
675 mtspr(SPR_LDSTCR, 0);
676
677 /*
678 * Perform this in two stages: Flush the cache starting in RAM, then do it
679 * from ROM.
680 */
681 memp = (volatile uint32_t *)0x00000000;
682 for (i = 0; i < 128 * 1024; i++) {
683 (void)*memp;
684 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
685 memp += 32/sizeof(*memp);
686 }
687
688 memp = (volatile uint32_t *)0xfff00000;
689 x = 0xfe;
690
691 for (; x != 0xff;) {
692 mtspr(SPR_LDSTCR, x);
693 for (i = 0; i < 128; i++) {
694 (void)*memp;
695 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
696 memp += 32/sizeof(*memp);
697 }
698 x = ((x << 1) | 1) & 0xff;
699 }
700 mtspr(SPR_LDSTCR, 0);
701
702 cache_reg = mfspr(SPR_L2CR);
703 if (cache_reg & L2CR_L2E) {
704 cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450);
705 mtspr(SPR_L2CR, cache_reg);
706 powerpc_sync();
707 mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF);
708 while (mfspr(SPR_L2CR) & L2CR_L2HWF)
709 ; /* Busy wait for cache to flush */
710 powerpc_sync();
711 cache_reg &= ~L2CR_L2E;
712 mtspr(SPR_L2CR, cache_reg);
713 powerpc_sync();
714 mtspr(SPR_L2CR, cache_reg | L2CR_L2I);
715 powerpc_sync();
716 while (mfspr(SPR_L2CR) & L2CR_L2I)
717 ; /* Busy wait for L2 cache invalidate */
718 powerpc_sync();
719 }
720
721 cache_reg = mfspr(SPR_L3CR);
722 if (cache_reg & L3CR_L3E) {
723 cache_reg &= ~(L3CR_L3IO | L3CR_L3DO);
724 mtspr(SPR_L3CR, cache_reg);
725 powerpc_sync();
726 mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF);
727 while (mfspr(SPR_L3CR) & L3CR_L3HWF)
728 ; /* Busy wait for cache to flush */
729 powerpc_sync();
730 cache_reg &= ~L3CR_L3E;
731 mtspr(SPR_L3CR, cache_reg);
732 powerpc_sync();
733 mtspr(SPR_L3CR, cache_reg | L3CR_L3I);
734 powerpc_sync();
735 while (mfspr(SPR_L3CR) & L3CR_L3I)
736 ; /* Busy wait for L3 cache invalidate */
737 powerpc_sync();
738 }
739
740 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE);
741 powerpc_sync();
742 isync();
743
744 mtmsr(msr);
745 }
746
747 #ifndef __powerpc64__
748 void
mpc745x_sleep(void)749 mpc745x_sleep(void)
750 {
751 static u_quad_t timebase = 0;
752 static register_t sprgs[4];
753 static register_t srrs[2];
754
755 jmp_buf resetjb;
756 struct thread *fputd;
757 struct thread *vectd;
758 register_t hid0;
759 register_t msr;
760 register_t saved_msr;
761
762 ap_pcpu = pcpup;
763
764 PCPU_SET(restore, &resetjb);
765
766 saved_msr = mfmsr();
767 fputd = PCPU_GET(fputhread);
768 vectd = PCPU_GET(vecthread);
769 if (fputd != NULL)
770 save_fpu(fputd);
771 if (vectd != NULL)
772 save_vec(vectd);
773 if (setjmp(resetjb) == 0) {
774 sprgs[0] = mfspr(SPR_SPRG0);
775 sprgs[1] = mfspr(SPR_SPRG1);
776 sprgs[2] = mfspr(SPR_SPRG2);
777 sprgs[3] = mfspr(SPR_SPRG3);
778 srrs[0] = mfspr(SPR_SRR0);
779 srrs[1] = mfspr(SPR_SRR1);
780 timebase = mftb();
781 powerpc_sync();
782 flush_disable_caches();
783 hid0 = mfspr(SPR_HID0);
784 hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP;
785 powerpc_sync();
786 isync();
787 msr = mfmsr() | PSL_POW;
788 mtspr(SPR_HID0, hid0);
789 powerpc_sync();
790
791 while (1)
792 mtmsr(msr);
793 }
794 /* XXX: The mttb() means this *only* works on single-CPU systems. */
795 mttb(timebase);
796 PCPU_SET(curthread, curthread);
797 PCPU_SET(curpcb, curthread->td_pcb);
798 pmap_activate(curthread);
799 powerpc_sync();
800 mtspr(SPR_SPRG0, sprgs[0]);
801 mtspr(SPR_SPRG1, sprgs[1]);
802 mtspr(SPR_SPRG2, sprgs[2]);
803 mtspr(SPR_SPRG3, sprgs[3]);
804 mtspr(SPR_SRR0, srrs[0]);
805 mtspr(SPR_SRR1, srrs[1]);
806 mtmsr(saved_msr);
807 if (fputd == curthread)
808 enable_fpu(curthread);
809 if (vectd == curthread)
810 enable_vec(curthread);
811 powerpc_sync();
812 }
813 #endif
814