1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 * 14 * TILE startup code. 15 */ 16 17#include <linux/linkage.h> 18#include <linux/init.h> 19#include <asm/page.h> 20#include <asm/pgtable.h> 21#include <asm/thread_info.h> 22#include <asm/processor.h> 23#include <asm/asm-offsets.h> 24#include <hv/hypervisor.h> 25#include <arch/chip.h> 26#include <arch/spr_def.h> 27 28/* 29 * This module contains the entry code for kernel images. It performs the 30 * minimal setup needed to call the generic C routines. 31 */ 32 33 __HEAD 34ENTRY(_start) 35 /* Notify the hypervisor of what version of the API we want */ 36 { 37 movei r1, TILE_CHIP 38 movei r2, TILE_CHIP_REV 39 } 40 { 41 moveli r0, _HV_VERSION 42 jal hv_init 43 } 44 /* Get a reasonable default ASID in r0 */ 45 { 46 move r0, zero 47 jal hv_inquire_asid 48 } 49 /* Install the default page table */ 50 { 51 moveli r6, lo16(swapper_pgprot - PAGE_OFFSET) 52 move r4, r0 /* use starting ASID of range for this page table */ 53 } 54 { 55 moveli r0, lo16(swapper_pg_dir - PAGE_OFFSET) 56 auli r6, r6, ha16(swapper_pgprot - PAGE_OFFSET) 57 } 58 { 59 lw r2, r6 60 addi r6, r6, 4 61 } 62 { 63 lw r3, r6 64 auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET) 65 } 66 { 67 inv r6 68 move r1, zero /* high 32 bits of CPA is zero */ 69 } 70 { 71 moveli lr, lo16(1f) 72 move r5, zero 73 } 74 { 75 auli lr, lr, ha16(1f) 76 j hv_install_context 77 } 781: 79 80 /* Get our processor number and save it away in SAVE_K_0. */ 81 jal hv_inquire_topology 82 mulll_uu r4, r1, r2 /* r1 == y, r2 == width */ 83 add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */ 84 85#ifdef CONFIG_SMP 86 /* 87 * Load up our per-cpu offset. When the first (master) tile 88 * boots, this value is still zero, so we will load boot_pc 89 * with start_kernel, and boot_sp with init_stack + THREAD_SIZE. 90 * The master tile initializes the per-cpu offset array, so that 91 * when subsequent (secondary) tiles boot, they will instead load 92 * from their per-cpu versions of boot_sp and boot_pc. 93 */ 94 moveli r5, lo16(__per_cpu_offset) 95 auli r5, r5, ha16(__per_cpu_offset) 96 s2a r5, r4, r5 97 lw r5, r5 98 bnz r5, 1f 99 100 /* 101 * Save the width and height to the smp_topology variable 102 * for later use. 103 */ 104 moveli r0, lo16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET) 105 auli r0, r0, ha16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET) 106 { 107 sw r0, r2 108 addi r0, r0, (HV_TOPOLOGY_HEIGHT_OFFSET - HV_TOPOLOGY_WIDTH_OFFSET) 109 } 110 sw r0, r3 1111: 112#else 113 move r5, zero 114#endif 115 116 /* Load and go with the correct pc and sp. */ 117 { 118 addli r1, r5, lo16(boot_sp) 119 addli r0, r5, lo16(boot_pc) 120 } 121 { 122 auli r1, r1, ha16(boot_sp) 123 auli r0, r0, ha16(boot_pc) 124 } 125 lw r0, r0 126 lw sp, r1 127 or r4, sp, r4 128 mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */ 129 addi sp, sp, -STACK_TOP_DELTA 130 { 131 move lr, zero /* stop backtraces in the called function */ 132 jr r0 133 } 134 ENDPROC(_start) 135 136__PAGE_ALIGNED_BSS 137 .align PAGE_SIZE 138ENTRY(empty_zero_page) 139 .fill PAGE_SIZE,1,0 140 END(empty_zero_page) 141 142 .macro PTE va, cpa, bits1, no_org=0 143 .ifeq \no_org 144 .org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE 145 .endif 146 .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \ 147 (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) 148 .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32)) 149 .endm 150 151__PAGE_ALIGNED_DATA 152 .align PAGE_SIZE 153ENTRY(swapper_pg_dir) 154 /* 155 * All data pages from PAGE_OFFSET to MEM_USER_INTRPT are mapped as 156 * VA = PA + PAGE_OFFSET. We remap things with more precise access 157 * permissions and more respect for size of RAM later. 158 */ 159 .set addr, 0 160 .rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT 161 PTE addr + PAGE_OFFSET, addr, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ 162 (1 << (HV_PTE_INDEX_WRITABLE - 32)) 163 .set addr, addr + PGDIR_SIZE 164 .endr 165 166 /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ 167 PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ 168 (1 << (HV_PTE_INDEX_EXECUTABLE - 32)) 169 .org swapper_pg_dir + HV_L1_SIZE 170 END(swapper_pg_dir) 171 172 /* 173 * Isolate swapper_pgprot to its own cache line, since each cpu 174 * starting up will read it using VA-is-PA and local homing. 175 * This would otherwise likely conflict with other data on the cache 176 * line, once we have set its permanent home in the page tables. 177 */ 178 __INITDATA 179 .align CHIP_L2_LINE_SIZE() 180ENTRY(swapper_pgprot) 181 PTE 0, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ 182 (1 << (HV_PTE_INDEX_WRITABLE - 32)), 1 183 .align CHIP_L2_LINE_SIZE() 184 END(swapper_pgprot) 185