1 /* 2 * Initialize machine setup information and I/O. 3 * 4 * After running setup() unit tests may query how many cpus they have 5 * (nr_cpus_present), how much memory they have (PHYSICAL_END - PHYSICAL_START), 6 * may use dynamic memory allocation (malloc, etc.), printf, and exit. 7 * Finally, argc and argv are also ready to be passed to main(). 8 * 9 * Copyright (C) 2016, Red Hat Inc, Andrew Jones <drjones@redhat.com> 10 * 11 * This work is licensed under the terms of the GNU LGPL, version 2. 12 */ 13 #include <libcflat.h> 14 #include <libfdt/libfdt.h> 15 #include <devicetree.h> 16 #include <alloc.h> 17 #include <alloc_phys.h> 18 #include <alloc_page.h> 19 #include <argv.h> 20 #include <asm/setup.h> 21 #include <asm/smp.h> 22 #include <asm/page.h> 23 #include <asm/ptrace.h> 24 #include <asm/processor.h> 25 #include <asm/hcall.h> 26 #include "io.h" 27 28 extern unsigned long stacktop; 29 30 char *initrd; 31 u32 initrd_size; 32 33 u32 cpu_to_hwid[NR_CPUS] = { [0 ... NR_CPUS-1] = (~0U) }; 34 int nr_cpus_present; 35 uint64_t tb_hz; 36 37 struct mem_region mem_regions[NR_MEM_REGIONS]; 38 phys_addr_t __physical_start, __physical_end; 39 unsigned __icache_bytes, __dcache_bytes; 40 41 struct cpu_set_params { 42 unsigned icache_bytes; 43 unsigned dcache_bytes; 44 uint64_t tb_hz; 45 }; 46 47 static void cpu_set(int fdtnode, u64 regval, void *info) 48 { 49 const struct fdt_property *prop; 50 u32 *threads; 51 static bool read_common_info = false; 52 struct cpu_set_params *params = info; 53 int nr_threads; 54 int len, i; 55 56 /* Get the id array of threads on this node */ 57 prop = fdt_get_property(dt_fdt(), fdtnode, 58 "ibm,ppc-interrupt-server#s", &len); 59 assert(prop); 60 61 nr_threads = len >> 2; /* Divide by 4 since 4 bytes per thread */ 62 threads = (u32 *)prop->data; /* Array of valid ids */ 63 64 for (i = 0; i < nr_threads; i++) { 65 if (nr_cpus_present >= NR_CPUS) { 66 static bool warned = false; 67 if (!warned) { 68 printf("Warning: Number of present CPUs exceeds maximum supported (%d).\n", NR_CPUS); 69 warned = true; 70 } 71 break; 72 } 73 cpu_to_hwid[nr_cpus_present++] = fdt32_to_cpu(threads[i]); 74 } 75 76 if (!read_common_info) { 77 const struct fdt_property *prop; 78 u32 *data; 79 80 prop = fdt_get_property(dt_fdt(), fdtnode, 81 "i-cache-line-size", NULL); 82 assert(prop != NULL); 83 data = (u32 *)prop->data; 84 params->icache_bytes = fdt32_to_cpu(*data); 85 86 prop = fdt_get_property(dt_fdt(), fdtnode, 87 "d-cache-line-size", NULL); 88 assert(prop != NULL); 89 data = (u32 *)prop->data; 90 params->dcache_bytes = fdt32_to_cpu(*data); 91 92 prop = fdt_get_property(dt_fdt(), fdtnode, 93 "timebase-frequency", NULL); 94 assert(prop != NULL); 95 data = (u32 *)prop->data; 96 params->tb_hz = fdt32_to_cpu(*data); 97 98 read_common_info = true; 99 } 100 } 101 102 bool cpu_has_hv; 103 bool cpu_has_power_mce; /* POWER CPU machine checks */ 104 bool cpu_has_siar; 105 bool cpu_has_heai; 106 bool cpu_has_prefix; 107 bool cpu_has_sc_lev; /* sc interrupt has LEV field in SRR1 */ 108 bool cpu_has_pause_short; 109 110 static void cpu_init_params(void) 111 { 112 struct cpu_set_params params; 113 int ret; 114 115 nr_cpus_present = 0; 116 ret = dt_for_each_cpu_node(cpu_set, ¶ms); 117 assert(ret == 0); 118 __icache_bytes = params.icache_bytes; 119 __dcache_bytes = params.dcache_bytes; 120 tb_hz = params.tb_hz; 121 122 switch (mfspr(SPR_PVR) & PVR_VERSION_MASK) { 123 case PVR_VER_POWER10: 124 cpu_has_prefix = true; 125 cpu_has_sc_lev = true; 126 cpu_has_pause_short = true; 127 case PVR_VER_POWER9: 128 case PVR_VER_POWER8E: 129 case PVR_VER_POWER8NVL: 130 case PVR_VER_POWER8: 131 cpu_has_power_mce = true; 132 cpu_has_heai = true; 133 cpu_has_siar = true; 134 break; 135 default: 136 break; 137 } 138 139 if (!cpu_has_hv) /* HEIR is HV register */ 140 cpu_has_heai = false; 141 } 142 143 static void mem_init(phys_addr_t freemem_start) 144 { 145 struct dt_pbus_reg regs[NR_MEM_REGIONS]; 146 struct mem_region primary, mem = { 147 .start = (phys_addr_t)-1, 148 }; 149 int nr_regs, i; 150 phys_addr_t base, top; 151 152 nr_regs = dt_get_memory_params(regs, NR_MEM_REGIONS); 153 assert(nr_regs > 0); 154 155 primary.end = 0; 156 157 for (i = 0; i < nr_regs; ++i) { 158 mem_regions[i].start = regs[i].addr; 159 mem_regions[i].end = regs[i].addr + regs[i].size; 160 161 /* 162 * pick the region we're in for our primary region 163 */ 164 if (freemem_start >= mem_regions[i].start 165 && freemem_start < mem_regions[i].end) { 166 mem_regions[i].flags |= MR_F_PRIMARY; 167 primary = mem_regions[i]; 168 } 169 170 /* 171 * set the lowest and highest addresses found, 172 * ignoring potential gaps 173 */ 174 if (mem_regions[i].start < mem.start) 175 mem.start = mem_regions[i].start; 176 if (mem_regions[i].end > mem.end) 177 mem.end = mem_regions[i].end; 178 } 179 assert(primary.end != 0); 180 // assert(!(mem.start & ~PHYS_MASK) && !((mem.end - 1) & ~PHYS_MASK)); 181 182 __physical_start = mem.start; /* PHYSICAL_START */ 183 __physical_end = mem.end; /* PHYSICAL_END */ 184 185 phys_alloc_init(freemem_start, primary.end - freemem_start); 186 phys_alloc_set_minimum_alignment(__icache_bytes > __dcache_bytes 187 ? __icache_bytes : __dcache_bytes); 188 189 phys_alloc_get_unused(&base, &top); 190 base = PAGE_ALIGN(base); 191 top &= PAGE_MASK; 192 page_alloc_init_area(0, base >> PAGE_SHIFT, top >> PAGE_SHIFT); 193 page_alloc_ops_enable(); 194 } 195 196 #define EXCEPTION_STACK_SIZE SZ_64K 197 198 static char boot_exception_stack[EXCEPTION_STACK_SIZE]; 199 struct cpu cpus[NR_CPUS]; 200 201 void cpu_init(struct cpu *cpu, int cpu_id) 202 { 203 cpu->server_no = cpu_id; 204 205 cpu->stack = (unsigned long)memalign(SZ_4K, SZ_64K); 206 cpu->stack += SZ_64K - 64; 207 cpu->exception_stack = (unsigned long)memalign(SZ_4K, SZ_64K); 208 cpu->exception_stack += SZ_64K - 64; 209 } 210 211 bool host_is_tcg; 212 bool host_is_kvm; 213 214 void setup(const void *fdt) 215 { 216 void *freemem = &stacktop; 217 const char *bootargs, *tmp; 218 struct cpu *cpu; 219 u32 fdt_size; 220 int ret; 221 222 cpu_has_hv = !!(mfmsr() & (1ULL << MSR_HV_BIT)); 223 224 memset(cpus, 0xff, sizeof(cpus)); 225 226 cpu = &cpus[0]; 227 cpu->server_no = fdt_boot_cpuid_phys(fdt); 228 cpu->exception_stack = (unsigned long)boot_exception_stack; 229 cpu->exception_stack += EXCEPTION_STACK_SIZE - 64; 230 231 mtspr(SPR_SPRG0, (unsigned long)cpu); 232 __current_cpu = cpu; 233 234 enable_mcheck(); 235 236 /* 237 * Before calling mem_init we need to move the fdt and initrd 238 * to safe locations. We move them to construct the memory 239 * map illustrated below: 240 * 241 * +----------------------+ <-- top of physical memory 242 * | | 243 * ~ ~ 244 * | | 245 * +----------------------+ <-- top of initrd 246 * | | 247 * +----------------------+ <-- top of FDT 248 * | | 249 * +----------------------+ <-- top of cpu0's stack 250 * | | 251 * +----------------------+ <-- top of text/data/bss/toc sections, 252 * | | see powerpc/flat.lds 253 * | | 254 * +----------------------+ <-- load address 255 * | | 256 * +----------------------+ 257 */ 258 fdt_size = fdt_totalsize(fdt); 259 ret = fdt_move(fdt, freemem, fdt_size); 260 assert(ret == 0); 261 ret = dt_init(freemem); 262 assert(ret == 0); 263 freemem += fdt_size; 264 265 if (!fdt_node_check_compatible(fdt, 0, "qemu,pseries")) { 266 assert(!cpu_has_hv); 267 268 /* 269 * host_is_tcg incorrectly does not get set when running 270 * KVM on a TCG host (using powernv HV emulation or spapr 271 * nested HV). 272 */ 273 ret = fdt_subnode_offset(fdt, 0, "hypervisor"); 274 if (ret < 0) { 275 host_is_tcg = true; 276 host_is_kvm = false; 277 } else { 278 /* KVM is the only supported hypervisor */ 279 assert(!fdt_node_check_compatible(fdt, ret, "linux,kvm")); 280 host_is_tcg = false; 281 host_is_kvm = true; 282 } 283 } else { 284 assert(cpu_has_hv); 285 host_is_tcg = true; 286 host_is_kvm = false; 287 } 288 ret = dt_get_initrd(&tmp, &initrd_size); 289 assert(ret == 0 || ret == -FDT_ERR_NOTFOUND); 290 if (ret == 0) { 291 initrd = freemem; 292 memmove(initrd, tmp, initrd_size); 293 freemem += initrd_size; 294 } 295 296 assert(STACK_INT_FRAME_SIZE % 16 == 0); 297 298 /* set parameters from dt */ 299 cpu_init_params(); 300 301 /* Interrupt Endianness */ 302 if (machine_is_pseries()) { 303 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 304 hcall(H_SET_MODE, 1, 4, 0, 0); 305 #else 306 hcall(H_SET_MODE, 0, 4, 0, 0); 307 #endif 308 } 309 310 cpu_init_ipis(); 311 312 /* cpu_init must be called before mem_init */ 313 mem_init(PAGE_ALIGN((unsigned long)freemem)); 314 315 /* mem_init must be called before io_init */ 316 io_init(); 317 318 /* finish setup */ 319 ret = dt_get_bootargs(&bootargs); 320 assert(ret == 0 || ret == -FDT_ERR_NOTFOUND); 321 setup_args_progname(bootargs); 322 323 if (initrd) { 324 /* environ is currently the only file in the initrd */ 325 char *env = malloc(initrd_size); 326 memcpy(env, initrd, initrd_size); 327 setup_env(env, initrd_size); 328 } 329 } 330