1 /*
2 * Initialize machine setup information and I/O.
3 *
4 * After running setup() unit tests may query how many cpus they have
5 * (nr_cpus_present), how much memory they have (PHYSICAL_END - PHYSICAL_START),
6 * may use dynamic memory allocation (malloc, etc.), printf, and exit.
7 * Finally, argc and argv are also ready to be passed to main().
8 *
9 * Copyright (C) 2016, Red Hat Inc, Andrew Jones <drjones@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU LGPL, version 2.
12 */
13 #include <libcflat.h>
14 #include <libfdt/libfdt.h>
15 #include <devicetree.h>
16 #include <alloc.h>
17 #include <alloc_phys.h>
18 #include <alloc_page.h>
19 #include <argv.h>
20 #include <asm/setup.h>
21 #include <asm/smp.h>
22 #include <asm/page.h>
23 #include <asm/ptrace.h>
24 #include <asm/processor.h>
25 #include <asm/hcall.h>
26 #include "io.h"
27
28 extern unsigned long stacktop;
29
30 char *initrd;
31 u32 initrd_size;
32
33 u32 cpu_to_hwid[NR_CPUS] = { [0 ... NR_CPUS-1] = (~0U) };
34 int nr_cpus_present;
35 uint64_t tb_hz;
36
37 struct mem_region mem_regions[NR_MEM_REGIONS];
38 phys_addr_t __physical_start, __physical_end;
39 unsigned __icache_bytes, __dcache_bytes;
40
41 struct cpu_set_params {
42 unsigned icache_bytes;
43 unsigned dcache_bytes;
44 uint64_t tb_hz;
45 };
46
cpu_set(int fdtnode,u64 regval,void * info)47 static void cpu_set(int fdtnode, u64 regval, void *info)
48 {
49 const struct fdt_property *prop;
50 u32 *threads;
51 static bool read_common_info = false;
52 struct cpu_set_params *params = info;
53 int nr_threads;
54 int len, i;
55
56 /* Get the id array of threads on this node */
57 prop = fdt_get_property(dt_fdt(), fdtnode,
58 "ibm,ppc-interrupt-server#s", &len);
59 assert(prop);
60
61 nr_threads = len >> 2; /* Divide by 4 since 4 bytes per thread */
62 threads = (u32 *)prop->data; /* Array of valid ids */
63
64 for (i = 0; i < nr_threads; i++) {
65 if (nr_cpus_present >= NR_CPUS) {
66 static bool warned = false;
67 if (!warned) {
68 printf("Warning: Number of present CPUs exceeds maximum supported (%d).\n", NR_CPUS);
69 warned = true;
70 }
71 break;
72 }
73 cpu_to_hwid[nr_cpus_present++] = fdt32_to_cpu(threads[i]);
74 }
75
76 if (!read_common_info) {
77 const struct fdt_property *prop;
78 u32 *data;
79
80 prop = fdt_get_property(dt_fdt(), fdtnode,
81 "i-cache-line-size", NULL);
82 assert(prop != NULL);
83 data = (u32 *)prop->data;
84 params->icache_bytes = fdt32_to_cpu(*data);
85
86 prop = fdt_get_property(dt_fdt(), fdtnode,
87 "d-cache-line-size", NULL);
88 assert(prop != NULL);
89 data = (u32 *)prop->data;
90 params->dcache_bytes = fdt32_to_cpu(*data);
91
92 prop = fdt_get_property(dt_fdt(), fdtnode,
93 "timebase-frequency", NULL);
94 assert(prop != NULL);
95 data = (u32 *)prop->data;
96 params->tb_hz = fdt32_to_cpu(*data);
97
98 read_common_info = true;
99 }
100 }
101
102 bool cpu_has_hv;
103 bool cpu_has_power_mce; /* POWER CPU machine checks */
104 bool cpu_has_siar;
105 bool cpu_has_heai;
106 bool cpu_has_radix;
107 bool cpu_has_prefix;
108 bool cpu_has_sc_lev; /* sc interrupt has LEV field in SRR1 */
109 bool cpu_has_pause_short;
110
cpu_init_params(void)111 static void cpu_init_params(void)
112 {
113 struct cpu_set_params params;
114 int ret;
115
116 nr_cpus_present = 0;
117 ret = dt_for_each_cpu_node(cpu_set, ¶ms);
118 assert(ret == 0);
119 __icache_bytes = params.icache_bytes;
120 __dcache_bytes = params.dcache_bytes;
121 tb_hz = params.tb_hz;
122
123 switch (mfspr(SPR_PVR) & PVR_VERSION_MASK) {
124 case PVR_VER_POWER10:
125 cpu_has_prefix = true;
126 cpu_has_sc_lev = true;
127 cpu_has_pause_short = true;
128 case PVR_VER_POWER9:
129 cpu_has_radix = true;
130 case PVR_VER_POWER8E:
131 case PVR_VER_POWER8NVL:
132 case PVR_VER_POWER8:
133 cpu_has_power_mce = true;
134 cpu_has_heai = true;
135 cpu_has_siar = true;
136 break;
137 default:
138 break;
139 }
140
141 if (!cpu_has_hv) /* HEIR is HV register */
142 cpu_has_heai = false;
143 }
144
mem_init(phys_addr_t freemem_start)145 static void mem_init(phys_addr_t freemem_start)
146 {
147 struct dt_pbus_reg regs[NR_MEM_REGIONS];
148 struct mem_region primary, mem = {
149 .start = (phys_addr_t)-1,
150 };
151 int nr_regs, i;
152 phys_addr_t base, top;
153
154 nr_regs = dt_get_memory_params(regs, NR_MEM_REGIONS);
155 assert(nr_regs > 0);
156
157 primary.end = 0;
158
159 for (i = 0; i < nr_regs; ++i) {
160 mem_regions[i].start = regs[i].addr;
161 mem_regions[i].end = regs[i].addr + regs[i].size;
162
163 /*
164 * pick the region we're in for our primary region
165 */
166 if (freemem_start >= mem_regions[i].start
167 && freemem_start < mem_regions[i].end) {
168 mem_regions[i].flags |= MR_F_PRIMARY;
169 primary = mem_regions[i];
170 }
171
172 /*
173 * set the lowest and highest addresses found,
174 * ignoring potential gaps
175 */
176 if (mem_regions[i].start < mem.start)
177 mem.start = mem_regions[i].start;
178 if (mem_regions[i].end > mem.end)
179 mem.end = mem_regions[i].end;
180 }
181 assert(primary.end != 0);
182 // assert(!(mem.start & ~PHYS_MASK) && !((mem.end - 1) & ~PHYS_MASK));
183
184 __physical_start = mem.start; /* PHYSICAL_START */
185 __physical_end = mem.end; /* PHYSICAL_END */
186
187 phys_alloc_init(freemem_start, primary.end - freemem_start);
188 phys_alloc_set_minimum_alignment(__icache_bytes > __dcache_bytes
189 ? __icache_bytes : __dcache_bytes);
190
191 phys_alloc_get_unused(&base, &top);
192 base = PAGE_ALIGN(base);
193 top &= PAGE_MASK;
194 page_alloc_init_area(0, base >> PAGE_SHIFT, top >> PAGE_SHIFT);
195 page_alloc_ops_enable();
196 }
197
198 #define EXCEPTION_STACK_SIZE SZ_64K
199
200 static char boot_exception_stack[EXCEPTION_STACK_SIZE];
201 struct cpu cpus[NR_CPUS];
202
cpu_init(struct cpu * cpu,int cpu_id)203 void cpu_init(struct cpu *cpu, int cpu_id)
204 {
205 cpu->server_no = cpu_id;
206
207 cpu->stack = (unsigned long)memalign_pages(SZ_4K, SZ_64K);
208 cpu->stack += SZ_64K - 64;
209 cpu->exception_stack = (unsigned long)memalign_pages(SZ_4K, SZ_64K);
210 cpu->exception_stack += SZ_64K - 64;
211 cpu->pgtable = NULL;
212 cpu->in_user = false;
213 }
214
215 bool host_is_tcg;
216 bool host_is_kvm;
217 bool is_hvmode;
218
setup(const void * fdt)219 void setup(const void *fdt)
220 {
221 void *freemem = &stacktop;
222 const char *bootargs, *tmp;
223 struct cpu *cpu;
224 u32 fdt_size;
225 int ret;
226
227 memset(cpus, 0xff, sizeof(cpus));
228
229 cpu = &cpus[0];
230 cpu->server_no = fdt_boot_cpuid_phys(fdt);
231 cpu->exception_stack = (unsigned long)boot_exception_stack;
232 cpu->exception_stack += EXCEPTION_STACK_SIZE - 64;
233 cpu->pgtable = NULL;
234 cpu->in_user = false;
235
236 mtspr(SPR_SPRG0, (unsigned long)cpu);
237 __current_cpu = cpu;
238
239 cpu_has_hv = !!(mfmsr() & (1ULL << MSR_HV_BIT));
240
241 enable_mcheck();
242
243 /*
244 * Before calling mem_init we need to move the fdt and initrd
245 * to safe locations. We move them to construct the memory
246 * map illustrated below:
247 *
248 * +----------------------+ <-- top of physical memory
249 * | |
250 * ~ ~
251 * | |
252 * +----------------------+ <-- top of initrd
253 * | |
254 * +----------------------+ <-- top of FDT
255 * | |
256 * +----------------------+ <-- top of cpu0's stack
257 * | |
258 * +----------------------+ <-- top of text/data/bss/toc sections,
259 * | | see powerpc/flat.lds
260 * | |
261 * +----------------------+ <-- load address
262 * | |
263 * +----------------------+
264 */
265 fdt_size = fdt_totalsize(fdt);
266 ret = fdt_move(fdt, freemem, fdt_size);
267 assert(ret == 0);
268 ret = dt_init(freemem);
269 assert(ret == 0);
270 freemem += fdt_size;
271
272 if (!fdt_node_check_compatible(fdt, 0, "qemu,pseries")) {
273 assert(!cpu_has_hv);
274
275 /*
276 * host_is_tcg incorrectly does not get set when running
277 * KVM on a TCG host (using powernv HV emulation or spapr
278 * nested HV).
279 */
280 ret = fdt_subnode_offset(fdt, 0, "hypervisor");
281 if (ret < 0) {
282 host_is_tcg = true;
283 host_is_kvm = false;
284 } else {
285 /* KVM is the only supported hypervisor */
286 assert(!fdt_node_check_compatible(fdt, ret, "linux,kvm"));
287 host_is_tcg = false;
288 host_is_kvm = true;
289 }
290 } else {
291 assert(cpu_has_hv);
292 host_is_tcg = true;
293 host_is_kvm = false;
294 }
295 ret = dt_get_initrd(&tmp, &initrd_size);
296 assert(ret == 0 || ret == -FDT_ERR_NOTFOUND);
297 if (ret == 0) {
298 initrd = freemem;
299 memmove(initrd, tmp, initrd_size);
300 freemem += initrd_size;
301 }
302
303 assert(STACK_INT_FRAME_SIZE % 16 == 0);
304
305 /* set parameters from dt */
306 cpu_init_params();
307
308 /* Interrupt Endianness */
309 if (machine_is_pseries()) {
310 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
311 hcall(H_SET_MODE, 1, 4, 0, 0);
312 #else
313 hcall(H_SET_MODE, 0, 4, 0, 0);
314 #endif
315 }
316
317 cpu_init_ipis();
318
319 /* cpu_init must be called before mem_init */
320 mem_init(PAGE_ALIGN((unsigned long)freemem));
321
322 /* mem_init must be called before io_init */
323 io_init();
324
325 /* finish setup */
326 ret = dt_get_bootargs(&bootargs);
327 assert(ret == 0 || ret == -FDT_ERR_NOTFOUND);
328 setup_args_progname(bootargs);
329
330 if (initrd) {
331 /* environ is currently the only file in the initrd */
332 char *env = malloc(initrd_size);
333 memcpy(env, initrd, initrd_size);
334 setup_env(env, initrd_size);
335 }
336 }
337