xref: /qemu/hw/ppc/spapr.c (revision 06b40d250ecfa1633209c2e431a7a38acfd03a98)
1 /*
2  * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3  *
4  * Copyright (c) 2004-2007 Fabrice Bellard
5  * Copyright (c) 2007 Jocelyn Mayer
6  * Copyright (c) 2010 David Gibson, IBM Corporation.
7  * Copyright (c) 2010-2024, IBM Corporation..
8  *
9  * SPDX-License-Identifier: GPL-2.0-or-later
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a copy
12  * of this software and associated documentation files (the "Software"), to deal
13  * in the Software without restriction, including without limitation the rights
14  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15  * copies of the Software, and to permit persons to whom the Software is
16  * furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice shall be included in
19  * all copies or substantial portions of the Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27  * THE SOFTWARE.
28  */
29 
30 #include "qemu/osdep.h"
31 #include "qemu/datadir.h"
32 #include "qemu/memalign.h"
33 #include "qemu/guest-random.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-events-machine.h"
36 #include "qapi/qapi-events-qdev.h"
37 #include "qapi/visitor.h"
38 #include "system/system.h"
39 #include "system/hostmem.h"
40 #include "system/numa.h"
41 #include "system/tcg.h"
42 #include "system/qtest.h"
43 #include "system/reset.h"
44 #include "system/runstate.h"
45 #include "qemu/log.h"
46 #include "hw/fw-path-provider.h"
47 #include "elf.h"
48 #include "net/net.h"
49 #include "system/device_tree.h"
50 #include "system/cpus.h"
51 #include "system/hw_accel.h"
52 #include "kvm_ppc.h"
53 #include "migration/misc.h"
54 #include "migration/qemu-file-types.h"
55 #include "migration/global_state.h"
56 #include "migration/register.h"
57 #include "migration/blocker.h"
58 #include "mmu-hash64.h"
59 #include "mmu-book3s-v3.h"
60 #include "cpu-models.h"
61 #include "hw/core/cpu.h"
62 
63 #include "hw/ppc/ppc.h"
64 #include "hw/loader.h"
65 
66 #include "hw/ppc/fdt.h"
67 #include "hw/ppc/spapr.h"
68 #include "hw/ppc/spapr_nested.h"
69 #include "hw/ppc/spapr_vio.h"
70 #include "hw/ppc/vof.h"
71 #include "hw/qdev-properties.h"
72 #include "hw/pci-host/spapr.h"
73 #include "hw/pci/msi.h"
74 
75 #include "hw/pci/pci.h"
76 #include "hw/scsi/scsi.h"
77 #include "hw/virtio/virtio-scsi.h"
78 #include "hw/virtio/vhost-scsi-common.h"
79 
80 #include "system/ram_addr.h"
81 #include "system/confidential-guest-support.h"
82 #include "hw/usb.h"
83 #include "qemu/config-file.h"
84 #include "qemu/error-report.h"
85 #include "trace.h"
86 #include "hw/nmi.h"
87 #include "hw/intc/intc.h"
88 
89 #include "hw/ppc/spapr_cpu_core.h"
90 #include "hw/mem/memory-device.h"
91 #include "hw/ppc/spapr_tpm_proxy.h"
92 #include "hw/ppc/spapr_nvdimm.h"
93 #include "hw/ppc/spapr_numa.h"
94 
95 #include <libfdt.h>
96 
97 /* SLOF memory layout:
98  *
99  * SLOF raw image loaded at 0, copies its romfs right below the flat
100  * device-tree, then position SLOF itself 31M below that
101  *
102  * So we set FW_OVERHEAD to 40MB which should account for all of that
103  * and more
104  *
105  * We load our kernel at 4M, leaving space for SLOF initial image
106  */
107 #define FDT_MAX_ADDR            0x80000000 /* FDT must stay below that */
108 #define FW_MAX_SIZE             0x400000
109 #define FW_FILE_NAME            "slof.bin"
110 #define FW_FILE_NAME_VOF        "vof.bin"
111 #define FW_OVERHEAD             0x2800000
112 #define KERNEL_LOAD_ADDR        FW_MAX_SIZE
113 
114 #define MIN_RMA_SLOF            (128 * MiB)
115 
116 #define PHANDLE_INTC            0x00001111
117 
118 /* These two functions implement the VCPU id numbering: one to compute them
119  * all and one to identify thread 0 of a VCORE. Any change to the first one
120  * is likely to have an impact on the second one, so let's keep them close.
121  */
spapr_vcpu_id(SpaprMachineState * spapr,int cpu_index)122 static int spapr_vcpu_id(SpaprMachineState *spapr, int cpu_index)
123 {
124     MachineState *ms = MACHINE(spapr);
125     unsigned int smp_threads = ms->smp.threads;
126 
127     assert(spapr->vsmt);
128     return
129         (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads;
130 }
spapr_is_thread0_in_vcore(SpaprMachineState * spapr,PowerPCCPU * cpu)131 static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr,
132                                       PowerPCCPU *cpu)
133 {
134     assert(spapr->vsmt);
135     return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0;
136 }
137 
spapr_max_server_number(SpaprMachineState * spapr)138 int spapr_max_server_number(SpaprMachineState *spapr)
139 {
140     MachineState *ms = MACHINE(spapr);
141 
142     assert(spapr->vsmt);
143     return DIV_ROUND_UP(ms->smp.max_cpus * spapr->vsmt, ms->smp.threads);
144 }
145 
spapr_fixup_cpu_smt_dt(void * fdt,int offset,PowerPCCPU * cpu,int smt_threads)146 static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
147                                   int smt_threads)
148 {
149     int i, ret = 0;
150     g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads);
151     g_autofree uint32_t *gservers_prop = g_new(uint32_t, smt_threads * 2);
152     int index = spapr_get_vcpu_id(cpu);
153 
154     if (cpu->compat_pvr) {
155         ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr);
156         if (ret < 0) {
157             return ret;
158         }
159     }
160 
161     /* Build interrupt servers and gservers properties */
162     for (i = 0; i < smt_threads; i++) {
163         servers_prop[i] = cpu_to_be32(index + i);
164         /* Hack, direct the group queues back to cpu 0 */
165         gservers_prop[i*2] = cpu_to_be32(index + i);
166         gservers_prop[i*2 + 1] = 0;
167     }
168     ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
169                       servers_prop, sizeof(*servers_prop) * smt_threads);
170     if (ret < 0) {
171         return ret;
172     }
173     ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s",
174                       gservers_prop, sizeof(*gservers_prop) * smt_threads * 2);
175 
176     return ret;
177 }
178 
spapr_dt_pa_features(SpaprMachineState * spapr,PowerPCCPU * cpu,void * fdt,int offset)179 static void spapr_dt_pa_features(SpaprMachineState *spapr,
180                                  PowerPCCPU *cpu,
181                                  void *fdt, int offset)
182 {
183     /*
184      * SSO (SAO) ordering is supported on KVM and thread=single hosts,
185      * but not MTTCG, so disable it. To advertise it, a cap would have
186      * to be added, or support implemented for MTTCG.
187      *
188      * Copy/paste is not supported by TCG, so it is not advertised. KVM
189      * can execute them but it has no accelerator drivers which are usable,
190      * so there isn't much need for it anyway.
191      */
192 
193     /* These should be kept in sync with pnv */
194     uint8_t pa_features_206[] = { 6, 0,
195         0xf6, 0x1f, 0xc7, 0x00, 0x00, 0xc0 };
196     uint8_t pa_features_207[] = { 24, 0,
197         0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0,
198         0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
199         0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
200         0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
201     uint8_t pa_features_300[] = { 66, 0,
202         /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
203         /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */
204         0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */
205         /* 6: DS207 */
206         0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
207         /* 16: Vector */
208         0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
209         /* 18: Vec. Scalar, 20: Vec. XOR */
210         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
211         /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
212         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
213         /* 32: LE atomic, 34: EBB + ext EBB */
214         0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
215         /* 40: Radix MMU */
216         0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */
217         /* 42: PM, 44: PC RA, 46: SC vec'd */
218         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
219         /* 48: SIMD, 50: QP BFP, 52: String */
220         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
221         /* 54: DecFP, 56: DecI, 58: SHA */
222         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
223         /* 60: NM atomic, 62: RNG */
224         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
225     };
226     /* 3.1 removes SAO, HTM support */
227     uint8_t pa_features_31[] = { 74, 0,
228         /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
229         /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */
230         0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */
231         /* 6: DS207 */
232         0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
233         /* 16: Vector */
234         0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
235         /* 18: Vec. Scalar, 20: Vec. XOR */
236         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
237         /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
238         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
239         /* 32: LE atomic, 34: EBB + ext EBB */
240         0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
241         /* 40: Radix MMU */
242         0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */
243         /* 42: PM, 44: PC RA, 46: SC vec'd */
244         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
245         /* 48: SIMD, 50: QP BFP, 52: String */
246         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
247         /* 54: DecFP, 56: DecI, 58: SHA */
248         0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
249         /* 60: NM atomic, 62: RNG, 64: DAWR1 (ISA 3.1) */
250         0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
251         /* 68: DEXCR[SBHE|IBRTPDUS|SRAPD|NPHIE|PHIE] */
252         0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 66 - 71 */
253         /* 72: [P]HASHST/[P]HASHCHK */
254         0x80, 0x00,                         /* 72 - 73 */
255     };
256     uint8_t *pa_features = NULL;
257     size_t pa_size;
258 
259     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) {
260         pa_features = pa_features_206;
261         pa_size = sizeof(pa_features_206);
262     }
263     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) {
264         pa_features = pa_features_207;
265         pa_size = sizeof(pa_features_207);
266     }
267     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) {
268         pa_features = pa_features_300;
269         pa_size = sizeof(pa_features_300);
270     }
271     if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, cpu->compat_pvr)) {
272         pa_features = pa_features_31;
273         pa_size = sizeof(pa_features_31);
274     }
275     if (!pa_features) {
276         return;
277     }
278 
279     if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
280         /*
281          * Note: we keep CI large pages off by default because a 64K capable
282          * guest provisioned with large pages might otherwise try to map a qemu
283          * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
284          * even if that qemu runs on a 4k host.
285          * We dd this bit back here if we are confident this is not an issue
286          */
287         pa_features[3] |= 0x20;
288     }
289     if ((spapr_get_cap(spapr, SPAPR_CAP_HTM) != 0) && pa_size > 24) {
290         pa_features[24] |= 0x80;    /* Transactional memory support */
291     }
292     if (spapr->cas_pre_isa3_guest && pa_size > 40) {
293         /* Workaround for broken kernels that attempt (guest) radix
294          * mode when they can't handle it, if they see the radix bit set
295          * in pa-features. So hide it from them. */
296         pa_features[40 + 2] &= ~0x80; /* Radix MMU */
297     }
298     if (spapr_get_cap(spapr, SPAPR_CAP_DAWR1)) {
299         g_assert(pa_size > 66);
300         pa_features[66] |= 0x80;
301     }
302 
303     _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
304 }
305 
spapr_dt_pi_features(SpaprMachineState * spapr,PowerPCCPU * cpu,void * fdt,int offset)306 static void spapr_dt_pi_features(SpaprMachineState *spapr,
307                                  PowerPCCPU *cpu,
308                                  void *fdt, int offset)
309 {
310     uint8_t pi_features[] = { 1, 0,
311         0x00 };
312 
313     if (kvm_enabled() && ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00,
314                                           0, cpu->compat_pvr)) {
315         /*
316          * POWER9 and later CPUs with KVM run in LPAR-per-thread mode where
317          * all threads are essentially independent CPUs, and msgsndp does not
318          * work (because it is physically-addressed) and therefore is
319          * emulated by KVM, so disable it here to ensure XIVE will be used.
320          * This is both KVM and CPU implementation-specific behaviour so a KVM
321          * cap would be cleanest, but for now this works. If KVM ever permits
322          * native msgsndp execution by guests, a cap could be added at that
323          * time.
324          */
325         pi_features[2] |= 0x08; /* 4: No msgsndp */
326     }
327 
328     _FDT((fdt_setprop(fdt, offset, "ibm,pi-features", pi_features,
329                       sizeof(pi_features))));
330 }
331 
spapr_node0_size(MachineState * machine)332 static hwaddr spapr_node0_size(MachineState *machine)
333 {
334     if (machine->numa_state->num_nodes) {
335         int i;
336         for (i = 0; i < machine->numa_state->num_nodes; ++i) {
337             if (machine->numa_state->nodes[i].node_mem) {
338                 return MIN(pow2floor(machine->numa_state->nodes[i].node_mem),
339                            machine->ram_size);
340             }
341         }
342     }
343     return machine->ram_size;
344 }
345 
add_str(GString * s,const gchar * s1)346 static void add_str(GString *s, const gchar *s1)
347 {
348     g_string_append_len(s, s1, strlen(s1) + 1);
349 }
350 
spapr_dt_memory_node(SpaprMachineState * spapr,void * fdt,int nodeid,hwaddr start,hwaddr size)351 static int spapr_dt_memory_node(SpaprMachineState *spapr, void *fdt, int nodeid,
352                                 hwaddr start, hwaddr size)
353 {
354     char mem_name[32];
355     uint64_t mem_reg_property[2];
356     int off;
357 
358     mem_reg_property[0] = cpu_to_be64(start);
359     mem_reg_property[1] = cpu_to_be64(size);
360 
361     sprintf(mem_name, "memory@%" HWADDR_PRIx, start);
362     off = fdt_add_subnode(fdt, 0, mem_name);
363     _FDT(off);
364     _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
365     _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
366                       sizeof(mem_reg_property))));
367     spapr_numa_write_associativity_dt(spapr, fdt, off, nodeid);
368     return off;
369 }
370 
spapr_pc_dimm_node(MemoryDeviceInfoList * list,ram_addr_t addr)371 static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr)
372 {
373     MemoryDeviceInfoList *info;
374 
375     for (info = list; info; info = info->next) {
376         MemoryDeviceInfo *value = info->value;
377 
378         if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) {
379             PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data;
380 
381             if (addr >= pcdimm_info->addr &&
382                 addr < (pcdimm_info->addr + pcdimm_info->size)) {
383                 return pcdimm_info->node;
384             }
385         }
386     }
387 
388     return -1;
389 }
390 
391 struct sPAPRDrconfCellV2 {
392      uint32_t seq_lmbs;
393      uint64_t base_addr;
394      uint32_t drc_index;
395      uint32_t aa_index;
396      uint32_t flags;
397 } QEMU_PACKED;
398 
399 typedef struct DrconfCellQueue {
400     struct sPAPRDrconfCellV2 cell;
401     QSIMPLEQ_ENTRY(DrconfCellQueue) entry;
402 } DrconfCellQueue;
403 
404 static DrconfCellQueue *
spapr_get_drconf_cell(uint32_t seq_lmbs,uint64_t base_addr,uint32_t drc_index,uint32_t aa_index,uint32_t flags)405 spapr_get_drconf_cell(uint32_t seq_lmbs, uint64_t base_addr,
406                       uint32_t drc_index, uint32_t aa_index,
407                       uint32_t flags)
408 {
409     DrconfCellQueue *elem;
410 
411     elem = g_malloc0(sizeof(*elem));
412     elem->cell.seq_lmbs = cpu_to_be32(seq_lmbs);
413     elem->cell.base_addr = cpu_to_be64(base_addr);
414     elem->cell.drc_index = cpu_to_be32(drc_index);
415     elem->cell.aa_index = cpu_to_be32(aa_index);
416     elem->cell.flags = cpu_to_be32(flags);
417 
418     return elem;
419 }
420 
spapr_dt_dynamic_memory_v2(SpaprMachineState * spapr,void * fdt,int offset,MemoryDeviceInfoList * dimms)421 static int spapr_dt_dynamic_memory_v2(SpaprMachineState *spapr, void *fdt,
422                                       int offset, MemoryDeviceInfoList *dimms)
423 {
424     MachineState *machine = MACHINE(spapr);
425     uint8_t *int_buf, *cur_index;
426     int ret;
427     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
428     uint64_t addr, cur_addr, size;
429     uint32_t nr_boot_lmbs = (machine->device_memory->base / lmb_size);
430     uint64_t mem_end = machine->device_memory->base +
431                        memory_region_size(&machine->device_memory->mr);
432     uint32_t node, buf_len, nr_entries = 0;
433     SpaprDrc *drc;
434     DrconfCellQueue *elem, *next;
435     MemoryDeviceInfoList *info;
436     QSIMPLEQ_HEAD(, DrconfCellQueue) drconf_queue
437         = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue);
438 
439     /* Entry to cover RAM and the gap area */
440     elem = spapr_get_drconf_cell(nr_boot_lmbs, 0, 0, -1,
441                                  SPAPR_LMB_FLAGS_RESERVED |
442                                  SPAPR_LMB_FLAGS_DRC_INVALID);
443     QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
444     nr_entries++;
445 
446     cur_addr = machine->device_memory->base;
447     for (info = dimms; info; info = info->next) {
448         PCDIMMDeviceInfo *di = info->value->u.dimm.data;
449 
450         addr = di->addr;
451         size = di->size;
452         node = di->node;
453 
454         /*
455          * The NVDIMM area is hotpluggable after the NVDIMM is unplugged. The
456          * area is marked hotpluggable in the next iteration for the bigger
457          * chunk including the NVDIMM occupied area.
458          */
459         if (info->value->type == MEMORY_DEVICE_INFO_KIND_NVDIMM)
460             continue;
461 
462         /* Entry for hot-pluggable area */
463         if (cur_addr < addr) {
464             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
465             g_assert(drc);
466             elem = spapr_get_drconf_cell((addr - cur_addr) / lmb_size,
467                                          cur_addr, spapr_drc_index(drc), -1, 0);
468             QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
469             nr_entries++;
470         }
471 
472         /* Entry for DIMM */
473         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, addr / lmb_size);
474         g_assert(drc);
475         elem = spapr_get_drconf_cell(size / lmb_size, addr,
476                                      spapr_drc_index(drc), node,
477                                      (SPAPR_LMB_FLAGS_ASSIGNED |
478                                       SPAPR_LMB_FLAGS_HOTREMOVABLE));
479         QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
480         nr_entries++;
481         cur_addr = addr + size;
482     }
483 
484     /* Entry for remaining hotpluggable area */
485     if (cur_addr < mem_end) {
486         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
487         g_assert(drc);
488         elem = spapr_get_drconf_cell((mem_end - cur_addr) / lmb_size,
489                                      cur_addr, spapr_drc_index(drc), -1, 0);
490         QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
491         nr_entries++;
492     }
493 
494     buf_len = nr_entries * sizeof(struct sPAPRDrconfCellV2) + sizeof(uint32_t);
495     int_buf = cur_index = g_malloc0(buf_len);
496     *(uint32_t *)int_buf = cpu_to_be32(nr_entries);
497     cur_index += sizeof(nr_entries);
498 
499     QSIMPLEQ_FOREACH_SAFE(elem, &drconf_queue, entry, next) {
500         memcpy(cur_index, &elem->cell, sizeof(elem->cell));
501         cur_index += sizeof(elem->cell);
502         QSIMPLEQ_REMOVE(&drconf_queue, elem, DrconfCellQueue, entry);
503         g_free(elem);
504     }
505 
506     ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory-v2", int_buf, buf_len);
507     g_free(int_buf);
508     if (ret < 0) {
509         return -1;
510     }
511     return 0;
512 }
513 
spapr_dt_dynamic_memory(SpaprMachineState * spapr,void * fdt,int offset,MemoryDeviceInfoList * dimms)514 static int spapr_dt_dynamic_memory(SpaprMachineState *spapr, void *fdt,
515                                    int offset, MemoryDeviceInfoList *dimms)
516 {
517     MachineState *machine = MACHINE(spapr);
518     int i, ret;
519     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
520     uint32_t device_lmb_start = machine->device_memory->base / lmb_size;
521     uint32_t nr_lmbs = (machine->device_memory->base +
522                        memory_region_size(&machine->device_memory->mr)) /
523                        lmb_size;
524     uint32_t *int_buf, *cur_index, buf_len;
525 
526     /*
527      * Allocate enough buffer size to fit in ibm,dynamic-memory
528      */
529     buf_len = (nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1) * sizeof(uint32_t);
530     cur_index = int_buf = g_malloc0(buf_len);
531     int_buf[0] = cpu_to_be32(nr_lmbs);
532     cur_index++;
533     for (i = 0; i < nr_lmbs; i++) {
534         uint64_t addr = i * lmb_size;
535         uint32_t *dynamic_memory = cur_index;
536 
537         if (i >= device_lmb_start) {
538             SpaprDrc *drc;
539 
540             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i);
541             g_assert(drc);
542 
543             dynamic_memory[0] = cpu_to_be32(addr >> 32);
544             dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
545             dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc));
546             dynamic_memory[3] = cpu_to_be32(0); /* reserved */
547             dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr));
548             if (memory_region_present(get_system_memory(), addr)) {
549                 dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
550             } else {
551                 dynamic_memory[5] = cpu_to_be32(0);
552             }
553         } else {
554             /*
555              * LMB information for RMA, boot time RAM and gap b/n RAM and
556              * device memory region -- all these are marked as reserved
557              * and as having no valid DRC.
558              */
559             dynamic_memory[0] = cpu_to_be32(addr >> 32);
560             dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
561             dynamic_memory[2] = cpu_to_be32(0);
562             dynamic_memory[3] = cpu_to_be32(0); /* reserved */
563             dynamic_memory[4] = cpu_to_be32(-1);
564             dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED |
565                                             SPAPR_LMB_FLAGS_DRC_INVALID);
566         }
567 
568         cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;
569     }
570     ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len);
571     g_free(int_buf);
572     if (ret < 0) {
573         return -1;
574     }
575     return 0;
576 }
577 
578 /*
579  * Adds ibm,dynamic-reconfiguration-memory node.
580  * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
581  * of this device tree node.
582  */
spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState * spapr,void * fdt)583 static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr,
584                                                    void *fdt)
585 {
586     MachineState *machine = MACHINE(spapr);
587     int ret, offset;
588     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
589     uint32_t prop_lmb_size[] = {cpu_to_be32(lmb_size >> 32),
590                                 cpu_to_be32(lmb_size & 0xffffffff)};
591     MemoryDeviceInfoList *dimms = NULL;
592 
593     /* Don't create the node if there is no device memory. */
594     if (!machine->device_memory) {
595         return 0;
596     }
597 
598     offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory");
599 
600     ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size,
601                     sizeof(prop_lmb_size));
602     if (ret < 0) {
603         return ret;
604     }
605 
606     ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff);
607     if (ret < 0) {
608         return ret;
609     }
610 
611     ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0);
612     if (ret < 0) {
613         return ret;
614     }
615 
616     /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */
617     dimms = qmp_memory_device_list();
618     if (spapr_ovec_test(spapr->ov5_cas, OV5_DRMEM_V2)) {
619         ret = spapr_dt_dynamic_memory_v2(spapr, fdt, offset, dimms);
620     } else {
621         ret = spapr_dt_dynamic_memory(spapr, fdt, offset, dimms);
622     }
623     qapi_free_MemoryDeviceInfoList(dimms);
624 
625     if (ret < 0) {
626         return ret;
627     }
628 
629     ret = spapr_numa_write_assoc_lookup_arrays(spapr, fdt, offset);
630 
631     return ret;
632 }
633 
spapr_dt_memory(SpaprMachineState * spapr,void * fdt)634 static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt)
635 {
636     MachineState *machine = MACHINE(spapr);
637     hwaddr mem_start, node_size;
638     int i, nb_nodes = machine->numa_state->num_nodes;
639     NodeInfo *nodes = machine->numa_state->nodes;
640 
641     for (i = 0, mem_start = 0; i < nb_nodes; ++i) {
642         if (!nodes[i].node_mem) {
643             continue;
644         }
645         if (mem_start >= machine->ram_size) {
646             node_size = 0;
647         } else {
648             node_size = nodes[i].node_mem;
649             if (node_size > machine->ram_size - mem_start) {
650                 node_size = machine->ram_size - mem_start;
651             }
652         }
653         if (!mem_start) {
654             /* spapr_machine_init() checks for rma_size <= node0_size
655              * already */
656             spapr_dt_memory_node(spapr, fdt, i, 0, spapr->rma_size);
657             mem_start += spapr->rma_size;
658             node_size -= spapr->rma_size;
659         }
660         for ( ; node_size; ) {
661             hwaddr sizetmp = pow2floor(node_size);
662 
663             /* mem_start != 0 here */
664             if (ctzl(mem_start) < ctzl(sizetmp)) {
665                 sizetmp = 1ULL << ctzl(mem_start);
666             }
667 
668             spapr_dt_memory_node(spapr, fdt, i, mem_start, sizetmp);
669             node_size -= sizetmp;
670             mem_start += sizetmp;
671         }
672     }
673 
674     /* Generate ibm,dynamic-reconfiguration-memory node if required */
675     if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) {
676         int ret;
677 
678         ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt);
679         if (ret) {
680             return ret;
681         }
682     }
683 
684     return 0;
685 }
686 
spapr_dt_cpu(CPUState * cs,void * fdt,int offset,SpaprMachineState * spapr)687 static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset,
688                          SpaprMachineState *spapr)
689 {
690     MachineState *ms = MACHINE(spapr);
691     PowerPCCPU *cpu = POWERPC_CPU(cs);
692     CPUPPCState *env = &cpu->env;
693     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
694     int index = spapr_get_vcpu_id(cpu);
695     uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
696                        0xffffffff, 0xffffffff};
697     uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq()
698         : SPAPR_TIMEBASE_FREQ;
699     uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
700     uint32_t page_sizes_prop[64];
701     size_t page_sizes_prop_size;
702     unsigned int smp_threads = ms->smp.threads;
703     uint32_t vcpus_per_socket = smp_threads * ms->smp.cores;
704     uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
705     int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu));
706     SpaprDrc *drc;
707     int drc_index;
708     uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ];
709     int i;
710 
711     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, env->core_index);
712     if (drc) {
713         drc_index = spapr_drc_index(drc);
714         _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
715     }
716 
717     _FDT((fdt_setprop_cell(fdt, offset, "reg", index)));
718     _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
719 
720     _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR])));
721     _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size",
722                            env->dcache_line_size)));
723     _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size",
724                            env->dcache_line_size)));
725     _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size",
726                            env->icache_line_size)));
727     _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size",
728                            env->icache_line_size)));
729 
730     if (pcc->l1_dcache_size) {
731         _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
732                                pcc->l1_dcache_size)));
733     } else {
734         warn_report("Unknown L1 dcache size for cpu");
735     }
736     if (pcc->l1_icache_size) {
737         _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
738                                pcc->l1_icache_size)));
739     } else {
740         warn_report("Unknown L1 icache size for cpu");
741     }
742 
743     _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
744     _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq)));
745     _FDT((fdt_setprop_cell(fdt, offset, "slb-size", cpu->hash64_opts->slb_size)));
746     _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", cpu->hash64_opts->slb_size)));
747     _FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
748     _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
749 
750     if (ppc_has_spr(cpu, SPR_PURR)) {
751         _FDT((fdt_setprop_cell(fdt, offset, "ibm,purr", 1)));
752     }
753     if (ppc_has_spr(cpu, SPR_PURR)) {
754         _FDT((fdt_setprop_cell(fdt, offset, "ibm,spurr", 1)));
755     }
756 
757     if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) {
758         _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes",
759                           segs, sizeof(segs))));
760     }
761 
762     /* Advertise VSX (vector extensions) if available
763      *   1               == VMX / Altivec available
764      *   2               == VSX available
765      *
766      * Only CPUs for which we create core types in spapr_cpu_core.c
767      * are possible, and all of those have VMX */
768     if (env->insns_flags & PPC_ALTIVEC) {
769         if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) {
770             _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2)));
771         } else {
772             _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1)));
773         }
774     }
775 
776     /* Advertise DFP (Decimal Floating Point) if available
777      *   0 / no property == no DFP
778      *   1               == DFP available */
779     if (spapr_get_cap(spapr, SPAPR_CAP_DFP) != 0) {
780         _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
781     }
782 
783     page_sizes_prop_size = ppc_create_page_sizes_prop(cpu, page_sizes_prop,
784                                                       sizeof(page_sizes_prop));
785     if (page_sizes_prop_size) {
786         _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
787                           page_sizes_prop, page_sizes_prop_size)));
788     }
789 
790     spapr_dt_pa_features(spapr, cpu, fdt, offset);
791 
792     spapr_dt_pi_features(spapr, cpu, fdt, offset);
793 
794     _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id",
795                            cs->cpu_index / vcpus_per_socket)));
796 
797     _FDT((fdt_setprop(fdt, offset, "ibm,pft-size",
798                       pft_size_prop, sizeof(pft_size_prop))));
799 
800     if (ms->numa_state->num_nodes > 1) {
801         _FDT(spapr_numa_fixup_cpu_dt(spapr, fdt, offset, cpu));
802     }
803 
804     _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt));
805 
806     if (pcc->radix_page_info) {
807         for (i = 0; i < pcc->radix_page_info->count; i++) {
808             radix_AP_encodings[i] =
809                 cpu_to_be32(pcc->radix_page_info->entries[i]);
810         }
811         _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings",
812                           radix_AP_encodings,
813                           pcc->radix_page_info->count *
814                           sizeof(radix_AP_encodings[0]))));
815     }
816 
817     /*
818      * We set this property to let the guest know that it can use the large
819      * decrementer and its width in bits.
820      */
821     if (spapr_get_cap(spapr, SPAPR_CAP_LARGE_DECREMENTER) != SPAPR_CAP_OFF)
822         _FDT((fdt_setprop_u32(fdt, offset, "ibm,dec-bits",
823                               pcc->lrg_decr_bits)));
824 }
825 
spapr_dt_one_cpu(void * fdt,SpaprMachineState * spapr,CPUState * cs,int cpus_offset)826 static void spapr_dt_one_cpu(void *fdt, SpaprMachineState *spapr, CPUState *cs,
827                              int cpus_offset)
828 {
829     PowerPCCPU *cpu = POWERPC_CPU(cs);
830     int index = spapr_get_vcpu_id(cpu);
831     DeviceClass *dc = DEVICE_GET_CLASS(cs);
832     g_autofree char *nodename = NULL;
833     int offset;
834 
835     if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
836         return;
837     }
838 
839     nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
840     offset = fdt_add_subnode(fdt, cpus_offset, nodename);
841     _FDT(offset);
842     spapr_dt_cpu(cs, fdt, offset, spapr);
843 }
844 
845 
spapr_dt_cpus(void * fdt,SpaprMachineState * spapr)846 static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr)
847 {
848     CPUState **rev;
849     CPUState *cs;
850     int n_cpus;
851     int cpus_offset;
852     int i;
853 
854     cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
855     _FDT(cpus_offset);
856     _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1)));
857     _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0)));
858 
859     /*
860      * We walk the CPUs in reverse order to ensure that CPU DT nodes
861      * created by fdt_add_subnode() end up in the right order in FDT
862      * for the guest kernel the enumerate the CPUs correctly.
863      *
864      * The CPU list cannot be traversed in reverse order, so we need
865      * to do extra work.
866      */
867     n_cpus = 0;
868     rev = NULL;
869     CPU_FOREACH(cs) {
870         rev = g_renew(CPUState *, rev, n_cpus + 1);
871         rev[n_cpus++] = cs;
872     }
873 
874     for (i = n_cpus - 1; i >= 0; i--) {
875         spapr_dt_one_cpu(fdt, spapr, rev[i], cpus_offset);
876     }
877 
878     g_free(rev);
879 }
880 
spapr_dt_rng(void * fdt)881 static int spapr_dt_rng(void *fdt)
882 {
883     int node;
884     int ret;
885 
886     node = qemu_fdt_add_subnode(fdt, "/ibm,platform-facilities");
887     if (node <= 0) {
888         return -1;
889     }
890     ret = fdt_setprop_string(fdt, node, "device_type",
891                              "ibm,platform-facilities");
892     ret |= fdt_setprop_cell(fdt, node, "#address-cells", 0x1);
893     ret |= fdt_setprop_cell(fdt, node, "#size-cells", 0x0);
894 
895     node = fdt_add_subnode(fdt, node, "ibm,random-v1");
896     if (node <= 0) {
897         return -1;
898     }
899     ret |= fdt_setprop_string(fdt, node, "compatible", "ibm,random");
900 
901     return ret ? -1 : 0;
902 }
903 
spapr_dt_rtas(SpaprMachineState * spapr,void * fdt)904 static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt)
905 {
906     MachineState *ms = MACHINE(spapr);
907     int rtas;
908     GString *hypertas = g_string_sized_new(256);
909     GString *qemu_hypertas = g_string_sized_new(256);
910     uint32_t lrdr_capacity[] = {
911         0,
912         0,
913         cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE >> 32),
914         cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE & 0xffffffff),
915         cpu_to_be32(ms->smp.max_cpus / ms->smp.threads),
916     };
917 
918     /* Do we have device memory? */
919     if (MACHINE(spapr)->device_memory) {
920         uint64_t max_device_addr = MACHINE(spapr)->device_memory->base +
921             memory_region_size(&MACHINE(spapr)->device_memory->mr);
922 
923         lrdr_capacity[0] = cpu_to_be32(max_device_addr >> 32);
924         lrdr_capacity[1] = cpu_to_be32(max_device_addr & 0xffffffff);
925     }
926 
927     _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
928 
929     /* hypertas */
930     add_str(hypertas, "hcall-pft");
931     add_str(hypertas, "hcall-term");
932     add_str(hypertas, "hcall-dabr");
933     add_str(hypertas, "hcall-interrupt");
934     add_str(hypertas, "hcall-tce");
935     add_str(hypertas, "hcall-vio");
936     add_str(hypertas, "hcall-splpar");
937     add_str(hypertas, "hcall-join");
938     add_str(hypertas, "hcall-bulk");
939     add_str(hypertas, "hcall-set-mode");
940     add_str(hypertas, "hcall-sprg0");
941     add_str(hypertas, "hcall-copy");
942     add_str(hypertas, "hcall-debug");
943     add_str(hypertas, "hcall-vphn");
944     if (spapr_get_cap(spapr, SPAPR_CAP_RPT_INVALIDATE) == SPAPR_CAP_ON) {
945         add_str(hypertas, "hcall-rpt-invalidate");
946     }
947 
948     add_str(qemu_hypertas, "hcall-memop1");
949 
950     if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
951         add_str(hypertas, "hcall-multi-tce");
952     }
953 
954     if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
955         add_str(hypertas, "hcall-hpt-resize");
956     }
957 
958     add_str(hypertas, "hcall-watchdog");
959 
960     _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions",
961                      hypertas->str, hypertas->len));
962     g_string_free(hypertas, TRUE);
963     _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions",
964                      qemu_hypertas->str, qemu_hypertas->len));
965     g_string_free(qemu_hypertas, TRUE);
966 
967     spapr_numa_write_rtas_dt(spapr, fdt, rtas);
968 
969     /*
970      * FWNMI reserves RTAS_ERROR_LOG_MAX for the machine check error log,
971      * and 16 bytes per CPU for system reset error log plus an extra 8 bytes.
972      *
973      * The system reset requirements are driven by existing Linux and PowerVM
974      * implementation which (contrary to PAPR) saves r3 in the error log
975      * structure like machine check, so Linux expects to find the saved r3
976      * value at the address in r3 upon FWNMI-enabled sreset interrupt (and
977      * does not look at the error value).
978      *
979      * System reset interrupts are not subject to interlock like machine
980      * check, so this memory area could be corrupted if the sreset is
981      * interrupted by a machine check (or vice versa) if it was shared. To
982      * prevent this, system reset uses per-CPU areas for the sreset save
983      * area. A system reset that interrupts a system reset handler could
984      * still overwrite this area, but Linux doesn't try to recover in that
985      * case anyway.
986      *
987      * The extra 8 bytes is required because Linux's FWNMI error log check
988      * is off-by-one.
989      *
990      * RTAS_MIN_SIZE is required for the RTAS blob itself.
991      */
992     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-size", RTAS_MIN_SIZE +
993                           RTAS_ERROR_LOG_MAX +
994                           ms->smp.max_cpus * sizeof(uint64_t) * 2 +
995                           sizeof(uint64_t)));
996     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max",
997                           RTAS_ERROR_LOG_MAX));
998     _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate",
999                           RTAS_EVENT_SCAN_RATE));
1000 
1001     g_assert(msi_nonbroken);
1002     _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0));
1003 
1004     /*
1005      * According to PAPR, rtas ibm,os-term does not guarantee a return
1006      * back to the guest cpu.
1007      *
1008      * While an additional ibm,extended-os-term property indicates
1009      * that rtas call return will always occur. Set this property.
1010      */
1011     _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0));
1012 
1013     _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity",
1014                      lrdr_capacity, sizeof(lrdr_capacity)));
1015 
1016     spapr_dt_rtas_tokens(fdt, rtas);
1017 }
1018 
1019 /*
1020  * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU
1021  * and the XIVE features that the guest may request and thus the valid
1022  * values for bytes 23..26 of option vector 5:
1023  */
spapr_dt_ov5_platform_support(SpaprMachineState * spapr,void * fdt,int chosen)1024 static void spapr_dt_ov5_platform_support(SpaprMachineState *spapr, void *fdt,
1025                                           int chosen)
1026 {
1027     PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
1028 
1029     char val[2 * 4] = {
1030         23, 0x00, /* XICS / XIVE mode */
1031         24, 0x00, /* Hash/Radix, filled in below. */
1032         25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
1033         26, 0x40, /* Radix options: GTSE == yes. */
1034     };
1035 
1036     if (spapr->irq->xics && spapr->irq->xive) {
1037         val[1] = SPAPR_OV5_XIVE_BOTH;
1038     } else if (spapr->irq->xive) {
1039         val[1] = SPAPR_OV5_XIVE_EXPLOIT;
1040     } else {
1041         assert(spapr->irq->xics);
1042         val[1] = SPAPR_OV5_XIVE_LEGACY;
1043     }
1044 
1045     if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0,
1046                           first_ppc_cpu->compat_pvr)) {
1047         /*
1048          * If we're in a pre POWER9 compat mode then the guest should
1049          * do hash and use the legacy interrupt mode
1050          */
1051         val[1] = SPAPR_OV5_XIVE_LEGACY; /* XICS */
1052         val[3] = 0x00; /* Hash */
1053         spapr_check_mmu_mode(false);
1054     } else if (kvm_enabled()) {
1055         if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
1056             val[3] = 0x80; /* OV5_MMU_BOTH */
1057         } else if (kvmppc_has_cap_mmu_radix()) {
1058             val[3] = 0x40; /* OV5_MMU_RADIX_300 */
1059         } else {
1060             val[3] = 0x00; /* Hash */
1061         }
1062     } else {
1063         /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */
1064         val[3] = 0xC0;
1065     }
1066     _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support",
1067                      val, sizeof(val)));
1068 }
1069 
spapr_dt_chosen(SpaprMachineState * spapr,void * fdt,bool reset)1070 static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset)
1071 {
1072     MachineState *machine = MACHINE(spapr);
1073     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
1074     int chosen;
1075 
1076     _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen"));
1077 
1078     if (reset) {
1079         const char *boot_device = spapr->boot_device;
1080         g_autofree char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
1081         size_t cb = 0;
1082         g_autofree char *bootlist = get_boot_devices_list(&cb);
1083 
1084         if (machine->kernel_cmdline && machine->kernel_cmdline[0]) {
1085             _FDT(fdt_setprop_string(fdt, chosen, "bootargs",
1086                                     machine->kernel_cmdline));
1087         }
1088 
1089         if (spapr->initrd_size) {
1090             _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start",
1091                                   spapr->initrd_base));
1092             _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end",
1093                                   spapr->initrd_base + spapr->initrd_size));
1094         }
1095 
1096         if (spapr->kernel_size) {
1097             uint64_t kprop[2] = { cpu_to_be64(spapr->kernel_addr),
1098                                   cpu_to_be64(spapr->kernel_size) };
1099 
1100             _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel",
1101                          &kprop, sizeof(kprop)));
1102             if (spapr->kernel_le) {
1103                 _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0));
1104             }
1105         }
1106         if (machine->boot_config.has_menu && machine->boot_config.menu) {
1107             _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", true)));
1108         }
1109         _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width));
1110         _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height));
1111         _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth));
1112 
1113         if (cb && bootlist) {
1114             int i;
1115 
1116             for (i = 0; i < cb; i++) {
1117                 if (bootlist[i] == '\n') {
1118                     bootlist[i] = ' ';
1119                 }
1120             }
1121             _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist));
1122         }
1123 
1124         if (boot_device && strlen(boot_device)) {
1125             _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device));
1126         }
1127 
1128         if (spapr->want_stdout_path && stdout_path) {
1129             /*
1130              * "linux,stdout-path" and "stdout" properties are
1131              * deprecated by linux kernel. New platforms should only
1132              * use the "stdout-path" property. Set the new property
1133              * and continue using older property to remain compatible
1134              * with the existing firmware.
1135              */
1136             _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path));
1137             _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path));
1138         }
1139 
1140         /*
1141          * We can deal with BAR reallocation just fine, advertise it
1142          * to the guest
1143          */
1144         if (smc->linux_pci_probe) {
1145             _FDT(fdt_setprop_cell(fdt, chosen, "linux,pci-probe-only", 0));
1146         }
1147 
1148         spapr_dt_ov5_platform_support(spapr, fdt, chosen);
1149     }
1150 
1151     _FDT(fdt_setprop(fdt, chosen, "rng-seed", spapr->fdt_rng_seed, 32));
1152 
1153     _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5"));
1154 }
1155 
spapr_dt_hypervisor(SpaprMachineState * spapr,void * fdt)1156 static void spapr_dt_hypervisor(SpaprMachineState *spapr, void *fdt)
1157 {
1158     /* The /hypervisor node isn't in PAPR - this is a hack to allow PR
1159      * KVM to work under pHyp with some guest co-operation */
1160     int hypervisor;
1161     uint8_t hypercall[16];
1162 
1163     _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor"));
1164     /* indicate KVM hypercall interface */
1165     _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm"));
1166     if (kvmppc_has_cap_fixup_hcalls()) {
1167         /*
1168          * Older KVM versions with older guest kernels were broken
1169          * with the magic page, don't allow the guest to map it.
1170          */
1171         if (!kvmppc_get_hypercall(cpu_env(first_cpu), hypercall,
1172                                   sizeof(hypercall))) {
1173             _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions",
1174                              hypercall, sizeof(hypercall)));
1175         }
1176     }
1177 }
1178 
spapr_build_fdt(SpaprMachineState * spapr,bool reset,size_t space)1179 void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space)
1180 {
1181     MachineState *machine = MACHINE(spapr);
1182     MachineClass *mc = MACHINE_GET_CLASS(machine);
1183     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
1184     uint32_t root_drc_type_mask = 0;
1185     int ret;
1186     void *fdt;
1187     SpaprPhbState *phb;
1188     char *buf;
1189 
1190     fdt = g_malloc0(space);
1191     _FDT((fdt_create_empty_tree(fdt, space)));
1192 
1193     /* Root node */
1194     _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp"));
1195     _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)"));
1196     _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries"));
1197 
1198     /* Guest UUID & Name*/
1199     buf = qemu_uuid_unparse_strdup(&qemu_uuid);
1200     _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf));
1201     if (qemu_uuid_set) {
1202         _FDT(fdt_setprop_string(fdt, 0, "system-id", buf));
1203     }
1204     g_free(buf);
1205 
1206     if (qemu_get_vm_name()) {
1207         _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name",
1208                                 qemu_get_vm_name()));
1209     }
1210 
1211     /* Host Model & Serial Number */
1212     if (spapr->host_model) {
1213         _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model));
1214     } else if (smc->broken_host_serial_model && kvmppc_get_host_model(&buf)) {
1215         _FDT(fdt_setprop_string(fdt, 0, "host-model", buf));
1216         g_free(buf);
1217     }
1218 
1219     if (spapr->host_serial) {
1220         _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial));
1221     } else if (smc->broken_host_serial_model && kvmppc_get_host_serial(&buf)) {
1222         _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf));
1223         g_free(buf);
1224     }
1225 
1226     _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
1227     _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
1228 
1229     /* /interrupt controller */
1230     spapr_irq_dt(spapr, spapr_max_server_number(spapr), fdt, PHANDLE_INTC);
1231 
1232     ret = spapr_dt_memory(spapr, fdt);
1233     if (ret < 0) {
1234         error_report("couldn't setup memory nodes in fdt");
1235         exit(1);
1236     }
1237 
1238     /* /vdevice */
1239     spapr_dt_vdevice(spapr->vio_bus, fdt);
1240 
1241     if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
1242         ret = spapr_dt_rng(fdt);
1243         if (ret < 0) {
1244             error_report("could not set up rng device in the fdt");
1245             exit(1);
1246         }
1247     }
1248 
1249     QLIST_FOREACH(phb, &spapr->phbs, list) {
1250         ret = spapr_dt_phb(spapr, phb, PHANDLE_INTC, fdt, NULL);
1251         if (ret < 0) {
1252             error_report("couldn't setup PCI devices in fdt");
1253             exit(1);
1254         }
1255     }
1256 
1257     spapr_dt_cpus(fdt, spapr);
1258 
1259     /* ibm,drc-indexes and friends */
1260     root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB;
1261     if (smc->dr_phb_enabled) {
1262         root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PHB;
1263     }
1264     if (mc->nvdimm_supported) {
1265         root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PMEM;
1266     }
1267     if (root_drc_type_mask) {
1268         _FDT(spapr_dt_drc(fdt, 0, NULL, root_drc_type_mask));
1269     }
1270 
1271     if (mc->has_hotpluggable_cpus) {
1272         int offset = fdt_path_offset(fdt, "/cpus");
1273         ret = spapr_dt_drc(fdt, offset, NULL, SPAPR_DR_CONNECTOR_TYPE_CPU);
1274         if (ret < 0) {
1275             error_report("Couldn't set up CPU DR device tree properties");
1276             exit(1);
1277         }
1278     }
1279 
1280     /* /event-sources */
1281     spapr_dt_events(spapr, fdt);
1282 
1283     /* /rtas */
1284     spapr_dt_rtas(spapr, fdt);
1285 
1286     /* /chosen */
1287     spapr_dt_chosen(spapr, fdt, reset);
1288 
1289     /* /hypervisor */
1290     if (kvm_enabled()) {
1291         spapr_dt_hypervisor(spapr, fdt);
1292     }
1293 
1294     /* Build memory reserve map */
1295     if (reset) {
1296         if (spapr->kernel_size) {
1297             _FDT((fdt_add_mem_rsv(fdt, spapr->kernel_addr,
1298                                   spapr->kernel_size)));
1299         }
1300         if (spapr->initrd_size) {
1301             _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base,
1302                                   spapr->initrd_size)));
1303         }
1304     }
1305 
1306     /* NVDIMM devices */
1307     if (mc->nvdimm_supported) {
1308         spapr_dt_persistent_memory(spapr, fdt);
1309     }
1310 
1311     return fdt;
1312 }
1313 
translate_kernel_address(void * opaque,uint64_t addr)1314 static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
1315 {
1316     SpaprMachineState *spapr = opaque;
1317 
1318     return (addr & 0x0fffffff) + spapr->kernel_addr;
1319 }
1320 
emulate_spapr_hypercall(PPCVirtualHypervisor * vhyp,PowerPCCPU * cpu)1321 static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
1322                                     PowerPCCPU *cpu)
1323 {
1324     CPUPPCState *env = &cpu->env;
1325 
1326     /* The TCG path should also be holding the BQL at this point */
1327     g_assert(bql_locked());
1328 
1329     g_assert(!vhyp_cpu_in_nested(cpu));
1330 
1331     if (FIELD_EX64(env->msr, MSR, PR)) {
1332         hcall_dprintf("Hypercall made with MSR[PR]=1\n");
1333         env->gpr[3] = H_PRIVILEGE;
1334     } else {
1335         env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
1336     }
1337 }
1338 
1339 struct LPCRSyncState {
1340     target_ulong value;
1341     target_ulong mask;
1342 };
1343 
do_lpcr_sync(CPUState * cs,run_on_cpu_data arg)1344 static void do_lpcr_sync(CPUState *cs, run_on_cpu_data arg)
1345 {
1346     struct LPCRSyncState *s = arg.host_ptr;
1347     PowerPCCPU *cpu = POWERPC_CPU(cs);
1348     CPUPPCState *env = &cpu->env;
1349     target_ulong lpcr;
1350 
1351     cpu_synchronize_state(cs);
1352     lpcr = env->spr[SPR_LPCR];
1353     lpcr &= ~s->mask;
1354     lpcr |= s->value;
1355     ppc_store_lpcr(cpu, lpcr);
1356 }
1357 
spapr_set_all_lpcrs(target_ulong value,target_ulong mask)1358 void spapr_set_all_lpcrs(target_ulong value, target_ulong mask)
1359 {
1360     CPUState *cs;
1361     struct LPCRSyncState s = {
1362         .value = value,
1363         .mask = mask
1364     };
1365     CPU_FOREACH(cs) {
1366         run_on_cpu(cs, do_lpcr_sync, RUN_ON_CPU_HOST_PTR(&s));
1367     }
1368 }
1369 
1370 /* May be used when the machine is not running */
spapr_init_all_lpcrs(target_ulong value,target_ulong mask)1371 void spapr_init_all_lpcrs(target_ulong value, target_ulong mask)
1372 {
1373     CPUState *cs;
1374     CPU_FOREACH(cs) {
1375         PowerPCCPU *cpu = POWERPC_CPU(cs);
1376         CPUPPCState *env = &cpu->env;
1377         target_ulong lpcr;
1378 
1379         lpcr = env->spr[SPR_LPCR];
1380         lpcr &= ~(LPCR_HR | LPCR_UPRT);
1381         ppc_store_lpcr(cpu, lpcr);
1382     }
1383 }
1384 
spapr_get_pate(PPCVirtualHypervisor * vhyp,PowerPCCPU * cpu,target_ulong lpid,ppc_v3_pate_t * entry)1385 static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
1386                            target_ulong lpid, ppc_v3_pate_t *entry)
1387 {
1388     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1389     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
1390 
1391     if (!spapr_cpu->in_nested) {
1392         assert(lpid == 0);
1393 
1394         /* Copy PATE1:GR into PATE0:HR */
1395         entry->dw0 = spapr->patb_entry & PATE0_HR;
1396         entry->dw1 = spapr->patb_entry;
1397         return true;
1398     } else {
1399         if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) {
1400             return spapr_get_pate_nested_hv(spapr, cpu, lpid, entry);
1401         } else if (spapr_nested_api(spapr) == NESTED_API_PAPR) {
1402             return spapr_get_pate_nested_papr(spapr, cpu, lpid, entry);
1403         } else {
1404             g_assert_not_reached();
1405         }
1406     }
1407 }
1408 
hpte_get_ptr(SpaprMachineState * s,unsigned index)1409 static uint64_t *hpte_get_ptr(SpaprMachineState *s, unsigned index)
1410 {
1411     uint64_t *table = s->htab;
1412 
1413     return &table[2 * index];
1414 }
1415 
hpte_is_valid(SpaprMachineState * s,unsigned index)1416 static bool hpte_is_valid(SpaprMachineState *s, unsigned index)
1417 {
1418     return ldq_be_p(hpte_get_ptr(s, index)) & HPTE64_V_VALID;
1419 }
1420 
hpte_is_dirty(SpaprMachineState * s,unsigned index)1421 static bool hpte_is_dirty(SpaprMachineState *s, unsigned index)
1422 {
1423     return ldq_be_p(hpte_get_ptr(s, index)) & HPTE64_V_HPTE_DIRTY;
1424 }
1425 
hpte_set_clean(SpaprMachineState * s,unsigned index)1426 static void hpte_set_clean(SpaprMachineState *s, unsigned index)
1427 {
1428     stq_be_p(hpte_get_ptr(s, index),
1429              ldq_be_p(hpte_get_ptr(s, index)) & ~HPTE64_V_HPTE_DIRTY);
1430 }
1431 
hpte_set_dirty(SpaprMachineState * s,unsigned index)1432 static void hpte_set_dirty(SpaprMachineState *s, unsigned index)
1433 {
1434     stq_be_p(hpte_get_ptr(s, index),
1435              ldq_be_p(hpte_get_ptr(s, index)) | HPTE64_V_HPTE_DIRTY);
1436 }
1437 
1438 /*
1439  * Get the fd to access the kernel htab, re-opening it if necessary
1440  */
get_htab_fd(SpaprMachineState * spapr)1441 static int get_htab_fd(SpaprMachineState *spapr)
1442 {
1443     Error *local_err = NULL;
1444 
1445     if (spapr->htab_fd >= 0) {
1446         return spapr->htab_fd;
1447     }
1448 
1449     spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err);
1450     if (spapr->htab_fd < 0) {
1451         error_report_err(local_err);
1452     }
1453 
1454     return spapr->htab_fd;
1455 }
1456 
close_htab_fd(SpaprMachineState * spapr)1457 void close_htab_fd(SpaprMachineState *spapr)
1458 {
1459     if (spapr->htab_fd >= 0) {
1460         close(spapr->htab_fd);
1461     }
1462     spapr->htab_fd = -1;
1463 }
1464 
spapr_hpt_mask(PPCVirtualHypervisor * vhyp)1465 static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp)
1466 {
1467     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1468 
1469     return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1;
1470 }
1471 
spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor * vhyp)1472 static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp)
1473 {
1474     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1475 
1476     assert(kvm_enabled());
1477 
1478     if (!spapr->htab) {
1479         return 0;
1480     }
1481 
1482     return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18);
1483 }
1484 
spapr_map_hptes(PPCVirtualHypervisor * vhyp,hwaddr ptex,int n)1485 static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp,
1486                                                 hwaddr ptex, int n)
1487 {
1488     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1489     hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
1490 
1491     if (!spapr->htab) {
1492         /*
1493          * HTAB is controlled by KVM. Fetch into temporary buffer
1494          */
1495         ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64);
1496         kvmppc_read_hptes(hptes, ptex, n);
1497         return hptes;
1498     }
1499 
1500     /*
1501      * HTAB is controlled by QEMU. Just point to the internally
1502      * accessible PTEG.
1503      */
1504     return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset);
1505 }
1506 
spapr_unmap_hptes(PPCVirtualHypervisor * vhyp,const ppc_hash_pte64_t * hptes,hwaddr ptex,int n)1507 static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
1508                               const ppc_hash_pte64_t *hptes,
1509                               hwaddr ptex, int n)
1510 {
1511     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1512 
1513     if (!spapr->htab) {
1514         g_free((void *)hptes);
1515     }
1516 
1517     /* Nothing to do for qemu managed HPT */
1518 }
1519 
spapr_store_hpte(PowerPCCPU * cpu,hwaddr ptex,uint64_t pte0,uint64_t pte1)1520 void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
1521                       uint64_t pte0, uint64_t pte1)
1522 {
1523     SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp);
1524     hwaddr offset = ptex * HASH_PTE_SIZE_64;
1525 
1526     if (!spapr->htab) {
1527         kvmppc_write_hpte(ptex, pte0, pte1);
1528     } else {
1529         if (pte0 & HPTE64_V_VALID) {
1530             stq_p(spapr->htab + offset + HPTE64_DW1, pte1);
1531             /*
1532              * When setting valid, we write PTE1 first. This ensures
1533              * proper synchronization with the reading code in
1534              * ppc_hash64_pteg_search()
1535              */
1536             smp_wmb();
1537             stq_p(spapr->htab + offset, pte0);
1538         } else {
1539             stq_p(spapr->htab + offset, pte0);
1540             /*
1541              * When clearing it we set PTE0 first. This ensures proper
1542              * synchronization with the reading code in
1543              * ppc_hash64_pteg_search()
1544              */
1545             smp_wmb();
1546             stq_p(spapr->htab + offset + HPTE64_DW1, pte1);
1547         }
1548     }
1549 }
1550 
spapr_hpte_set_c(PPCVirtualHypervisor * vhyp,hwaddr ptex,uint64_t pte1)1551 static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex,
1552                              uint64_t pte1)
1553 {
1554     hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C;
1555     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1556 
1557     if (!spapr->htab) {
1558         /* There should always be a hash table when this is called */
1559         error_report("spapr_hpte_set_c called with no hash table !");
1560         return;
1561     }
1562 
1563     /* The HW performs a non-atomic byte update */
1564     stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80);
1565 }
1566 
spapr_hpte_set_r(PPCVirtualHypervisor * vhyp,hwaddr ptex,uint64_t pte1)1567 static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex,
1568                              uint64_t pte1)
1569 {
1570     hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R;
1571     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
1572 
1573     if (!spapr->htab) {
1574         /* There should always be a hash table when this is called */
1575         error_report("spapr_hpte_set_r called with no hash table !");
1576         return;
1577     }
1578 
1579     /* The HW performs a non-atomic byte update */
1580     stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01);
1581 }
1582 
spapr_hpt_shift_for_ramsize(uint64_t ramsize)1583 int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
1584 {
1585     int shift;
1586 
1587     /* We aim for a hash table of size 1/128 the size of RAM (rounded
1588      * up).  The PAPR recommendation is actually 1/64 of RAM size, but
1589      * that's much more than is needed for Linux guests */
1590     shift = ctz64(pow2ceil(ramsize)) - 7;
1591     shift = MAX(shift, 18); /* Minimum architected size */
1592     shift = MIN(shift, 46); /* Maximum architected size */
1593     return shift;
1594 }
1595 
spapr_free_hpt(SpaprMachineState * spapr)1596 void spapr_free_hpt(SpaprMachineState *spapr)
1597 {
1598     qemu_vfree(spapr->htab);
1599     spapr->htab = NULL;
1600     spapr->htab_shift = 0;
1601     close_htab_fd(spapr);
1602 }
1603 
spapr_reallocate_hpt(SpaprMachineState * spapr,int shift,Error ** errp)1604 int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp)
1605 {
1606     ERRP_GUARD();
1607     long rc;
1608 
1609     /* Clean up any HPT info from a previous boot */
1610     spapr_free_hpt(spapr);
1611 
1612     rc = kvmppc_reset_htab(shift);
1613 
1614     if (rc == -EOPNOTSUPP) {
1615         error_setg(errp, "HPT not supported in nested guests");
1616         return -EOPNOTSUPP;
1617     }
1618 
1619     if (rc < 0) {
1620         /* kernel-side HPT needed, but couldn't allocate one */
1621         error_setg_errno(errp, errno, "Failed to allocate KVM HPT of order %d",
1622                          shift);
1623         error_append_hint(errp, "Try smaller maxmem?\n");
1624         return -errno;
1625     } else if (rc > 0) {
1626         /* kernel-side HPT allocated */
1627         if (rc != shift) {
1628             error_setg(errp,
1629                        "Requested order %d HPT, but kernel allocated order %ld",
1630                        shift, rc);
1631             error_append_hint(errp, "Try smaller maxmem?\n");
1632             return -ENOSPC;
1633         }
1634 
1635         spapr->htab_shift = shift;
1636         spapr->htab = NULL;
1637     } else {
1638         /* kernel-side HPT not needed, allocate in userspace instead */
1639         size_t size = 1ULL << shift;
1640         int i;
1641 
1642         spapr->htab = qemu_memalign(size, size);
1643         memset(spapr->htab, 0, size);
1644         spapr->htab_shift = shift;
1645 
1646         for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
1647             hpte_set_dirty(spapr, i);
1648         }
1649     }
1650     /* We're setting up a hash table, so that means we're not radix */
1651     spapr->patb_entry = 0;
1652     spapr_init_all_lpcrs(0, LPCR_HR | LPCR_UPRT);
1653     return 0;
1654 }
1655 
spapr_setup_hpt(SpaprMachineState * spapr)1656 void spapr_setup_hpt(SpaprMachineState *spapr)
1657 {
1658     int hpt_shift;
1659 
1660     if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
1661         hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size);
1662     } else {
1663         uint64_t current_ram_size;
1664 
1665         current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size();
1666         hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size);
1667     }
1668     spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal);
1669 
1670     if (kvm_enabled()) {
1671         hwaddr vrma_limit = kvmppc_vrma_limit(spapr->htab_shift);
1672 
1673         /* Check our RMA fits in the possible VRMA */
1674         if (vrma_limit < spapr->rma_size) {
1675             error_report("Unable to create %" HWADDR_PRIu
1676                          "MiB RMA (VRMA only allows %" HWADDR_PRIu "MiB",
1677                          spapr->rma_size / MiB, vrma_limit / MiB);
1678             exit(EXIT_FAILURE);
1679         }
1680     }
1681 }
1682 
spapr_check_mmu_mode(bool guest_radix)1683 void spapr_check_mmu_mode(bool guest_radix)
1684 {
1685     if (guest_radix) {
1686         if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) {
1687             error_report("Guest requested unavailable MMU mode (radix).");
1688             exit(EXIT_FAILURE);
1689         }
1690     } else {
1691         if (kvm_enabled() && kvmppc_has_cap_mmu_radix()
1692             && !kvmppc_has_cap_mmu_hash_v3()) {
1693             error_report("Guest requested unavailable MMU mode (hash).");
1694             exit(EXIT_FAILURE);
1695         }
1696     }
1697 }
1698 
spapr_machine_reset(MachineState * machine,ResetType type)1699 static void spapr_machine_reset(MachineState *machine, ResetType type)
1700 {
1701     SpaprMachineState *spapr = SPAPR_MACHINE(machine);
1702     PowerPCCPU *first_ppc_cpu;
1703     hwaddr fdt_addr;
1704     void *fdt;
1705     int rc;
1706 
1707     if (type != RESET_TYPE_SNAPSHOT_LOAD) {
1708         /*
1709          * Record-replay snapshot load must not consume random, this was
1710          * already replayed from initial machine reset.
1711          */
1712         qemu_guest_getrandom_nofail(spapr->fdt_rng_seed, 32);
1713     }
1714 
1715     if (machine->cgs) {
1716         confidential_guest_kvm_reset(machine->cgs, &error_fatal);
1717     }
1718     spapr_caps_apply(spapr);
1719     spapr_nested_reset(spapr);
1720 
1721     first_ppc_cpu = POWERPC_CPU(first_cpu);
1722     if (kvm_enabled() && kvmppc_has_cap_mmu_radix() &&
1723         ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
1724                               spapr->max_compat_pvr)) {
1725         /*
1726          * If using KVM with radix mode available, VCPUs can be started
1727          * without a HPT because KVM will start them in radix mode.
1728          * Set the GR bit in PATE so that we know there is no HPT.
1729          */
1730         spapr->patb_entry = PATE1_GR;
1731         spapr_set_all_lpcrs(LPCR_HR | LPCR_UPRT, LPCR_HR | LPCR_UPRT);
1732     } else {
1733         spapr_setup_hpt(spapr);
1734     }
1735 
1736     qemu_devices_reset(type);
1737 
1738     spapr_ovec_cleanup(spapr->ov5_cas);
1739     spapr->ov5_cas = spapr_ovec_new();
1740 
1741     ppc_init_compat_all(spapr->max_compat_pvr, &error_fatal);
1742 
1743     /*
1744      * This is fixing some of the default configuration of the XIVE
1745      * devices. To be called after the reset of the machine devices.
1746      */
1747     spapr_irq_reset(spapr, &error_fatal);
1748 
1749     /*
1750      * There is no CAS under qtest. Simulate one to please the code that
1751      * depends on spapr->ov5_cas. This is especially needed to test device
1752      * unplug, so we do that before resetting the DRCs.
1753      */
1754     if (qtest_enabled()) {
1755         spapr_ovec_cleanup(spapr->ov5_cas);
1756         spapr->ov5_cas = spapr_ovec_clone(spapr->ov5);
1757     }
1758 
1759     spapr_nvdimm_finish_flushes();
1760 
1761     /* DRC reset may cause a device to be unplugged. This will cause troubles
1762      * if this device is used by another device (eg, a running vhost backend
1763      * will crash QEMU if the DIMM holding the vring goes away). To avoid such
1764      * situations, we reset DRCs after all devices have been reset.
1765      */
1766     spapr_drc_reset_all(spapr);
1767 
1768     spapr_clear_pending_events(spapr);
1769 
1770     /*
1771      * We place the device tree just below either the top of the RMA,
1772      * or just below 2GB, whichever is lower, so that it can be
1773      * processed with 32-bit real mode code if necessary
1774      */
1775     fdt_addr = MIN(spapr->rma_size, FDT_MAX_ADDR) - FDT_MAX_SIZE;
1776 
1777     fdt = spapr_build_fdt(spapr, true, FDT_MAX_SIZE);
1778     if (spapr->vof) {
1779         spapr_vof_reset(spapr, fdt, &error_fatal);
1780         /*
1781          * Do not pack the FDT as the client may change properties.
1782          * VOF client does not expect the FDT so we do not load it to the VM.
1783          */
1784     } else {
1785         rc = fdt_pack(fdt);
1786         /* Should only fail if we've built a corrupted tree */
1787         assert(rc == 0);
1788 
1789         spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT,
1790                                   0, fdt_addr, 0);
1791         cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
1792     }
1793 
1794     g_free(spapr->fdt_blob);
1795     spapr->fdt_size = fdt_totalsize(fdt);
1796     spapr->fdt_initial_size = spapr->fdt_size;
1797     spapr->fdt_blob = fdt;
1798 
1799     /* Set machine->fdt for 'dumpdtb' QMP/HMP command */
1800     machine->fdt = fdt;
1801 
1802     /* Set up the entry state */
1803     first_ppc_cpu->env.gpr[5] = 0;
1804 
1805     spapr->fwnmi_system_reset_addr = -1;
1806     spapr->fwnmi_machine_check_addr = -1;
1807     spapr->fwnmi_machine_check_interlock = -1;
1808 
1809     /* Signal all vCPUs waiting on this condition */
1810     qemu_cond_broadcast(&spapr->fwnmi_machine_check_interlock_cond);
1811 
1812     migrate_del_blocker(&spapr->fwnmi_migration_blocker);
1813 }
1814 
spapr_create_nvram(SpaprMachineState * spapr)1815 static void spapr_create_nvram(SpaprMachineState *spapr)
1816 {
1817     DeviceState *dev = qdev_new("spapr-nvram");
1818     DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
1819 
1820     if (dinfo) {
1821         qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(dinfo),
1822                                 &error_fatal);
1823     }
1824 
1825     qdev_realize_and_unref(dev, &spapr->vio_bus->bus, &error_fatal);
1826 
1827     spapr->nvram = (struct SpaprNvram *)dev;
1828 }
1829 
spapr_rtc_create(SpaprMachineState * spapr)1830 static void spapr_rtc_create(SpaprMachineState *spapr)
1831 {
1832     object_initialize_child_with_props(OBJECT(spapr), "rtc", &spapr->rtc,
1833                                        sizeof(spapr->rtc), TYPE_SPAPR_RTC,
1834                                        &error_fatal, NULL);
1835     qdev_realize(DEVICE(&spapr->rtc), NULL, &error_fatal);
1836     object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc),
1837                               "date");
1838 }
1839 
1840 /* Returns whether we want to use VGA or not */
spapr_vga_init(PCIBus * pci_bus,Error ** errp)1841 static bool spapr_vga_init(PCIBus *pci_bus, Error **errp)
1842 {
1843     vga_interface_created = true;
1844     switch (vga_interface_type) {
1845     case VGA_NONE:
1846         return false;
1847     case VGA_DEVICE:
1848         return true;
1849     case VGA_STD:
1850     case VGA_VIRTIO:
1851     case VGA_CIRRUS:
1852         return pci_vga_init(pci_bus) != NULL;
1853     default:
1854         error_setg(errp,
1855                    "Unsupported VGA mode, only -vga std or -vga virtio is supported");
1856         return false;
1857     }
1858 }
1859 
spapr_pre_load(void * opaque)1860 static int spapr_pre_load(void *opaque)
1861 {
1862     int rc;
1863 
1864     rc = spapr_caps_pre_load(opaque);
1865     if (rc) {
1866         return rc;
1867     }
1868 
1869     return 0;
1870 }
1871 
spapr_post_load(void * opaque,int version_id)1872 static int spapr_post_load(void *opaque, int version_id)
1873 {
1874     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1875     int err = 0;
1876 
1877     err = spapr_caps_post_migration(spapr);
1878     if (err) {
1879         return err;
1880     }
1881 
1882     /*
1883      * In earlier versions, there was no separate qdev for the PAPR
1884      * RTC, so the RTC offset was stored directly in sPAPREnvironment.
1885      * So when migrating from those versions, poke the incoming offset
1886      * value into the RTC device
1887      */
1888     if (version_id < 3) {
1889         err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset);
1890         if (err) {
1891             return err;
1892         }
1893     }
1894 
1895     if (kvm_enabled() && spapr->patb_entry) {
1896         PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
1897         bool radix = !!(spapr->patb_entry & PATE1_GR);
1898         bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE);
1899 
1900         /*
1901          * Update LPCR:HR and UPRT as they may not be set properly in
1902          * the stream
1903          */
1904         spapr_set_all_lpcrs(radix ? (LPCR_HR | LPCR_UPRT) : 0,
1905                             LPCR_HR | LPCR_UPRT);
1906 
1907         err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry);
1908         if (err) {
1909             error_report("Process table config unsupported by the host");
1910             return -EINVAL;
1911         }
1912     }
1913 
1914     err = spapr_irq_post_load(spapr, version_id);
1915     if (err) {
1916         return err;
1917     }
1918 
1919     return err;
1920 }
1921 
spapr_pre_save(void * opaque)1922 static int spapr_pre_save(void *opaque)
1923 {
1924     int rc;
1925 
1926     rc = spapr_caps_pre_save(opaque);
1927     if (rc) {
1928         return rc;
1929     }
1930 
1931     return 0;
1932 }
1933 
version_before_3(void * opaque,int version_id)1934 static bool version_before_3(void *opaque, int version_id)
1935 {
1936     return version_id < 3;
1937 }
1938 
spapr_pending_events_needed(void * opaque)1939 static bool spapr_pending_events_needed(void *opaque)
1940 {
1941     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
1942     return !QTAILQ_EMPTY(&spapr->pending_events);
1943 }
1944 
1945 static const VMStateDescription vmstate_spapr_event_entry = {
1946     .name = "spapr_event_log_entry",
1947     .version_id = 1,
1948     .minimum_version_id = 1,
1949     .fields = (const VMStateField[]) {
1950         VMSTATE_UINT32(summary, SpaprEventLogEntry),
1951         VMSTATE_UINT32(extended_length, SpaprEventLogEntry),
1952         VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0,
1953                                      NULL, extended_length),
1954         VMSTATE_END_OF_LIST()
1955     },
1956 };
1957 
1958 static const VMStateDescription vmstate_spapr_pending_events = {
1959     .name = "spapr_pending_events",
1960     .version_id = 1,
1961     .minimum_version_id = 1,
1962     .needed = spapr_pending_events_needed,
1963     .fields = (const VMStateField[]) {
1964         VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1,
1965                          vmstate_spapr_event_entry, SpaprEventLogEntry, next),
1966         VMSTATE_END_OF_LIST()
1967     },
1968 };
1969 
spapr_ov5_cas_needed(void * opaque)1970 static bool spapr_ov5_cas_needed(void *opaque)
1971 {
1972     SpaprMachineState *spapr = opaque;
1973     SpaprOptionVector *ov5_mask = spapr_ovec_new();
1974     bool cas_needed;
1975 
1976     /* Prior to the introduction of SpaprOptionVector, we had two option
1977      * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
1978      * Both of these options encode machine topology into the device-tree
1979      * in such a way that the now-booted OS should still be able to interact
1980      * appropriately with QEMU regardless of what options were actually
1981      * negotiatied on the source side.
1982      *
1983      * As such, we can avoid migrating the CAS-negotiated options if these
1984      * are the only options available on the current machine/platform.
1985      * Since these are the only options available for pseries-2.7 and
1986      * earlier, this allows us to maintain old->new/new->old migration
1987      * compatibility.
1988      *
1989      * For QEMU 2.8+, there are additional CAS-negotiatable options available
1990      * via default pseries-2.8 machines and explicit command-line parameters.
1991      * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
1992      * of the actual CAS-negotiated values to continue working properly. For
1993      * example, availability of memory unplug depends on knowing whether
1994      * OV5_HP_EVT was negotiated via CAS.
1995      *
1996      * Thus, for any cases where the set of available CAS-negotiatable
1997      * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
1998      * include the CAS-negotiated options in the migration stream, unless
1999      * if they affect boot time behaviour only.
2000      */
2001     spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY);
2002     spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY);
2003     spapr_ovec_set(ov5_mask, OV5_DRMEM_V2);
2004 
2005     /* We need extra information if we have any bits outside the mask
2006      * defined above */
2007     cas_needed = !spapr_ovec_subset(spapr->ov5, ov5_mask);
2008 
2009     spapr_ovec_cleanup(ov5_mask);
2010 
2011     return cas_needed;
2012 }
2013 
2014 static const VMStateDescription vmstate_spapr_ov5_cas = {
2015     .name = "spapr_option_vector_ov5_cas",
2016     .version_id = 1,
2017     .minimum_version_id = 1,
2018     .needed = spapr_ov5_cas_needed,
2019     .fields = (const VMStateField[]) {
2020         VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1,
2021                                  vmstate_spapr_ovec, SpaprOptionVector),
2022         VMSTATE_END_OF_LIST()
2023     },
2024 };
2025 
spapr_patb_entry_needed(void * opaque)2026 static bool spapr_patb_entry_needed(void *opaque)
2027 {
2028     SpaprMachineState *spapr = opaque;
2029 
2030     return !!spapr->patb_entry;
2031 }
2032 
2033 static const VMStateDescription vmstate_spapr_patb_entry = {
2034     .name = "spapr_patb_entry",
2035     .version_id = 1,
2036     .minimum_version_id = 1,
2037     .needed = spapr_patb_entry_needed,
2038     .fields = (const VMStateField[]) {
2039         VMSTATE_UINT64(patb_entry, SpaprMachineState),
2040         VMSTATE_END_OF_LIST()
2041     },
2042 };
2043 
spapr_irq_map_needed(void * opaque)2044 static bool spapr_irq_map_needed(void *opaque)
2045 {
2046     SpaprMachineState *spapr = opaque;
2047 
2048     return spapr->irq_map && !bitmap_empty(spapr->irq_map, spapr->irq_map_nr);
2049 }
2050 
2051 static const VMStateDescription vmstate_spapr_irq_map = {
2052     .name = "spapr_irq_map",
2053     .version_id = 1,
2054     .minimum_version_id = 1,
2055     .needed = spapr_irq_map_needed,
2056     .fields = (const VMStateField[]) {
2057         VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr),
2058         VMSTATE_END_OF_LIST()
2059     },
2060 };
2061 
spapr_dtb_needed(void * opaque)2062 static bool spapr_dtb_needed(void *opaque)
2063 {
2064     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(opaque);
2065 
2066     return smc->update_dt_enabled;
2067 }
2068 
spapr_dtb_pre_load(void * opaque)2069 static int spapr_dtb_pre_load(void *opaque)
2070 {
2071     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
2072 
2073     g_free(spapr->fdt_blob);
2074     spapr->fdt_blob = NULL;
2075     spapr->fdt_size = 0;
2076 
2077     return 0;
2078 }
2079 
2080 static const VMStateDescription vmstate_spapr_dtb = {
2081     .name = "spapr_dtb",
2082     .version_id = 1,
2083     .minimum_version_id = 1,
2084     .needed = spapr_dtb_needed,
2085     .pre_load = spapr_dtb_pre_load,
2086     .fields = (const VMStateField[]) {
2087         VMSTATE_UINT32(fdt_initial_size, SpaprMachineState),
2088         VMSTATE_UINT32(fdt_size, SpaprMachineState),
2089         VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL,
2090                                      fdt_size),
2091         VMSTATE_END_OF_LIST()
2092     },
2093 };
2094 
spapr_fwnmi_needed(void * opaque)2095 static bool spapr_fwnmi_needed(void *opaque)
2096 {
2097     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
2098 
2099     return spapr->fwnmi_machine_check_addr != -1;
2100 }
2101 
spapr_fwnmi_pre_save(void * opaque)2102 static int spapr_fwnmi_pre_save(void *opaque)
2103 {
2104     SpaprMachineState *spapr = (SpaprMachineState *)opaque;
2105 
2106     /*
2107      * Check if machine check handling is in progress and print a
2108      * warning message.
2109      */
2110     if (spapr->fwnmi_machine_check_interlock != -1) {
2111         warn_report("A machine check is being handled during migration. The"
2112                 "handler may run and log hardware error on the destination");
2113     }
2114 
2115     return 0;
2116 }
2117 
2118 static const VMStateDescription vmstate_spapr_fwnmi = {
2119     .name = "spapr_fwnmi",
2120     .version_id = 1,
2121     .minimum_version_id = 1,
2122     .needed = spapr_fwnmi_needed,
2123     .pre_save = spapr_fwnmi_pre_save,
2124     .fields = (const VMStateField[]) {
2125         VMSTATE_UINT64(fwnmi_system_reset_addr, SpaprMachineState),
2126         VMSTATE_UINT64(fwnmi_machine_check_addr, SpaprMachineState),
2127         VMSTATE_INT32(fwnmi_machine_check_interlock, SpaprMachineState),
2128         VMSTATE_END_OF_LIST()
2129     },
2130 };
2131 
2132 static const VMStateDescription vmstate_spapr = {
2133     .name = "spapr",
2134     .version_id = 3,
2135     .minimum_version_id = 1,
2136     .pre_load = spapr_pre_load,
2137     .post_load = spapr_post_load,
2138     .pre_save = spapr_pre_save,
2139     .fields = (const VMStateField[]) {
2140         /* used to be @next_irq */
2141         VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4),
2142 
2143         /* RTC offset */
2144         VMSTATE_UINT64_TEST(rtc_offset, SpaprMachineState, version_before_3),
2145 
2146         VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2),
2147         VMSTATE_END_OF_LIST()
2148     },
2149     .subsections = (const VMStateDescription * const []) {
2150         &vmstate_spapr_ov5_cas,
2151         &vmstate_spapr_patb_entry,
2152         &vmstate_spapr_pending_events,
2153         &vmstate_spapr_cap_htm,
2154         &vmstate_spapr_cap_vsx,
2155         &vmstate_spapr_cap_dfp,
2156         &vmstate_spapr_cap_cfpc,
2157         &vmstate_spapr_cap_sbbc,
2158         &vmstate_spapr_cap_ibs,
2159         &vmstate_spapr_cap_hpt_maxpagesize,
2160         &vmstate_spapr_irq_map,
2161         &vmstate_spapr_cap_nested_kvm_hv,
2162         &vmstate_spapr_dtb,
2163         &vmstate_spapr_cap_large_decr,
2164         &vmstate_spapr_cap_ccf_assist,
2165         &vmstate_spapr_cap_fwnmi,
2166         &vmstate_spapr_fwnmi,
2167         &vmstate_spapr_cap_rpt_invalidate,
2168         &vmstate_spapr_cap_ail_mode_3,
2169         &vmstate_spapr_cap_nested_papr,
2170         &vmstate_spapr_cap_dawr1,
2171         NULL
2172     }
2173 };
2174 
htab_save_setup(QEMUFile * f,void * opaque,Error ** errp)2175 static int htab_save_setup(QEMUFile *f, void *opaque, Error **errp)
2176 {
2177     SpaprMachineState *spapr = opaque;
2178 
2179     /* "Iteration" header */
2180     if (!spapr->htab_shift) {
2181         qemu_put_be32(f, -1);
2182     } else {
2183         qemu_put_be32(f, spapr->htab_shift);
2184     }
2185 
2186     if (spapr->htab) {
2187         spapr->htab_save_index = 0;
2188         spapr->htab_first_pass = true;
2189     } else {
2190         if (spapr->htab_shift) {
2191             assert(kvm_enabled());
2192         }
2193     }
2194 
2195 
2196     return 0;
2197 }
2198 
htab_save_chunk(QEMUFile * f,SpaprMachineState * spapr,int chunkstart,int n_valid,int n_invalid)2199 static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr,
2200                             int chunkstart, int n_valid, int n_invalid)
2201 {
2202     qemu_put_be32(f, chunkstart);
2203     qemu_put_be16(f, n_valid);
2204     qemu_put_be16(f, n_invalid);
2205     qemu_put_buffer(f, (void *)hpte_get_ptr(spapr, chunkstart),
2206                     HASH_PTE_SIZE_64 * n_valid);
2207 }
2208 
htab_save_end_marker(QEMUFile * f)2209 static void htab_save_end_marker(QEMUFile *f)
2210 {
2211     qemu_put_be32(f, 0);
2212     qemu_put_be16(f, 0);
2213     qemu_put_be16(f, 0);
2214 }
2215 
htab_save_first_pass(QEMUFile * f,SpaprMachineState * spapr,int64_t max_ns)2216 static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr,
2217                                  int64_t max_ns)
2218 {
2219     bool has_timeout = max_ns != -1;
2220     int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
2221     int index = spapr->htab_save_index;
2222     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2223 
2224     assert(spapr->htab_first_pass);
2225 
2226     do {
2227         int chunkstart;
2228 
2229         /* Consume invalid HPTEs */
2230         while ((index < htabslots)
2231                && !hpte_is_valid(spapr, index)) {
2232             hpte_set_clean(spapr, index);
2233             index++;
2234         }
2235 
2236         /* Consume valid HPTEs */
2237         chunkstart = index;
2238         while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
2239                && hpte_is_valid(spapr, index)) {
2240             hpte_set_clean(spapr, index);
2241             index++;
2242         }
2243 
2244         if (index > chunkstart) {
2245             int n_valid = index - chunkstart;
2246 
2247             htab_save_chunk(f, spapr, chunkstart, n_valid, 0);
2248 
2249             if (has_timeout &&
2250                 (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
2251                 break;
2252             }
2253         }
2254     } while ((index < htabslots) && !migration_rate_exceeded(f));
2255 
2256     if (index >= htabslots) {
2257         assert(index == htabslots);
2258         index = 0;
2259         spapr->htab_first_pass = false;
2260     }
2261     spapr->htab_save_index = index;
2262 }
2263 
htab_save_later_pass(QEMUFile * f,SpaprMachineState * spapr,int64_t max_ns)2264 static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr,
2265                                 int64_t max_ns)
2266 {
2267     bool final = max_ns < 0;
2268     int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
2269     int examined = 0, sent = 0;
2270     int index = spapr->htab_save_index;
2271     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2272 
2273     assert(!spapr->htab_first_pass);
2274 
2275     do {
2276         int chunkstart, invalidstart;
2277 
2278         /* Consume non-dirty HPTEs */
2279         while ((index < htabslots)
2280                && !hpte_is_dirty(spapr, index)) {
2281             index++;
2282             examined++;
2283         }
2284 
2285         chunkstart = index;
2286         /* Consume valid dirty HPTEs */
2287         while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
2288                && hpte_is_dirty(spapr, index)
2289                && hpte_is_valid(spapr, index)) {
2290             hpte_set_clean(spapr, index);
2291             index++;
2292             examined++;
2293         }
2294 
2295         invalidstart = index;
2296         /* Consume invalid dirty HPTEs */
2297         while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
2298                && hpte_is_dirty(spapr, index)
2299                && !hpte_is_valid(spapr, index)) {
2300             hpte_set_clean(spapr, index);
2301             index++;
2302             examined++;
2303         }
2304 
2305         if (index > chunkstart) {
2306             int n_valid = invalidstart - chunkstart;
2307             int n_invalid = index - invalidstart;
2308 
2309             htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid);
2310             sent += index - chunkstart;
2311 
2312             if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
2313                 break;
2314             }
2315         }
2316 
2317         if (examined >= htabslots) {
2318             break;
2319         }
2320 
2321         if (index >= htabslots) {
2322             assert(index == htabslots);
2323             index = 0;
2324         }
2325     } while ((examined < htabslots) && (!migration_rate_exceeded(f) || final));
2326 
2327     if (index >= htabslots) {
2328         assert(index == htabslots);
2329         index = 0;
2330     }
2331 
2332     spapr->htab_save_index = index;
2333 
2334     return (examined >= htabslots) && (sent == 0) ? 1 : 0;
2335 }
2336 
2337 #define MAX_ITERATION_NS    5000000 /* 5 ms */
2338 #define MAX_KVM_BUF_SIZE    2048
2339 
htab_save_iterate(QEMUFile * f,void * opaque)2340 static int htab_save_iterate(QEMUFile *f, void *opaque)
2341 {
2342     SpaprMachineState *spapr = opaque;
2343     int fd;
2344     int rc = 0;
2345 
2346     /* Iteration header */
2347     if (!spapr->htab_shift) {
2348         qemu_put_be32(f, -1);
2349         return 1;
2350     } else {
2351         qemu_put_be32(f, 0);
2352     }
2353 
2354     if (!spapr->htab) {
2355         assert(kvm_enabled());
2356 
2357         fd = get_htab_fd(spapr);
2358         if (fd < 0) {
2359             return fd;
2360         }
2361 
2362         rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
2363         if (rc < 0) {
2364             return rc;
2365         }
2366     } else  if (spapr->htab_first_pass) {
2367         htab_save_first_pass(f, spapr, MAX_ITERATION_NS);
2368     } else {
2369         rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS);
2370     }
2371 
2372     htab_save_end_marker(f);
2373 
2374     return rc;
2375 }
2376 
htab_save_complete(QEMUFile * f,void * opaque)2377 static int htab_save_complete(QEMUFile *f, void *opaque)
2378 {
2379     SpaprMachineState *spapr = opaque;
2380     int fd;
2381 
2382     /* Iteration header */
2383     if (!spapr->htab_shift) {
2384         qemu_put_be32(f, -1);
2385         return 0;
2386     } else {
2387         qemu_put_be32(f, 0);
2388     }
2389 
2390     if (!spapr->htab) {
2391         int rc;
2392 
2393         assert(kvm_enabled());
2394 
2395         fd = get_htab_fd(spapr);
2396         if (fd < 0) {
2397             return fd;
2398         }
2399 
2400         rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1);
2401         if (rc < 0) {
2402             return rc;
2403         }
2404     } else {
2405         if (spapr->htab_first_pass) {
2406             htab_save_first_pass(f, spapr, -1);
2407         }
2408         htab_save_later_pass(f, spapr, -1);
2409     }
2410 
2411     /* End marker */
2412     htab_save_end_marker(f);
2413 
2414     return 0;
2415 }
2416 
htab_load(QEMUFile * f,void * opaque,int version_id)2417 static int htab_load(QEMUFile *f, void *opaque, int version_id)
2418 {
2419     SpaprMachineState *spapr = opaque;
2420     uint32_t section_hdr;
2421     int fd = -1;
2422     Error *local_err = NULL;
2423 
2424     if (version_id < 1 || version_id > 1) {
2425         error_report("htab_load() bad version");
2426         return -EINVAL;
2427     }
2428 
2429     section_hdr = qemu_get_be32(f);
2430 
2431     if (section_hdr == -1) {
2432         spapr_free_hpt(spapr);
2433         return 0;
2434     }
2435 
2436     if (section_hdr) {
2437         int ret;
2438 
2439         /* First section gives the htab size */
2440         ret = spapr_reallocate_hpt(spapr, section_hdr, &local_err);
2441         if (ret < 0) {
2442             error_report_err(local_err);
2443             return ret;
2444         }
2445         return 0;
2446     }
2447 
2448     if (!spapr->htab) {
2449         assert(kvm_enabled());
2450 
2451         fd = kvmppc_get_htab_fd(true, 0, &local_err);
2452         if (fd < 0) {
2453             error_report_err(local_err);
2454             return fd;
2455         }
2456     }
2457 
2458     while (true) {
2459         uint32_t index;
2460         uint16_t n_valid, n_invalid;
2461 
2462         index = qemu_get_be32(f);
2463         n_valid = qemu_get_be16(f);
2464         n_invalid = qemu_get_be16(f);
2465 
2466         if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
2467             /* End of Stream */
2468             break;
2469         }
2470 
2471         if ((index + n_valid + n_invalid) >
2472             (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
2473             /* Bad index in stream */
2474             error_report(
2475                 "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
2476                 index, n_valid, n_invalid, spapr->htab_shift);
2477             return -EINVAL;
2478         }
2479 
2480         if (spapr->htab) {
2481             if (n_valid) {
2482                 qemu_get_buffer(f, (void *)hpte_get_ptr(spapr, index),
2483                                 HASH_PTE_SIZE_64 * n_valid);
2484             }
2485             if (n_invalid) {
2486                 memset(hpte_get_ptr(spapr, index + n_valid), 0,
2487                        HASH_PTE_SIZE_64 * n_invalid);
2488             }
2489         } else {
2490             int rc;
2491 
2492             assert(fd >= 0);
2493 
2494             rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid,
2495                                         &local_err);
2496             if (rc < 0) {
2497                 error_report_err(local_err);
2498                 return rc;
2499             }
2500         }
2501     }
2502 
2503     if (!spapr->htab) {
2504         assert(fd >= 0);
2505         close(fd);
2506     }
2507 
2508     return 0;
2509 }
2510 
htab_save_cleanup(void * opaque)2511 static void htab_save_cleanup(void *opaque)
2512 {
2513     SpaprMachineState *spapr = opaque;
2514 
2515     close_htab_fd(spapr);
2516 }
2517 
2518 static SaveVMHandlers savevm_htab_handlers = {
2519     .save_setup = htab_save_setup,
2520     .save_live_iterate = htab_save_iterate,
2521     .save_live_complete_precopy = htab_save_complete,
2522     .save_cleanup = htab_save_cleanup,
2523     .load_state = htab_load,
2524 };
2525 
spapr_boot_set(void * opaque,const char * boot_device,Error ** errp)2526 static void spapr_boot_set(void *opaque, const char *boot_device,
2527                            Error **errp)
2528 {
2529     SpaprMachineState *spapr = SPAPR_MACHINE(opaque);
2530 
2531     g_free(spapr->boot_device);
2532     spapr->boot_device = g_strdup(boot_device);
2533 }
2534 
spapr_create_lmb_dr_connectors(SpaprMachineState * spapr)2535 static void spapr_create_lmb_dr_connectors(SpaprMachineState *spapr)
2536 {
2537     MachineState *machine = MACHINE(spapr);
2538     uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
2539     uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
2540     int i;
2541 
2542     g_assert(!nr_lmbs || machine->device_memory);
2543     for (i = 0; i < nr_lmbs; i++) {
2544         uint64_t addr;
2545 
2546         addr = i * lmb_size + machine->device_memory->base;
2547         spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
2548                                addr / lmb_size);
2549     }
2550 }
2551 
2552 /*
2553  * If RAM size, maxmem size and individual node mem sizes aren't aligned
2554  * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
2555  * since we can't support such unaligned sizes with DRCONF_MEMORY.
2556  */
spapr_validate_node_memory(MachineState * machine,Error ** errp)2557 static void spapr_validate_node_memory(MachineState *machine, Error **errp)
2558 {
2559     int i;
2560 
2561     if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2562         error_setg(errp, "Memory size 0x" RAM_ADDR_FMT
2563                    " is not aligned to %" PRIu64 " MiB",
2564                    machine->ram_size,
2565                    SPAPR_MEMORY_BLOCK_SIZE / MiB);
2566         return;
2567     }
2568 
2569     if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2570         error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT
2571                    " is not aligned to %" PRIu64 " MiB",
2572                    machine->ram_size,
2573                    SPAPR_MEMORY_BLOCK_SIZE / MiB);
2574         return;
2575     }
2576 
2577     for (i = 0; i < machine->numa_state->num_nodes; i++) {
2578         if (machine->numa_state->nodes[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) {
2579             error_setg(errp,
2580                        "Node %d memory size 0x%" PRIx64
2581                        " is not aligned to %" PRIu64 " MiB",
2582                        i, machine->numa_state->nodes[i].node_mem,
2583                        SPAPR_MEMORY_BLOCK_SIZE / MiB);
2584             return;
2585         }
2586     }
2587 }
2588 
2589 /* find cpu slot in machine->possible_cpus by core_id */
spapr_find_cpu_slot(MachineState * ms,uint32_t id,int * idx)2590 static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
2591 {
2592     int index = id / ms->smp.threads;
2593 
2594     if (index >= ms->possible_cpus->len) {
2595         return NULL;
2596     }
2597     if (idx) {
2598         *idx = index;
2599     }
2600     return &ms->possible_cpus->cpus[index];
2601 }
2602 
spapr_set_vsmt_mode(SpaprMachineState * spapr,Error ** errp)2603 static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp)
2604 {
2605     MachineState *ms = MACHINE(spapr);
2606     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
2607     Error *local_err = NULL;
2608     bool vsmt_user = !!spapr->vsmt;
2609     int kvm_smt = kvmppc_smt_threads();
2610     int ret;
2611     unsigned int smp_threads = ms->smp.threads;
2612 
2613     if (tcg_enabled()) {
2614         if (smp_threads > 1 &&
2615             !ppc_type_check_compat(ms->cpu_type, CPU_POWERPC_LOGICAL_2_07, 0,
2616                                    spapr->max_compat_pvr)) {
2617             error_setg(errp, "TCG only supports SMT on POWER8 or newer CPUs");
2618             return;
2619         }
2620 
2621         if (smp_threads > 8) {
2622             error_setg(errp, "TCG cannot support more than 8 threads/core "
2623                        "on a pseries machine");
2624             return;
2625         }
2626     }
2627     if (!is_power_of_2(smp_threads)) {
2628         error_setg(errp, "Cannot support %d threads/core on a pseries "
2629                    "machine because it must be a power of 2", smp_threads);
2630         return;
2631     }
2632 
2633     /* Determine the VSMT mode to use: */
2634     if (vsmt_user) {
2635         if (spapr->vsmt < smp_threads) {
2636             error_setg(errp, "Cannot support VSMT mode %d"
2637                        " because it must be >= threads/core (%d)",
2638                        spapr->vsmt, smp_threads);
2639             return;
2640         }
2641         /* In this case, spapr->vsmt has been set by the command line */
2642     } else if (!smc->smp_threads_vsmt) {
2643         /*
2644          * Default VSMT value is tricky, because we need it to be as
2645          * consistent as possible (for migration), but this requires
2646          * changing it for at least some existing cases.  We pick 8 as
2647          * the value that we'd get with KVM on POWER8, the
2648          * overwhelmingly common case in production systems.
2649          */
2650         spapr->vsmt = MAX(8, smp_threads);
2651     } else {
2652         spapr->vsmt = smp_threads;
2653     }
2654 
2655     /* KVM: If necessary, set the SMT mode: */
2656     if (kvm_enabled() && (spapr->vsmt != kvm_smt)) {
2657         ret = kvmppc_set_smt_threads(spapr->vsmt);
2658         if (ret) {
2659             /* Looks like KVM isn't able to change VSMT mode */
2660             error_setg(&local_err,
2661                        "Failed to set KVM's VSMT mode to %d (errno %d)",
2662                        spapr->vsmt, ret);
2663             /* We can live with that if the default one is big enough
2664              * for the number of threads, and a submultiple of the one
2665              * we want.  In this case we'll waste some vcpu ids, but
2666              * behaviour will be correct */
2667             if ((kvm_smt >= smp_threads) && ((spapr->vsmt % kvm_smt) == 0)) {
2668                 warn_report_err(local_err);
2669             } else {
2670                 if (!vsmt_user) {
2671                     error_append_hint(&local_err,
2672                                       "On PPC, a VM with %d threads/core"
2673                                       " on a host with %d threads/core"
2674                                       " requires the use of VSMT mode %d.\n",
2675                                       smp_threads, kvm_smt, spapr->vsmt);
2676                 }
2677                 kvmppc_error_append_smt_possible_hint(&local_err);
2678                 error_propagate(errp, local_err);
2679             }
2680         }
2681     }
2682     /* else TCG: nothing to do currently */
2683 }
2684 
spapr_init_cpus(SpaprMachineState * spapr)2685 static void spapr_init_cpus(SpaprMachineState *spapr)
2686 {
2687     MachineState *machine = MACHINE(spapr);
2688     MachineClass *mc = MACHINE_GET_CLASS(machine);
2689     const char *type = spapr_get_cpu_core_type(machine->cpu_type);
2690     const CPUArchIdList *possible_cpus;
2691     unsigned int smp_cpus = machine->smp.cpus;
2692     unsigned int smp_threads = machine->smp.threads;
2693     unsigned int max_cpus = machine->smp.max_cpus;
2694     int boot_cores_nr = smp_cpus / smp_threads;
2695     int i;
2696 
2697     possible_cpus = mc->possible_cpu_arch_ids(machine);
2698     if (mc->has_hotpluggable_cpus) {
2699         if (smp_cpus % smp_threads) {
2700             error_report("smp_cpus (%u) must be multiple of threads (%u)",
2701                          smp_cpus, smp_threads);
2702             exit(1);
2703         }
2704         if (max_cpus % smp_threads) {
2705             error_report("max_cpus (%u) must be multiple of threads (%u)",
2706                          max_cpus, smp_threads);
2707             exit(1);
2708         }
2709     } else {
2710         if (max_cpus != smp_cpus) {
2711             error_report("This machine version does not support CPU hotplug");
2712             exit(1);
2713         }
2714         boot_cores_nr = possible_cpus->len;
2715     }
2716 
2717     for (i = 0; i < possible_cpus->len; i++) {
2718         int core_id = i * smp_threads;
2719 
2720         if (mc->has_hotpluggable_cpus) {
2721             spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
2722                                    spapr_vcpu_id(spapr, core_id));
2723         }
2724 
2725         if (i < boot_cores_nr) {
2726             Object *core  = object_new(type);
2727             int nr_threads = smp_threads;
2728 
2729             /* Handle the partially filled core for older machine types */
2730             if ((i + 1) * smp_threads >= smp_cpus) {
2731                 nr_threads = smp_cpus - i * smp_threads;
2732             }
2733 
2734             object_property_set_int(core, "nr-threads", nr_threads,
2735                                     &error_fatal);
2736             object_property_set_int(core, CPU_CORE_PROP_CORE_ID, core_id,
2737                                     &error_fatal);
2738             qdev_realize(DEVICE(core), NULL, &error_fatal);
2739 
2740             object_unref(core);
2741         }
2742     }
2743 }
2744 
spapr_create_default_phb(void)2745 static PCIHostState *spapr_create_default_phb(void)
2746 {
2747     DeviceState *dev;
2748 
2749     dev = qdev_new(TYPE_SPAPR_PCI_HOST_BRIDGE);
2750     qdev_prop_set_uint32(dev, "index", 0);
2751     sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
2752 
2753     return PCI_HOST_BRIDGE(dev);
2754 }
2755 
spapr_rma_size(SpaprMachineState * spapr,Error ** errp)2756 static hwaddr spapr_rma_size(SpaprMachineState *spapr, Error **errp)
2757 {
2758     MachineState *machine = MACHINE(spapr);
2759     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
2760     hwaddr rma_size = machine->ram_size;
2761     hwaddr node0_size = spapr_node0_size(machine);
2762 
2763     /* RMA has to fit in the first NUMA node */
2764     rma_size = MIN(rma_size, node0_size);
2765 
2766     /*
2767      * VRMA access is via a special 1TiB SLB mapping, so the RMA can
2768      * never exceed that
2769      */
2770     rma_size = MIN(rma_size, 1 * TiB);
2771 
2772     /*
2773      * Clamp the RMA size based on machine type.  This is for
2774      * migration compatibility with older qemu versions, which limited
2775      * the RMA size for complicated and mostly bad reasons.
2776      */
2777     if (smc->rma_limit) {
2778         rma_size = MIN(rma_size, smc->rma_limit);
2779     }
2780 
2781     if (rma_size < MIN_RMA_SLOF) {
2782         error_setg(errp,
2783                    "pSeries SLOF firmware requires >= %" HWADDR_PRIx
2784                    "ldMiB guest RMA (Real Mode Area memory)",
2785                    MIN_RMA_SLOF / MiB);
2786         return 0;
2787     }
2788 
2789     return rma_size;
2790 }
2791 
spapr_create_nvdimm_dr_connectors(SpaprMachineState * spapr)2792 static void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr)
2793 {
2794     MachineState *machine = MACHINE(spapr);
2795     int i;
2796 
2797     for (i = 0; i < machine->ram_slots; i++) {
2798         spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_PMEM, i);
2799     }
2800 }
2801 
2802 /* pSeries LPAR / sPAPR hardware init */
spapr_machine_init(MachineState * machine)2803 static void spapr_machine_init(MachineState *machine)
2804 {
2805     SpaprMachineState *spapr = SPAPR_MACHINE(machine);
2806     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
2807     MachineClass *mc = MACHINE_GET_CLASS(machine);
2808     const char *bios_default = spapr->vof ? FW_FILE_NAME_VOF : FW_FILE_NAME;
2809     const char *bios_name = machine->firmware ?: bios_default;
2810     g_autofree char *filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
2811     const char *kernel_filename = machine->kernel_filename;
2812     const char *initrd_filename = machine->initrd_filename;
2813     PCIHostState *phb;
2814     bool has_vga;
2815     int i;
2816     MemoryRegion *sysmem = get_system_memory();
2817     long load_limit, fw_size;
2818     Error *resize_hpt_err = NULL;
2819     NICInfo *nd;
2820 
2821     if (!filename) {
2822         error_report("Could not find LPAR firmware '%s'", bios_name);
2823         exit(1);
2824     }
2825     fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
2826     if (fw_size <= 0) {
2827         error_report("Could not load LPAR firmware '%s'", filename);
2828         exit(1);
2829     }
2830 
2831     /*
2832      * if Secure VM (PEF) support is configured, then initialize it
2833      */
2834     if (machine->cgs) {
2835         confidential_guest_kvm_init(machine->cgs, &error_fatal);
2836     }
2837 
2838     msi_nonbroken = true;
2839 
2840     QLIST_INIT(&spapr->phbs);
2841     QTAILQ_INIT(&spapr->pending_dimm_unplugs);
2842 
2843     /* Determine capabilities to run with */
2844     spapr_caps_init(spapr);
2845 
2846     kvmppc_check_papr_resize_hpt(&resize_hpt_err);
2847     if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) {
2848         /*
2849          * If the user explicitly requested a mode we should either
2850          * supply it, or fail completely (which we do below).  But if
2851          * it's not set explicitly, we reset our mode to something
2852          * that works
2853          */
2854         if (resize_hpt_err) {
2855             spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
2856             error_free(resize_hpt_err);
2857             resize_hpt_err = NULL;
2858         } else {
2859             spapr->resize_hpt = smc->resize_hpt_default;
2860         }
2861     }
2862 
2863     assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT);
2864 
2865     if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) {
2866         /*
2867          * User requested HPT resize, but this host can't supply it.  Bail out
2868          */
2869         error_report_err(resize_hpt_err);
2870         exit(1);
2871     }
2872     error_free(resize_hpt_err);
2873 
2874     spapr->rma_size = spapr_rma_size(spapr, &error_fatal);
2875 
2876     /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
2877     load_limit = MIN(spapr->rma_size, FDT_MAX_ADDR) - FW_OVERHEAD;
2878 
2879     /*
2880      * VSMT must be set in order to be able to compute VCPU ids, ie to
2881      * call spapr_max_server_number() or spapr_vcpu_id().
2882      */
2883     spapr_set_vsmt_mode(spapr, &error_fatal);
2884 
2885     /* Set up Interrupt Controller before we create the VCPUs */
2886     spapr_irq_init(spapr, &error_fatal);
2887 
2888     /* Set up containers for ibm,client-architecture-support negotiated options
2889      */
2890     spapr->ov5 = spapr_ovec_new();
2891     spapr->ov5_cas = spapr_ovec_new();
2892 
2893     spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
2894     spapr_validate_node_memory(machine, &error_fatal);
2895 
2896     spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
2897 
2898     /* Do not advertise FORM2 NUMA support for pseries-6.1 and older */
2899     if (!smc->pre_6_2_numa_affinity) {
2900         spapr_ovec_set(spapr->ov5, OV5_FORM2_AFFINITY);
2901     }
2902 
2903     /* advertise support for dedicated HP event source to guests */
2904     if (spapr->use_hotplug_event_source) {
2905         spapr_ovec_set(spapr->ov5, OV5_HP_EVT);
2906     }
2907 
2908     /* advertise support for HPT resizing */
2909     if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
2910         spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE);
2911     }
2912 
2913     /* advertise support for ibm,dyamic-memory-v2 */
2914     spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2);
2915 
2916     /* advertise XIVE on POWER9 machines */
2917     if (spapr->irq->xive) {
2918         spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT);
2919     }
2920 
2921     qemu_guest_getrandom_nofail(&spapr->hashpkey_val,
2922                                 sizeof(spapr->hashpkey_val));
2923 
2924     /* init CPUs */
2925     spapr_init_cpus(spapr);
2926 
2927     /* Init numa_assoc_array */
2928     spapr_numa_associativity_init(spapr, machine);
2929 
2930     if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) &&
2931         ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
2932                               spapr->max_compat_pvr)) {
2933         spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_300);
2934         /* KVM and TCG always allow GTSE with radix... */
2935         spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE);
2936     }
2937     /* ... but not with hash (currently). */
2938 
2939     if (kvm_enabled()) {
2940         /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
2941         kvmppc_enable_logical_ci_hcalls();
2942         kvmppc_enable_set_mode_hcall();
2943 
2944         /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
2945         kvmppc_enable_clear_ref_mod_hcalls();
2946 
2947         /* Enable H_PAGE_INIT */
2948         kvmppc_enable_h_page_init();
2949     }
2950 
2951     /* map RAM */
2952     memory_region_add_subregion(sysmem, 0, machine->ram);
2953 
2954     /* initialize hotplug memory address space */
2955     if (machine->ram_size < machine->maxram_size) {
2956         ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size;
2957         hwaddr device_mem_base;
2958 
2959         /*
2960          * Limit the number of hotpluggable memory slots to half the number
2961          * slots that KVM supports, leaving the other half for PCI and other
2962          * devices. However ensure that number of slots doesn't drop below 32.
2963          */
2964         int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 :
2965                            SPAPR_MAX_RAM_SLOTS;
2966 
2967         if (max_memslots < SPAPR_MAX_RAM_SLOTS) {
2968             max_memslots = SPAPR_MAX_RAM_SLOTS;
2969         }
2970         if (machine->ram_slots > max_memslots) {
2971             error_report("Specified number of memory slots %"
2972                          PRIu64" exceeds max supported %d",
2973                          machine->ram_slots, max_memslots);
2974             exit(1);
2975         }
2976 
2977         device_mem_base = ROUND_UP(machine->ram_size, SPAPR_DEVICE_MEM_ALIGN);
2978         machine_memory_devices_init(machine, device_mem_base, device_mem_size);
2979     }
2980 
2981     spapr_create_lmb_dr_connectors(spapr);
2982 
2983     if (mc->nvdimm_supported) {
2984         spapr_create_nvdimm_dr_connectors(spapr);
2985     }
2986 
2987     /* Set up RTAS event infrastructure */
2988     spapr_events_init(spapr);
2989 
2990     /* Set up the RTC RTAS interfaces */
2991     spapr_rtc_create(spapr);
2992 
2993     /* Set up VIO bus */
2994     spapr->vio_bus = spapr_vio_bus_init();
2995 
2996     for (i = 0; serial_hd(i); i++) {
2997         spapr_vty_create(spapr->vio_bus, serial_hd(i));
2998     }
2999 
3000     /* We always have at least the nvram device on VIO */
3001     spapr_create_nvram(spapr);
3002 
3003     /*
3004      * Setup hotplug / dynamic-reconfiguration connectors. top-level
3005      * connectors (described in root DT node's "ibm,drc-types" property)
3006      * are pre-initialized here. additional child connectors (such as
3007      * connectors for a PHBs PCI slots) are added as needed during their
3008      * parent's realization.
3009      */
3010     if (smc->dr_phb_enabled) {
3011         for (i = 0; i < SPAPR_MAX_PHBS; i++) {
3012             spapr_dr_connector_new(OBJECT(machine), TYPE_SPAPR_DRC_PHB, i);
3013         }
3014     }
3015 
3016     /* Set up PCI */
3017     spapr_pci_rtas_init();
3018 
3019     phb = spapr_create_default_phb();
3020 
3021     while ((nd = qemu_find_nic_info("spapr-vlan", true, "ibmveth"))) {
3022         spapr_vlan_create(spapr->vio_bus, nd);
3023     }
3024 
3025     pci_init_nic_devices(phb->bus, NULL);
3026 
3027     for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
3028         spapr_vscsi_create(spapr->vio_bus);
3029     }
3030 
3031     /* Graphics */
3032     has_vga = spapr_vga_init(phb->bus, &error_fatal);
3033     if (has_vga) {
3034         spapr->want_stdout_path = !machine->enable_graphics;
3035         machine->usb |= defaults_enabled() && !machine->usb_disabled;
3036     } else {
3037         spapr->want_stdout_path = true;
3038     }
3039 
3040     if (machine->usb) {
3041         pci_create_simple(phb->bus, -1, "nec-usb-xhci");
3042 
3043         if (has_vga) {
3044             USBBus *usb_bus;
3045 
3046             usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS,
3047                                                               &error_abort));
3048             usb_create_simple(usb_bus, "usb-kbd");
3049             usb_create_simple(usb_bus, "usb-mouse");
3050         }
3051     }
3052 
3053     if (kernel_filename) {
3054         uint64_t loaded_addr = 0;
3055 
3056         spapr->kernel_size = load_elf(kernel_filename, NULL,
3057                                       translate_kernel_address, spapr,
3058                                       NULL, &loaded_addr, NULL, NULL,
3059                                       ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
3060         if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) {
3061             spapr->kernel_size = load_elf(kernel_filename, NULL,
3062                                           translate_kernel_address, spapr,
3063                                           NULL, &loaded_addr, NULL, NULL,
3064                                           ELFDATA2LSB, PPC_ELF_MACHINE, 0, 0);
3065             spapr->kernel_le = spapr->kernel_size > 0;
3066         }
3067         if (spapr->kernel_size < 0) {
3068             error_report("error loading %s: %s", kernel_filename,
3069                          load_elf_strerror(spapr->kernel_size));
3070             exit(1);
3071         }
3072 
3073         if (spapr->kernel_addr != loaded_addr) {
3074             warn_report("spapr: kernel_addr changed from 0x%"PRIx64
3075                         " to 0x%"PRIx64,
3076                         spapr->kernel_addr, loaded_addr);
3077             spapr->kernel_addr = loaded_addr;
3078         }
3079 
3080         /* load initrd */
3081         if (initrd_filename) {
3082             /* Try to locate the initrd in the gap between the kernel
3083              * and the firmware. Add a bit of space just in case
3084              */
3085             spapr->initrd_base = (spapr->kernel_addr + spapr->kernel_size
3086                                   + 0x1ffff) & ~0xffff;
3087             spapr->initrd_size = load_image_targphys(initrd_filename,
3088                                                      spapr->initrd_base,
3089                                                      load_limit
3090                                                      - spapr->initrd_base);
3091             if (spapr->initrd_size < 0) {
3092                 error_report("could not load initial ram disk '%s'",
3093                              initrd_filename);
3094                 exit(1);
3095             }
3096         }
3097     }
3098 
3099     /* FIXME: Should register things through the MachineState's qdev
3100      * interface, this is a legacy from the sPAPREnvironment structure
3101      * which predated MachineState but had a similar function */
3102     vmstate_register(NULL, 0, &vmstate_spapr, spapr);
3103     register_savevm_live("spapr/htab", VMSTATE_INSTANCE_ID_ANY, 1,
3104                          &savevm_htab_handlers, spapr);
3105 
3106     qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine));
3107 
3108     qemu_register_boot_set(spapr_boot_set, spapr);
3109 
3110     /*
3111      * Nothing needs to be done to resume a suspended guest because
3112      * suspending does not change the machine state, so no need for
3113      * a ->wakeup method.
3114      */
3115     qemu_register_wakeup_support();
3116 
3117     if (kvm_enabled()) {
3118         /* to stop and start vmclock */
3119         qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change,
3120                                          &spapr->tb);
3121 
3122         kvmppc_spapr_enable_inkernel_multitce();
3123     }
3124 
3125     qemu_cond_init(&spapr->fwnmi_machine_check_interlock_cond);
3126     if (spapr->vof) {
3127         spapr->vof->fw_size = fw_size; /* for claim() on itself */
3128         spapr_register_hypercall(KVMPPC_H_VOF_CLIENT, spapr_h_vof_client);
3129     }
3130 
3131     spapr_watchdog_init(spapr);
3132 }
3133 
3134 #define DEFAULT_KVM_TYPE "auto"
spapr_kvm_type(MachineState * machine,const char * vm_type)3135 static int spapr_kvm_type(MachineState *machine, const char *vm_type)
3136 {
3137     /*
3138      * The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to
3139      * accommodate the 'HV' and 'PV' formats that exists in the
3140      * wild. The 'auto' mode is being introduced already as
3141      * lower-case, thus we don't need to bother checking for
3142      * "AUTO".
3143      */
3144     if (!vm_type || !strcmp(vm_type, DEFAULT_KVM_TYPE)) {
3145         return 0;
3146     }
3147 
3148     if (!g_ascii_strcasecmp(vm_type, "hv")) {
3149         return 1;
3150     }
3151 
3152     if (!g_ascii_strcasecmp(vm_type, "pr")) {
3153         return 2;
3154     }
3155 
3156     error_report("Unknown kvm-type specified '%s'", vm_type);
3157     return -1;
3158 }
3159 
3160 /*
3161  * Implementation of an interface to adjust firmware path
3162  * for the bootindex property handling.
3163  */
spapr_get_fw_dev_path(FWPathProvider * p,BusState * bus,DeviceState * dev)3164 static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
3165                                    DeviceState *dev)
3166 {
3167 #define CAST(type, obj, name) \
3168     ((type *)object_dynamic_cast(OBJECT(obj), (name)))
3169     SCSIDevice *d = CAST(SCSIDevice,  dev, TYPE_SCSI_DEVICE);
3170     SpaprPhbState *phb = CAST(SpaprPhbState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
3171     VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON);
3172     PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
3173 
3174     if (d && bus) {
3175         void *spapr = CAST(void, bus->parent, "spapr-vscsi");
3176         VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
3177         USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
3178 
3179         if (spapr) {
3180             /*
3181              * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
3182              * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form
3183              * 0x8000 | (target << 8) | (bus << 5) | lun
3184              * (see the "Logical unit addressing format" table in SAM5)
3185              */
3186             unsigned id = 0x8000 | (d->id << 8) | (d->channel << 5) | d->lun;
3187             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3188                                    (uint64_t)id << 48);
3189         } else if (virtio) {
3190             /*
3191              * We use SRP luns of the form 01000000 | (target << 8) | lun
3192              * in the top 32 bits of the 64-bit LUN
3193              * Note: the quote above is from SLOF and it is wrong,
3194              * the actual binding is:
3195              * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
3196              */
3197             unsigned id = 0x1000000 | (d->id << 16) | d->lun;
3198             if (d->lun >= 256) {
3199                 /* Use the LUN "flat space addressing method" */
3200                 id |= 0x4000;
3201             }
3202             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3203                                    (uint64_t)id << 32);
3204         } else if (usb) {
3205             /*
3206              * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
3207              * in the top 32 bits of the 64-bit LUN
3208              */
3209             unsigned usb_port = atoi(usb->port->path);
3210             unsigned id = 0x1000000 | (usb_port << 16) | d->lun;
3211             return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
3212                                    (uint64_t)id << 32);
3213         }
3214     }
3215 
3216     /*
3217      * SLOF probes the USB devices, and if it recognizes that the device is a
3218      * storage device, it changes its name to "storage" instead of "usb-host",
3219      * and additionally adds a child node for the SCSI LUN, so the correct
3220      * boot path in SLOF is something like .../storage@1/disk@xxx" instead.
3221      */
3222     if (strcmp("usb-host", qdev_fw_name(dev)) == 0) {
3223         USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE);
3224         if (usb_device_is_scsi_storage(usbdev)) {
3225             return g_strdup_printf("storage@%s/disk", usbdev->port->path);
3226         }
3227     }
3228 
3229     if (phb) {
3230         /* Replace "pci" with "pci@800000020000000" */
3231         return g_strdup_printf("pci@%"PRIX64, phb->buid);
3232     }
3233 
3234     if (vsc) {
3235         /* Same logic as virtio above */
3236         unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun;
3237         return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32);
3238     }
3239 
3240     if (g_str_equal("pci-bridge", qdev_fw_name(dev))) {
3241         /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
3242         PCIDevice *pdev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
3243         return g_strdup_printf("pci@%x", PCI_SLOT(pdev->devfn));
3244     }
3245 
3246     if (pcidev) {
3247         return spapr_pci_fw_dev_name(pcidev);
3248     }
3249 
3250     return NULL;
3251 }
3252 
spapr_get_kvm_type(Object * obj,Error ** errp)3253 static char *spapr_get_kvm_type(Object *obj, Error **errp)
3254 {
3255     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3256 
3257     return g_strdup(spapr->kvm_type);
3258 }
3259 
spapr_set_kvm_type(Object * obj,const char * value,Error ** errp)3260 static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
3261 {
3262     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3263 
3264     g_free(spapr->kvm_type);
3265     spapr->kvm_type = g_strdup(value);
3266 }
3267 
spapr_get_modern_hotplug_events(Object * obj,Error ** errp)3268 static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp)
3269 {
3270     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3271 
3272     return spapr->use_hotplug_event_source;
3273 }
3274 
spapr_set_modern_hotplug_events(Object * obj,bool value,Error ** errp)3275 static void spapr_set_modern_hotplug_events(Object *obj, bool value,
3276                                             Error **errp)
3277 {
3278     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3279 
3280     spapr->use_hotplug_event_source = value;
3281 }
3282 
spapr_get_msix_emulation(Object * obj,Error ** errp)3283 static bool spapr_get_msix_emulation(Object *obj, Error **errp)
3284 {
3285     return true;
3286 }
3287 
spapr_get_resize_hpt(Object * obj,Error ** errp)3288 static char *spapr_get_resize_hpt(Object *obj, Error **errp)
3289 {
3290     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3291 
3292     switch (spapr->resize_hpt) {
3293     case SPAPR_RESIZE_HPT_DEFAULT:
3294         return g_strdup("default");
3295     case SPAPR_RESIZE_HPT_DISABLED:
3296         return g_strdup("disabled");
3297     case SPAPR_RESIZE_HPT_ENABLED:
3298         return g_strdup("enabled");
3299     case SPAPR_RESIZE_HPT_REQUIRED:
3300         return g_strdup("required");
3301     }
3302     g_assert_not_reached();
3303 }
3304 
spapr_set_resize_hpt(Object * obj,const char * value,Error ** errp)3305 static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp)
3306 {
3307     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3308 
3309     if (strcmp(value, "default") == 0) {
3310         spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT;
3311     } else if (strcmp(value, "disabled") == 0) {
3312         spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
3313     } else if (strcmp(value, "enabled") == 0) {
3314         spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED;
3315     } else if (strcmp(value, "required") == 0) {
3316         spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED;
3317     } else {
3318         error_setg(errp, "Bad value for \"resize-hpt\" property");
3319     }
3320 }
3321 
spapr_get_vof(Object * obj,Error ** errp)3322 static bool spapr_get_vof(Object *obj, Error **errp)
3323 {
3324     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3325 
3326     return spapr->vof != NULL;
3327 }
3328 
spapr_set_vof(Object * obj,bool value,Error ** errp)3329 static void spapr_set_vof(Object *obj, bool value, Error **errp)
3330 {
3331     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3332 
3333     if (spapr->vof) {
3334         vof_cleanup(spapr->vof);
3335         g_free(spapr->vof);
3336         spapr->vof = NULL;
3337     }
3338     if (!value) {
3339         return;
3340     }
3341     spapr->vof = g_malloc0(sizeof(*spapr->vof));
3342 }
3343 
spapr_get_ic_mode(Object * obj,Error ** errp)3344 static char *spapr_get_ic_mode(Object *obj, Error **errp)
3345 {
3346     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3347 
3348     if (spapr->irq == &spapr_irq_xics_legacy) {
3349         return g_strdup("legacy");
3350     } else if (spapr->irq == &spapr_irq_xics) {
3351         return g_strdup("xics");
3352     } else if (spapr->irq == &spapr_irq_xive) {
3353         return g_strdup("xive");
3354     } else if (spapr->irq == &spapr_irq_dual) {
3355         return g_strdup("dual");
3356     }
3357     g_assert_not_reached();
3358 }
3359 
spapr_set_ic_mode(Object * obj,const char * value,Error ** errp)3360 static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp)
3361 {
3362     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3363 
3364     if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
3365         error_setg(errp, "This machine only uses the legacy XICS backend, don't pass ic-mode");
3366         return;
3367     }
3368 
3369     /* The legacy IRQ backend can not be set */
3370     if (strcmp(value, "xics") == 0) {
3371         spapr->irq = &spapr_irq_xics;
3372     } else if (strcmp(value, "xive") == 0) {
3373         spapr->irq = &spapr_irq_xive;
3374     } else if (strcmp(value, "dual") == 0) {
3375         spapr->irq = &spapr_irq_dual;
3376     } else {
3377         error_setg(errp, "Bad value for \"ic-mode\" property");
3378     }
3379 }
3380 
spapr_get_host_model(Object * obj,Error ** errp)3381 static char *spapr_get_host_model(Object *obj, Error **errp)
3382 {
3383     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3384 
3385     return g_strdup(spapr->host_model);
3386 }
3387 
spapr_set_host_model(Object * obj,const char * value,Error ** errp)3388 static void spapr_set_host_model(Object *obj, const char *value, Error **errp)
3389 {
3390     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3391 
3392     g_free(spapr->host_model);
3393     spapr->host_model = g_strdup(value);
3394 }
3395 
spapr_get_host_serial(Object * obj,Error ** errp)3396 static char *spapr_get_host_serial(Object *obj, Error **errp)
3397 {
3398     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3399 
3400     return g_strdup(spapr->host_serial);
3401 }
3402 
spapr_set_host_serial(Object * obj,const char * value,Error ** errp)3403 static void spapr_set_host_serial(Object *obj, const char *value, Error **errp)
3404 {
3405     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3406 
3407     g_free(spapr->host_serial);
3408     spapr->host_serial = g_strdup(value);
3409 }
3410 
spapr_instance_init(Object * obj)3411 static void spapr_instance_init(Object *obj)
3412 {
3413     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3414     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
3415     MachineState *ms = MACHINE(spapr);
3416     MachineClass *mc = MACHINE_GET_CLASS(ms);
3417 
3418     /*
3419      * NVDIMM support went live in 5.1 without considering that, in
3420      * other archs, the user needs to enable NVDIMM support with the
3421      * 'nvdimm' machine option and the default behavior is NVDIMM
3422      * support disabled. It is too late to roll back to the standard
3423      * behavior without breaking 5.1 guests.
3424      */
3425     if (mc->nvdimm_supported) {
3426         ms->nvdimms_state->is_enabled = true;
3427     }
3428 
3429     spapr->htab_fd = -1;
3430     spapr->use_hotplug_event_source = true;
3431     spapr->kvm_type = g_strdup(DEFAULT_KVM_TYPE);
3432     object_property_add_str(obj, "kvm-type",
3433                             spapr_get_kvm_type, spapr_set_kvm_type);
3434     object_property_set_description(obj, "kvm-type",
3435                                     "Specifies the KVM virtualization mode (auto,"
3436                                     " hv, pr). Defaults to 'auto'. This mode will use"
3437                                     " any available KVM module loaded in the host,"
3438                                     " where kvm_hv takes precedence if both kvm_hv and"
3439                                     " kvm_pr are loaded.");
3440     object_property_add_bool(obj, "modern-hotplug-events",
3441                             spapr_get_modern_hotplug_events,
3442                             spapr_set_modern_hotplug_events);
3443     object_property_set_description(obj, "modern-hotplug-events",
3444                                     "Use dedicated hotplug event mechanism in"
3445                                     " place of standard EPOW events when possible"
3446                                     " (required for memory hot-unplug support)");
3447     ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr,
3448                             "Maximum permitted CPU compatibility mode");
3449 
3450     object_property_add_str(obj, "resize-hpt",
3451                             spapr_get_resize_hpt, spapr_set_resize_hpt);
3452     object_property_set_description(obj, "resize-hpt",
3453                                     "Resizing of the Hash Page Table (enabled, disabled, required)");
3454     object_property_add_uint32_ptr(obj, "vsmt",
3455                                    &spapr->vsmt, OBJ_PROP_FLAG_READWRITE);
3456     object_property_set_description(obj, "vsmt",
3457                                     "Virtual SMT: KVM behaves as if this were"
3458                                     " the host's SMT mode");
3459 
3460     object_property_add_bool(obj, "vfio-no-msix-emulation",
3461                              spapr_get_msix_emulation, NULL);
3462 
3463     object_property_add_uint64_ptr(obj, "kernel-addr",
3464                                    &spapr->kernel_addr, OBJ_PROP_FLAG_READWRITE);
3465     object_property_set_description(obj, "kernel-addr",
3466                                     stringify(KERNEL_LOAD_ADDR)
3467                                     " for -kernel is the default");
3468     spapr->kernel_addr = KERNEL_LOAD_ADDR;
3469 
3470     object_property_add_bool(obj, "x-vof", spapr_get_vof, spapr_set_vof);
3471     object_property_set_description(obj, "x-vof",
3472                                     "Enable Virtual Open Firmware (experimental)");
3473 
3474     /* The machine class defines the default interrupt controller mode */
3475     spapr->irq = smc->irq;
3476     object_property_add_str(obj, "ic-mode", spapr_get_ic_mode,
3477                             spapr_set_ic_mode);
3478     object_property_set_description(obj, "ic-mode",
3479                  "Specifies the interrupt controller mode (xics, xive, dual)");
3480 
3481     object_property_add_str(obj, "host-model",
3482         spapr_get_host_model, spapr_set_host_model);
3483     object_property_set_description(obj, "host-model",
3484         "Host model to advertise in guest device tree");
3485     object_property_add_str(obj, "host-serial",
3486         spapr_get_host_serial, spapr_set_host_serial);
3487     object_property_set_description(obj, "host-serial",
3488         "Host serial number to advertise in guest device tree");
3489 }
3490 
spapr_machine_finalizefn(Object * obj)3491 static void spapr_machine_finalizefn(Object *obj)
3492 {
3493     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
3494 
3495     g_free(spapr->kvm_type);
3496 }
3497 
spapr_do_system_reset_on_cpu(CPUState * cs,run_on_cpu_data arg)3498 void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg)
3499 {
3500     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
3501     CPUPPCState *env = cpu_env(cs);
3502 
3503     cpu_synchronize_state(cs);
3504     /* If FWNMI is inactive, addr will be -1, which will deliver to 0x100 */
3505     if (spapr->fwnmi_system_reset_addr != -1) {
3506         uint64_t rtas_addr, addr;
3507 
3508         /* get rtas addr from fdt */
3509         rtas_addr = spapr_get_rtas_addr();
3510         if (!rtas_addr) {
3511             qemu_system_guest_panicked(NULL);
3512             return;
3513         }
3514 
3515         addr = rtas_addr + RTAS_ERROR_LOG_MAX + cs->cpu_index * sizeof(uint64_t)*2;
3516         stq_be_phys(&address_space_memory, addr, env->gpr[3]);
3517         stq_be_phys(&address_space_memory, addr + sizeof(uint64_t), 0);
3518         env->gpr[3] = addr;
3519     }
3520     ppc_cpu_do_system_reset(cs);
3521     if (spapr->fwnmi_system_reset_addr != -1) {
3522         env->nip = spapr->fwnmi_system_reset_addr;
3523     }
3524 }
3525 
spapr_nmi(NMIState * n,int cpu_index,Error ** errp)3526 static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
3527 {
3528     CPUState *cs;
3529 
3530     CPU_FOREACH(cs) {
3531         async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
3532     }
3533 }
3534 
spapr_lmb_dt_populate(SpaprDrc * drc,SpaprMachineState * spapr,void * fdt,int * fdt_start_offset,Error ** errp)3535 int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3536                           void *fdt, int *fdt_start_offset, Error **errp)
3537 {
3538     uint64_t addr;
3539     uint32_t node;
3540 
3541     addr = spapr_drc_index(drc) * SPAPR_MEMORY_BLOCK_SIZE;
3542     node = object_property_get_uint(OBJECT(drc->dev), PC_DIMM_NODE_PROP,
3543                                     &error_abort);
3544     *fdt_start_offset = spapr_dt_memory_node(spapr, fdt, node, addr,
3545                                              SPAPR_MEMORY_BLOCK_SIZE);
3546     return 0;
3547 }
3548 
spapr_add_lmbs(DeviceState * dev,uint64_t addr_start,uint64_t size,bool dedicated_hp_event_source)3549 static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
3550                            bool dedicated_hp_event_source)
3551 {
3552     SpaprDrc *drc;
3553     uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
3554     int i;
3555     uint64_t addr = addr_start;
3556     bool hotplugged = spapr_drc_hotplugged(dev);
3557 
3558     for (i = 0; i < nr_lmbs; i++) {
3559         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3560                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3561         g_assert(drc);
3562 
3563         /*
3564          * memory_device_get_free_addr() provided a range of free addresses
3565          * that doesn't overlap with any existing mapping at pre-plug. The
3566          * corresponding LMB DRCs are thus assumed to be all attachable.
3567          */
3568         spapr_drc_attach(drc, dev);
3569         if (!hotplugged) {
3570             spapr_drc_reset(drc);
3571         }
3572         addr += SPAPR_MEMORY_BLOCK_SIZE;
3573     }
3574     /* send hotplug notification to the
3575      * guest only in case of hotplugged memory
3576      */
3577     if (hotplugged) {
3578         if (dedicated_hp_event_source) {
3579             drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3580                                   addr_start / SPAPR_MEMORY_BLOCK_SIZE);
3581             g_assert(drc);
3582             spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
3583                                                    nr_lmbs,
3584                                                    spapr_drc_index(drc));
3585         } else {
3586             spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB,
3587                                            nr_lmbs);
3588         }
3589     }
3590 }
3591 
spapr_memory_plug(HotplugHandler * hotplug_dev,DeviceState * dev)3592 static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
3593 {
3594     SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev);
3595     PCDIMMDevice *dimm = PC_DIMM(dev);
3596     uint64_t size, addr;
3597     int64_t slot;
3598     bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
3599 
3600     size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort);
3601 
3602     pc_dimm_plug(dimm, MACHINE(ms));
3603 
3604     if (!is_nvdimm) {
3605         addr = object_property_get_uint(OBJECT(dimm),
3606                                         PC_DIMM_ADDR_PROP, &error_abort);
3607         spapr_add_lmbs(dev, addr, size,
3608                        spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT));
3609     } else {
3610         slot = object_property_get_int(OBJECT(dimm),
3611                                        PC_DIMM_SLOT_PROP, &error_abort);
3612         /* We should have valid slot number at this point */
3613         g_assert(slot >= 0);
3614         spapr_add_nvdimm(dev, slot);
3615     }
3616 }
3617 
spapr_memory_pre_plug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)3618 static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3619                                   Error **errp)
3620 {
3621     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3622     bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
3623     PCDIMMDevice *dimm = PC_DIMM(dev);
3624     Error *local_err = NULL;
3625     uint64_t size;
3626     Object *memdev;
3627     hwaddr pagesize;
3628 
3629     size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err);
3630     if (local_err) {
3631         error_propagate(errp, local_err);
3632         return;
3633     }
3634 
3635     if (is_nvdimm) {
3636         if (!spapr_nvdimm_validate(hotplug_dev, NVDIMM(dev), size, errp)) {
3637             return;
3638         }
3639     } else if (size % SPAPR_MEMORY_BLOCK_SIZE) {
3640         error_setg(errp, "Hotplugged memory size must be a multiple of "
3641                    "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB);
3642         return;
3643     }
3644 
3645     memdev = object_property_get_link(OBJECT(dimm), PC_DIMM_MEMDEV_PROP,
3646                                       &error_abort);
3647     pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(memdev));
3648     if (!spapr_check_pagesize(spapr, pagesize, errp)) {
3649         return;
3650     }
3651 
3652     pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), errp);
3653 }
3654 
3655 struct SpaprDimmState {
3656     PCDIMMDevice *dimm;
3657     uint32_t nr_lmbs;
3658     QTAILQ_ENTRY(SpaprDimmState) next;
3659 };
3660 
spapr_pending_dimm_unplugs_find(SpaprMachineState * s,PCDIMMDevice * dimm)3661 static SpaprDimmState *spapr_pending_dimm_unplugs_find(SpaprMachineState *s,
3662                                                        PCDIMMDevice *dimm)
3663 {
3664     SpaprDimmState *dimm_state = NULL;
3665 
3666     QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) {
3667         if (dimm_state->dimm == dimm) {
3668             break;
3669         }
3670     }
3671     return dimm_state;
3672 }
3673 
spapr_pending_dimm_unplugs_add(SpaprMachineState * spapr,uint32_t nr_lmbs,PCDIMMDevice * dimm)3674 static SpaprDimmState *spapr_pending_dimm_unplugs_add(SpaprMachineState *spapr,
3675                                                       uint32_t nr_lmbs,
3676                                                       PCDIMMDevice *dimm)
3677 {
3678     SpaprDimmState *ds = NULL;
3679 
3680     /*
3681      * If this request is for a DIMM whose removal had failed earlier
3682      * (due to guest's refusal to remove the LMBs), we would have this
3683      * dimm already in the pending_dimm_unplugs list. In that
3684      * case don't add again.
3685      */
3686     ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
3687     if (!ds) {
3688         ds = g_new0(SpaprDimmState, 1);
3689         ds->nr_lmbs = nr_lmbs;
3690         ds->dimm = dimm;
3691         QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next);
3692     }
3693     return ds;
3694 }
3695 
spapr_pending_dimm_unplugs_remove(SpaprMachineState * spapr,SpaprDimmState * dimm_state)3696 static void spapr_pending_dimm_unplugs_remove(SpaprMachineState *spapr,
3697                                               SpaprDimmState *dimm_state)
3698 {
3699     QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next);
3700     g_free(dimm_state);
3701 }
3702 
spapr_recover_pending_dimm_state(SpaprMachineState * ms,PCDIMMDevice * dimm)3703 static SpaprDimmState *spapr_recover_pending_dimm_state(SpaprMachineState *ms,
3704                                                         PCDIMMDevice *dimm)
3705 {
3706     SpaprDrc *drc;
3707     uint64_t size = memory_device_get_region_size(MEMORY_DEVICE(dimm),
3708                                                   &error_abort);
3709     uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3710     uint32_t avail_lmbs = 0;
3711     uint64_t addr_start, addr;
3712     int i;
3713 
3714     addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3715                                           &error_abort);
3716 
3717     addr = addr_start;
3718     for (i = 0; i < nr_lmbs; i++) {
3719         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3720                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3721         g_assert(drc);
3722         if (drc->dev) {
3723             avail_lmbs++;
3724         }
3725         addr += SPAPR_MEMORY_BLOCK_SIZE;
3726     }
3727 
3728     return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm);
3729 }
3730 
spapr_memory_unplug_rollback(SpaprMachineState * spapr,DeviceState * dev)3731 void spapr_memory_unplug_rollback(SpaprMachineState *spapr, DeviceState *dev)
3732 {
3733     SpaprDimmState *ds;
3734     PCDIMMDevice *dimm;
3735     SpaprDrc *drc;
3736     uint32_t nr_lmbs;
3737     uint64_t size, addr_start, addr;
3738     int i;
3739 
3740     if (!dev) {
3741         return;
3742     }
3743 
3744     dimm = PC_DIMM(dev);
3745     ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
3746 
3747     /*
3748      * 'ds == NULL' would mean that the DIMM doesn't have a pending
3749      * unplug state, but one of its DRC is marked as unplug_requested.
3750      * This is bad and weird enough to g_assert() out.
3751      */
3752     g_assert(ds);
3753 
3754     spapr_pending_dimm_unplugs_remove(spapr, ds);
3755 
3756     size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort);
3757     nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3758 
3759     addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3760                                           &error_abort);
3761 
3762     addr = addr_start;
3763     for (i = 0; i < nr_lmbs; i++) {
3764         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3765                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3766         g_assert(drc);
3767 
3768         drc->unplug_requested = false;
3769         addr += SPAPR_MEMORY_BLOCK_SIZE;
3770     }
3771 
3772     /*
3773      * Tell QAPI that something happened and the memory
3774      * hotunplug wasn't successful.
3775      */
3776     qapi_event_send_device_unplug_guest_error(dev->id,
3777                                               dev->canonical_path);
3778 }
3779 
3780 /* Callback to be called during DRC release. */
spapr_lmb_release(DeviceState * dev)3781 void spapr_lmb_release(DeviceState *dev)
3782 {
3783     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3784     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_ctrl);
3785     SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
3786 
3787     /* This information will get lost if a migration occurs
3788      * during the unplug process. In this case recover it. */
3789     if (ds == NULL) {
3790         ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev));
3791         g_assert(ds);
3792         /* The DRC being examined by the caller at least must be counted */
3793         g_assert(ds->nr_lmbs);
3794     }
3795 
3796     if (--ds->nr_lmbs) {
3797         return;
3798     }
3799 
3800     /*
3801      * Now that all the LMBs have been removed by the guest, call the
3802      * unplug handler chain. This can never fail.
3803      */
3804     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3805     object_unparent(OBJECT(dev));
3806 }
3807 
spapr_memory_unplug(HotplugHandler * hotplug_dev,DeviceState * dev)3808 static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3809 {
3810     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3811     SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
3812 
3813     /* We really shouldn't get this far without anything to unplug */
3814     g_assert(ds);
3815 
3816     pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev));
3817     qdev_unrealize(dev);
3818     spapr_pending_dimm_unplugs_remove(spapr, ds);
3819 }
3820 
spapr_memory_unplug_request(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)3821 static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
3822                                         DeviceState *dev, Error **errp)
3823 {
3824     SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3825     PCDIMMDevice *dimm = PC_DIMM(dev);
3826     uint32_t nr_lmbs;
3827     uint64_t size, addr_start, addr;
3828     int i;
3829     SpaprDrc *drc;
3830 
3831     if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
3832         error_setg(errp, "nvdimm device hot unplug is not supported yet.");
3833         return;
3834     }
3835 
3836     size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort);
3837     nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3838 
3839     addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3840                                           &error_abort);
3841 
3842     /*
3843      * An existing pending dimm state for this DIMM means that there is an
3844      * unplug operation in progress, waiting for the spapr_lmb_release
3845      * callback to complete the job (BQL can't cover that far). In this case,
3846      * bail out to avoid detaching DRCs that were already released.
3847      */
3848     if (spapr_pending_dimm_unplugs_find(spapr, dimm)) {
3849         error_setg(errp, "Memory unplug already in progress for device %s",
3850                    dev->id);
3851         return;
3852     }
3853 
3854     spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm);
3855 
3856     addr = addr_start;
3857     for (i = 0; i < nr_lmbs; i++) {
3858         drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3859                               addr / SPAPR_MEMORY_BLOCK_SIZE);
3860         g_assert(drc);
3861 
3862         spapr_drc_unplug_request(drc);
3863         addr += SPAPR_MEMORY_BLOCK_SIZE;
3864     }
3865 
3866     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3867                           addr_start / SPAPR_MEMORY_BLOCK_SIZE);
3868     spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
3869                                               nr_lmbs, spapr_drc_index(drc));
3870 }
3871 
3872 /* Callback to be called during DRC release. */
spapr_core_release(DeviceState * dev)3873 void spapr_core_release(DeviceState *dev)
3874 {
3875     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
3876 
3877     /* Call the unplug handler chain. This can never fail. */
3878     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
3879     object_unparent(OBJECT(dev));
3880 }
3881 
spapr_core_unplug(HotplugHandler * hotplug_dev,DeviceState * dev)3882 static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
3883 {
3884     MachineState *ms = MACHINE(hotplug_dev);
3885     CPUCore *cc = CPU_CORE(dev);
3886     CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
3887 
3888     assert(core_slot);
3889     core_slot->cpu = NULL;
3890     qdev_unrealize(dev);
3891 }
3892 
3893 static
spapr_core_unplug_request(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)3894 void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
3895                                Error **errp)
3896 {
3897     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3898     int index;
3899     SpaprDrc *drc;
3900     CPUCore *cc = CPU_CORE(dev);
3901 
3902     if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) {
3903         error_setg(errp, "Unable to find CPU core with core-id: %d",
3904                    cc->core_id);
3905         return;
3906     }
3907     if (index == 0) {
3908         error_setg(errp, "Boot CPU core may not be unplugged");
3909         return;
3910     }
3911 
3912     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3913                           spapr_vcpu_id(spapr, cc->core_id));
3914     g_assert(drc);
3915 
3916     if (!spapr_drc_unplug_requested(drc)) {
3917         spapr_drc_unplug_request(drc);
3918     }
3919 
3920     /*
3921      * spapr_hotplug_req_remove_by_index is left unguarded, out of the
3922      * "!spapr_drc_unplug_requested" check, to allow for multiple IRQ
3923      * pulses removing the same CPU. Otherwise, in an failed hotunplug
3924      * attempt (e.g. the kernel will refuse to remove the last online
3925      * CPU), we will never attempt it again because unplug_requested
3926      * will still be 'true' in that case.
3927      */
3928     spapr_hotplug_req_remove_by_index(drc);
3929 }
3930 
spapr_core_dt_populate(SpaprDrc * drc,SpaprMachineState * spapr,void * fdt,int * fdt_start_offset,Error ** errp)3931 int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
3932                            void *fdt, int *fdt_start_offset, Error **errp)
3933 {
3934     SpaprCpuCore *core = SPAPR_CPU_CORE(drc->dev);
3935     CPUState *cs = CPU(core->threads[0]);
3936     PowerPCCPU *cpu = POWERPC_CPU(cs);
3937     DeviceClass *dc = DEVICE_GET_CLASS(cs);
3938     int id = spapr_get_vcpu_id(cpu);
3939     g_autofree char *nodename = NULL;
3940     int offset;
3941 
3942     nodename = g_strdup_printf("%s@%x", dc->fw_name, id);
3943     offset = fdt_add_subnode(fdt, 0, nodename);
3944 
3945     spapr_dt_cpu(cs, fdt, offset, spapr);
3946 
3947     /*
3948      * spapr_dt_cpu() does not fill the 'name' property in the
3949      * CPU node. The function is called during boot process, before
3950      * and after CAS, and overwriting the 'name' property written
3951      * by SLOF is not allowed.
3952      *
3953      * Write it manually after spapr_dt_cpu(). This makes the hotplug
3954      * CPUs more compatible with the coldplugged ones, which have
3955      * the 'name' property. Linux Kernel also relies on this
3956      * property to identify CPU nodes.
3957      */
3958     _FDT((fdt_setprop_string(fdt, offset, "name", nodename)));
3959 
3960     *fdt_start_offset = offset;
3961     return 0;
3962 }
3963 
spapr_core_plug(HotplugHandler * hotplug_dev,DeviceState * dev)3964 static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
3965 {
3966     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3967     MachineClass *mc = MACHINE_GET_CLASS(spapr);
3968     SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev));
3969     CPUCore *cc = CPU_CORE(dev);
3970     SpaprDrc *drc;
3971     CPUArchId *core_slot;
3972     int index;
3973     bool hotplugged = spapr_drc_hotplugged(dev);
3974     int i;
3975 
3976     core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
3977     g_assert(core_slot); /* Already checked in spapr_core_pre_plug() */
3978 
3979     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3980                           spapr_vcpu_id(spapr, cc->core_id));
3981 
3982     g_assert(drc || !mc->has_hotpluggable_cpus);
3983 
3984     if (drc) {
3985         /*
3986          * spapr_core_pre_plug() already buys us this is a brand new
3987          * core being plugged into a free slot. Nothing should already
3988          * be attached to the corresponding DRC.
3989          */
3990         spapr_drc_attach(drc, dev);
3991 
3992         if (hotplugged) {
3993             /*
3994              * Send hotplug notification interrupt to the guest only
3995              * in case of hotplugged CPUs.
3996              */
3997             spapr_hotplug_req_add_by_index(drc);
3998         } else {
3999             spapr_drc_reset(drc);
4000         }
4001     }
4002 
4003     core_slot->cpu = CPU(dev);
4004 
4005     /*
4006      * Set compatibility mode to match the boot CPU, which was either set
4007      * by the machine reset code or by CAS. This really shouldn't fail at
4008      * this point.
4009      */
4010     if (hotplugged) {
4011         for (i = 0; i < cc->nr_threads; i++) {
4012             ppc_set_compat(core->threads[i], POWERPC_CPU(first_cpu)->compat_pvr,
4013                            &error_abort);
4014         }
4015     }
4016 
4017 }
4018 
spapr_core_pre_plug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4019 static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
4020                                 Error **errp)
4021 {
4022     MachineState *machine = MACHINE(OBJECT(hotplug_dev));
4023     MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
4024     CPUCore *cc = CPU_CORE(dev);
4025     const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type);
4026     const char *type = object_get_typename(OBJECT(dev));
4027     CPUArchId *core_slot;
4028     int index;
4029     unsigned int smp_threads = machine->smp.threads;
4030 
4031     if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
4032         error_setg(errp, "CPU hotplug not supported for this machine");
4033         return;
4034     }
4035 
4036     if (strcmp(base_core_type, type)) {
4037         error_setg(errp, "CPU core type should be %s", base_core_type);
4038         return;
4039     }
4040 
4041     if (cc->core_id % smp_threads) {
4042         error_setg(errp, "invalid core id %d", cc->core_id);
4043         return;
4044     }
4045 
4046     /*
4047      * In general we should have homogeneous threads-per-core, but old
4048      * (pre hotplug support) machine types allow the last core to have
4049      * reduced threads as a compatibility hack for when we allowed
4050      * total vcpus not a multiple of threads-per-core.
4051      */
4052     if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) {
4053         error_setg(errp, "invalid nr-threads %d, must be %d", cc->nr_threads,
4054                    smp_threads);
4055         return;
4056     }
4057 
4058     core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
4059     if (!core_slot) {
4060         error_setg(errp, "core id %d out of range", cc->core_id);
4061         return;
4062     }
4063 
4064     if (core_slot->cpu) {
4065         error_setg(errp, "core %d already populated", cc->core_id);
4066         return;
4067     }
4068 
4069     numa_cpu_pre_plug(core_slot, dev, errp);
4070 }
4071 
spapr_phb_dt_populate(SpaprDrc * drc,SpaprMachineState * spapr,void * fdt,int * fdt_start_offset,Error ** errp)4072 int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
4073                           void *fdt, int *fdt_start_offset, Error **errp)
4074 {
4075     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(drc->dev);
4076     int intc_phandle;
4077 
4078     intc_phandle = spapr_irq_get_phandle(spapr, spapr->fdt_blob, errp);
4079     if (intc_phandle <= 0) {
4080         return -1;
4081     }
4082 
4083     if (spapr_dt_phb(spapr, sphb, intc_phandle, fdt, fdt_start_offset)) {
4084         error_setg(errp, "unable to create FDT node for PHB %d", sphb->index);
4085         return -1;
4086     }
4087 
4088     /* generally SLOF creates these, for hotplug it's up to QEMU */
4089     _FDT(fdt_setprop_string(fdt, *fdt_start_offset, "name", "pci"));
4090 
4091     return 0;
4092 }
4093 
spapr_phb_pre_plug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4094 static bool spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
4095                                Error **errp)
4096 {
4097     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4098     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
4099     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
4100     const unsigned windows_supported = spapr_phb_windows_supported(sphb);
4101     SpaprDrc *drc;
4102 
4103     if (dev->hotplugged && !smc->dr_phb_enabled) {
4104         error_setg(errp, "PHB hotplug not supported for this machine");
4105         return false;
4106     }
4107 
4108     if (sphb->index == (uint32_t)-1) {
4109         error_setg(errp, "\"index\" for PAPR PHB is mandatory");
4110         return false;
4111     }
4112 
4113     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
4114     if (drc && drc->dev) {
4115         error_setg(errp, "PHB %d already attached", sphb->index);
4116         return false;
4117     }
4118 
4119     /*
4120      * This will check that sphb->index doesn't exceed the maximum number of
4121      * PHBs for the current machine type.
4122      */
4123     return
4124         smc->phb_placement(spapr, sphb->index,
4125                            &sphb->buid, &sphb->io_win_addr,
4126                            &sphb->mem_win_addr, &sphb->mem64_win_addr,
4127                            windows_supported, sphb->dma_liobn,
4128                            errp);
4129 }
4130 
spapr_phb_plug(HotplugHandler * hotplug_dev,DeviceState * dev)4131 static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
4132 {
4133     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4134     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
4135     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
4136     SpaprDrc *drc;
4137     bool hotplugged = spapr_drc_hotplugged(dev);
4138 
4139     if (!smc->dr_phb_enabled) {
4140         return;
4141     }
4142 
4143     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
4144     /* hotplug hooks should check it's enabled before getting this far */
4145     assert(drc);
4146 
4147     /* spapr_phb_pre_plug() already checked the DRC is attachable */
4148     spapr_drc_attach(drc, dev);
4149 
4150     if (hotplugged) {
4151         spapr_hotplug_req_add_by_index(drc);
4152     } else {
4153         spapr_drc_reset(drc);
4154     }
4155 }
4156 
spapr_phb_release(DeviceState * dev)4157 void spapr_phb_release(DeviceState *dev)
4158 {
4159     HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
4160 
4161     hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
4162     object_unparent(OBJECT(dev));
4163 }
4164 
spapr_phb_unplug(HotplugHandler * hotplug_dev,DeviceState * dev)4165 static void spapr_phb_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
4166 {
4167     qdev_unrealize(dev);
4168 }
4169 
spapr_phb_unplug_request(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4170 static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev,
4171                                      DeviceState *dev, Error **errp)
4172 {
4173     SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
4174     SpaprDrc *drc;
4175 
4176     drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
4177     assert(drc);
4178 
4179     if (!spapr_drc_unplug_requested(drc)) {
4180         spapr_drc_unplug_request(drc);
4181         spapr_hotplug_req_remove_by_index(drc);
4182     } else {
4183         error_setg(errp,
4184                    "PCI Host Bridge unplug already in progress for device %s",
4185                    dev->id);
4186     }
4187 }
4188 
4189 static
spapr_tpm_proxy_pre_plug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4190 bool spapr_tpm_proxy_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
4191                               Error **errp)
4192 {
4193     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4194 
4195     if (spapr->tpm_proxy != NULL) {
4196         error_setg(errp, "Only one TPM proxy can be specified for this machine");
4197         return false;
4198     }
4199 
4200     return true;
4201 }
4202 
spapr_tpm_proxy_plug(HotplugHandler * hotplug_dev,DeviceState * dev)4203 static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
4204 {
4205     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4206     SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev);
4207 
4208     /* Already checked in spapr_tpm_proxy_pre_plug() */
4209     g_assert(spapr->tpm_proxy == NULL);
4210 
4211     spapr->tpm_proxy = tpm_proxy;
4212 }
4213 
spapr_tpm_proxy_unplug(HotplugHandler * hotplug_dev,DeviceState * dev)4214 static void spapr_tpm_proxy_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
4215 {
4216     SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
4217 
4218     qdev_unrealize(dev);
4219     object_unparent(OBJECT(dev));
4220     spapr->tpm_proxy = NULL;
4221 }
4222 
spapr_machine_device_plug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4223 static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
4224                                       DeviceState *dev, Error **errp)
4225 {
4226     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4227         spapr_memory_plug(hotplug_dev, dev);
4228     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4229         spapr_core_plug(hotplug_dev, dev);
4230     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4231         spapr_phb_plug(hotplug_dev, dev);
4232     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4233         spapr_tpm_proxy_plug(hotplug_dev, dev);
4234     }
4235 }
4236 
spapr_machine_device_unplug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4237 static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
4238                                         DeviceState *dev, Error **errp)
4239 {
4240     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4241         spapr_memory_unplug(hotplug_dev, dev);
4242     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4243         spapr_core_unplug(hotplug_dev, dev);
4244     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4245         spapr_phb_unplug(hotplug_dev, dev);
4246     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4247         spapr_tpm_proxy_unplug(hotplug_dev, dev);
4248     }
4249 }
4250 
spapr_memory_hot_unplug_supported(SpaprMachineState * spapr)4251 bool spapr_memory_hot_unplug_supported(SpaprMachineState *spapr)
4252 {
4253     return spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT) ||
4254         /*
4255          * CAS will process all pending unplug requests.
4256          *
4257          * HACK: a guest could theoretically have cleared all bits in OV5,
4258          * but none of the guests we care for do.
4259          */
4260         spapr_ovec_empty(spapr->ov5_cas);
4261 }
4262 
spapr_machine_device_unplug_request(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4263 static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
4264                                                 DeviceState *dev, Error **errp)
4265 {
4266     SpaprMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev));
4267     MachineClass *mc = MACHINE_GET_CLASS(sms);
4268     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4269 
4270     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4271         if (spapr_memory_hot_unplug_supported(sms)) {
4272             spapr_memory_unplug_request(hotplug_dev, dev, errp);
4273         } else {
4274             error_setg(errp, "Memory hot unplug not supported for this guest");
4275         }
4276     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4277         if (!mc->has_hotpluggable_cpus) {
4278             error_setg(errp, "CPU hot unplug not supported on this machine");
4279             return;
4280         }
4281         spapr_core_unplug_request(hotplug_dev, dev, errp);
4282     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4283         if (!smc->dr_phb_enabled) {
4284             error_setg(errp, "PHB hot unplug not supported on this machine");
4285             return;
4286         }
4287         spapr_phb_unplug_request(hotplug_dev, dev, errp);
4288     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4289         spapr_tpm_proxy_unplug(hotplug_dev, dev);
4290     }
4291 }
4292 
spapr_machine_device_pre_plug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)4293 static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
4294                                           DeviceState *dev, Error **errp)
4295 {
4296     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
4297         spapr_memory_pre_plug(hotplug_dev, dev, errp);
4298     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
4299         spapr_core_pre_plug(hotplug_dev, dev, errp);
4300     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
4301         spapr_phb_pre_plug(hotplug_dev, dev, errp);
4302     } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4303         spapr_tpm_proxy_pre_plug(hotplug_dev, dev, errp);
4304     }
4305 }
4306 
spapr_get_hotplug_handler(MachineState * machine,DeviceState * dev)4307 static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine,
4308                                                  DeviceState *dev)
4309 {
4310     if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
4311         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) ||
4312         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE) ||
4313         object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
4314         return HOTPLUG_HANDLER(machine);
4315     }
4316     if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
4317         PCIDevice *pcidev = PCI_DEVICE(dev);
4318         PCIBus *root = pci_device_root_bus(pcidev);
4319         SpaprPhbState *phb =
4320             (SpaprPhbState *)object_dynamic_cast(OBJECT(BUS(root)->parent),
4321                                                  TYPE_SPAPR_PCI_HOST_BRIDGE);
4322 
4323         if (phb) {
4324             return HOTPLUG_HANDLER(phb);
4325         }
4326     }
4327     return NULL;
4328 }
4329 
4330 static CpuInstanceProperties
spapr_cpu_index_to_props(MachineState * machine,unsigned cpu_index)4331 spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index)
4332 {
4333     CPUArchId *core_slot;
4334     MachineClass *mc = MACHINE_GET_CLASS(machine);
4335 
4336     /* make sure possible_cpu are initialized */
4337     mc->possible_cpu_arch_ids(machine);
4338     /* get CPU core slot containing thread that matches cpu_index */
4339     core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL);
4340     assert(core_slot);
4341     return core_slot->props;
4342 }
4343 
spapr_get_default_cpu_node_id(const MachineState * ms,int idx)4344 static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx)
4345 {
4346     return idx / ms->smp.cores % ms->numa_state->num_nodes;
4347 }
4348 
spapr_possible_cpu_arch_ids(MachineState * machine)4349 static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
4350 {
4351     int i;
4352     unsigned int smp_threads = machine->smp.threads;
4353     unsigned int smp_cpus = machine->smp.cpus;
4354     const char *core_type;
4355     int spapr_max_cores = machine->smp.max_cpus / smp_threads;
4356     MachineClass *mc = MACHINE_GET_CLASS(machine);
4357 
4358     if (!mc->has_hotpluggable_cpus) {
4359         spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
4360     }
4361     if (machine->possible_cpus) {
4362         assert(machine->possible_cpus->len == spapr_max_cores);
4363         return machine->possible_cpus;
4364     }
4365 
4366     core_type = spapr_get_cpu_core_type(machine->cpu_type);
4367     if (!core_type) {
4368         error_report("Unable to find sPAPR CPU Core definition");
4369         exit(1);
4370     }
4371 
4372     machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
4373                              sizeof(CPUArchId) * spapr_max_cores);
4374     machine->possible_cpus->len = spapr_max_cores;
4375     for (i = 0; i < machine->possible_cpus->len; i++) {
4376         int core_id = i * smp_threads;
4377 
4378         machine->possible_cpus->cpus[i].type = core_type;
4379         machine->possible_cpus->cpus[i].vcpus_count = smp_threads;
4380         machine->possible_cpus->cpus[i].arch_id = core_id;
4381         machine->possible_cpus->cpus[i].props.has_core_id = true;
4382         machine->possible_cpus->cpus[i].props.core_id = core_id;
4383     }
4384     return machine->possible_cpus;
4385 }
4386 
spapr_phb_placement(SpaprMachineState * spapr,uint32_t index,uint64_t * buid,hwaddr * pio,hwaddr * mmio32,hwaddr * mmio64,unsigned n_dma,uint32_t * liobns,Error ** errp)4387 static bool spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
4388                                 uint64_t *buid, hwaddr *pio,
4389                                 hwaddr *mmio32, hwaddr *mmio64,
4390                                 unsigned n_dma, uint32_t *liobns, Error **errp)
4391 {
4392     /*
4393      * New-style PHB window placement.
4394      *
4395      * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
4396      * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
4397      * windows.
4398      *
4399      * Some guest kernels can't work with MMIO windows above 1<<46
4400      * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
4401      *
4402      * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
4403      * PHB stacked together.  (32TiB+2GiB)..(32TiB+64GiB) contains the
4404      * 2GiB 32-bit MMIO windows for each PHB.  Then 33..64TiB has the
4405      * 1TiB 64-bit MMIO windows for each PHB.
4406      */
4407     const uint64_t base_buid = 0x800000020000000ULL;
4408     int i;
4409 
4410     /* Sanity check natural alignments */
4411     QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
4412     QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
4413     QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0);
4414     QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0);
4415     /* Sanity check bounds */
4416     QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) >
4417                       SPAPR_PCI_MEM32_WIN_SIZE);
4418     QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) >
4419                       SPAPR_PCI_MEM64_WIN_SIZE);
4420 
4421     if (index >= SPAPR_MAX_PHBS) {
4422         error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)",
4423                    SPAPR_MAX_PHBS - 1);
4424         return false;
4425     }
4426 
4427     *buid = base_buid + index;
4428     for (i = 0; i < n_dma; ++i) {
4429         liobns[i] = SPAPR_PCI_LIOBN(index, i);
4430     }
4431 
4432     *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
4433     *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
4434     *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
4435     return true;
4436 }
4437 
spapr_ics_get(XICSFabric * dev,int irq)4438 static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
4439 {
4440     SpaprMachineState *spapr = SPAPR_MACHINE(dev);
4441 
4442     return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL;
4443 }
4444 
spapr_ics_resend(XICSFabric * dev)4445 static void spapr_ics_resend(XICSFabric *dev)
4446 {
4447     SpaprMachineState *spapr = SPAPR_MACHINE(dev);
4448 
4449     ics_resend(spapr->ics);
4450 }
4451 
spapr_icp_get(XICSFabric * xi,int vcpu_id)4452 static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id)
4453 {
4454     PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
4455 
4456     return cpu ? spapr_cpu_state(cpu)->icp : NULL;
4457 }
4458 
spapr_pic_print_info(InterruptStatsProvider * obj,GString * buf)4459 static void spapr_pic_print_info(InterruptStatsProvider *obj, GString *buf)
4460 {
4461     SpaprMachineState *spapr = SPAPR_MACHINE(obj);
4462 
4463     spapr_irq_print_info(spapr, buf);
4464     g_string_append_printf(buf, "irqchip: %s\n",
4465                            kvm_irqchip_in_kernel() ? "in-kernel" : "emulated");
4466 }
4467 
4468 /*
4469  * This is a XIVE only operation
4470  */
spapr_match_nvt(XiveFabric * xfb,uint8_t format,uint8_t nvt_blk,uint32_t nvt_idx,bool crowd,bool cam_ignore,uint8_t priority,uint32_t logic_serv,XiveTCTXMatch * match)4471 static int spapr_match_nvt(XiveFabric *xfb, uint8_t format,
4472                            uint8_t nvt_blk, uint32_t nvt_idx,
4473                            bool crowd, bool cam_ignore, uint8_t priority,
4474                            uint32_t logic_serv, XiveTCTXMatch *match)
4475 {
4476     SpaprMachineState *spapr = SPAPR_MACHINE(xfb);
4477     XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc);
4478     XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
4479     int count;
4480 
4481     count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, cam_ignore,
4482                            priority, logic_serv, match);
4483     if (count < 0) {
4484         return count;
4485     }
4486 
4487     /*
4488      * When we implement the save and restore of the thread interrupt
4489      * contexts in the enter/exit CPU handlers of the machine and the
4490      * escalations in QEMU, we should be able to handle non dispatched
4491      * vCPUs.
4492      *
4493      * Until this is done, the sPAPR machine should find at least one
4494      * matching context always.
4495      */
4496     if (count == 0) {
4497         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n",
4498                       nvt_blk, nvt_idx);
4499     }
4500 
4501     return count;
4502 }
4503 
spapr_get_vcpu_id(PowerPCCPU * cpu)4504 int spapr_get_vcpu_id(PowerPCCPU *cpu)
4505 {
4506     return cpu->vcpu_id;
4507 }
4508 
spapr_set_vcpu_id(PowerPCCPU * cpu,int cpu_index,Error ** errp)4509 bool spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp)
4510 {
4511     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
4512     MachineState *ms = MACHINE(spapr);
4513     int vcpu_id;
4514 
4515     vcpu_id = spapr_vcpu_id(spapr, cpu_index);
4516 
4517     if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) {
4518         error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id);
4519         error_append_hint(errp, "Adjust the number of cpus to %d "
4520                           "or try to raise the number of threads per core\n",
4521                           vcpu_id * ms->smp.threads / spapr->vsmt);
4522         return false;
4523     }
4524 
4525     cpu->vcpu_id = vcpu_id;
4526     return true;
4527 }
4528 
spapr_find_cpu(int vcpu_id)4529 PowerPCCPU *spapr_find_cpu(int vcpu_id)
4530 {
4531     CPUState *cs;
4532 
4533     CPU_FOREACH(cs) {
4534         PowerPCCPU *cpu = POWERPC_CPU(cs);
4535 
4536         if (spapr_get_vcpu_id(cpu) == vcpu_id) {
4537             return cpu;
4538         }
4539     }
4540 
4541     return NULL;
4542 }
4543 
spapr_cpu_in_nested(PowerPCCPU * cpu)4544 static bool spapr_cpu_in_nested(PowerPCCPU *cpu)
4545 {
4546     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4547 
4548     return spapr_cpu->in_nested;
4549 }
4550 
spapr_cpu_exec_enter(PPCVirtualHypervisor * vhyp,PowerPCCPU * cpu)4551 static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
4552 {
4553     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4554 
4555     /* These are only called by TCG, KVM maintains dispatch state */
4556 
4557     spapr_cpu->prod = false;
4558     if (spapr_cpu->vpa_addr) {
4559         CPUState *cs = CPU(cpu);
4560         uint32_t dispatch;
4561 
4562         dispatch = ldl_be_phys(cs->as,
4563                                spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
4564         dispatch++;
4565         if ((dispatch & 1) != 0) {
4566             qemu_log_mask(LOG_GUEST_ERROR,
4567                           "VPA: incorrect dispatch counter value for "
4568                           "dispatched partition %u, correcting.\n", dispatch);
4569             dispatch++;
4570         }
4571         stl_be_phys(cs->as,
4572                     spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
4573     }
4574 }
4575 
spapr_cpu_exec_exit(PPCVirtualHypervisor * vhyp,PowerPCCPU * cpu)4576 static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
4577 {
4578     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
4579 
4580     if (spapr_cpu->vpa_addr) {
4581         CPUState *cs = CPU(cpu);
4582         uint32_t dispatch;
4583 
4584         dispatch = ldl_be_phys(cs->as,
4585                                spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
4586         dispatch++;
4587         if ((dispatch & 1) != 1) {
4588             qemu_log_mask(LOG_GUEST_ERROR,
4589                           "VPA: incorrect dispatch counter value for "
4590                           "preempted partition %u, correcting.\n", dispatch);
4591             dispatch++;
4592         }
4593         stl_be_phys(cs->as,
4594                     spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
4595     }
4596 }
4597 
spapr_machine_class_init(ObjectClass * oc,const void * data)4598 static void spapr_machine_class_init(ObjectClass *oc, const void *data)
4599 {
4600     MachineClass *mc = MACHINE_CLASS(oc);
4601     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
4602     FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
4603     NMIClass *nc = NMI_CLASS(oc);
4604     HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
4605     PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc);
4606     XICSFabricClass *xic = XICS_FABRIC_CLASS(oc);
4607     InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc);
4608     XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc);
4609     VofMachineIfClass *vmc = VOF_MACHINE_CLASS(oc);
4610 
4611     mc->desc = "pSeries Logical Partition (PAPR compliant)";
4612     mc->ignore_boot_device_suffixes = true;
4613 
4614     /*
4615      * We set up the default / latest behaviour here.  The class_init
4616      * functions for the specific versioned machine types can override
4617      * these details for backwards compatibility
4618      */
4619     mc->init = spapr_machine_init;
4620     mc->reset = spapr_machine_reset;
4621     mc->block_default_type = IF_SCSI;
4622 
4623     /*
4624      * While KVM determines max cpus in kvm_init() using kvm_max_vcpus(),
4625      * In TCG the limit is restricted by the range of CPU IPIs available.
4626      */
4627     mc->max_cpus = SPAPR_IRQ_NR_IPIS;
4628 
4629     mc->no_parallel = 1;
4630     mc->default_boot_order = "";
4631     mc->default_ram_size = 512 * MiB;
4632     mc->default_ram_id = "ppc_spapr.ram";
4633     mc->default_display = "std";
4634     mc->kvm_type = spapr_kvm_type;
4635     machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE);
4636     mc->pci_allow_0_address = true;
4637     assert(!mc->get_hotplug_handler);
4638     mc->get_hotplug_handler = spapr_get_hotplug_handler;
4639     hc->pre_plug = spapr_machine_device_pre_plug;
4640     hc->plug = spapr_machine_device_plug;
4641     mc->cpu_index_to_instance_props = spapr_cpu_index_to_props;
4642     mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id;
4643     mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids;
4644     hc->unplug_request = spapr_machine_device_unplug_request;
4645     hc->unplug = spapr_machine_device_unplug;
4646 
4647     smc->update_dt_enabled = true;
4648     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power10_v2.0");
4649     mc->has_hotpluggable_cpus = true;
4650     mc->nvdimm_supported = true;
4651     smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED;
4652     fwc->get_dev_path = spapr_get_fw_dev_path;
4653     nc->nmi_monitor_handler = spapr_nmi;
4654     smc->phb_placement = spapr_phb_placement;
4655     vhc->cpu_in_nested = spapr_cpu_in_nested;
4656     vhc->deliver_hv_excp = spapr_exit_nested;
4657     vhc->hypercall = emulate_spapr_hypercall;
4658     vhc->hpt_mask = spapr_hpt_mask;
4659     vhc->map_hptes = spapr_map_hptes;
4660     vhc->unmap_hptes = spapr_unmap_hptes;
4661     vhc->hpte_set_c = spapr_hpte_set_c;
4662     vhc->hpte_set_r = spapr_hpte_set_r;
4663     vhc->get_pate = spapr_get_pate;
4664     vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
4665     vhc->cpu_exec_enter = spapr_cpu_exec_enter;
4666     vhc->cpu_exec_exit = spapr_cpu_exec_exit;
4667     xic->ics_get = spapr_ics_get;
4668     xic->ics_resend = spapr_ics_resend;
4669     xic->icp_get = spapr_icp_get;
4670     ispc->print_info = spapr_pic_print_info;
4671     /* Force NUMA node memory size to be a multiple of
4672      * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity
4673      * in which LMBs are represented and hot-added
4674      */
4675     mc->numa_mem_align_shift = 28;
4676     mc->auto_enable_numa = true;
4677 
4678     smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF;
4679     smc->default_caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_ON;
4680     smc->default_caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_ON;
4681     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND;
4682     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND;
4683     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND;
4684     smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */
4685     smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF;
4686     smc->default_caps.caps[SPAPR_CAP_NESTED_PAPR] = SPAPR_CAP_OFF;
4687     smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON;
4688     smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON;
4689     smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON;
4690     smc->default_caps.caps[SPAPR_CAP_RPT_INVALIDATE] = SPAPR_CAP_OFF;
4691     smc->default_caps.caps[SPAPR_CAP_DAWR1] = SPAPR_CAP_ON;
4692 
4693     /*
4694      * This cap specifies whether the AIL 3 mode for
4695      * H_SET_RESOURCE is supported. The default is modified
4696      * by default_caps_with_cpu().
4697      */
4698     smc->default_caps.caps[SPAPR_CAP_AIL_MODE_3] = SPAPR_CAP_ON;
4699     spapr_caps_add_properties(smc);
4700     smc->irq = &spapr_irq_dual;
4701     smc->dr_phb_enabled = true;
4702     smc->linux_pci_probe = true;
4703     smc->smp_threads_vsmt = true;
4704     smc->nr_xirqs = SPAPR_NR_XIRQS;
4705     xfc->match_nvt = spapr_match_nvt;
4706     vmc->client_architecture_support = spapr_vof_client_architecture_support;
4707     vmc->quiesce = spapr_vof_quiesce;
4708     vmc->setprop = spapr_vof_setprop;
4709 }
4710 
4711 static const TypeInfo spapr_machine_info = {
4712     .name          = TYPE_SPAPR_MACHINE,
4713     .parent        = TYPE_MACHINE,
4714     .abstract      = true,
4715     .instance_size = sizeof(SpaprMachineState),
4716     .instance_init = spapr_instance_init,
4717     .instance_finalize = spapr_machine_finalizefn,
4718     .class_size    = sizeof(SpaprMachineClass),
4719     .class_init    = spapr_machine_class_init,
4720     .interfaces = (const InterfaceInfo[]) {
4721         { TYPE_FW_PATH_PROVIDER },
4722         { TYPE_NMI },
4723         { TYPE_HOTPLUG_HANDLER },
4724         { TYPE_PPC_VIRTUAL_HYPERVISOR },
4725         { TYPE_XICS_FABRIC },
4726         { TYPE_INTERRUPT_STATS_PROVIDER },
4727         { TYPE_XIVE_FABRIC },
4728         { TYPE_VOF_MACHINE_IF },
4729         { }
4730     },
4731 };
4732 
spapr_machine_latest_class_options(MachineClass * mc)4733 static void spapr_machine_latest_class_options(MachineClass *mc)
4734 {
4735     mc->alias = "pseries";
4736     mc->is_default = true;
4737 }
4738 
4739 #define DEFINE_SPAPR_MACHINE_IMPL(latest, ...)                       \
4740     static void MACHINE_VER_SYM(class_init, spapr, __VA_ARGS__)(     \
4741         ObjectClass *oc,                                             \
4742         const void *data)                                            \
4743     {                                                                \
4744         MachineClass *mc = MACHINE_CLASS(oc);                        \
4745         MACHINE_VER_SYM(class_options, spapr, __VA_ARGS__)(mc);      \
4746         MACHINE_VER_DEPRECATION(__VA_ARGS__);                        \
4747         if (latest) {                                                \
4748             spapr_machine_latest_class_options(mc);                  \
4749         }                                                            \
4750     }                                                                \
4751     static const TypeInfo MACHINE_VER_SYM(info, spapr, __VA_ARGS__) = \
4752     {                                                                \
4753         .name = MACHINE_VER_TYPE_NAME("pseries", __VA_ARGS__),       \
4754         .parent = TYPE_SPAPR_MACHINE,                                \
4755         .class_init = MACHINE_VER_SYM(class_init, spapr, __VA_ARGS__), \
4756     };                                                               \
4757     static void MACHINE_VER_SYM(register, spapr, __VA_ARGS__)(void)  \
4758     {                                                                \
4759         MACHINE_VER_DELETION(__VA_ARGS__);                           \
4760         type_register_static(&MACHINE_VER_SYM(info, spapr, __VA_ARGS__));   \
4761     }                                                                \
4762     type_init(MACHINE_VER_SYM(register, spapr, __VA_ARGS__))
4763 
4764 #define DEFINE_SPAPR_MACHINE_AS_LATEST(major, minor) \
4765     DEFINE_SPAPR_MACHINE_IMPL(true, major, minor)
4766 #define DEFINE_SPAPR_MACHINE(major, minor) \
4767     DEFINE_SPAPR_MACHINE_IMPL(false, major, minor)
4768 
4769 /*
4770  * pseries-10.1
4771  */
spapr_machine_10_1_class_options(MachineClass * mc)4772 static void spapr_machine_10_1_class_options(MachineClass *mc)
4773 {
4774     /* Defaults for the latest behaviour inherited from the base class */
4775 }
4776 
4777 DEFINE_SPAPR_MACHINE_AS_LATEST(10, 1);
4778 
4779 /*
4780  * pseries-10.0
4781  */
spapr_machine_10_0_class_options(MachineClass * mc)4782 static void spapr_machine_10_0_class_options(MachineClass *mc)
4783 {
4784     spapr_machine_10_1_class_options(mc);
4785     compat_props_add(mc->compat_props, hw_compat_10_0, hw_compat_10_0_len);
4786 }
4787 
4788 DEFINE_SPAPR_MACHINE(10, 0);
4789 
4790 /*
4791  * pseries-9.2
4792  */
spapr_machine_9_2_class_options(MachineClass * mc)4793 static void spapr_machine_9_2_class_options(MachineClass *mc)
4794 {
4795     spapr_machine_10_0_class_options(mc);
4796     compat_props_add(mc->compat_props, hw_compat_9_2, hw_compat_9_2_len);
4797 }
4798 
4799 DEFINE_SPAPR_MACHINE(9, 2);
4800 
4801 /*
4802  * pseries-9.1
4803  */
spapr_machine_9_1_class_options(MachineClass * mc)4804 static void spapr_machine_9_1_class_options(MachineClass *mc)
4805 {
4806     spapr_machine_9_2_class_options(mc);
4807     compat_props_add(mc->compat_props, hw_compat_9_1, hw_compat_9_1_len);
4808 }
4809 
4810 DEFINE_SPAPR_MACHINE(9, 1);
4811 
4812 /*
4813  * pseries-9.0
4814  */
spapr_machine_9_0_class_options(MachineClass * mc)4815 static void spapr_machine_9_0_class_options(MachineClass *mc)
4816 {
4817     spapr_machine_9_1_class_options(mc);
4818     compat_props_add(mc->compat_props, hw_compat_9_0, hw_compat_9_0_len);
4819 }
4820 
4821 DEFINE_SPAPR_MACHINE(9, 0);
4822 
4823 /*
4824  * pseries-8.2
4825  */
spapr_machine_8_2_class_options(MachineClass * mc)4826 static void spapr_machine_8_2_class_options(MachineClass *mc)
4827 {
4828     spapr_machine_9_0_class_options(mc);
4829     compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len);
4830     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.2");
4831 }
4832 
4833 DEFINE_SPAPR_MACHINE(8, 2);
4834 
4835 /*
4836  * pseries-8.1
4837  */
spapr_machine_8_1_class_options(MachineClass * mc)4838 static void spapr_machine_8_1_class_options(MachineClass *mc)
4839 {
4840     spapr_machine_8_2_class_options(mc);
4841     compat_props_add(mc->compat_props, hw_compat_8_1, hw_compat_8_1_len);
4842 }
4843 
4844 DEFINE_SPAPR_MACHINE(8, 1);
4845 
4846 /*
4847  * pseries-8.0
4848  */
spapr_machine_8_0_class_options(MachineClass * mc)4849 static void spapr_machine_8_0_class_options(MachineClass *mc)
4850 {
4851     spapr_machine_8_1_class_options(mc);
4852     compat_props_add(mc->compat_props, hw_compat_8_0, hw_compat_8_0_len);
4853 }
4854 
4855 DEFINE_SPAPR_MACHINE(8, 0);
4856 
4857 /*
4858  * pseries-7.2
4859  */
spapr_machine_7_2_class_options(MachineClass * mc)4860 static void spapr_machine_7_2_class_options(MachineClass *mc)
4861 {
4862     spapr_machine_8_0_class_options(mc);
4863     compat_props_add(mc->compat_props, hw_compat_7_2, hw_compat_7_2_len);
4864 }
4865 
4866 DEFINE_SPAPR_MACHINE(7, 2);
4867 
4868 /*
4869  * pseries-7.1
4870  */
spapr_machine_7_1_class_options(MachineClass * mc)4871 static void spapr_machine_7_1_class_options(MachineClass *mc)
4872 {
4873     spapr_machine_7_2_class_options(mc);
4874     compat_props_add(mc->compat_props, hw_compat_7_1, hw_compat_7_1_len);
4875 }
4876 
4877 DEFINE_SPAPR_MACHINE(7, 1);
4878 
4879 /*
4880  * pseries-7.0
4881  */
spapr_machine_7_0_class_options(MachineClass * mc)4882 static void spapr_machine_7_0_class_options(MachineClass *mc)
4883 {
4884     spapr_machine_7_1_class_options(mc);
4885     compat_props_add(mc->compat_props, hw_compat_7_0, hw_compat_7_0_len);
4886 }
4887 
4888 DEFINE_SPAPR_MACHINE(7, 0);
4889 
4890 /*
4891  * pseries-6.2
4892  */
spapr_machine_6_2_class_options(MachineClass * mc)4893 static void spapr_machine_6_2_class_options(MachineClass *mc)
4894 {
4895     spapr_machine_7_0_class_options(mc);
4896     compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len);
4897 }
4898 
4899 DEFINE_SPAPR_MACHINE(6, 2);
4900 
4901 /*
4902  * pseries-6.1
4903  */
spapr_machine_6_1_class_options(MachineClass * mc)4904 static void spapr_machine_6_1_class_options(MachineClass *mc)
4905 {
4906     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4907 
4908     spapr_machine_6_2_class_options(mc);
4909     compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len);
4910     smc->pre_6_2_numa_affinity = true;
4911     mc->smp_props.prefer_sockets = true;
4912 }
4913 
4914 DEFINE_SPAPR_MACHINE(6, 1);
4915 
4916 /*
4917  * pseries-6.0
4918  */
spapr_machine_6_0_class_options(MachineClass * mc)4919 static void spapr_machine_6_0_class_options(MachineClass *mc)
4920 {
4921     spapr_machine_6_1_class_options(mc);
4922     compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len);
4923 }
4924 
4925 DEFINE_SPAPR_MACHINE(6, 0);
4926 
4927 /*
4928  * pseries-5.2
4929  */
spapr_machine_5_2_class_options(MachineClass * mc)4930 static void spapr_machine_5_2_class_options(MachineClass *mc)
4931 {
4932     spapr_machine_6_0_class_options(mc);
4933     compat_props_add(mc->compat_props, hw_compat_5_2, hw_compat_5_2_len);
4934 }
4935 
4936 DEFINE_SPAPR_MACHINE(5, 2);
4937 
4938 /*
4939  * pseries-5.1
4940  */
spapr_machine_5_1_class_options(MachineClass * mc)4941 static void spapr_machine_5_1_class_options(MachineClass *mc)
4942 {
4943     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4944 
4945     spapr_machine_5_2_class_options(mc);
4946     compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len);
4947     smc->pre_5_2_numa_associativity = true;
4948 }
4949 
4950 DEFINE_SPAPR_MACHINE(5, 1);
4951 
4952 /*
4953  * pseries-5.0
4954  */
spapr_machine_5_0_class_options(MachineClass * mc)4955 static void spapr_machine_5_0_class_options(MachineClass *mc)
4956 {
4957     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4958     static GlobalProperty compat[] = {
4959         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-5.1-associativity", "on" },
4960     };
4961 
4962     spapr_machine_5_1_class_options(mc);
4963     compat_props_add(mc->compat_props, hw_compat_5_0, hw_compat_5_0_len);
4964     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
4965     mc->numa_mem_supported = true;
4966     smc->pre_5_1_assoc_refpoints = true;
4967 }
4968 
4969 DEFINE_SPAPR_MACHINE(5, 0);
4970 
4971 /*
4972  * pseries-4.2
4973  */
spapr_machine_4_2_class_options(MachineClass * mc)4974 static void spapr_machine_4_2_class_options(MachineClass *mc)
4975 {
4976     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4977 
4978     spapr_machine_5_0_class_options(mc);
4979     compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len);
4980     smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF;
4981     smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_OFF;
4982     smc->rma_limit = 16 * GiB;
4983     mc->nvdimm_supported = false;
4984 }
4985 
4986 DEFINE_SPAPR_MACHINE(4, 2);
4987 
4988 /*
4989  * pseries-4.1
4990  */
spapr_machine_4_1_class_options(MachineClass * mc)4991 static void spapr_machine_4_1_class_options(MachineClass *mc)
4992 {
4993     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4994     static GlobalProperty compat[] = {
4995         /* Only allow 4kiB and 64kiB IOMMU pagesizes */
4996         { TYPE_SPAPR_PCI_HOST_BRIDGE, "pgsz", "0x11000" },
4997     };
4998 
4999     spapr_machine_4_2_class_options(mc);
5000     smc->linux_pci_probe = false;
5001     smc->smp_threads_vsmt = false;
5002     compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len);
5003     compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
5004 }
5005 
5006 DEFINE_SPAPR_MACHINE(4, 1);
5007 
5008 /*
5009  * pseries-4.0
5010  */
phb_placement_4_0(SpaprMachineState * spapr,uint32_t index,uint64_t * buid,hwaddr * pio,hwaddr * mmio32,hwaddr * mmio64,unsigned n_dma,uint32_t * liobns,Error ** errp)5011 static bool phb_placement_4_0(SpaprMachineState *spapr, uint32_t index,
5012                               uint64_t *buid, hwaddr *pio,
5013                               hwaddr *mmio32, hwaddr *mmio64,
5014                               unsigned n_dma, uint32_t *liobns, Error **errp)
5015 {
5016     if (!spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma,
5017                              liobns, errp)) {
5018         return false;
5019     }
5020     return true;
5021 }
spapr_machine_4_0_class_options(MachineClass * mc)5022 static void spapr_machine_4_0_class_options(MachineClass *mc)
5023 {
5024     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5025 
5026     spapr_machine_4_1_class_options(mc);
5027     compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len);
5028     smc->phb_placement = phb_placement_4_0;
5029     smc->irq = &spapr_irq_xics;
5030     smc->pre_4_1_migration = true;
5031 }
5032 
5033 DEFINE_SPAPR_MACHINE(4, 0);
5034 
5035 /*
5036  * pseries-3.1
5037  */
spapr_machine_3_1_class_options(MachineClass * mc)5038 static void spapr_machine_3_1_class_options(MachineClass *mc)
5039 {
5040     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5041 
5042     spapr_machine_4_0_class_options(mc);
5043     compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len);
5044 
5045     mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0");
5046     smc->update_dt_enabled = false;
5047     smc->dr_phb_enabled = false;
5048     smc->broken_host_serial_model = true;
5049     smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN;
5050     smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN;
5051     smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
5052     smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF;
5053 }
5054 
5055 DEFINE_SPAPR_MACHINE(3, 1);
5056 
5057 /*
5058  * pseries-3.0
5059  */
5060 
spapr_machine_3_0_class_options(MachineClass * mc)5061 static void spapr_machine_3_0_class_options(MachineClass *mc)
5062 {
5063     SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
5064 
5065     spapr_machine_3_1_class_options(mc);
5066     compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len);
5067 
5068     smc->legacy_irq_allocation = true;
5069     smc->nr_xirqs = 0x400;
5070     smc->irq = &spapr_irq_xics_legacy;
5071 }
5072 
5073 DEFINE_SPAPR_MACHINE(3, 0);
5074 
spapr_machine_register_types(void)5075 static void spapr_machine_register_types(void)
5076 {
5077     type_register_static(&spapr_machine_info);
5078 }
5079 
5080 type_init(spapr_machine_register_types)
5081