1 #include "qemu/osdep.h"
2 #include "migration/vmstate.h"
3 #include "hw/acpi/cpu.h"
4 #include "hw/core/cpu.h"
5 #include "qapi/error.h"
6 #include "qapi/qapi-events-acpi.h"
7 #include "trace.h"
8 #include "system/numa.h"
9
10 #define ACPI_CPU_SELECTOR_OFFSET_WR 0
11 #define ACPI_CPU_FLAGS_OFFSET_RW 4
12 #define ACPI_CPU_CMD_OFFSET_WR 5
13 #define ACPI_CPU_CMD_DATA_OFFSET_RW 8
14 #define ACPI_CPU_CMD_DATA2_OFFSET_R 0
15
16 #define OVMF_CPUHP_SMI_CMD 4
17
18 enum {
19 CPHP_GET_NEXT_CPU_WITH_EVENT_CMD = 0,
20 CPHP_OST_EVENT_CMD = 1,
21 CPHP_OST_STATUS_CMD = 2,
22 CPHP_GET_CPU_ID_CMD = 3,
23 CPHP_CMD_MAX
24 };
25
acpi_cpu_device_status(int idx,AcpiCpuStatus * cdev)26 static ACPIOSTInfo *acpi_cpu_device_status(int idx, AcpiCpuStatus *cdev)
27 {
28 ACPIOSTInfo *info = g_new0(ACPIOSTInfo, 1);
29
30 info->slot_type = ACPI_SLOT_TYPE_CPU;
31 info->slot = g_strdup_printf("%d", idx);
32 info->source = cdev->ost_event;
33 info->status = cdev->ost_status;
34 if (cdev->cpu) {
35 DeviceState *dev = DEVICE(cdev->cpu);
36 if (dev->id) {
37 info->device = g_strdup(dev->id);
38 }
39 }
40 return info;
41 }
42
acpi_cpu_ospm_status(CPUHotplugState * cpu_st,ACPIOSTInfoList *** list)43 void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list)
44 {
45 ACPIOSTInfoList ***tail = list;
46 int i;
47
48 for (i = 0; i < cpu_st->dev_count; i++) {
49 QAPI_LIST_APPEND(*tail, acpi_cpu_device_status(i, &cpu_st->devs[i]));
50 }
51 }
52
cpu_hotplug_rd(void * opaque,hwaddr addr,unsigned size)53 static uint64_t cpu_hotplug_rd(void *opaque, hwaddr addr, unsigned size)
54 {
55 uint64_t val = 0;
56 CPUHotplugState *cpu_st = opaque;
57 AcpiCpuStatus *cdev;
58
59 if (cpu_st->selector >= cpu_st->dev_count) {
60 return val;
61 }
62
63 cdev = &cpu_st->devs[cpu_st->selector];
64 switch (addr) {
65 case ACPI_CPU_FLAGS_OFFSET_RW: /* pack and return is_* fields */
66 val |= cdev->cpu ? 1 : 0;
67 val |= cdev->is_inserting ? 2 : 0;
68 val |= cdev->is_removing ? 4 : 0;
69 val |= cdev->fw_remove ? 16 : 0;
70 trace_cpuhp_acpi_read_flags(cpu_st->selector, val);
71 break;
72 case ACPI_CPU_CMD_DATA_OFFSET_RW:
73 switch (cpu_st->command) {
74 case CPHP_GET_NEXT_CPU_WITH_EVENT_CMD:
75 val = cpu_st->selector;
76 break;
77 case CPHP_GET_CPU_ID_CMD:
78 val = cdev->arch_id & 0xFFFFFFFF;
79 break;
80 default:
81 break;
82 }
83 trace_cpuhp_acpi_read_cmd_data(cpu_st->selector, val);
84 break;
85 case ACPI_CPU_CMD_DATA2_OFFSET_R:
86 switch (cpu_st->command) {
87 case CPHP_GET_NEXT_CPU_WITH_EVENT_CMD:
88 val = 0;
89 break;
90 case CPHP_GET_CPU_ID_CMD:
91 val = cdev->arch_id >> 32;
92 break;
93 default:
94 break;
95 }
96 trace_cpuhp_acpi_read_cmd_data2(cpu_st->selector, val);
97 break;
98 default:
99 break;
100 }
101 return val;
102 }
103
cpu_hotplug_wr(void * opaque,hwaddr addr,uint64_t data,unsigned int size)104 static void cpu_hotplug_wr(void *opaque, hwaddr addr, uint64_t data,
105 unsigned int size)
106 {
107 CPUHotplugState *cpu_st = opaque;
108 AcpiCpuStatus *cdev;
109 ACPIOSTInfo *info;
110
111 assert(cpu_st->dev_count);
112
113 if (addr) {
114 if (cpu_st->selector >= cpu_st->dev_count) {
115 trace_cpuhp_acpi_invalid_idx_selected(cpu_st->selector);
116 return;
117 }
118 }
119
120 switch (addr) {
121 case ACPI_CPU_SELECTOR_OFFSET_WR: /* current CPU selector */
122 cpu_st->selector = data;
123 trace_cpuhp_acpi_write_idx(cpu_st->selector);
124 break;
125 case ACPI_CPU_FLAGS_OFFSET_RW: /* set is_* fields */
126 cdev = &cpu_st->devs[cpu_st->selector];
127 if (data & 2) { /* clear insert event */
128 cdev->is_inserting = false;
129 trace_cpuhp_acpi_clear_inserting_evt(cpu_st->selector);
130 } else if (data & 4) { /* clear remove event */
131 cdev->is_removing = false;
132 trace_cpuhp_acpi_clear_remove_evt(cpu_st->selector);
133 } else if (data & 8) {
134 DeviceState *dev = NULL;
135 HotplugHandler *hotplug_ctrl = NULL;
136
137 if (!cdev->cpu || cdev->cpu == first_cpu) {
138 trace_cpuhp_acpi_ejecting_invalid_cpu(cpu_st->selector);
139 break;
140 }
141
142 trace_cpuhp_acpi_ejecting_cpu(cpu_st->selector);
143 dev = DEVICE(cdev->cpu);
144 hotplug_ctrl = qdev_get_hotplug_handler(dev);
145 hotplug_handler_unplug(hotplug_ctrl, dev, NULL);
146 object_unparent(OBJECT(dev));
147 cdev->fw_remove = false;
148 } else if (data & 16) {
149 if (!cdev->cpu || cdev->cpu == first_cpu) {
150 trace_cpuhp_acpi_fw_remove_invalid_cpu(cpu_st->selector);
151 break;
152 }
153 trace_cpuhp_acpi_fw_remove_cpu(cpu_st->selector);
154 cdev->fw_remove = true;
155 }
156 break;
157 case ACPI_CPU_CMD_OFFSET_WR:
158 trace_cpuhp_acpi_write_cmd(cpu_st->selector, data);
159 if (data < CPHP_CMD_MAX) {
160 cpu_st->command = data;
161 if (cpu_st->command == CPHP_GET_NEXT_CPU_WITH_EVENT_CMD) {
162 uint32_t iter = cpu_st->selector;
163
164 do {
165 cdev = &cpu_st->devs[iter];
166 if (cdev->is_inserting || cdev->is_removing ||
167 cdev->fw_remove) {
168 cpu_st->selector = iter;
169 trace_cpuhp_acpi_cpu_has_events(cpu_st->selector,
170 cdev->is_inserting, cdev->is_removing);
171 break;
172 }
173 iter = iter + 1 < cpu_st->dev_count ? iter + 1 : 0;
174 } while (iter != cpu_st->selector);
175 }
176 }
177 break;
178 case ACPI_CPU_CMD_DATA_OFFSET_RW:
179 switch (cpu_st->command) {
180 case CPHP_OST_EVENT_CMD: {
181 cdev = &cpu_st->devs[cpu_st->selector];
182 cdev->ost_event = data;
183 trace_cpuhp_acpi_write_ost_ev(cpu_st->selector, cdev->ost_event);
184 break;
185 }
186 case CPHP_OST_STATUS_CMD: {
187 cdev = &cpu_st->devs[cpu_st->selector];
188 cdev->ost_status = data;
189 info = acpi_cpu_device_status(cpu_st->selector, cdev);
190 qapi_event_send_acpi_device_ost(info);
191 qapi_free_ACPIOSTInfo(info);
192 trace_cpuhp_acpi_write_ost_status(cpu_st->selector,
193 cdev->ost_status);
194 break;
195 }
196 default:
197 break;
198 }
199 break;
200 default:
201 break;
202 }
203 }
204
205 static const MemoryRegionOps cpu_hotplug_ops = {
206 .read = cpu_hotplug_rd,
207 .write = cpu_hotplug_wr,
208 .endianness = DEVICE_LITTLE_ENDIAN,
209 .valid = {
210 .min_access_size = 1,
211 .max_access_size = 4,
212 },
213 };
214
cpu_hotplug_hw_init(MemoryRegion * as,Object * owner,CPUHotplugState * state,hwaddr base_addr)215 void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
216 CPUHotplugState *state, hwaddr base_addr)
217 {
218 MachineState *machine = MACHINE(qdev_get_machine());
219 MachineClass *mc = MACHINE_GET_CLASS(machine);
220 const CPUArchIdList *id_list;
221 int i;
222
223 assert(mc->possible_cpu_arch_ids);
224 id_list = mc->possible_cpu_arch_ids(machine);
225 state->dev_count = id_list->len;
226 state->devs = g_new0(typeof(*state->devs), state->dev_count);
227 for (i = 0; i < id_list->len; i++) {
228 state->devs[i].cpu = CPU(id_list->cpus[i].cpu);
229 state->devs[i].arch_id = id_list->cpus[i].arch_id;
230 }
231 memory_region_init_io(&state->ctrl_reg, owner, &cpu_hotplug_ops, state,
232 "acpi-cpu-hotplug", ACPI_CPU_HOTPLUG_REG_LEN);
233 memory_region_add_subregion(as, base_addr, &state->ctrl_reg);
234 }
235
get_cpu_status(CPUHotplugState * cpu_st,DeviceState * dev)236 static AcpiCpuStatus *get_cpu_status(CPUHotplugState *cpu_st, DeviceState *dev)
237 {
238 CPUState *cpu = CPU(dev);
239 uint64_t cpu_arch_id = cpu->cc->get_arch_id(cpu);
240 int i;
241
242 for (i = 0; i < cpu_st->dev_count; i++) {
243 if (cpu_arch_id == cpu_st->devs[i].arch_id) {
244 return &cpu_st->devs[i];
245 }
246 }
247 return NULL;
248 }
249
acpi_cpu_plug_cb(HotplugHandler * hotplug_dev,CPUHotplugState * cpu_st,DeviceState * dev,Error ** errp)250 void acpi_cpu_plug_cb(HotplugHandler *hotplug_dev,
251 CPUHotplugState *cpu_st, DeviceState *dev, Error **errp)
252 {
253 AcpiCpuStatus *cdev;
254
255 cdev = get_cpu_status(cpu_st, dev);
256 if (!cdev) {
257 return;
258 }
259
260 cdev->cpu = CPU(dev);
261 if (dev->hotplugged) {
262 cdev->is_inserting = true;
263 acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS);
264 }
265 }
266
acpi_cpu_unplug_request_cb(HotplugHandler * hotplug_dev,CPUHotplugState * cpu_st,DeviceState * dev,Error ** errp)267 void acpi_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
268 CPUHotplugState *cpu_st,
269 DeviceState *dev, Error **errp)
270 {
271 AcpiCpuStatus *cdev;
272
273 cdev = get_cpu_status(cpu_st, dev);
274 if (!cdev) {
275 return;
276 }
277
278 cdev->is_removing = true;
279 acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS);
280 }
281
acpi_cpu_unplug_cb(CPUHotplugState * cpu_st,DeviceState * dev,Error ** errp)282 void acpi_cpu_unplug_cb(CPUHotplugState *cpu_st,
283 DeviceState *dev, Error **errp)
284 {
285 AcpiCpuStatus *cdev;
286
287 cdev = get_cpu_status(cpu_st, dev);
288 if (!cdev) {
289 return;
290 }
291
292 cdev->cpu = NULL;
293 }
294
295 static const VMStateDescription vmstate_cpuhp_sts = {
296 .name = "CPU hotplug device state",
297 .version_id = 1,
298 .minimum_version_id = 1,
299 .fields = (const VMStateField[]) {
300 VMSTATE_BOOL(is_inserting, AcpiCpuStatus),
301 VMSTATE_BOOL(is_removing, AcpiCpuStatus),
302 VMSTATE_UINT32(ost_event, AcpiCpuStatus),
303 VMSTATE_UINT32(ost_status, AcpiCpuStatus),
304 VMSTATE_END_OF_LIST()
305 }
306 };
307
308 const VMStateDescription vmstate_cpu_hotplug = {
309 .name = "CPU hotplug state",
310 .version_id = 1,
311 .minimum_version_id = 1,
312 .fields = (const VMStateField[]) {
313 VMSTATE_UINT32(selector, CPUHotplugState),
314 VMSTATE_UINT8(command, CPUHotplugState),
315 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, CPUHotplugState, dev_count,
316 vmstate_cpuhp_sts, AcpiCpuStatus),
317 VMSTATE_END_OF_LIST()
318 }
319 };
320
321 #define CPU_NAME_FMT "C%.03X"
322 #define CPUHP_RES_DEVICE "PRES"
323 #define CPU_LOCK "CPLK"
324 #define CPU_STS_METHOD "CSTA"
325 #define CPU_SCAN_METHOD "CSCN"
326 #define CPU_NOTIFY_METHOD "CTFY"
327 #define CPU_EJECT_METHOD "CEJ0"
328 #define CPU_OST_METHOD "COST"
329 #define CPU_ADDED_LIST "CNEW"
330 #define CPU_EJ_LIST "CEJL"
331
332 #define CPU_ENABLED "CPEN"
333 #define CPU_SELECTOR "CSEL"
334 #define CPU_COMMAND "CCMD"
335 #define CPU_DATA "CDAT"
336 #define CPU_INSERT_EVENT "CINS"
337 #define CPU_REMOVE_EVENT "CRMV"
338 #define CPU_EJECT_EVENT "CEJ0"
339 #define CPU_FW_EJECT_EVENT "CEJF"
340
build_cpus_aml(Aml * table,MachineState * machine,CPUHotplugFeatures opts,build_madt_cpu_fn build_madt_cpu,hwaddr base_addr,const char * res_root,const char * event_handler_method,AmlRegionSpace rs)341 void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
342 build_madt_cpu_fn build_madt_cpu, hwaddr base_addr,
343 const char *res_root,
344 const char *event_handler_method,
345 AmlRegionSpace rs)
346 {
347 Aml *ifctx;
348 Aml *field;
349 Aml *method;
350 Aml *cpu_ctrl_dev;
351 Aml *cpus_dev;
352 Aml *zero = aml_int(0);
353 Aml *one = aml_int(1);
354 Aml *sb_scope = aml_scope("_SB");
355 MachineClass *mc = MACHINE_GET_CLASS(machine);
356 const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(machine);
357 char *cphp_res_path = g_strdup_printf("%s." CPUHP_RES_DEVICE, res_root);
358
359 cpu_ctrl_dev = aml_device("%s", cphp_res_path);
360 {
361 Aml *crs;
362
363 aml_append(cpu_ctrl_dev,
364 aml_name_decl("_HID", aml_eisaid("PNP0A06")));
365 aml_append(cpu_ctrl_dev,
366 aml_name_decl("_UID", aml_string("CPU Hotplug resources")));
367 aml_append(cpu_ctrl_dev, aml_mutex(CPU_LOCK, 0));
368
369 assert((rs == AML_SYSTEM_IO) || (rs == AML_SYSTEM_MEMORY));
370
371 crs = aml_resource_template();
372 if (rs == AML_SYSTEM_IO) {
373 aml_append(crs, aml_io(AML_DECODE16, base_addr, base_addr, 1,
374 ACPI_CPU_HOTPLUG_REG_LEN));
375 } else if (rs == AML_SYSTEM_MEMORY) {
376 aml_append(crs, aml_memory32_fixed(base_addr,
377 ACPI_CPU_HOTPLUG_REG_LEN, AML_READ_WRITE));
378 }
379
380 aml_append(cpu_ctrl_dev, aml_name_decl("_CRS", crs));
381
382 /* declare CPU hotplug MMIO region with related access fields */
383 aml_append(cpu_ctrl_dev,
384 aml_operation_region("PRST", rs, aml_int(base_addr),
385 ACPI_CPU_HOTPLUG_REG_LEN));
386
387 field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK,
388 AML_WRITE_AS_ZEROS);
389 aml_append(field, aml_reserved_field(ACPI_CPU_FLAGS_OFFSET_RW * 8));
390 /* 1 if enabled, read only */
391 aml_append(field, aml_named_field(CPU_ENABLED, 1));
392 /* (read) 1 if has a insert event. (write) 1 to clear event */
393 aml_append(field, aml_named_field(CPU_INSERT_EVENT, 1));
394 /* (read) 1 if has a remove event. (write) 1 to clear event */
395 aml_append(field, aml_named_field(CPU_REMOVE_EVENT, 1));
396 /* initiates device eject, write only */
397 aml_append(field, aml_named_field(CPU_EJECT_EVENT, 1));
398 /* tell firmware to do device eject, write only */
399 aml_append(field, aml_named_field(CPU_FW_EJECT_EVENT, 1));
400 aml_append(field, aml_reserved_field(3));
401 aml_append(field, aml_named_field(CPU_COMMAND, 8));
402 aml_append(cpu_ctrl_dev, field);
403
404 field = aml_field("PRST", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
405 /* CPU selector, write only */
406 aml_append(field, aml_named_field(CPU_SELECTOR, 32));
407 /* flags + cmd + 2byte align */
408 aml_append(field, aml_reserved_field(4 * 8));
409 aml_append(field, aml_named_field(CPU_DATA, 32));
410 aml_append(cpu_ctrl_dev, field);
411
412 if (opts.has_legacy_cphp) {
413 method = aml_method("_INI", 0, AML_SERIALIZED);
414 /* switch off legacy CPU hotplug HW and use new one,
415 * on reboot system is in new mode and writing 0
416 * in CPU_SELECTOR selects BSP, which is NOP at
417 * the time _INI is called */
418 aml_append(method, aml_store(zero, aml_name(CPU_SELECTOR)));
419 aml_append(cpu_ctrl_dev, method);
420 }
421 }
422 aml_append(sb_scope, cpu_ctrl_dev);
423
424 cpus_dev = aml_device("\\_SB.CPUS");
425 {
426 int i;
427 Aml *ctrl_lock = aml_name("%s.%s", cphp_res_path, CPU_LOCK);
428 Aml *cpu_selector = aml_name("%s.%s", cphp_res_path, CPU_SELECTOR);
429 Aml *is_enabled = aml_name("%s.%s", cphp_res_path, CPU_ENABLED);
430 Aml *cpu_cmd = aml_name("%s.%s", cphp_res_path, CPU_COMMAND);
431 Aml *cpu_data = aml_name("%s.%s", cphp_res_path, CPU_DATA);
432 Aml *ins_evt = aml_name("%s.%s", cphp_res_path, CPU_INSERT_EVENT);
433 Aml *rm_evt = aml_name("%s.%s", cphp_res_path, CPU_REMOVE_EVENT);
434 Aml *ej_evt = aml_name("%s.%s", cphp_res_path, CPU_EJECT_EVENT);
435 Aml *fw_ej_evt = aml_name("%s.%s", cphp_res_path, CPU_FW_EJECT_EVENT);
436
437 aml_append(cpus_dev, aml_name_decl("_HID", aml_string("ACPI0010")));
438 aml_append(cpus_dev, aml_name_decl("_CID", aml_eisaid("PNP0A05")));
439
440 method = aml_method(CPU_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
441 for (i = 0; i < arch_ids->len; i++) {
442 Aml *cpu = aml_name(CPU_NAME_FMT, i);
443 Aml *uid = aml_arg(0);
444 Aml *event = aml_arg(1);
445
446 ifctx = aml_if(aml_equal(uid, aml_int(i)));
447 {
448 aml_append(ifctx, aml_notify(cpu, event));
449 }
450 aml_append(method, ifctx);
451 }
452 aml_append(cpus_dev, method);
453
454 method = aml_method(CPU_STS_METHOD, 1, AML_SERIALIZED);
455 {
456 Aml *idx = aml_arg(0);
457 Aml *sta = aml_local(0);
458
459 aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
460 aml_append(method, aml_store(idx, cpu_selector));
461 aml_append(method, aml_store(zero, sta));
462 ifctx = aml_if(aml_equal(is_enabled, one));
463 {
464 aml_append(ifctx, aml_store(aml_int(0xF), sta));
465 }
466 aml_append(method, ifctx);
467 aml_append(method, aml_release(ctrl_lock));
468 aml_append(method, aml_return(sta));
469 }
470 aml_append(cpus_dev, method);
471
472 method = aml_method(CPU_EJECT_METHOD, 1, AML_SERIALIZED);
473 {
474 Aml *idx = aml_arg(0);
475
476 aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
477 aml_append(method, aml_store(idx, cpu_selector));
478 if (opts.fw_unplugs_cpu) {
479 aml_append(method, aml_store(one, fw_ej_evt));
480 aml_append(method, aml_store(aml_int(OVMF_CPUHP_SMI_CMD),
481 aml_name("%s", opts.smi_path)));
482 } else {
483 aml_append(method, aml_store(one, ej_evt));
484 }
485 aml_append(method, aml_release(ctrl_lock));
486 }
487 aml_append(cpus_dev, method);
488
489 method = aml_method(CPU_SCAN_METHOD, 0, AML_SERIALIZED);
490 {
491 const uint8_t max_cpus_per_pass = 255;
492 Aml *while_ctx, *while_ctx2;
493 Aml *has_event = aml_local(0);
494 Aml *dev_chk = aml_int(1);
495 Aml *eject_req = aml_int(3);
496 Aml *next_cpu_cmd = aml_int(CPHP_GET_NEXT_CPU_WITH_EVENT_CMD);
497 Aml *num_added_cpus = aml_local(1);
498 Aml *cpu_idx = aml_local(2);
499 Aml *uid = aml_local(3);
500 Aml *has_job = aml_local(4);
501 Aml *new_cpus = aml_name(CPU_ADDED_LIST);
502 Aml *ej_cpus = aml_name(CPU_EJ_LIST);
503 Aml *num_ej_cpus = aml_local(5);
504
505 aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
506
507 /*
508 * Windows versions newer than XP (including Windows 10/Windows
509 * Server 2019), do support* VarPackageOp but, it is cripled to hold
510 * the same elements number as old PackageOp.
511 * For compatibility with Windows XP (so it won't crash) use ACPI1.0
512 * PackageOp which can hold max 255 elements.
513 *
514 * use named package as old Windows don't support it in local var
515 */
516 aml_append(method, aml_name_decl(CPU_ADDED_LIST,
517 aml_package(max_cpus_per_pass)));
518 aml_append(method, aml_name_decl(CPU_EJ_LIST,
519 aml_package(max_cpus_per_pass)));
520
521 aml_append(method, aml_store(zero, uid));
522 aml_append(method, aml_store(one, has_job));
523 /*
524 * CPU_ADDED_LIST can hold limited number of elements, outer loop
525 * allows to process CPUs in batches which let us to handle more
526 * CPUs than CPU_ADDED_LIST can hold.
527 */
528 while_ctx2 = aml_while(aml_equal(has_job, one));
529 {
530 aml_append(while_ctx2, aml_store(zero, has_job));
531
532 aml_append(while_ctx2, aml_store(one, has_event));
533 aml_append(while_ctx2, aml_store(zero, num_added_cpus));
534 aml_append(while_ctx2, aml_store(zero, num_ej_cpus));
535
536 /*
537 * Scan CPUs, till there are CPUs with events or
538 * CPU_ADDED_LIST capacity is exhausted
539 */
540 while_ctx = aml_while(aml_land(aml_equal(has_event, one),
541 aml_lless(uid, aml_int(arch_ids->len))));
542 {
543 /*
544 * clear loop exit condition, ins_evt/rm_evt checks will
545 * set it to 1 while next_cpu_cmd returns a CPU with events
546 */
547 aml_append(while_ctx, aml_store(zero, has_event));
548
549 aml_append(while_ctx, aml_store(uid, cpu_selector));
550 aml_append(while_ctx, aml_store(next_cpu_cmd, cpu_cmd));
551
552 /*
553 * wrap around case, scan is complete, exit loop.
554 * It happens since events are not cleared in scan loop,
555 * so next_cpu_cmd continues to find already processed CPUs
556 */
557 ifctx = aml_if(aml_lless(cpu_data, uid));
558 {
559 aml_append(ifctx, aml_break());
560 }
561 aml_append(while_ctx, ifctx);
562
563 /*
564 * if CPU_ADDED_LIST is full, exit inner loop and process
565 * collected CPUs
566 */
567 ifctx = aml_if(aml_lor(
568 aml_equal(num_added_cpus, aml_int(max_cpus_per_pass)),
569 aml_equal(num_ej_cpus, aml_int(max_cpus_per_pass))
570 ));
571 {
572 aml_append(ifctx, aml_store(one, has_job));
573 aml_append(ifctx, aml_break());
574 }
575 aml_append(while_ctx, ifctx);
576
577 aml_append(while_ctx, aml_store(cpu_data, uid));
578 ifctx = aml_if(aml_equal(ins_evt, one));
579 {
580 /* cache added CPUs to Notify/Wakeup later */
581 aml_append(ifctx, aml_store(uid,
582 aml_index(new_cpus, num_added_cpus)));
583 aml_append(ifctx, aml_increment(num_added_cpus));
584 aml_append(ifctx, aml_store(one, has_event));
585 }
586 aml_append(while_ctx, ifctx);
587
588 ifctx = aml_if(aml_equal(rm_evt, one));
589 {
590 /* cache to be removed CPUs to Notify later */
591 aml_append(ifctx, aml_store(uid,
592 aml_index(ej_cpus, num_ej_cpus)));
593 aml_append(ifctx, aml_increment(num_ej_cpus));
594 aml_append(ifctx, aml_store(one, has_event));
595 }
596 aml_append(while_ctx, ifctx);
597 aml_append(while_ctx, aml_increment(uid));
598 }
599 aml_append(while_ctx2, while_ctx);
600
601 /*
602 * in case FW negotiated ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT,
603 * make upcall to FW, so it can pull in new CPUs before
604 * OS is notified and wakes them up
605 */
606 if (opts.smi_path) {
607 ifctx = aml_if(aml_lgreater(num_added_cpus, zero));
608 {
609 aml_append(ifctx, aml_store(aml_int(OVMF_CPUHP_SMI_CMD),
610 aml_name("%s", opts.smi_path)));
611 }
612 aml_append(while_ctx2, ifctx);
613 }
614
615 /* Notify OSPM about new CPUs and clear insert events */
616 aml_append(while_ctx2, aml_store(zero, cpu_idx));
617 while_ctx = aml_while(aml_lless(cpu_idx, num_added_cpus));
618 {
619 aml_append(while_ctx,
620 aml_store(aml_derefof(aml_index(new_cpus, cpu_idx)),
621 uid));
622 aml_append(while_ctx,
623 aml_call2(CPU_NOTIFY_METHOD, uid, dev_chk));
624 aml_append(while_ctx, aml_store(uid, aml_debug()));
625 aml_append(while_ctx, aml_store(uid, cpu_selector));
626 aml_append(while_ctx, aml_store(one, ins_evt));
627 aml_append(while_ctx, aml_increment(cpu_idx));
628 }
629 aml_append(while_ctx2, while_ctx);
630
631 /*
632 * Notify OSPM about to be removed CPUs and clear remove flag
633 */
634 aml_append(while_ctx2, aml_store(zero, cpu_idx));
635 while_ctx = aml_while(aml_lless(cpu_idx, num_ej_cpus));
636 {
637 aml_append(while_ctx,
638 aml_store(aml_derefof(aml_index(ej_cpus, cpu_idx)),
639 uid));
640 aml_append(while_ctx,
641 aml_call2(CPU_NOTIFY_METHOD, uid, eject_req));
642 aml_append(while_ctx, aml_store(uid, cpu_selector));
643 aml_append(while_ctx, aml_store(one, rm_evt));
644 aml_append(while_ctx, aml_increment(cpu_idx));
645 }
646 aml_append(while_ctx2, while_ctx);
647
648 /*
649 * If another batch is needed, then it will resume scanning
650 * exactly at -- and not after -- the last CPU that's currently
651 * in CPU_ADDED_LIST. In other words, the last CPU in
652 * CPU_ADDED_LIST is going to be re-checked. That's OK: we've
653 * just cleared the insert event for *all* CPUs in
654 * CPU_ADDED_LIST, including the last one. So the scan will
655 * simply seek past it.
656 */
657 }
658 aml_append(method, while_ctx2);
659 aml_append(method, aml_release(ctrl_lock));
660 }
661 aml_append(cpus_dev, method);
662
663 method = aml_method(CPU_OST_METHOD, 4, AML_SERIALIZED);
664 {
665 Aml *uid = aml_arg(0);
666 Aml *ev_cmd = aml_int(CPHP_OST_EVENT_CMD);
667 Aml *st_cmd = aml_int(CPHP_OST_STATUS_CMD);
668
669 aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
670 aml_append(method, aml_store(uid, cpu_selector));
671 aml_append(method, aml_store(ev_cmd, cpu_cmd));
672 aml_append(method, aml_store(aml_arg(1), cpu_data));
673 aml_append(method, aml_store(st_cmd, cpu_cmd));
674 aml_append(method, aml_store(aml_arg(2), cpu_data));
675 aml_append(method, aml_release(ctrl_lock));
676 }
677 aml_append(cpus_dev, method);
678
679 /* build Processor object for each processor */
680 for (i = 0; i < arch_ids->len; i++) {
681 Aml *dev;
682 Aml *uid = aml_int(i);
683 GArray *madt_buf = g_array_new(0, 1, 1);
684 int arch_id = arch_ids->cpus[i].arch_id;
685
686 if (opts.acpi_1_compatible && arch_id < 255) {
687 dev = aml_processor(i, 0, 0, CPU_NAME_FMT, i);
688 } else {
689 dev = aml_device(CPU_NAME_FMT, i);
690 aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
691 aml_append(dev, aml_name_decl("_UID", uid));
692 }
693
694 method = aml_method("_STA", 0, AML_SERIALIZED);
695 aml_append(method, aml_return(aml_call1(CPU_STS_METHOD, uid)));
696 aml_append(dev, method);
697
698 /* build _MAT object */
699 build_madt_cpu(i, arch_ids, madt_buf, true); /* set enabled flag */
700 aml_append(dev, aml_name_decl("_MAT",
701 aml_buffer(madt_buf->len, (uint8_t *)madt_buf->data)));
702 g_array_free(madt_buf, true);
703
704 if (CPU(arch_ids->cpus[i].cpu) != first_cpu) {
705 method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
706 aml_append(method, aml_call1(CPU_EJECT_METHOD, uid));
707 aml_append(dev, method);
708 }
709
710 method = aml_method("_OST", 3, AML_SERIALIZED);
711 aml_append(method,
712 aml_call4(CPU_OST_METHOD, uid, aml_arg(0),
713 aml_arg(1), aml_arg(2))
714 );
715 aml_append(dev, method);
716
717 /* Linux guests discard SRAT info for non-present CPUs
718 * as a result _PXM is required for all CPUs which might
719 * be hot-plugged. For simplicity, add it for all CPUs.
720 */
721 if (arch_ids->cpus[i].props.has_node_id) {
722 aml_append(dev, aml_name_decl("_PXM",
723 aml_int(arch_ids->cpus[i].props.node_id)));
724 }
725
726 aml_append(cpus_dev, dev);
727 }
728 }
729 aml_append(sb_scope, cpus_dev);
730 aml_append(table, sb_scope);
731
732 method = aml_method(event_handler_method, 0, AML_NOTSERIALIZED);
733 aml_append(method, aml_call0("\\_SB.CPUS." CPU_SCAN_METHOD));
734 aml_append(table, method);
735
736 g_free(cphp_res_path);
737 }
738