1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU loongson 3a5000 develop board emulation 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 */ 7 #include "qemu/osdep.h" 8 #include "qemu/units.h" 9 #include "qemu/datadir.h" 10 #include "qapi/error.h" 11 #include "hw/boards.h" 12 #include "hw/char/serial-mm.h" 13 #include "system/kvm.h" 14 #include "system/tcg.h" 15 #include "system/system.h" 16 #include "system/qtest.h" 17 #include "system/runstate.h" 18 #include "system/reset.h" 19 #include "system/rtc.h" 20 #include "hw/loongarch/virt.h" 21 #include "exec/address-spaces.h" 22 #include "hw/irq.h" 23 #include "net/net.h" 24 #include "hw/loader.h" 25 #include "elf.h" 26 #include "hw/intc/loongarch_ipi.h" 27 #include "hw/intc/loongarch_extioi.h" 28 #include "hw/intc/loongarch_pch_pic.h" 29 #include "hw/intc/loongarch_pch_msi.h" 30 #include "hw/pci-host/ls7a.h" 31 #include "hw/pci-host/gpex.h" 32 #include "hw/misc/unimp.h" 33 #include "hw/loongarch/fw_cfg.h" 34 #include "target/loongarch/cpu.h" 35 #include "hw/firmware/smbios.h" 36 #include "qapi/qapi-visit-common.h" 37 #include "hw/acpi/generic_event_device.h" 38 #include "hw/mem/nvdimm.h" 39 #include "hw/platform-bus.h" 40 #include "hw/display/ramfb.h" 41 #include "hw/mem/pc-dimm.h" 42 #include "system/tpm.h" 43 #include "system/block-backend.h" 44 #include "hw/block/flash.h" 45 #include "hw/virtio/virtio-iommu.h" 46 #include "qemu/error-report.h" 47 48 static void virt_get_veiointc(Object *obj, Visitor *v, const char *name, 49 void *opaque, Error **errp) 50 { 51 LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj); 52 OnOffAuto veiointc = lvms->veiointc; 53 54 visit_type_OnOffAuto(v, name, &veiointc, errp); 55 } 56 57 static void virt_set_veiointc(Object *obj, Visitor *v, const char *name, 58 void *opaque, Error **errp) 59 { 60 LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj); 61 62 visit_type_OnOffAuto(v, name, &lvms->veiointc, errp); 63 } 64 65 static PFlashCFI01 *virt_flash_create1(LoongArchVirtMachineState *lvms, 66 const char *name, 67 const char *alias_prop_name) 68 { 69 DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01); 70 71 qdev_prop_set_uint64(dev, "sector-length", VIRT_FLASH_SECTOR_SIZE); 72 qdev_prop_set_uint8(dev, "width", 4); 73 qdev_prop_set_uint8(dev, "device-width", 2); 74 qdev_prop_set_bit(dev, "big-endian", false); 75 qdev_prop_set_uint16(dev, "id0", 0x89); 76 qdev_prop_set_uint16(dev, "id1", 0x18); 77 qdev_prop_set_uint16(dev, "id2", 0x00); 78 qdev_prop_set_uint16(dev, "id3", 0x00); 79 qdev_prop_set_string(dev, "name", name); 80 object_property_add_child(OBJECT(lvms), name, OBJECT(dev)); 81 object_property_add_alias(OBJECT(lvms), alias_prop_name, 82 OBJECT(dev), "drive"); 83 return PFLASH_CFI01(dev); 84 } 85 86 static void virt_flash_create(LoongArchVirtMachineState *lvms) 87 { 88 lvms->flash[0] = virt_flash_create1(lvms, "virt.flash0", "pflash0"); 89 lvms->flash[1] = virt_flash_create1(lvms, "virt.flash1", "pflash1"); 90 } 91 92 static void virt_flash_map1(PFlashCFI01 *flash, 93 hwaddr base, hwaddr size, 94 MemoryRegion *sysmem) 95 { 96 DeviceState *dev = DEVICE(flash); 97 BlockBackend *blk; 98 hwaddr real_size = size; 99 100 blk = pflash_cfi01_get_blk(flash); 101 if (blk) { 102 real_size = blk_getlength(blk); 103 assert(real_size && real_size <= size); 104 } 105 106 assert(QEMU_IS_ALIGNED(real_size, VIRT_FLASH_SECTOR_SIZE)); 107 assert(real_size / VIRT_FLASH_SECTOR_SIZE <= UINT32_MAX); 108 109 qdev_prop_set_uint32(dev, "num-blocks", real_size / VIRT_FLASH_SECTOR_SIZE); 110 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); 111 memory_region_add_subregion(sysmem, base, 112 sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0)); 113 } 114 115 static void virt_flash_map(LoongArchVirtMachineState *lvms, 116 MemoryRegion *sysmem) 117 { 118 PFlashCFI01 *flash0 = lvms->flash[0]; 119 PFlashCFI01 *flash1 = lvms->flash[1]; 120 121 virt_flash_map1(flash0, VIRT_FLASH0_BASE, VIRT_FLASH0_SIZE, sysmem); 122 virt_flash_map1(flash1, VIRT_FLASH1_BASE, VIRT_FLASH1_SIZE, sysmem); 123 } 124 125 static void virt_build_smbios(LoongArchVirtMachineState *lvms) 126 { 127 MachineState *ms = MACHINE(lvms); 128 MachineClass *mc = MACHINE_GET_CLASS(lvms); 129 uint8_t *smbios_tables, *smbios_anchor; 130 size_t smbios_tables_len, smbios_anchor_len; 131 const char *product = "QEMU Virtual Machine"; 132 133 if (!lvms->fw_cfg) { 134 return; 135 } 136 137 smbios_set_defaults("QEMU", product, mc->name); 138 139 smbios_get_tables(ms, SMBIOS_ENTRY_POINT_TYPE_64, 140 NULL, 0, 141 &smbios_tables, &smbios_tables_len, 142 &smbios_anchor, &smbios_anchor_len, &error_fatal); 143 144 if (smbios_anchor) { 145 fw_cfg_add_file(lvms->fw_cfg, "etc/smbios/smbios-tables", 146 smbios_tables, smbios_tables_len); 147 fw_cfg_add_file(lvms->fw_cfg, "etc/smbios/smbios-anchor", 148 smbios_anchor, smbios_anchor_len); 149 } 150 } 151 152 static void virt_done(Notifier *notifier, void *data) 153 { 154 LoongArchVirtMachineState *lvms = container_of(notifier, 155 LoongArchVirtMachineState, machine_done); 156 virt_build_smbios(lvms); 157 virt_acpi_setup(lvms); 158 virt_fdt_setup(lvms); 159 } 160 161 static void virt_powerdown_req(Notifier *notifier, void *opaque) 162 { 163 LoongArchVirtMachineState *s; 164 165 s = container_of(notifier, LoongArchVirtMachineState, powerdown_notifier); 166 acpi_send_event(s->acpi_ged, ACPI_POWER_DOWN_STATUS); 167 } 168 169 static void memmap_add_entry(uint64_t address, uint64_t length, uint32_t type) 170 { 171 /* Ensure there are no duplicate entries. */ 172 for (unsigned i = 0; i < memmap_entries; i++) { 173 assert(memmap_table[i].address != address); 174 } 175 176 memmap_table = g_renew(struct memmap_entry, memmap_table, 177 memmap_entries + 1); 178 memmap_table[memmap_entries].address = cpu_to_le64(address); 179 memmap_table[memmap_entries].length = cpu_to_le64(length); 180 memmap_table[memmap_entries].type = cpu_to_le32(type); 181 memmap_table[memmap_entries].reserved = 0; 182 memmap_entries++; 183 } 184 185 static DeviceState *create_acpi_ged(DeviceState *pch_pic, 186 LoongArchVirtMachineState *lvms) 187 { 188 DeviceState *dev; 189 MachineState *ms = MACHINE(lvms); 190 uint32_t event = ACPI_GED_PWR_DOWN_EVT; 191 192 if (ms->ram_slots) { 193 event |= ACPI_GED_MEM_HOTPLUG_EVT; 194 } 195 dev = qdev_new(TYPE_ACPI_GED); 196 qdev_prop_set_uint32(dev, "ged-event", event); 197 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); 198 199 /* ged event */ 200 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, VIRT_GED_EVT_ADDR); 201 /* memory hotplug */ 202 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, VIRT_GED_MEM_ADDR); 203 /* ged regs used for reset and power down */ 204 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, VIRT_GED_REG_ADDR); 205 206 sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, 207 qdev_get_gpio_in(pch_pic, VIRT_SCI_IRQ - VIRT_GSI_BASE)); 208 return dev; 209 } 210 211 static DeviceState *create_platform_bus(DeviceState *pch_pic) 212 { 213 DeviceState *dev; 214 SysBusDevice *sysbus; 215 int i, irq; 216 MemoryRegion *sysmem = get_system_memory(); 217 218 dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE); 219 dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE); 220 qdev_prop_set_uint32(dev, "num_irqs", VIRT_PLATFORM_BUS_NUM_IRQS); 221 qdev_prop_set_uint32(dev, "mmio_size", VIRT_PLATFORM_BUS_SIZE); 222 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); 223 224 sysbus = SYS_BUS_DEVICE(dev); 225 for (i = 0; i < VIRT_PLATFORM_BUS_NUM_IRQS; i++) { 226 irq = VIRT_PLATFORM_BUS_IRQ - VIRT_GSI_BASE + i; 227 sysbus_connect_irq(sysbus, i, qdev_get_gpio_in(pch_pic, irq)); 228 } 229 230 memory_region_add_subregion(sysmem, 231 VIRT_PLATFORM_BUS_BASEADDRESS, 232 sysbus_mmio_get_region(sysbus, 0)); 233 return dev; 234 } 235 236 static void virt_devices_init(DeviceState *pch_pic, 237 LoongArchVirtMachineState *lvms) 238 { 239 MachineClass *mc = MACHINE_GET_CLASS(lvms); 240 DeviceState *gpex_dev; 241 SysBusDevice *d; 242 PCIBus *pci_bus; 243 MemoryRegion *ecam_alias, *ecam_reg, *pio_alias, *pio_reg; 244 MemoryRegion *mmio_alias, *mmio_reg; 245 int i; 246 247 gpex_dev = qdev_new(TYPE_GPEX_HOST); 248 d = SYS_BUS_DEVICE(gpex_dev); 249 sysbus_realize_and_unref(d, &error_fatal); 250 pci_bus = PCI_HOST_BRIDGE(gpex_dev)->bus; 251 lvms->pci_bus = pci_bus; 252 253 /* Map only part size_ecam bytes of ECAM space */ 254 ecam_alias = g_new0(MemoryRegion, 1); 255 ecam_reg = sysbus_mmio_get_region(d, 0); 256 memory_region_init_alias(ecam_alias, OBJECT(gpex_dev), "pcie-ecam", 257 ecam_reg, 0, VIRT_PCI_CFG_SIZE); 258 memory_region_add_subregion(get_system_memory(), VIRT_PCI_CFG_BASE, 259 ecam_alias); 260 261 /* Map PCI mem space */ 262 mmio_alias = g_new0(MemoryRegion, 1); 263 mmio_reg = sysbus_mmio_get_region(d, 1); 264 memory_region_init_alias(mmio_alias, OBJECT(gpex_dev), "pcie-mmio", 265 mmio_reg, VIRT_PCI_MEM_BASE, VIRT_PCI_MEM_SIZE); 266 memory_region_add_subregion(get_system_memory(), VIRT_PCI_MEM_BASE, 267 mmio_alias); 268 269 /* Map PCI IO port space. */ 270 pio_alias = g_new0(MemoryRegion, 1); 271 pio_reg = sysbus_mmio_get_region(d, 2); 272 memory_region_init_alias(pio_alias, OBJECT(gpex_dev), "pcie-io", pio_reg, 273 VIRT_PCI_IO_OFFSET, VIRT_PCI_IO_SIZE); 274 memory_region_add_subregion(get_system_memory(), VIRT_PCI_IO_BASE, 275 pio_alias); 276 277 for (i = 0; i < PCI_NUM_PINS; i++) { 278 sysbus_connect_irq(d, i, 279 qdev_get_gpio_in(pch_pic, 16 + i)); 280 gpex_set_irq_num(GPEX_HOST(gpex_dev), i, 16 + i); 281 } 282 283 /* 284 * Create uart fdt node in reverse order so that they appear 285 * in the finished device tree lowest address first 286 */ 287 for (i = VIRT_UART_COUNT; i-- > 0;) { 288 hwaddr base = VIRT_UART_BASE + i * VIRT_UART_SIZE; 289 int irq = VIRT_UART_IRQ + i - VIRT_GSI_BASE; 290 serial_mm_init(get_system_memory(), base, 0, 291 qdev_get_gpio_in(pch_pic, irq), 292 115200, serial_hd(i), DEVICE_LITTLE_ENDIAN); 293 } 294 295 /* Network init */ 296 pci_init_nic_devices(pci_bus, mc->default_nic); 297 298 /* 299 * There are some invalid guest memory access. 300 * Create some unimplemented devices to emulate this. 301 */ 302 create_unimplemented_device("pci-dma-cfg", 0x1001041c, 0x4); 303 sysbus_create_simple("ls7a_rtc", VIRT_RTC_REG_BASE, 304 qdev_get_gpio_in(pch_pic, 305 VIRT_RTC_IRQ - VIRT_GSI_BASE)); 306 307 /* acpi ged */ 308 lvms->acpi_ged = create_acpi_ged(pch_pic, lvms); 309 /* platform bus */ 310 lvms->platform_bus_dev = create_platform_bus(pch_pic); 311 } 312 313 static void virt_cpu_irq_init(LoongArchVirtMachineState *lvms) 314 { 315 int num; 316 MachineState *ms = MACHINE(lvms); 317 MachineClass *mc = MACHINE_GET_CLASS(ms); 318 const CPUArchIdList *possible_cpus; 319 CPUState *cs; 320 Error *err = NULL; 321 322 /* cpu nodes */ 323 possible_cpus = mc->possible_cpu_arch_ids(ms); 324 for (num = 0; num < possible_cpus->len; num++) { 325 cs = possible_cpus->cpus[num].cpu; 326 if (cs == NULL) { 327 continue; 328 } 329 330 hotplug_handler_plug(HOTPLUG_HANDLER(lvms->ipi), DEVICE(cs), &err); 331 hotplug_handler_plug(HOTPLUG_HANDLER(lvms->extioi), DEVICE(cs), &err); 332 } 333 } 334 335 static void virt_irq_init(LoongArchVirtMachineState *lvms) 336 { 337 DeviceState *pch_pic, *pch_msi; 338 DeviceState *ipi, *extioi; 339 SysBusDevice *d; 340 int i, start, num; 341 342 /* 343 * Extended IRQ model. 344 * | 345 * +-----------+ +-------------|--------+ +-----------+ 346 * | IPI/Timer | --> | CPUINTC(0-3)|(4-255) | <-- | IPI/Timer | 347 * +-----------+ +-------------|--------+ +-----------+ 348 * ^ | 349 * | 350 * +---------+ 351 * | EIOINTC | 352 * +---------+ 353 * ^ ^ 354 * | | 355 * +---------+ +---------+ 356 * | PCH-PIC | | PCH-MSI | 357 * +---------+ +---------+ 358 * ^ ^ ^ 359 * | | | 360 * +--------+ +---------+ +---------+ 361 * | UARTs | | Devices | | Devices | 362 * +--------+ +---------+ +---------+ 363 * 364 * Virt extended IRQ model. 365 * 366 * +-----+ +---------------+ +-------+ 367 * | IPI |--> | CPUINTC(0-255)| <-- | Timer | 368 * +-----+ +---------------+ +-------+ 369 * ^ 370 * | 371 * +-----------+ 372 * | V-EIOINTC | 373 * +-----------+ 374 * ^ ^ 375 * | | 376 * +---------+ +---------+ 377 * | PCH-PIC | | PCH-MSI | 378 * +---------+ +---------+ 379 * ^ ^ ^ 380 * | | | 381 * +--------+ +---------+ +---------+ 382 * | UARTs | | Devices | | Devices | 383 * +--------+ +---------+ +---------+ 384 */ 385 386 /* Create IPI device */ 387 ipi = qdev_new(TYPE_LOONGARCH_IPI); 388 lvms->ipi = ipi; 389 sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); 390 391 /* IPI iocsr memory region */ 392 memory_region_add_subregion(&lvms->system_iocsr, SMP_IPI_MAILBOX, 393 sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 0)); 394 memory_region_add_subregion(&lvms->system_iocsr, MAIL_SEND_ADDR, 395 sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 1)); 396 397 /* Create EXTIOI device */ 398 extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); 399 lvms->extioi = extioi; 400 if (virt_is_veiointc_enabled(lvms)) { 401 qdev_prop_set_bit(extioi, "has-virtualization-extension", true); 402 } 403 sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal); 404 memory_region_add_subregion(&lvms->system_iocsr, APIC_BASE, 405 sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 0)); 406 if (virt_is_veiointc_enabled(lvms)) { 407 memory_region_add_subregion(&lvms->system_iocsr, EXTIOI_VIRT_BASE, 408 sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 1)); 409 } 410 411 virt_cpu_irq_init(lvms); 412 pch_pic = qdev_new(TYPE_LOONGARCH_PIC); 413 num = VIRT_PCH_PIC_IRQ_NUM; 414 qdev_prop_set_uint32(pch_pic, "pch_pic_irq_num", num); 415 d = SYS_BUS_DEVICE(pch_pic); 416 sysbus_realize_and_unref(d, &error_fatal); 417 memory_region_add_subregion(get_system_memory(), VIRT_IOAPIC_REG_BASE, 418 sysbus_mmio_get_region(d, 0)); 419 memory_region_add_subregion(get_system_memory(), 420 VIRT_IOAPIC_REG_BASE + PCH_PIC_ROUTE_ENTRY_OFFSET, 421 sysbus_mmio_get_region(d, 1)); 422 memory_region_add_subregion(get_system_memory(), 423 VIRT_IOAPIC_REG_BASE + PCH_PIC_INT_STATUS_LO, 424 sysbus_mmio_get_region(d, 2)); 425 426 /* Connect pch_pic irqs to extioi */ 427 for (i = 0; i < num; i++) { 428 qdev_connect_gpio_out(DEVICE(d), i, qdev_get_gpio_in(extioi, i)); 429 } 430 431 pch_msi = qdev_new(TYPE_LOONGARCH_PCH_MSI); 432 start = num; 433 num = EXTIOI_IRQS - start; 434 qdev_prop_set_uint32(pch_msi, "msi_irq_base", start); 435 qdev_prop_set_uint32(pch_msi, "msi_irq_num", num); 436 d = SYS_BUS_DEVICE(pch_msi); 437 sysbus_realize_and_unref(d, &error_fatal); 438 sysbus_mmio_map(d, 0, VIRT_PCH_MSI_ADDR_LOW); 439 for (i = 0; i < num; i++) { 440 /* Connect pch_msi irqs to extioi */ 441 qdev_connect_gpio_out(DEVICE(d), i, 442 qdev_get_gpio_in(extioi, i + start)); 443 } 444 445 virt_devices_init(pch_pic, lvms); 446 } 447 448 static void virt_firmware_init(LoongArchVirtMachineState *lvms) 449 { 450 char *filename = MACHINE(lvms)->firmware; 451 char *bios_name = NULL; 452 int bios_size, i; 453 BlockBackend *pflash_blk0; 454 MemoryRegion *mr; 455 456 lvms->bios_loaded = false; 457 458 /* Map legacy -drive if=pflash to machine properties */ 459 for (i = 0; i < ARRAY_SIZE(lvms->flash); i++) { 460 pflash_cfi01_legacy_drive(lvms->flash[i], 461 drive_get(IF_PFLASH, 0, i)); 462 } 463 464 virt_flash_map(lvms, get_system_memory()); 465 466 pflash_blk0 = pflash_cfi01_get_blk(lvms->flash[0]); 467 468 if (pflash_blk0) { 469 if (filename) { 470 error_report("cannot use both '-bios' and '-drive if=pflash'" 471 "options at once"); 472 exit(1); 473 } 474 lvms->bios_loaded = true; 475 return; 476 } 477 478 if (filename) { 479 bios_name = qemu_find_file(QEMU_FILE_TYPE_BIOS, filename); 480 if (!bios_name) { 481 error_report("Could not find ROM image '%s'", filename); 482 exit(1); 483 } 484 485 mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(lvms->flash[0]), 0); 486 bios_size = load_image_mr(bios_name, mr); 487 if (bios_size < 0) { 488 error_report("Could not load ROM image '%s'", bios_name); 489 exit(1); 490 } 491 g_free(bios_name); 492 lvms->bios_loaded = true; 493 } 494 } 495 496 static MemTxResult virt_iocsr_misc_write(void *opaque, hwaddr addr, 497 uint64_t val, unsigned size, 498 MemTxAttrs attrs) 499 { 500 LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(opaque); 501 uint64_t features; 502 503 switch (addr) { 504 case MISC_FUNC_REG: 505 if (!virt_is_veiointc_enabled(lvms)) { 506 return MEMTX_OK; 507 } 508 509 features = address_space_ldl(&lvms->as_iocsr, 510 EXTIOI_VIRT_BASE + EXTIOI_VIRT_CONFIG, 511 attrs, NULL); 512 if (val & BIT_ULL(IOCSRM_EXTIOI_EN)) { 513 features |= BIT(EXTIOI_ENABLE); 514 } 515 if (val & BIT_ULL(IOCSRM_EXTIOI_INT_ENCODE)) { 516 features |= BIT(EXTIOI_ENABLE_INT_ENCODE); 517 } 518 519 address_space_stl(&lvms->as_iocsr, 520 EXTIOI_VIRT_BASE + EXTIOI_VIRT_CONFIG, 521 features, attrs, NULL); 522 break; 523 default: 524 g_assert_not_reached(); 525 } 526 527 return MEMTX_OK; 528 } 529 530 static MemTxResult virt_iocsr_misc_read(void *opaque, hwaddr addr, 531 uint64_t *data, 532 unsigned size, MemTxAttrs attrs) 533 { 534 LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(opaque); 535 uint64_t ret = 0; 536 int features; 537 538 switch (addr) { 539 case VERSION_REG: 540 ret = 0x11ULL; 541 break; 542 case FEATURE_REG: 543 ret = BIT(IOCSRF_MSI) | BIT(IOCSRF_EXTIOI) | BIT(IOCSRF_CSRIPI); 544 if (kvm_enabled()) { 545 ret |= BIT(IOCSRF_VM); 546 } 547 break; 548 case VENDOR_REG: 549 ret = 0x6e6f73676e6f6f4cULL; /* "Loongson" */ 550 break; 551 case CPUNAME_REG: 552 ret = 0x303030354133ULL; /* "3A5000" */ 553 break; 554 case MISC_FUNC_REG: 555 if (!virt_is_veiointc_enabled(lvms)) { 556 ret |= BIT_ULL(IOCSRM_EXTIOI_EN); 557 break; 558 } 559 560 features = address_space_ldl(&lvms->as_iocsr, 561 EXTIOI_VIRT_BASE + EXTIOI_VIRT_CONFIG, 562 attrs, NULL); 563 if (features & BIT(EXTIOI_ENABLE)) { 564 ret |= BIT_ULL(IOCSRM_EXTIOI_EN); 565 } 566 if (features & BIT(EXTIOI_ENABLE_INT_ENCODE)) { 567 ret |= BIT_ULL(IOCSRM_EXTIOI_INT_ENCODE); 568 } 569 break; 570 default: 571 g_assert_not_reached(); 572 } 573 574 *data = ret; 575 return MEMTX_OK; 576 } 577 578 static const MemoryRegionOps virt_iocsr_misc_ops = { 579 .read_with_attrs = virt_iocsr_misc_read, 580 .write_with_attrs = virt_iocsr_misc_write, 581 .endianness = DEVICE_LITTLE_ENDIAN, 582 .valid = { 583 .min_access_size = 4, 584 .max_access_size = 8, 585 }, 586 .impl = { 587 .min_access_size = 8, 588 .max_access_size = 8, 589 }, 590 }; 591 592 static void fw_cfg_add_memory(MachineState *ms) 593 { 594 hwaddr base, size, ram_size, gap; 595 int nb_numa_nodes, nodes; 596 NodeInfo *numa_info; 597 598 ram_size = ms->ram_size; 599 base = VIRT_LOWMEM_BASE; 600 gap = VIRT_LOWMEM_SIZE; 601 nodes = nb_numa_nodes = ms->numa_state->num_nodes; 602 numa_info = ms->numa_state->nodes; 603 if (!nodes) { 604 nodes = 1; 605 } 606 607 /* add fw_cfg memory map of node0 */ 608 if (nb_numa_nodes) { 609 size = numa_info[0].node_mem; 610 } else { 611 size = ram_size; 612 } 613 614 if (size >= gap) { 615 memmap_add_entry(base, gap, 1); 616 size -= gap; 617 base = VIRT_HIGHMEM_BASE; 618 } 619 620 if (size) { 621 memmap_add_entry(base, size, 1); 622 base += size; 623 } 624 625 if (nodes < 2) { 626 return; 627 } 628 629 /* add fw_cfg memory map of other nodes */ 630 if (numa_info[0].node_mem < gap && ram_size > gap) { 631 /* 632 * memory map for the maining nodes splited into two part 633 * lowram: [base, +(gap - numa_info[0].node_mem)) 634 * highram: [VIRT_HIGHMEM_BASE, +(ram_size - gap)) 635 */ 636 memmap_add_entry(base, gap - numa_info[0].node_mem, 1); 637 size = ram_size - gap; 638 base = VIRT_HIGHMEM_BASE; 639 } else { 640 size = ram_size - numa_info[0].node_mem; 641 } 642 643 if (size) { 644 memmap_add_entry(base, size, 1); 645 } 646 } 647 648 static void virt_init(MachineState *machine) 649 { 650 LoongArchCPU *lacpu; 651 const char *cpu_model = machine->cpu_type; 652 MemoryRegion *address_space_mem = get_system_memory(); 653 LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine); 654 int i; 655 hwaddr base, size, ram_size = machine->ram_size; 656 const CPUArchIdList *possible_cpus; 657 MachineClass *mc = MACHINE_GET_CLASS(machine); 658 CPUState *cpu; 659 660 if (!cpu_model) { 661 cpu_model = LOONGARCH_CPU_TYPE_NAME("la464"); 662 } 663 664 /* Create IOCSR space */ 665 memory_region_init_io(&lvms->system_iocsr, OBJECT(machine), NULL, 666 machine, "iocsr", UINT64_MAX); 667 address_space_init(&lvms->as_iocsr, &lvms->system_iocsr, "IOCSR"); 668 memory_region_init_io(&lvms->iocsr_mem, OBJECT(machine), 669 &virt_iocsr_misc_ops, 670 machine, "iocsr_misc", 0x428); 671 memory_region_add_subregion(&lvms->system_iocsr, 0, &lvms->iocsr_mem); 672 673 /* Init CPUs */ 674 possible_cpus = mc->possible_cpu_arch_ids(machine); 675 for (i = 0; i < possible_cpus->len; i++) { 676 cpu = cpu_create(machine->cpu_type); 677 cpu->cpu_index = i; 678 machine->possible_cpus->cpus[i].cpu = cpu; 679 lacpu = LOONGARCH_CPU(cpu); 680 lacpu->phy_id = machine->possible_cpus->cpus[i].arch_id; 681 lacpu->env.address_space_iocsr = &lvms->as_iocsr; 682 } 683 fw_cfg_add_memory(machine); 684 685 /* Node0 memory */ 686 size = ram_size; 687 base = VIRT_LOWMEM_BASE; 688 if (size > VIRT_LOWMEM_SIZE) { 689 size = VIRT_LOWMEM_SIZE; 690 } 691 692 memory_region_init_alias(&lvms->lowmem, NULL, "loongarch.lowram", 693 machine->ram, base, size); 694 memory_region_add_subregion(address_space_mem, base, &lvms->lowmem); 695 base += size; 696 if (ram_size - size) { 697 base = VIRT_HIGHMEM_BASE; 698 memory_region_init_alias(&lvms->highmem, NULL, "loongarch.highram", 699 machine->ram, VIRT_LOWMEM_BASE + size, ram_size - size); 700 memory_region_add_subregion(address_space_mem, base, &lvms->highmem); 701 base += ram_size - size; 702 } 703 704 /* initialize device memory address space */ 705 if (machine->ram_size < machine->maxram_size) { 706 ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size; 707 708 if (machine->ram_slots > ACPI_MAX_RAM_SLOTS) { 709 error_report("unsupported amount of memory slots: %"PRIu64, 710 machine->ram_slots); 711 exit(EXIT_FAILURE); 712 } 713 714 if (QEMU_ALIGN_UP(machine->maxram_size, 715 TARGET_PAGE_SIZE) != machine->maxram_size) { 716 error_report("maximum memory size must by aligned to multiple of " 717 "%d bytes", TARGET_PAGE_SIZE); 718 exit(EXIT_FAILURE); 719 } 720 machine_memory_devices_init(machine, base, device_mem_size); 721 } 722 723 /* load the BIOS image. */ 724 virt_firmware_init(lvms); 725 726 /* fw_cfg init */ 727 lvms->fw_cfg = virt_fw_cfg_init(ram_size, machine); 728 rom_set_fw(lvms->fw_cfg); 729 if (lvms->fw_cfg != NULL) { 730 fw_cfg_add_file(lvms->fw_cfg, "etc/memmap", 731 memmap_table, 732 sizeof(struct memmap_entry) * (memmap_entries)); 733 } 734 735 /* Initialize the IO interrupt subsystem */ 736 virt_irq_init(lvms); 737 lvms->machine_done.notify = virt_done; 738 qemu_add_machine_init_done_notifier(&lvms->machine_done); 739 /* connect powerdown request */ 740 lvms->powerdown_notifier.notify = virt_powerdown_req; 741 qemu_register_powerdown_notifier(&lvms->powerdown_notifier); 742 743 lvms->bootinfo.ram_size = ram_size; 744 loongarch_load_kernel(machine, &lvms->bootinfo); 745 } 746 747 static void virt_get_acpi(Object *obj, Visitor *v, const char *name, 748 void *opaque, Error **errp) 749 { 750 LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj); 751 OnOffAuto acpi = lvms->acpi; 752 753 visit_type_OnOffAuto(v, name, &acpi, errp); 754 } 755 756 static void virt_set_acpi(Object *obj, Visitor *v, const char *name, 757 void *opaque, Error **errp) 758 { 759 LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj); 760 761 visit_type_OnOffAuto(v, name, &lvms->acpi, errp); 762 } 763 764 static void virt_initfn(Object *obj) 765 { 766 LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj); 767 768 if (tcg_enabled()) { 769 lvms->veiointc = ON_OFF_AUTO_OFF; 770 } 771 lvms->acpi = ON_OFF_AUTO_AUTO; 772 lvms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); 773 lvms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); 774 virt_flash_create(lvms); 775 } 776 777 static void virt_get_topo_from_index(MachineState *ms, 778 LoongArchCPUTopo *topo, int index) 779 { 780 topo->socket_id = index / (ms->smp.cores * ms->smp.threads); 781 topo->core_id = index / ms->smp.threads % ms->smp.cores; 782 topo->thread_id = index % ms->smp.threads; 783 } 784 785 static unsigned int topo_align_up(unsigned int count) 786 { 787 g_assert(count >= 1); 788 count -= 1; 789 return BIT(count ? 32 - clz32(count) : 0); 790 } 791 792 /* 793 * LoongArch Reference Manual Vol1, Chapter 7.4.12 CPU Identity 794 * For CPU architecture, bit0 .. bit8 is valid for CPU id, max cpuid is 512 795 * However for IPI/Eiointc interrupt controller, max supported cpu id for 796 * irq routingis 256 797 * 798 * Here max cpu id is 256 for virt machine 799 */ 800 static int virt_get_arch_id_from_topo(MachineState *ms, LoongArchCPUTopo *topo) 801 { 802 int arch_id, threads, cores, sockets; 803 804 threads = topo_align_up(ms->smp.threads); 805 cores = topo_align_up(ms->smp.cores); 806 sockets = topo_align_up(ms->smp.sockets); 807 if ((threads * cores * sockets) > 256) { 808 error_report("Exceeding max cpuid 256 with sockets[%d] cores[%d]" 809 " threads[%d]", ms->smp.sockets, ms->smp.cores, 810 ms->smp.threads); 811 exit(1); 812 } 813 814 arch_id = topo->thread_id + topo->core_id * threads; 815 arch_id += topo->socket_id * threads * cores; 816 return arch_id; 817 } 818 819 static bool memhp_type_supported(DeviceState *dev) 820 { 821 /* we only support pc dimm now */ 822 return object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) && 823 !object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); 824 } 825 826 static void virt_mem_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 827 Error **errp) 828 { 829 pc_dimm_pre_plug(PC_DIMM(dev), MACHINE(hotplug_dev), errp); 830 } 831 832 static void virt_device_pre_plug(HotplugHandler *hotplug_dev, 833 DeviceState *dev, Error **errp) 834 { 835 if (memhp_type_supported(dev)) { 836 virt_mem_pre_plug(hotplug_dev, dev, errp); 837 } 838 } 839 840 static void virt_mem_unplug_request(HotplugHandler *hotplug_dev, 841 DeviceState *dev, Error **errp) 842 { 843 LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev); 844 845 /* the acpi ged is always exist */ 846 hotplug_handler_unplug_request(HOTPLUG_HANDLER(lvms->acpi_ged), dev, 847 errp); 848 } 849 850 static void virt_device_unplug_request(HotplugHandler *hotplug_dev, 851 DeviceState *dev, Error **errp) 852 { 853 if (memhp_type_supported(dev)) { 854 virt_mem_unplug_request(hotplug_dev, dev, errp); 855 } 856 } 857 858 static void virt_mem_unplug(HotplugHandler *hotplug_dev, 859 DeviceState *dev, Error **errp) 860 { 861 LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev); 862 863 hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->acpi_ged), dev, errp); 864 pc_dimm_unplug(PC_DIMM(dev), MACHINE(lvms)); 865 qdev_unrealize(dev); 866 } 867 868 static void virt_device_unplug(HotplugHandler *hotplug_dev, 869 DeviceState *dev, Error **errp) 870 { 871 if (memhp_type_supported(dev)) { 872 virt_mem_unplug(hotplug_dev, dev, errp); 873 } 874 } 875 876 static void virt_mem_plug(HotplugHandler *hotplug_dev, 877 DeviceState *dev, Error **errp) 878 { 879 LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev); 880 881 pc_dimm_plug(PC_DIMM(dev), MACHINE(lvms)); 882 hotplug_handler_plug(HOTPLUG_HANDLER(lvms->acpi_ged), 883 dev, &error_abort); 884 } 885 886 static void virt_device_plug_cb(HotplugHandler *hotplug_dev, 887 DeviceState *dev, Error **errp) 888 { 889 LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev); 890 MachineClass *mc = MACHINE_GET_CLASS(lvms); 891 PlatformBusDevice *pbus; 892 893 if (device_is_dynamic_sysbus(mc, dev)) { 894 if (lvms->platform_bus_dev) { 895 pbus = PLATFORM_BUS_DEVICE(lvms->platform_bus_dev); 896 platform_bus_link_device(pbus, SYS_BUS_DEVICE(dev)); 897 } 898 } else if (memhp_type_supported(dev)) { 899 virt_mem_plug(hotplug_dev, dev, errp); 900 } 901 } 902 903 static HotplugHandler *virt_get_hotplug_handler(MachineState *machine, 904 DeviceState *dev) 905 { 906 MachineClass *mc = MACHINE_GET_CLASS(machine); 907 908 if (device_is_dynamic_sysbus(mc, dev) || 909 object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI) || 910 memhp_type_supported(dev)) { 911 return HOTPLUG_HANDLER(machine); 912 } 913 return NULL; 914 } 915 916 static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) 917 { 918 int n, arch_id; 919 unsigned int max_cpus = ms->smp.max_cpus; 920 LoongArchCPUTopo topo; 921 922 if (ms->possible_cpus) { 923 assert(ms->possible_cpus->len == max_cpus); 924 return ms->possible_cpus; 925 } 926 927 ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + 928 sizeof(CPUArchId) * max_cpus); 929 ms->possible_cpus->len = max_cpus; 930 for (n = 0; n < ms->possible_cpus->len; n++) { 931 virt_get_topo_from_index(ms, &topo, n); 932 arch_id = virt_get_arch_id_from_topo(ms, &topo); 933 ms->possible_cpus->cpus[n].type = ms->cpu_type; 934 ms->possible_cpus->cpus[n].arch_id = arch_id; 935 ms->possible_cpus->cpus[n].vcpus_count = 1; 936 ms->possible_cpus->cpus[n].props.has_socket_id = true; 937 ms->possible_cpus->cpus[n].props.socket_id = topo.socket_id; 938 ms->possible_cpus->cpus[n].props.has_core_id = true; 939 ms->possible_cpus->cpus[n].props.core_id = topo.core_id; 940 ms->possible_cpus->cpus[n].props.has_thread_id = true; 941 ms->possible_cpus->cpus[n].props.thread_id = topo.thread_id; 942 } 943 return ms->possible_cpus; 944 } 945 946 static CpuInstanceProperties virt_cpu_index_to_props(MachineState *ms, 947 unsigned cpu_index) 948 { 949 MachineClass *mc = MACHINE_GET_CLASS(ms); 950 const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); 951 952 assert(cpu_index < possible_cpus->len); 953 return possible_cpus->cpus[cpu_index].props; 954 } 955 956 static int64_t virt_get_default_cpu_node_id(const MachineState *ms, int idx) 957 { 958 int64_t socket_id; 959 960 if (ms->numa_state->num_nodes) { 961 socket_id = ms->possible_cpus->cpus[idx].props.socket_id; 962 return socket_id % ms->numa_state->num_nodes; 963 } else { 964 return 0; 965 } 966 } 967 968 static void virt_class_init(ObjectClass *oc, void *data) 969 { 970 MachineClass *mc = MACHINE_CLASS(oc); 971 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); 972 973 mc->init = virt_init; 974 mc->default_cpu_type = LOONGARCH_CPU_TYPE_NAME("la464"); 975 mc->default_ram_id = "loongarch.ram"; 976 mc->desc = "QEMU LoongArch Virtual Machine"; 977 mc->max_cpus = LOONGARCH_MAX_CPUS; 978 mc->is_default = 1; 979 mc->default_kernel_irqchip_split = false; 980 mc->block_default_type = IF_VIRTIO; 981 mc->default_boot_order = "c"; 982 mc->no_cdrom = 1; 983 mc->possible_cpu_arch_ids = virt_possible_cpu_arch_ids; 984 mc->cpu_index_to_instance_props = virt_cpu_index_to_props; 985 mc->get_default_cpu_node_id = virt_get_default_cpu_node_id; 986 mc->numa_mem_supported = true; 987 mc->auto_enable_numa_with_memhp = true; 988 mc->auto_enable_numa_with_memdev = true; 989 mc->get_hotplug_handler = virt_get_hotplug_handler; 990 mc->default_nic = "virtio-net-pci"; 991 hc->plug = virt_device_plug_cb; 992 hc->pre_plug = virt_device_pre_plug; 993 hc->unplug_request = virt_device_unplug_request; 994 hc->unplug = virt_device_unplug; 995 996 object_class_property_add(oc, "acpi", "OnOffAuto", 997 virt_get_acpi, virt_set_acpi, 998 NULL, NULL); 999 object_class_property_set_description(oc, "acpi", 1000 "Enable ACPI"); 1001 object_class_property_add(oc, "v-eiointc", "OnOffAuto", 1002 virt_get_veiointc, virt_set_veiointc, 1003 NULL, NULL); 1004 object_class_property_set_description(oc, "v-eiointc", 1005 "Enable Virt Extend I/O Interrupt Controller."); 1006 machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); 1007 #ifdef CONFIG_TPM 1008 machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS); 1009 #endif 1010 } 1011 1012 static const TypeInfo virt_machine_types[] = { 1013 { 1014 .name = TYPE_LOONGARCH_VIRT_MACHINE, 1015 .parent = TYPE_MACHINE, 1016 .instance_size = sizeof(LoongArchVirtMachineState), 1017 .class_init = virt_class_init, 1018 .instance_init = virt_initfn, 1019 .interfaces = (InterfaceInfo[]) { 1020 { TYPE_HOTPLUG_HANDLER }, 1021 { } 1022 }, 1023 } 1024 }; 1025 1026 DEFINE_TYPES(virt_machine_types) 1027