1 /* 2 * QEMU sPAPR IOMMU (TCE) code 3 * 4 * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/error-report.h" 21 #include "hw/hw.h" 22 #include "qemu/log.h" 23 #include "sysemu/kvm.h" 24 #include "hw/qdev.h" 25 #include "kvm_ppc.h" 26 #include "sysemu/dma.h" 27 #include "exec/address-spaces.h" 28 #include "trace.h" 29 30 #include "hw/ppc/spapr.h" 31 #include "hw/ppc/spapr_vio.h" 32 33 #include <libfdt.h> 34 35 enum sPAPRTCEAccess { 36 SPAPR_TCE_FAULT = 0, 37 SPAPR_TCE_RO = 1, 38 SPAPR_TCE_WO = 2, 39 SPAPR_TCE_RW = 3, 40 }; 41 42 #define IOMMU_PAGE_SIZE(shift) (1ULL << (shift)) 43 #define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1)) 44 45 static QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables; 46 47 sPAPRTCETable *spapr_tce_find_by_liobn(target_ulong liobn) 48 { 49 sPAPRTCETable *tcet; 50 51 if (liobn & 0xFFFFFFFF00000000ULL) { 52 hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx "\n", 53 liobn); 54 return NULL; 55 } 56 57 QLIST_FOREACH(tcet, &spapr_tce_tables, list) { 58 if (tcet->liobn == (uint32_t)liobn) { 59 return tcet; 60 } 61 } 62 63 return NULL; 64 } 65 66 static IOMMUAccessFlags spapr_tce_iommu_access_flags(uint64_t tce) 67 { 68 switch (tce & SPAPR_TCE_RW) { 69 case SPAPR_TCE_FAULT: 70 return IOMMU_NONE; 71 case SPAPR_TCE_RO: 72 return IOMMU_RO; 73 case SPAPR_TCE_WO: 74 return IOMMU_WO; 75 default: /* SPAPR_TCE_RW */ 76 return IOMMU_RW; 77 } 78 } 79 80 static uint64_t *spapr_tce_alloc_table(uint32_t liobn, 81 uint32_t page_shift, 82 uint64_t bus_offset, 83 uint32_t nb_table, 84 int *fd, 85 bool need_vfio) 86 { 87 uint64_t *table = NULL; 88 89 if (kvm_enabled()) { 90 table = kvmppc_create_spapr_tce(liobn, page_shift, bus_offset, nb_table, 91 fd, need_vfio); 92 } 93 94 if (!table) { 95 *fd = -1; 96 table = g_malloc0(nb_table * sizeof(uint64_t)); 97 } 98 99 trace_spapr_iommu_new_table(liobn, table, *fd); 100 101 return table; 102 } 103 104 static void spapr_tce_free_table(uint64_t *table, int fd, uint32_t nb_table) 105 { 106 if (!kvm_enabled() || 107 (kvmppc_remove_spapr_tce(table, fd, nb_table) != 0)) { 108 g_free(table); 109 } 110 } 111 112 /* Called from RCU critical section */ 113 static IOMMUTLBEntry spapr_tce_translate_iommu(IOMMUMemoryRegion *iommu, 114 hwaddr addr, 115 IOMMUAccessFlags flag) 116 { 117 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu); 118 uint64_t tce; 119 IOMMUTLBEntry ret = { 120 .target_as = &address_space_memory, 121 .iova = 0, 122 .translated_addr = 0, 123 .addr_mask = ~(hwaddr)0, 124 .perm = IOMMU_NONE, 125 }; 126 127 if ((addr >> tcet->page_shift) < tcet->nb_table) { 128 /* Check if we are in bound */ 129 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 130 131 tce = tcet->table[addr >> tcet->page_shift]; 132 ret.iova = addr & page_mask; 133 ret.translated_addr = tce & page_mask; 134 ret.addr_mask = ~page_mask; 135 ret.perm = spapr_tce_iommu_access_flags(tce); 136 } 137 trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm, 138 ret.addr_mask); 139 140 return ret; 141 } 142 143 static void spapr_tce_table_pre_save(void *opaque) 144 { 145 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque); 146 147 tcet->mig_table = tcet->table; 148 tcet->mig_nb_table = tcet->nb_table; 149 150 trace_spapr_iommu_pre_save(tcet->liobn, tcet->mig_nb_table, 151 tcet->bus_offset, tcet->page_shift); 152 } 153 154 static uint64_t spapr_tce_get_min_page_size(IOMMUMemoryRegion *iommu) 155 { 156 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu); 157 158 return 1ULL << tcet->page_shift; 159 } 160 161 static void spapr_tce_notify_flag_changed(IOMMUMemoryRegion *iommu, 162 IOMMUNotifierFlag old, 163 IOMMUNotifierFlag new) 164 { 165 struct sPAPRTCETable *tbl = container_of(iommu, sPAPRTCETable, iommu); 166 167 if (old == IOMMU_NOTIFIER_NONE && new != IOMMU_NOTIFIER_NONE) { 168 spapr_tce_set_need_vfio(tbl, true); 169 } else if (old != IOMMU_NOTIFIER_NONE && new == IOMMU_NOTIFIER_NONE) { 170 spapr_tce_set_need_vfio(tbl, false); 171 } 172 } 173 174 static int spapr_tce_table_post_load(void *opaque, int version_id) 175 { 176 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque); 177 uint32_t old_nb_table = tcet->nb_table; 178 uint64_t old_bus_offset = tcet->bus_offset; 179 uint32_t old_page_shift = tcet->page_shift; 180 181 if (tcet->vdev) { 182 spapr_vio_set_bypass(tcet->vdev, tcet->bypass); 183 } 184 185 if (tcet->mig_nb_table != tcet->nb_table) { 186 spapr_tce_table_disable(tcet); 187 } 188 189 if (tcet->mig_nb_table) { 190 if (!tcet->nb_table) { 191 spapr_tce_table_enable(tcet, old_page_shift, old_bus_offset, 192 tcet->mig_nb_table); 193 } 194 195 memcpy(tcet->table, tcet->mig_table, 196 tcet->nb_table * sizeof(tcet->table[0])); 197 198 free(tcet->mig_table); 199 tcet->mig_table = NULL; 200 } 201 202 trace_spapr_iommu_post_load(tcet->liobn, old_nb_table, tcet->nb_table, 203 tcet->bus_offset, tcet->page_shift); 204 205 return 0; 206 } 207 208 static bool spapr_tce_table_ex_needed(void *opaque) 209 { 210 sPAPRTCETable *tcet = opaque; 211 212 return tcet->bus_offset || tcet->page_shift != 0xC; 213 } 214 215 static const VMStateDescription vmstate_spapr_tce_table_ex = { 216 .name = "spapr_iommu_ex", 217 .version_id = 1, 218 .minimum_version_id = 1, 219 .needed = spapr_tce_table_ex_needed, 220 .fields = (VMStateField[]) { 221 VMSTATE_UINT64(bus_offset, sPAPRTCETable), 222 VMSTATE_UINT32(page_shift, sPAPRTCETable), 223 VMSTATE_END_OF_LIST() 224 }, 225 }; 226 227 static const VMStateDescription vmstate_spapr_tce_table = { 228 .name = "spapr_iommu", 229 .version_id = 2, 230 .minimum_version_id = 2, 231 .pre_save = spapr_tce_table_pre_save, 232 .post_load = spapr_tce_table_post_load, 233 .fields = (VMStateField []) { 234 /* Sanity check */ 235 VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable, NULL), 236 237 /* IOMMU state */ 238 VMSTATE_UINT32(mig_nb_table, sPAPRTCETable), 239 VMSTATE_BOOL(bypass, sPAPRTCETable), 240 VMSTATE_VARRAY_UINT32_ALLOC(mig_table, sPAPRTCETable, mig_nb_table, 0, 241 vmstate_info_uint64, uint64_t), 242 243 VMSTATE_END_OF_LIST() 244 }, 245 .subsections = (const VMStateDescription*[]) { 246 &vmstate_spapr_tce_table_ex, 247 NULL 248 } 249 }; 250 251 static void spapr_tce_table_realize(DeviceState *dev, Error **errp) 252 { 253 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); 254 Object *tcetobj = OBJECT(tcet); 255 gchar *tmp; 256 257 tcet->fd = -1; 258 tcet->need_vfio = false; 259 tmp = g_strdup_printf("tce-root-%x", tcet->liobn); 260 memory_region_init(&tcet->root, tcetobj, tmp, UINT64_MAX); 261 g_free(tmp); 262 263 tmp = g_strdup_printf("tce-iommu-%x", tcet->liobn); 264 memory_region_init_iommu(&tcet->iommu, sizeof(tcet->iommu), 265 TYPE_SPAPR_IOMMU_MEMORY_REGION, 266 tcetobj, tmp, 0); 267 g_free(tmp); 268 269 QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list); 270 271 vmstate_register(DEVICE(tcet), tcet->liobn, &vmstate_spapr_tce_table, 272 tcet); 273 } 274 275 void spapr_tce_set_need_vfio(sPAPRTCETable *tcet, bool need_vfio) 276 { 277 size_t table_size = tcet->nb_table * sizeof(uint64_t); 278 void *newtable; 279 280 if (need_vfio == tcet->need_vfio) { 281 /* Nothing to do */ 282 return; 283 } 284 285 if (!need_vfio) { 286 /* FIXME: We don't support transition back to KVM accelerated 287 * TCEs yet */ 288 return; 289 } 290 291 tcet->need_vfio = true; 292 293 if (tcet->fd < 0) { 294 /* Table is already in userspace, nothing to be do */ 295 return; 296 } 297 298 newtable = g_malloc(table_size); 299 memcpy(newtable, tcet->table, table_size); 300 301 kvmppc_remove_spapr_tce(tcet->table, tcet->fd, tcet->nb_table); 302 303 tcet->fd = -1; 304 tcet->table = newtable; 305 } 306 307 sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn) 308 { 309 sPAPRTCETable *tcet; 310 gchar *tmp; 311 312 if (spapr_tce_find_by_liobn(liobn)) { 313 error_report("Attempted to create TCE table with duplicate" 314 " LIOBN 0x%x", liobn); 315 return NULL; 316 } 317 318 tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE)); 319 tcet->liobn = liobn; 320 321 tmp = g_strdup_printf("tce-table-%x", liobn); 322 object_property_add_child(OBJECT(owner), tmp, OBJECT(tcet), NULL); 323 g_free(tmp); 324 325 object_property_set_bool(OBJECT(tcet), true, "realized", NULL); 326 327 return tcet; 328 } 329 330 void spapr_tce_table_enable(sPAPRTCETable *tcet, 331 uint32_t page_shift, uint64_t bus_offset, 332 uint32_t nb_table) 333 { 334 if (tcet->nb_table) { 335 warn_report("trying to enable already enabled TCE table"); 336 return; 337 } 338 339 tcet->bus_offset = bus_offset; 340 tcet->page_shift = page_shift; 341 tcet->nb_table = nb_table; 342 tcet->table = spapr_tce_alloc_table(tcet->liobn, 343 tcet->page_shift, 344 tcet->bus_offset, 345 tcet->nb_table, 346 &tcet->fd, 347 tcet->need_vfio); 348 349 memory_region_set_size(MEMORY_REGION(&tcet->iommu), 350 (uint64_t)tcet->nb_table << tcet->page_shift); 351 memory_region_add_subregion(&tcet->root, tcet->bus_offset, 352 MEMORY_REGION(&tcet->iommu)); 353 } 354 355 void spapr_tce_table_disable(sPAPRTCETable *tcet) 356 { 357 if (!tcet->nb_table) { 358 return; 359 } 360 361 memory_region_del_subregion(&tcet->root, MEMORY_REGION(&tcet->iommu)); 362 memory_region_set_size(MEMORY_REGION(&tcet->iommu), 0); 363 364 spapr_tce_free_table(tcet->table, tcet->fd, tcet->nb_table); 365 tcet->fd = -1; 366 tcet->table = NULL; 367 tcet->bus_offset = 0; 368 tcet->page_shift = 0; 369 tcet->nb_table = 0; 370 } 371 372 static void spapr_tce_table_unrealize(DeviceState *dev, Error **errp) 373 { 374 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); 375 376 QLIST_REMOVE(tcet, list); 377 378 spapr_tce_table_disable(tcet); 379 } 380 381 MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet) 382 { 383 return &tcet->root; 384 } 385 386 static void spapr_tce_reset(DeviceState *dev) 387 { 388 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); 389 size_t table_size = tcet->nb_table * sizeof(uint64_t); 390 391 if (tcet->nb_table) { 392 memset(tcet->table, 0, table_size); 393 } 394 } 395 396 static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba, 397 target_ulong tce) 398 { 399 IOMMUTLBEntry entry; 400 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 401 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift; 402 403 if (index >= tcet->nb_table) { 404 hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x" 405 TARGET_FMT_lx "\n", ioba); 406 return H_PARAMETER; 407 } 408 409 tcet->table[index] = tce; 410 411 entry.target_as = &address_space_memory, 412 entry.iova = (ioba - tcet->bus_offset) & page_mask; 413 entry.translated_addr = tce & page_mask; 414 entry.addr_mask = ~page_mask; 415 entry.perm = spapr_tce_iommu_access_flags(tce); 416 memory_region_notify_iommu(&tcet->iommu, entry); 417 418 return H_SUCCESS; 419 } 420 421 static target_ulong h_put_tce_indirect(PowerPCCPU *cpu, 422 sPAPRMachineState *spapr, 423 target_ulong opcode, target_ulong *args) 424 { 425 int i; 426 target_ulong liobn = args[0]; 427 target_ulong ioba = args[1]; 428 target_ulong ioba1 = ioba; 429 target_ulong tce_list = args[2]; 430 target_ulong npages = args[3]; 431 target_ulong ret = H_PARAMETER, tce = 0; 432 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 433 CPUState *cs = CPU(cpu); 434 hwaddr page_mask, page_size; 435 436 if (!tcet) { 437 return H_PARAMETER; 438 } 439 440 if ((npages > 512) || (tce_list & SPAPR_TCE_PAGE_MASK)) { 441 return H_PARAMETER; 442 } 443 444 page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 445 page_size = IOMMU_PAGE_SIZE(tcet->page_shift); 446 ioba &= page_mask; 447 448 for (i = 0; i < npages; ++i, ioba += page_size) { 449 tce = ldq_be_phys(cs->as, tce_list + i * sizeof(target_ulong)); 450 451 ret = put_tce_emu(tcet, ioba, tce); 452 if (ret) { 453 break; 454 } 455 } 456 457 /* Trace last successful or the first problematic entry */ 458 i = i ? (i - 1) : 0; 459 if (SPAPR_IS_PCI_LIOBN(liobn)) { 460 trace_spapr_iommu_pci_indirect(liobn, ioba1, tce_list, i, tce, ret); 461 } else { 462 trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i, tce, ret); 463 } 464 return ret; 465 } 466 467 static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr, 468 target_ulong opcode, target_ulong *args) 469 { 470 int i; 471 target_ulong liobn = args[0]; 472 target_ulong ioba = args[1]; 473 target_ulong tce_value = args[2]; 474 target_ulong npages = args[3]; 475 target_ulong ret = H_PARAMETER; 476 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 477 hwaddr page_mask, page_size; 478 479 if (!tcet) { 480 return H_PARAMETER; 481 } 482 483 if (npages > tcet->nb_table) { 484 return H_PARAMETER; 485 } 486 487 page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 488 page_size = IOMMU_PAGE_SIZE(tcet->page_shift); 489 ioba &= page_mask; 490 491 for (i = 0; i < npages; ++i, ioba += page_size) { 492 ret = put_tce_emu(tcet, ioba, tce_value); 493 if (ret) { 494 break; 495 } 496 } 497 if (SPAPR_IS_PCI_LIOBN(liobn)) { 498 trace_spapr_iommu_pci_stuff(liobn, ioba, tce_value, npages, ret); 499 } else { 500 trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret); 501 } 502 503 return ret; 504 } 505 506 static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr, 507 target_ulong opcode, target_ulong *args) 508 { 509 target_ulong liobn = args[0]; 510 target_ulong ioba = args[1]; 511 target_ulong tce = args[2]; 512 target_ulong ret = H_PARAMETER; 513 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 514 515 if (tcet) { 516 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 517 518 ioba &= page_mask; 519 520 ret = put_tce_emu(tcet, ioba, tce); 521 } 522 if (SPAPR_IS_PCI_LIOBN(liobn)) { 523 trace_spapr_iommu_pci_put(liobn, ioba, tce, ret); 524 } else { 525 trace_spapr_iommu_put(liobn, ioba, tce, ret); 526 } 527 528 return ret; 529 } 530 531 static target_ulong get_tce_emu(sPAPRTCETable *tcet, target_ulong ioba, 532 target_ulong *tce) 533 { 534 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift; 535 536 if (index >= tcet->nb_table) { 537 hcall_dprintf("spapr_iommu_get_tce on out-of-bounds IOBA 0x" 538 TARGET_FMT_lx "\n", ioba); 539 return H_PARAMETER; 540 } 541 542 *tce = tcet->table[index]; 543 544 return H_SUCCESS; 545 } 546 547 static target_ulong h_get_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr, 548 target_ulong opcode, target_ulong *args) 549 { 550 target_ulong liobn = args[0]; 551 target_ulong ioba = args[1]; 552 target_ulong tce = 0; 553 target_ulong ret = H_PARAMETER; 554 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 555 556 if (tcet) { 557 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 558 559 ioba &= page_mask; 560 561 ret = get_tce_emu(tcet, ioba, &tce); 562 if (!ret) { 563 args[0] = tce; 564 } 565 } 566 if (SPAPR_IS_PCI_LIOBN(liobn)) { 567 trace_spapr_iommu_pci_get(liobn, ioba, ret, tce); 568 } else { 569 trace_spapr_iommu_get(liobn, ioba, ret, tce); 570 } 571 572 return ret; 573 } 574 575 int spapr_dma_dt(void *fdt, int node_off, const char *propname, 576 uint32_t liobn, uint64_t window, uint32_t size) 577 { 578 uint32_t dma_prop[5]; 579 int ret; 580 581 dma_prop[0] = cpu_to_be32(liobn); 582 dma_prop[1] = cpu_to_be32(window >> 32); 583 dma_prop[2] = cpu_to_be32(window & 0xFFFFFFFF); 584 dma_prop[3] = 0; /* window size is 32 bits */ 585 dma_prop[4] = cpu_to_be32(size); 586 587 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2); 588 if (ret < 0) { 589 return ret; 590 } 591 592 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2); 593 if (ret < 0) { 594 return ret; 595 } 596 597 ret = fdt_setprop(fdt, node_off, propname, dma_prop, sizeof(dma_prop)); 598 if (ret < 0) { 599 return ret; 600 } 601 602 return 0; 603 } 604 605 int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname, 606 sPAPRTCETable *tcet) 607 { 608 if (!tcet) { 609 return 0; 610 } 611 612 return spapr_dma_dt(fdt, node_off, propname, 613 tcet->liobn, 0, tcet->nb_table << tcet->page_shift); 614 } 615 616 static void spapr_tce_table_class_init(ObjectClass *klass, void *data) 617 { 618 DeviceClass *dc = DEVICE_CLASS(klass); 619 dc->realize = spapr_tce_table_realize; 620 dc->reset = spapr_tce_reset; 621 dc->unrealize = spapr_tce_table_unrealize; 622 /* Reason: This is just an internal device for handling the hypercalls */ 623 dc->user_creatable = false; 624 625 QLIST_INIT(&spapr_tce_tables); 626 627 /* hcall-tce */ 628 spapr_register_hypercall(H_PUT_TCE, h_put_tce); 629 spapr_register_hypercall(H_GET_TCE, h_get_tce); 630 spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect); 631 spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce); 632 } 633 634 static TypeInfo spapr_tce_table_info = { 635 .name = TYPE_SPAPR_TCE_TABLE, 636 .parent = TYPE_DEVICE, 637 .instance_size = sizeof(sPAPRTCETable), 638 .class_init = spapr_tce_table_class_init, 639 }; 640 641 static void spapr_iommu_memory_region_class_init(ObjectClass *klass, void *data) 642 { 643 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); 644 645 imrc->translate = spapr_tce_translate_iommu; 646 imrc->get_min_page_size = spapr_tce_get_min_page_size; 647 imrc->notify_flag_changed = spapr_tce_notify_flag_changed; 648 } 649 650 static const TypeInfo spapr_iommu_memory_region_info = { 651 .parent = TYPE_IOMMU_MEMORY_REGION, 652 .name = TYPE_SPAPR_IOMMU_MEMORY_REGION, 653 .class_init = spapr_iommu_memory_region_class_init, 654 }; 655 656 static void register_types(void) 657 { 658 type_register_static(&spapr_tce_table_info); 659 type_register_static(&spapr_iommu_memory_region_info); 660 } 661 662 type_init(register_types); 663