1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * IOMMU API for s390 PCI devices 4 * 5 * Copyright IBM Corp. 2015 6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> 7 */ 8 9 #include <linux/pci.h> 10 #include <linux/iommu.h> 11 #include <linux/iommu-helper.h> 12 #include <linux/sizes.h> 13 #include <linux/rculist.h> 14 #include <linux/rcupdate.h> 15 #include <asm/pci_dma.h> 16 17 #include "dma-iommu.h" 18 19 static const struct iommu_ops s390_iommu_ops, s390_iommu_rtr_ops; 20 21 static struct kmem_cache *dma_region_table_cache; 22 static struct kmem_cache *dma_page_table_cache; 23 24 static u64 s390_iommu_aperture; 25 static u32 s390_iommu_aperture_factor = 1; 26 27 struct s390_domain { 28 struct iommu_domain domain; 29 struct list_head devices; 30 struct zpci_iommu_ctrs ctrs; 31 unsigned long *dma_table; 32 spinlock_t list_lock; 33 struct rcu_head rcu; 34 u8 origin_type; 35 }; 36 37 static struct iommu_domain blocking_domain; 38 39 static inline unsigned int calc_rfx(dma_addr_t ptr) 40 { 41 return ((unsigned long)ptr >> ZPCI_RF_SHIFT) & ZPCI_INDEX_MASK; 42 } 43 44 static inline unsigned int calc_rsx(dma_addr_t ptr) 45 { 46 return ((unsigned long)ptr >> ZPCI_RS_SHIFT) & ZPCI_INDEX_MASK; 47 } 48 49 static inline unsigned int calc_rtx(dma_addr_t ptr) 50 { 51 return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK; 52 } 53 54 static inline unsigned int calc_sx(dma_addr_t ptr) 55 { 56 return ((unsigned long)ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK; 57 } 58 59 static inline unsigned int calc_px(dma_addr_t ptr) 60 { 61 return ((unsigned long)ptr >> PAGE_SHIFT) & ZPCI_PT_MASK; 62 } 63 64 static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa) 65 { 66 *entry &= ZPCI_PTE_FLAG_MASK; 67 *entry |= (pfaa & ZPCI_PTE_ADDR_MASK); 68 } 69 70 static inline void set_rf_rso(unsigned long *entry, phys_addr_t rso) 71 { 72 *entry &= ZPCI_RTE_FLAG_MASK; 73 *entry |= (rso & ZPCI_RTE_ADDR_MASK); 74 *entry |= ZPCI_TABLE_TYPE_RFX; 75 } 76 77 static inline void set_rs_rto(unsigned long *entry, phys_addr_t rto) 78 { 79 *entry &= ZPCI_RTE_FLAG_MASK; 80 *entry |= (rto & ZPCI_RTE_ADDR_MASK); 81 *entry |= ZPCI_TABLE_TYPE_RSX; 82 } 83 84 static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto) 85 { 86 *entry &= ZPCI_RTE_FLAG_MASK; 87 *entry |= (sto & ZPCI_RTE_ADDR_MASK); 88 *entry |= ZPCI_TABLE_TYPE_RTX; 89 } 90 91 static inline void set_st_pto(unsigned long *entry, phys_addr_t pto) 92 { 93 *entry &= ZPCI_STE_FLAG_MASK; 94 *entry |= (pto & ZPCI_STE_ADDR_MASK); 95 *entry |= ZPCI_TABLE_TYPE_SX; 96 } 97 98 static inline void validate_rf_entry(unsigned long *entry) 99 { 100 *entry &= ~ZPCI_TABLE_VALID_MASK; 101 *entry &= ~ZPCI_TABLE_OFFSET_MASK; 102 *entry |= ZPCI_TABLE_VALID; 103 *entry |= ZPCI_TABLE_LEN_RFX; 104 } 105 106 static inline void validate_rs_entry(unsigned long *entry) 107 { 108 *entry &= ~ZPCI_TABLE_VALID_MASK; 109 *entry &= ~ZPCI_TABLE_OFFSET_MASK; 110 *entry |= ZPCI_TABLE_VALID; 111 *entry |= ZPCI_TABLE_LEN_RSX; 112 } 113 114 static inline void validate_rt_entry(unsigned long *entry) 115 { 116 *entry &= ~ZPCI_TABLE_VALID_MASK; 117 *entry &= ~ZPCI_TABLE_OFFSET_MASK; 118 *entry |= ZPCI_TABLE_VALID; 119 *entry |= ZPCI_TABLE_LEN_RTX; 120 } 121 122 static inline void validate_st_entry(unsigned long *entry) 123 { 124 *entry &= ~ZPCI_TABLE_VALID_MASK; 125 *entry |= ZPCI_TABLE_VALID; 126 } 127 128 static inline void invalidate_pt_entry(unsigned long *entry) 129 { 130 WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID); 131 *entry &= ~ZPCI_PTE_VALID_MASK; 132 *entry |= ZPCI_PTE_INVALID; 133 } 134 135 static inline void validate_pt_entry(unsigned long *entry) 136 { 137 WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID); 138 *entry &= ~ZPCI_PTE_VALID_MASK; 139 *entry |= ZPCI_PTE_VALID; 140 } 141 142 static inline void entry_set_protected(unsigned long *entry) 143 { 144 *entry &= ~ZPCI_TABLE_PROT_MASK; 145 *entry |= ZPCI_TABLE_PROTECTED; 146 } 147 148 static inline void entry_clr_protected(unsigned long *entry) 149 { 150 *entry &= ~ZPCI_TABLE_PROT_MASK; 151 *entry |= ZPCI_TABLE_UNPROTECTED; 152 } 153 154 static inline int reg_entry_isvalid(unsigned long entry) 155 { 156 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID; 157 } 158 159 static inline int pt_entry_isvalid(unsigned long entry) 160 { 161 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID; 162 } 163 164 static inline unsigned long *get_rf_rso(unsigned long entry) 165 { 166 if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RFX) 167 return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK); 168 else 169 return NULL; 170 } 171 172 static inline unsigned long *get_rs_rto(unsigned long entry) 173 { 174 if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RSX) 175 return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK); 176 else 177 return NULL; 178 } 179 180 static inline unsigned long *get_rt_sto(unsigned long entry) 181 { 182 if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) 183 return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK); 184 else 185 return NULL; 186 } 187 188 static inline unsigned long *get_st_pto(unsigned long entry) 189 { 190 if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) 191 return phys_to_virt(entry & ZPCI_STE_ADDR_MASK); 192 else 193 return NULL; 194 } 195 196 static int __init dma_alloc_cpu_table_caches(void) 197 { 198 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables", 199 ZPCI_TABLE_SIZE, 200 ZPCI_TABLE_ALIGN, 201 0, NULL); 202 if (!dma_region_table_cache) 203 return -ENOMEM; 204 205 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables", 206 ZPCI_PT_SIZE, 207 ZPCI_PT_ALIGN, 208 0, NULL); 209 if (!dma_page_table_cache) { 210 kmem_cache_destroy(dma_region_table_cache); 211 return -ENOMEM; 212 } 213 return 0; 214 } 215 216 static unsigned long *dma_alloc_cpu_table(gfp_t gfp) 217 { 218 unsigned long *table, *entry; 219 220 table = kmem_cache_alloc(dma_region_table_cache, gfp); 221 if (!table) 222 return NULL; 223 224 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) 225 *entry = ZPCI_TABLE_INVALID; 226 return table; 227 } 228 229 static void dma_free_cpu_table(void *table) 230 { 231 kmem_cache_free(dma_region_table_cache, table); 232 } 233 234 static void dma_free_page_table(void *table) 235 { 236 kmem_cache_free(dma_page_table_cache, table); 237 } 238 239 static void dma_free_seg_table(unsigned long entry) 240 { 241 unsigned long *sto = get_rt_sto(entry); 242 int sx; 243 244 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++) 245 if (reg_entry_isvalid(sto[sx])) 246 dma_free_page_table(get_st_pto(sto[sx])); 247 248 dma_free_cpu_table(sto); 249 } 250 251 static void dma_free_rt_table(unsigned long entry) 252 { 253 unsigned long *rto = get_rs_rto(entry); 254 int rtx; 255 256 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) 257 if (reg_entry_isvalid(rto[rtx])) 258 dma_free_seg_table(rto[rtx]); 259 260 dma_free_cpu_table(rto); 261 } 262 263 static void dma_free_rs_table(unsigned long entry) 264 { 265 unsigned long *rso = get_rf_rso(entry); 266 int rsx; 267 268 for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++) 269 if (reg_entry_isvalid(rso[rsx])) 270 dma_free_rt_table(rso[rsx]); 271 272 dma_free_cpu_table(rso); 273 } 274 275 static void dma_cleanup_tables(struct s390_domain *domain) 276 { 277 int rtx, rsx, rfx; 278 279 if (!domain->dma_table) 280 return; 281 282 switch (domain->origin_type) { 283 case ZPCI_TABLE_TYPE_RFX: 284 for (rfx = 0; rfx < ZPCI_TABLE_ENTRIES; rfx++) 285 if (reg_entry_isvalid(domain->dma_table[rfx])) 286 dma_free_rs_table(domain->dma_table[rfx]); 287 break; 288 case ZPCI_TABLE_TYPE_RSX: 289 for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++) 290 if (reg_entry_isvalid(domain->dma_table[rsx])) 291 dma_free_rt_table(domain->dma_table[rsx]); 292 break; 293 case ZPCI_TABLE_TYPE_RTX: 294 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) 295 if (reg_entry_isvalid(domain->dma_table[rtx])) 296 dma_free_seg_table(domain->dma_table[rtx]); 297 break; 298 default: 299 WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type); 300 return; 301 } 302 303 dma_free_cpu_table(domain->dma_table); 304 } 305 306 static unsigned long *dma_alloc_page_table(gfp_t gfp) 307 { 308 unsigned long *table, *entry; 309 310 table = kmem_cache_alloc(dma_page_table_cache, gfp); 311 if (!table) 312 return NULL; 313 314 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) 315 *entry = ZPCI_PTE_INVALID; 316 return table; 317 } 318 319 static unsigned long *dma_walk_rs_table(unsigned long *rso, 320 dma_addr_t dma_addr, gfp_t gfp) 321 { 322 unsigned int rsx = calc_rsx(dma_addr); 323 unsigned long old_rse, rse; 324 unsigned long *rsep, *rto; 325 326 rsep = &rso[rsx]; 327 rse = READ_ONCE(*rsep); 328 if (reg_entry_isvalid(rse)) { 329 rto = get_rs_rto(rse); 330 } else { 331 rto = dma_alloc_cpu_table(gfp); 332 if (!rto) 333 return NULL; 334 335 set_rs_rto(&rse, virt_to_phys(rto)); 336 validate_rs_entry(&rse); 337 entry_clr_protected(&rse); 338 339 old_rse = cmpxchg(rsep, ZPCI_TABLE_INVALID, rse); 340 if (old_rse != ZPCI_TABLE_INVALID) { 341 /* Somone else was faster, use theirs */ 342 dma_free_cpu_table(rto); 343 rto = get_rs_rto(old_rse); 344 } 345 } 346 return rto; 347 } 348 349 static unsigned long *dma_walk_rf_table(unsigned long *rfo, 350 dma_addr_t dma_addr, gfp_t gfp) 351 { 352 unsigned int rfx = calc_rfx(dma_addr); 353 unsigned long old_rfe, rfe; 354 unsigned long *rfep, *rso; 355 356 rfep = &rfo[rfx]; 357 rfe = READ_ONCE(*rfep); 358 if (reg_entry_isvalid(rfe)) { 359 rso = get_rf_rso(rfe); 360 } else { 361 rso = dma_alloc_cpu_table(gfp); 362 if (!rso) 363 return NULL; 364 365 set_rf_rso(&rfe, virt_to_phys(rso)); 366 validate_rf_entry(&rfe); 367 entry_clr_protected(&rfe); 368 369 old_rfe = cmpxchg(rfep, ZPCI_TABLE_INVALID, rfe); 370 if (old_rfe != ZPCI_TABLE_INVALID) { 371 /* Somone else was faster, use theirs */ 372 dma_free_cpu_table(rso); 373 rso = get_rf_rso(old_rfe); 374 } 375 } 376 377 if (!rso) 378 return NULL; 379 380 return dma_walk_rs_table(rso, dma_addr, gfp); 381 } 382 383 static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp) 384 { 385 unsigned long old_rte, rte; 386 unsigned long *sto; 387 388 rte = READ_ONCE(*rtep); 389 if (reg_entry_isvalid(rte)) { 390 sto = get_rt_sto(rte); 391 } else { 392 sto = dma_alloc_cpu_table(gfp); 393 if (!sto) 394 return NULL; 395 396 set_rt_sto(&rte, virt_to_phys(sto)); 397 validate_rt_entry(&rte); 398 entry_clr_protected(&rte); 399 400 old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte); 401 if (old_rte != ZPCI_TABLE_INVALID) { 402 /* Somone else was faster, use theirs */ 403 dma_free_cpu_table(sto); 404 sto = get_rt_sto(old_rte); 405 } 406 } 407 return sto; 408 } 409 410 static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp) 411 { 412 unsigned long old_ste, ste; 413 unsigned long *pto; 414 415 ste = READ_ONCE(*step); 416 if (reg_entry_isvalid(ste)) { 417 pto = get_st_pto(ste); 418 } else { 419 pto = dma_alloc_page_table(gfp); 420 if (!pto) 421 return NULL; 422 set_st_pto(&ste, virt_to_phys(pto)); 423 validate_st_entry(&ste); 424 entry_clr_protected(&ste); 425 426 old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste); 427 if (old_ste != ZPCI_TABLE_INVALID) { 428 /* Somone else was faster, use theirs */ 429 dma_free_page_table(pto); 430 pto = get_st_pto(old_ste); 431 } 432 } 433 return pto; 434 } 435 436 static unsigned long *dma_walk_region_tables(struct s390_domain *domain, 437 dma_addr_t dma_addr, gfp_t gfp) 438 { 439 switch (domain->origin_type) { 440 case ZPCI_TABLE_TYPE_RFX: 441 return dma_walk_rf_table(domain->dma_table, dma_addr, gfp); 442 case ZPCI_TABLE_TYPE_RSX: 443 return dma_walk_rs_table(domain->dma_table, dma_addr, gfp); 444 case ZPCI_TABLE_TYPE_RTX: 445 return domain->dma_table; 446 default: 447 return NULL; 448 } 449 } 450 451 static unsigned long *dma_walk_cpu_trans(struct s390_domain *domain, 452 dma_addr_t dma_addr, gfp_t gfp) 453 { 454 unsigned long *rto, *sto, *pto; 455 unsigned int rtx, sx, px; 456 457 rto = dma_walk_region_tables(domain, dma_addr, gfp); 458 if (!rto) 459 return NULL; 460 461 rtx = calc_rtx(dma_addr); 462 sto = dma_get_seg_table_origin(&rto[rtx], gfp); 463 if (!sto) 464 return NULL; 465 466 sx = calc_sx(dma_addr); 467 pto = dma_get_page_table_origin(&sto[sx], gfp); 468 if (!pto) 469 return NULL; 470 471 px = calc_px(dma_addr); 472 return &pto[px]; 473 } 474 475 static void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags) 476 { 477 unsigned long pte; 478 479 pte = READ_ONCE(*ptep); 480 if (flags & ZPCI_PTE_INVALID) { 481 invalidate_pt_entry(&pte); 482 } else { 483 set_pt_pfaa(&pte, page_addr); 484 validate_pt_entry(&pte); 485 } 486 487 if (flags & ZPCI_TABLE_PROTECTED) 488 entry_set_protected(&pte); 489 else 490 entry_clr_protected(&pte); 491 492 xchg(ptep, pte); 493 } 494 495 static struct s390_domain *to_s390_domain(struct iommu_domain *dom) 496 { 497 return container_of(dom, struct s390_domain, domain); 498 } 499 500 static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap) 501 { 502 struct zpci_dev *zdev = to_zpci_dev(dev); 503 504 switch (cap) { 505 case IOMMU_CAP_CACHE_COHERENCY: 506 return true; 507 case IOMMU_CAP_DEFERRED_FLUSH: 508 return zdev->pft != PCI_FUNC_TYPE_ISM; 509 default: 510 return false; 511 } 512 } 513 514 static inline u64 max_tbl_size(struct s390_domain *domain) 515 { 516 switch (domain->origin_type) { 517 case ZPCI_TABLE_TYPE_RTX: 518 return ZPCI_TABLE_SIZE_RT - 1; 519 case ZPCI_TABLE_TYPE_RSX: 520 return ZPCI_TABLE_SIZE_RS - 1; 521 case ZPCI_TABLE_TYPE_RFX: 522 return U64_MAX; 523 default: 524 return 0; 525 } 526 } 527 528 static struct iommu_domain *s390_domain_alloc_paging(struct device *dev) 529 { 530 struct zpci_dev *zdev = to_zpci_dev(dev); 531 struct s390_domain *s390_domain; 532 u64 aperture_size; 533 534 s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL); 535 if (!s390_domain) 536 return NULL; 537 538 s390_domain->dma_table = dma_alloc_cpu_table(GFP_KERNEL); 539 if (!s390_domain->dma_table) { 540 kfree(s390_domain); 541 return NULL; 542 } 543 544 aperture_size = min(s390_iommu_aperture, 545 zdev->end_dma - zdev->start_dma + 1); 546 if (aperture_size <= (ZPCI_TABLE_SIZE_RT - zdev->start_dma)) { 547 s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX; 548 } else if (aperture_size <= (ZPCI_TABLE_SIZE_RS - zdev->start_dma) && 549 (zdev->dtsm & ZPCI_IOTA_DT_RS)) { 550 s390_domain->origin_type = ZPCI_TABLE_TYPE_RSX; 551 } else if (zdev->dtsm & ZPCI_IOTA_DT_RF) { 552 s390_domain->origin_type = ZPCI_TABLE_TYPE_RFX; 553 } else { 554 /* Assume RTX available */ 555 s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX; 556 aperture_size = ZPCI_TABLE_SIZE_RT - zdev->start_dma; 557 } 558 zdev->end_dma = zdev->start_dma + aperture_size - 1; 559 560 s390_domain->domain.geometry.force_aperture = true; 561 s390_domain->domain.geometry.aperture_start = 0; 562 s390_domain->domain.geometry.aperture_end = max_tbl_size(s390_domain); 563 564 spin_lock_init(&s390_domain->list_lock); 565 INIT_LIST_HEAD_RCU(&s390_domain->devices); 566 567 return &s390_domain->domain; 568 } 569 570 static void s390_iommu_rcu_free_domain(struct rcu_head *head) 571 { 572 struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu); 573 574 dma_cleanup_tables(s390_domain); 575 kfree(s390_domain); 576 } 577 578 static void s390_domain_free(struct iommu_domain *domain) 579 { 580 struct s390_domain *s390_domain = to_s390_domain(domain); 581 582 rcu_read_lock(); 583 WARN_ON(!list_empty(&s390_domain->devices)); 584 rcu_read_unlock(); 585 586 call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain); 587 } 588 589 static void zdev_s390_domain_update(struct zpci_dev *zdev, 590 struct iommu_domain *domain) 591 { 592 unsigned long flags; 593 594 spin_lock_irqsave(&zdev->dom_lock, flags); 595 zdev->s390_domain = domain; 596 spin_unlock_irqrestore(&zdev->dom_lock, flags); 597 } 598 599 static u64 get_iota_region_flag(struct s390_domain *domain) 600 { 601 switch (domain->origin_type) { 602 case ZPCI_TABLE_TYPE_RTX: 603 return ZPCI_IOTA_RTTO_FLAG; 604 case ZPCI_TABLE_TYPE_RSX: 605 return ZPCI_IOTA_RSTO_FLAG; 606 case ZPCI_TABLE_TYPE_RFX: 607 return ZPCI_IOTA_RFTO_FLAG; 608 default: 609 WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type); 610 return 0; 611 } 612 } 613 614 static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev, 615 struct iommu_domain *domain, u8 *status) 616 { 617 struct s390_domain *s390_domain; 618 int rc = 0; 619 u64 iota; 620 621 switch (domain->type) { 622 case IOMMU_DOMAIN_IDENTITY: 623 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, 624 zdev->end_dma, 0, status); 625 break; 626 case IOMMU_DOMAIN_BLOCKED: 627 /* Nothing to do in this case */ 628 break; 629 default: 630 s390_domain = to_s390_domain(domain); 631 iota = virt_to_phys(s390_domain->dma_table) | 632 get_iota_region_flag(s390_domain); 633 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, 634 zdev->end_dma, iota, status); 635 } 636 637 return rc; 638 } 639 640 int zpci_iommu_register_ioat(struct zpci_dev *zdev, u8 *status) 641 { 642 unsigned long flags; 643 int rc; 644 645 spin_lock_irqsave(&zdev->dom_lock, flags); 646 647 rc = s390_iommu_domain_reg_ioat(zdev, zdev->s390_domain, status); 648 649 spin_unlock_irqrestore(&zdev->dom_lock, flags); 650 651 return rc; 652 } 653 654 static int blocking_domain_attach_device(struct iommu_domain *domain, 655 struct device *dev) 656 { 657 struct zpci_dev *zdev = to_zpci_dev(dev); 658 struct s390_domain *s390_domain; 659 unsigned long flags; 660 661 if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED) 662 return 0; 663 664 s390_domain = to_s390_domain(zdev->s390_domain); 665 if (zdev->dma_table) { 666 spin_lock_irqsave(&s390_domain->list_lock, flags); 667 list_del_rcu(&zdev->iommu_list); 668 spin_unlock_irqrestore(&s390_domain->list_lock, flags); 669 } 670 671 zpci_unregister_ioat(zdev, 0); 672 zdev->dma_table = NULL; 673 zdev_s390_domain_update(zdev, domain); 674 675 return 0; 676 } 677 678 static int s390_iommu_attach_device(struct iommu_domain *domain, 679 struct device *dev) 680 { 681 struct s390_domain *s390_domain = to_s390_domain(domain); 682 struct zpci_dev *zdev = to_zpci_dev(dev); 683 unsigned long flags; 684 u8 status; 685 int cc; 686 687 if (!zdev) 688 return -ENODEV; 689 690 if (WARN_ON(domain->geometry.aperture_start > zdev->end_dma || 691 domain->geometry.aperture_end < zdev->start_dma)) 692 return -EINVAL; 693 694 blocking_domain_attach_device(&blocking_domain, dev); 695 696 /* If we fail now DMA remains blocked via blocking domain */ 697 cc = s390_iommu_domain_reg_ioat(zdev, domain, &status); 698 if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) 699 return -EIO; 700 zdev->dma_table = s390_domain->dma_table; 701 zdev_s390_domain_update(zdev, domain); 702 703 spin_lock_irqsave(&s390_domain->list_lock, flags); 704 list_add_rcu(&zdev->iommu_list, &s390_domain->devices); 705 spin_unlock_irqrestore(&s390_domain->list_lock, flags); 706 707 return 0; 708 } 709 710 static void s390_iommu_get_resv_regions(struct device *dev, 711 struct list_head *list) 712 { 713 struct zpci_dev *zdev = to_zpci_dev(dev); 714 struct iommu_resv_region *region; 715 u64 max_size, end_resv; 716 unsigned long flags; 717 718 if (zdev->start_dma) { 719 region = iommu_alloc_resv_region(0, zdev->start_dma, 0, 720 IOMMU_RESV_RESERVED, GFP_KERNEL); 721 if (!region) 722 return; 723 list_add_tail(®ion->list, list); 724 } 725 726 spin_lock_irqsave(&zdev->dom_lock, flags); 727 if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED || 728 zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) { 729 spin_unlock_irqrestore(&zdev->dom_lock, flags); 730 return; 731 } 732 733 max_size = max_tbl_size(to_s390_domain(zdev->s390_domain)); 734 spin_unlock_irqrestore(&zdev->dom_lock, flags); 735 736 if (zdev->end_dma < max_size) { 737 end_resv = max_size - zdev->end_dma; 738 region = iommu_alloc_resv_region(zdev->end_dma + 1, end_resv, 739 0, IOMMU_RESV_RESERVED, 740 GFP_KERNEL); 741 if (!region) 742 return; 743 list_add_tail(®ion->list, list); 744 } 745 } 746 747 static struct iommu_device *s390_iommu_probe_device(struct device *dev) 748 { 749 struct zpci_dev *zdev; 750 751 if (!dev_is_pci(dev)) 752 return ERR_PTR(-ENODEV); 753 754 zdev = to_zpci_dev(dev); 755 756 if (zdev->start_dma > zdev->end_dma) 757 return ERR_PTR(-EINVAL); 758 759 if (zdev->tlb_refresh) 760 dev->iommu->shadow_on_flush = 1; 761 762 /* Start with DMA blocked */ 763 spin_lock_init(&zdev->dom_lock); 764 zdev_s390_domain_update(zdev, &blocking_domain); 765 766 return &zdev->iommu_dev; 767 } 768 769 static int zpci_refresh_all(struct zpci_dev *zdev) 770 { 771 return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma, 772 zdev->end_dma - zdev->start_dma + 1); 773 } 774 775 static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain) 776 { 777 struct s390_domain *s390_domain = to_s390_domain(domain); 778 struct zpci_dev *zdev; 779 780 rcu_read_lock(); 781 list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) { 782 atomic64_inc(&s390_domain->ctrs.global_rpcits); 783 zpci_refresh_all(zdev); 784 } 785 rcu_read_unlock(); 786 } 787 788 static void s390_iommu_iotlb_sync(struct iommu_domain *domain, 789 struct iommu_iotlb_gather *gather) 790 { 791 struct s390_domain *s390_domain = to_s390_domain(domain); 792 size_t size = gather->end - gather->start + 1; 793 struct zpci_dev *zdev; 794 795 /* If gather was never added to there is nothing to flush */ 796 if (!gather->end) 797 return; 798 799 rcu_read_lock(); 800 list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) { 801 atomic64_inc(&s390_domain->ctrs.sync_rpcits); 802 zpci_refresh_trans((u64)zdev->fh << 32, gather->start, 803 size); 804 } 805 rcu_read_unlock(); 806 } 807 808 static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain, 809 unsigned long iova, size_t size) 810 { 811 struct s390_domain *s390_domain = to_s390_domain(domain); 812 struct zpci_dev *zdev; 813 int ret = 0; 814 815 rcu_read_lock(); 816 list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) { 817 if (!zdev->tlb_refresh) 818 continue; 819 atomic64_inc(&s390_domain->ctrs.sync_map_rpcits); 820 ret = zpci_refresh_trans((u64)zdev->fh << 32, 821 iova, size); 822 /* 823 * let the hypervisor discover invalidated entries 824 * allowing it to free IOVAs and unpin pages 825 */ 826 if (ret == -ENOMEM) { 827 ret = zpci_refresh_all(zdev); 828 if (ret) 829 break; 830 } 831 } 832 rcu_read_unlock(); 833 834 return ret; 835 } 836 837 static int s390_iommu_validate_trans(struct s390_domain *s390_domain, 838 phys_addr_t pa, dma_addr_t dma_addr, 839 unsigned long nr_pages, int flags, 840 gfp_t gfp) 841 { 842 phys_addr_t page_addr = pa & PAGE_MASK; 843 unsigned long *entry; 844 unsigned long i; 845 int rc; 846 847 for (i = 0; i < nr_pages; i++) { 848 entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp); 849 if (unlikely(!entry)) { 850 rc = -ENOMEM; 851 goto undo_cpu_trans; 852 } 853 dma_update_cpu_trans(entry, page_addr, flags); 854 page_addr += PAGE_SIZE; 855 dma_addr += PAGE_SIZE; 856 } 857 858 return 0; 859 860 undo_cpu_trans: 861 while (i-- > 0) { 862 dma_addr -= PAGE_SIZE; 863 entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp); 864 if (!entry) 865 break; 866 dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID); 867 } 868 869 return rc; 870 } 871 872 static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain, 873 dma_addr_t dma_addr, unsigned long nr_pages) 874 { 875 unsigned long *entry; 876 unsigned long i; 877 int rc = 0; 878 879 for (i = 0; i < nr_pages; i++) { 880 entry = dma_walk_cpu_trans(s390_domain, dma_addr, GFP_ATOMIC); 881 if (unlikely(!entry)) { 882 rc = -EINVAL; 883 break; 884 } 885 dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID); 886 dma_addr += PAGE_SIZE; 887 } 888 889 return rc; 890 } 891 892 static int s390_iommu_map_pages(struct iommu_domain *domain, 893 unsigned long iova, phys_addr_t paddr, 894 size_t pgsize, size_t pgcount, 895 int prot, gfp_t gfp, size_t *mapped) 896 { 897 struct s390_domain *s390_domain = to_s390_domain(domain); 898 size_t size = pgcount << __ffs(pgsize); 899 int flags = ZPCI_PTE_VALID, rc = 0; 900 901 if (pgsize != SZ_4K) 902 return -EINVAL; 903 904 if (iova < s390_domain->domain.geometry.aperture_start || 905 (iova + size - 1) > s390_domain->domain.geometry.aperture_end) 906 return -EINVAL; 907 908 if (!IS_ALIGNED(iova | paddr, pgsize)) 909 return -EINVAL; 910 911 if (!(prot & IOMMU_WRITE)) 912 flags |= ZPCI_TABLE_PROTECTED; 913 914 rc = s390_iommu_validate_trans(s390_domain, paddr, iova, 915 pgcount, flags, gfp); 916 if (!rc) { 917 *mapped = size; 918 atomic64_add(pgcount, &s390_domain->ctrs.mapped_pages); 919 } 920 921 return rc; 922 } 923 924 static unsigned long *get_rso_from_iova(struct s390_domain *domain, 925 dma_addr_t iova) 926 { 927 unsigned long *rfo; 928 unsigned long rfe; 929 unsigned int rfx; 930 931 switch (domain->origin_type) { 932 case ZPCI_TABLE_TYPE_RFX: 933 rfo = domain->dma_table; 934 rfx = calc_rfx(iova); 935 rfe = READ_ONCE(rfo[rfx]); 936 if (!reg_entry_isvalid(rfe)) 937 return NULL; 938 return get_rf_rso(rfe); 939 case ZPCI_TABLE_TYPE_RSX: 940 return domain->dma_table; 941 default: 942 return NULL; 943 } 944 } 945 946 static unsigned long *get_rto_from_iova(struct s390_domain *domain, 947 dma_addr_t iova) 948 { 949 unsigned long *rso; 950 unsigned long rse; 951 unsigned int rsx; 952 953 switch (domain->origin_type) { 954 case ZPCI_TABLE_TYPE_RFX: 955 case ZPCI_TABLE_TYPE_RSX: 956 rso = get_rso_from_iova(domain, iova); 957 rsx = calc_rsx(iova); 958 rse = READ_ONCE(rso[rsx]); 959 if (!reg_entry_isvalid(rse)) 960 return NULL; 961 return get_rs_rto(rse); 962 case ZPCI_TABLE_TYPE_RTX: 963 return domain->dma_table; 964 default: 965 return NULL; 966 } 967 } 968 969 static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, 970 dma_addr_t iova) 971 { 972 struct s390_domain *s390_domain = to_s390_domain(domain); 973 unsigned long *rto, *sto, *pto; 974 unsigned long ste, pte, rte; 975 unsigned int rtx, sx, px; 976 phys_addr_t phys = 0; 977 978 if (iova < domain->geometry.aperture_start || 979 iova > domain->geometry.aperture_end) 980 return 0; 981 982 rto = get_rto_from_iova(s390_domain, iova); 983 if (!rto) 984 return 0; 985 986 rtx = calc_rtx(iova); 987 sx = calc_sx(iova); 988 px = calc_px(iova); 989 990 rte = READ_ONCE(rto[rtx]); 991 if (reg_entry_isvalid(rte)) { 992 sto = get_rt_sto(rte); 993 ste = READ_ONCE(sto[sx]); 994 if (reg_entry_isvalid(ste)) { 995 pto = get_st_pto(ste); 996 pte = READ_ONCE(pto[px]); 997 if (pt_entry_isvalid(pte)) 998 phys = pte & ZPCI_PTE_ADDR_MASK; 999 } 1000 } 1001 1002 return phys; 1003 } 1004 1005 static size_t s390_iommu_unmap_pages(struct iommu_domain *domain, 1006 unsigned long iova, 1007 size_t pgsize, size_t pgcount, 1008 struct iommu_iotlb_gather *gather) 1009 { 1010 struct s390_domain *s390_domain = to_s390_domain(domain); 1011 size_t size = pgcount << __ffs(pgsize); 1012 int rc; 1013 1014 if (WARN_ON(iova < s390_domain->domain.geometry.aperture_start || 1015 (iova + size - 1) > s390_domain->domain.geometry.aperture_end)) 1016 return 0; 1017 1018 rc = s390_iommu_invalidate_trans(s390_domain, iova, pgcount); 1019 if (rc) 1020 return 0; 1021 1022 iommu_iotlb_gather_add_range(gather, iova, size); 1023 atomic64_add(pgcount, &s390_domain->ctrs.unmapped_pages); 1024 1025 return size; 1026 } 1027 1028 struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev) 1029 { 1030 struct s390_domain *s390_domain; 1031 1032 lockdep_assert_held(&zdev->dom_lock); 1033 1034 if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED) 1035 return NULL; 1036 1037 s390_domain = to_s390_domain(zdev->s390_domain); 1038 return &s390_domain->ctrs; 1039 } 1040 1041 int zpci_init_iommu(struct zpci_dev *zdev) 1042 { 1043 int rc = 0; 1044 1045 rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL, 1046 "s390-iommu.%08x", zdev->fid); 1047 if (rc) 1048 goto out_err; 1049 1050 if (zdev->rtr_avail) { 1051 rc = iommu_device_register(&zdev->iommu_dev, 1052 &s390_iommu_rtr_ops, NULL); 1053 } else { 1054 rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, 1055 NULL); 1056 } 1057 if (rc) 1058 goto out_sysfs; 1059 1060 return 0; 1061 1062 out_sysfs: 1063 iommu_device_sysfs_remove(&zdev->iommu_dev); 1064 1065 out_err: 1066 return rc; 1067 } 1068 1069 void zpci_destroy_iommu(struct zpci_dev *zdev) 1070 { 1071 iommu_device_unregister(&zdev->iommu_dev); 1072 iommu_device_sysfs_remove(&zdev->iommu_dev); 1073 } 1074 1075 static int __init s390_iommu_setup(char *str) 1076 { 1077 if (!strcmp(str, "strict")) { 1078 pr_warn("s390_iommu=strict deprecated; use iommu.strict=1 instead\n"); 1079 iommu_set_dma_strict(); 1080 } 1081 return 1; 1082 } 1083 1084 __setup("s390_iommu=", s390_iommu_setup); 1085 1086 static int __init s390_iommu_aperture_setup(char *str) 1087 { 1088 if (kstrtou32(str, 10, &s390_iommu_aperture_factor)) 1089 s390_iommu_aperture_factor = 1; 1090 return 1; 1091 } 1092 1093 __setup("s390_iommu_aperture=", s390_iommu_aperture_setup); 1094 1095 static int __init s390_iommu_init(void) 1096 { 1097 int rc; 1098 1099 iommu_dma_forcedac = true; 1100 s390_iommu_aperture = (u64)virt_to_phys(high_memory); 1101 if (!s390_iommu_aperture_factor) 1102 s390_iommu_aperture = ULONG_MAX; 1103 else 1104 s390_iommu_aperture *= s390_iommu_aperture_factor; 1105 1106 rc = dma_alloc_cpu_table_caches(); 1107 if (rc) 1108 return rc; 1109 1110 return rc; 1111 } 1112 subsys_initcall(s390_iommu_init); 1113 1114 static int s390_attach_dev_identity(struct iommu_domain *domain, 1115 struct device *dev) 1116 { 1117 struct zpci_dev *zdev = to_zpci_dev(dev); 1118 u8 status; 1119 int cc; 1120 1121 blocking_domain_attach_device(&blocking_domain, dev); 1122 1123 /* If we fail now DMA remains blocked via blocking domain */ 1124 cc = s390_iommu_domain_reg_ioat(zdev, domain, &status); 1125 1126 /* 1127 * If the device is undergoing error recovery the reset code 1128 * will re-establish the new domain. 1129 */ 1130 if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) 1131 return -EIO; 1132 1133 zdev_s390_domain_update(zdev, domain); 1134 1135 return 0; 1136 } 1137 1138 static const struct iommu_domain_ops s390_identity_ops = { 1139 .attach_dev = s390_attach_dev_identity, 1140 }; 1141 1142 static struct iommu_domain s390_identity_domain = { 1143 .type = IOMMU_DOMAIN_IDENTITY, 1144 .ops = &s390_identity_ops, 1145 }; 1146 1147 static struct iommu_domain blocking_domain = { 1148 .type = IOMMU_DOMAIN_BLOCKED, 1149 .ops = &(const struct iommu_domain_ops) { 1150 .attach_dev = blocking_domain_attach_device, 1151 } 1152 }; 1153 1154 #define S390_IOMMU_COMMON_OPS() \ 1155 .blocked_domain = &blocking_domain, \ 1156 .release_domain = &blocking_domain, \ 1157 .capable = s390_iommu_capable, \ 1158 .domain_alloc_paging = s390_domain_alloc_paging, \ 1159 .probe_device = s390_iommu_probe_device, \ 1160 .device_group = generic_device_group, \ 1161 .pgsize_bitmap = SZ_4K, \ 1162 .get_resv_regions = s390_iommu_get_resv_regions, \ 1163 .default_domain_ops = &(const struct iommu_domain_ops) { \ 1164 .attach_dev = s390_iommu_attach_device, \ 1165 .map_pages = s390_iommu_map_pages, \ 1166 .unmap_pages = s390_iommu_unmap_pages, \ 1167 .flush_iotlb_all = s390_iommu_flush_iotlb_all, \ 1168 .iotlb_sync = s390_iommu_iotlb_sync, \ 1169 .iotlb_sync_map = s390_iommu_iotlb_sync_map, \ 1170 .iova_to_phys = s390_iommu_iova_to_phys, \ 1171 .free = s390_domain_free, \ 1172 } 1173 1174 static const struct iommu_ops s390_iommu_ops = { 1175 S390_IOMMU_COMMON_OPS() 1176 }; 1177 1178 static const struct iommu_ops s390_iommu_rtr_ops = { 1179 .identity_domain = &s390_identity_domain, 1180 S390_IOMMU_COMMON_OPS() 1181 }; 1182