1 /* 2 * Copyright (C) 2014-2016 Broadcom Corporation 3 * Copyright (c) 2017 Red Hat, Inc. 4 * Written by Prem Mallappa, Eric Auger 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * Author: Prem Mallappa <pmallapp@broadcom.com> 16 * 17 */ 18 19 #include "qemu/osdep.h" 20 #include "trace.h" 21 #include "exec/target_page.h" 22 #include "hw/core/cpu.h" 23 #include "hw/qdev-properties.h" 24 #include "qapi/error.h" 25 #include "qemu/jhash.h" 26 #include "qemu/module.h" 27 28 #include "qemu/error-report.h" 29 #include "hw/arm/smmu-common.h" 30 #include "smmu-internal.h" 31 32 /* IOTLB Management */ 33 34 static guint smmu_iotlb_key_hash(gconstpointer v) 35 { 36 SMMUIOTLBKey *key = (SMMUIOTLBKey *)v; 37 uint32_t a, b, c; 38 39 /* Jenkins hash */ 40 a = b = c = JHASH_INITVAL + sizeof(*key); 41 a += key->asid + key->vmid + key->level + key->tg; 42 b += extract64(key->iova, 0, 32); 43 c += extract64(key->iova, 32, 32); 44 45 __jhash_mix(a, b, c); 46 __jhash_final(a, b, c); 47 48 return c; 49 } 50 51 static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2) 52 { 53 SMMUIOTLBKey *k1 = (SMMUIOTLBKey *)v1, *k2 = (SMMUIOTLBKey *)v2; 54 55 return (k1->asid == k2->asid) && (k1->iova == k2->iova) && 56 (k1->level == k2->level) && (k1->tg == k2->tg) && 57 (k1->vmid == k2->vmid); 58 } 59 60 SMMUIOTLBKey smmu_get_iotlb_key(int asid, int vmid, uint64_t iova, 61 uint8_t tg, uint8_t level) 62 { 63 SMMUIOTLBKey key = {.asid = asid, .vmid = vmid, .iova = iova, 64 .tg = tg, .level = level}; 65 66 return key; 67 } 68 69 static SMMUTLBEntry *smmu_iotlb_lookup_all_levels(SMMUState *bs, 70 SMMUTransCfg *cfg, 71 SMMUTransTableInfo *tt, 72 hwaddr iova) 73 { 74 uint8_t tg = (tt->granule_sz - 10) / 2; 75 uint8_t inputsize = 64 - tt->tsz; 76 uint8_t stride = tt->granule_sz - 3; 77 uint8_t level = 4 - (inputsize - 4) / stride; 78 SMMUTLBEntry *entry = NULL; 79 80 while (level <= 3) { 81 uint64_t subpage_size = 1ULL << level_shift(level, tt->granule_sz); 82 uint64_t mask = subpage_size - 1; 83 SMMUIOTLBKey key; 84 85 key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, 86 iova & ~mask, tg, level); 87 entry = g_hash_table_lookup(bs->iotlb, &key); 88 if (entry) { 89 break; 90 } 91 level++; 92 } 93 return entry; 94 } 95 96 /** 97 * smmu_iotlb_lookup - Look up for a TLB entry. 98 * @bs: SMMU state which includes the TLB instance 99 * @cfg: Configuration of the translation 100 * @tt: Translation table info (granule and tsz) 101 * @iova: IOVA address to lookup 102 * 103 * returns a valid entry on success, otherwise NULL. 104 * In case of nested translation, tt can be updated to include 105 * the granule of the found entry as it might different from 106 * the IOVA granule. 107 */ 108 SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg, 109 SMMUTransTableInfo *tt, hwaddr iova) 110 { 111 SMMUTLBEntry *entry = NULL; 112 113 entry = smmu_iotlb_lookup_all_levels(bs, cfg, tt, iova); 114 /* 115 * For nested translation also try the s2 granule, as the TLB will insert 116 * it if the size of s2 tlb entry was smaller. 117 */ 118 if (!entry && (cfg->stage == SMMU_NESTED) && 119 (cfg->s2cfg.granule_sz != tt->granule_sz)) { 120 tt->granule_sz = cfg->s2cfg.granule_sz; 121 entry = smmu_iotlb_lookup_all_levels(bs, cfg, tt, iova); 122 } 123 124 if (entry) { 125 cfg->iotlb_hits++; 126 trace_smmu_iotlb_lookup_hit(cfg->asid, cfg->s2cfg.vmid, iova, 127 cfg->iotlb_hits, cfg->iotlb_misses, 128 100 * cfg->iotlb_hits / 129 (cfg->iotlb_hits + cfg->iotlb_misses)); 130 } else { 131 cfg->iotlb_misses++; 132 trace_smmu_iotlb_lookup_miss(cfg->asid, cfg->s2cfg.vmid, iova, 133 cfg->iotlb_hits, cfg->iotlb_misses, 134 100 * cfg->iotlb_hits / 135 (cfg->iotlb_hits + cfg->iotlb_misses)); 136 } 137 return entry; 138 } 139 140 void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *new) 141 { 142 SMMUIOTLBKey *key = g_new0(SMMUIOTLBKey, 1); 143 uint8_t tg = (new->granule - 10) / 2; 144 145 if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) { 146 smmu_iotlb_inv_all(bs); 147 } 148 149 *key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, new->entry.iova, 150 tg, new->level); 151 trace_smmu_iotlb_insert(cfg->asid, cfg->s2cfg.vmid, new->entry.iova, 152 tg, new->level); 153 g_hash_table_insert(bs->iotlb, key, new); 154 } 155 156 void smmu_iotlb_inv_all(SMMUState *s) 157 { 158 trace_smmu_iotlb_inv_all(); 159 g_hash_table_remove_all(s->iotlb); 160 } 161 162 static gboolean smmu_hash_remove_by_asid_vmid(gpointer key, gpointer value, 163 gpointer user_data) 164 { 165 SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data; 166 SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; 167 168 return (SMMU_IOTLB_ASID(*iotlb_key) == info->asid) && 169 (SMMU_IOTLB_VMID(*iotlb_key) == info->vmid); 170 } 171 172 static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value, 173 gpointer user_data) 174 { 175 int vmid = *(int *)user_data; 176 SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; 177 178 return SMMU_IOTLB_VMID(*iotlb_key) == vmid; 179 } 180 181 static gboolean smmu_hash_remove_by_vmid_s1(gpointer key, gpointer value, 182 gpointer user_data) 183 { 184 int vmid = *(int *)user_data; 185 SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; 186 187 return (SMMU_IOTLB_VMID(*iotlb_key) == vmid) && 188 (SMMU_IOTLB_ASID(*iotlb_key) >= 0); 189 } 190 191 static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value, 192 gpointer user_data) 193 { 194 SMMUTLBEntry *iter = (SMMUTLBEntry *)value; 195 IOMMUTLBEntry *entry = &iter->entry; 196 SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data; 197 SMMUIOTLBKey iotlb_key = *(SMMUIOTLBKey *)key; 198 199 if (info->asid >= 0 && info->asid != SMMU_IOTLB_ASID(iotlb_key)) { 200 return false; 201 } 202 if (info->vmid >= 0 && info->vmid != SMMU_IOTLB_VMID(iotlb_key)) { 203 return false; 204 } 205 return ((info->iova & ~entry->addr_mask) == entry->iova) || 206 ((entry->iova & ~info->mask) == info->iova); 207 } 208 209 static gboolean smmu_hash_remove_by_vmid_ipa(gpointer key, gpointer value, 210 gpointer user_data) 211 { 212 SMMUTLBEntry *iter = (SMMUTLBEntry *)value; 213 IOMMUTLBEntry *entry = &iter->entry; 214 SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data; 215 SMMUIOTLBKey iotlb_key = *(SMMUIOTLBKey *)key; 216 217 if (SMMU_IOTLB_ASID(iotlb_key) >= 0) { 218 /* This is a stage-1 address. */ 219 return false; 220 } 221 if (info->vmid != SMMU_IOTLB_VMID(iotlb_key)) { 222 return false; 223 } 224 return ((info->iova & ~entry->addr_mask) == entry->iova) || 225 ((entry->iova & ~info->mask) == info->iova); 226 } 227 228 static gboolean 229 smmu_hash_remove_by_sid_range(gpointer key, gpointer value, gpointer user_data) 230 { 231 SMMUDevice *sdev = (SMMUDevice *)key; 232 uint32_t sid = smmu_get_sid(sdev); 233 SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data; 234 235 if (sid < sid_range->start || sid > sid_range->end) { 236 return false; 237 } 238 trace_smmu_config_cache_inv(sid); 239 return true; 240 } 241 242 void smmu_configs_inv_sid_range(SMMUState *s, SMMUSIDRange sid_range) 243 { 244 trace_smmu_configs_inv_sid_range(sid_range.start, sid_range.end); 245 g_hash_table_foreach_remove(s->configs, smmu_hash_remove_by_sid_range, 246 &sid_range); 247 } 248 249 void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, 250 uint8_t tg, uint64_t num_pages, uint8_t ttl) 251 { 252 /* if tg is not set we use 4KB range invalidation */ 253 uint8_t granule = tg ? tg * 2 + 10 : 12; 254 255 if (ttl && (num_pages == 1) && (asid >= 0)) { 256 SMMUIOTLBKey key = smmu_get_iotlb_key(asid, vmid, iova, tg, ttl); 257 258 if (g_hash_table_remove(s->iotlb, &key)) { 259 return; 260 } 261 /* 262 * if the entry is not found, let's see if it does not 263 * belong to a larger IOTLB entry 264 */ 265 } 266 267 SMMUIOTLBPageInvInfo info = { 268 .asid = asid, .iova = iova, 269 .vmid = vmid, 270 .mask = (num_pages * 1 << granule) - 1}; 271 272 g_hash_table_foreach_remove(s->iotlb, 273 smmu_hash_remove_by_asid_vmid_iova, 274 &info); 275 } 276 277 /* 278 * Similar to smmu_iotlb_inv_iova(), but for Stage-2, ASID is always -1, 279 * in Stage-1 invalidation ASID = -1, means don't care. 280 */ 281 void smmu_iotlb_inv_ipa(SMMUState *s, int vmid, dma_addr_t ipa, uint8_t tg, 282 uint64_t num_pages, uint8_t ttl) 283 { 284 uint8_t granule = tg ? tg * 2 + 10 : 12; 285 int asid = -1; 286 287 if (ttl && (num_pages == 1)) { 288 SMMUIOTLBKey key = smmu_get_iotlb_key(asid, vmid, ipa, tg, ttl); 289 290 if (g_hash_table_remove(s->iotlb, &key)) { 291 return; 292 } 293 } 294 295 SMMUIOTLBPageInvInfo info = { 296 .iova = ipa, 297 .vmid = vmid, 298 .mask = (num_pages << granule) - 1}; 299 300 g_hash_table_foreach_remove(s->iotlb, 301 smmu_hash_remove_by_vmid_ipa, 302 &info); 303 } 304 305 void smmu_iotlb_inv_asid_vmid(SMMUState *s, int asid, int vmid) 306 { 307 SMMUIOTLBPageInvInfo info = { 308 .asid = asid, 309 .vmid = vmid, 310 }; 311 312 trace_smmu_iotlb_inv_asid_vmid(asid, vmid); 313 g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid_vmid, &info); 314 } 315 316 void smmu_iotlb_inv_vmid(SMMUState *s, int vmid) 317 { 318 trace_smmu_iotlb_inv_vmid(vmid); 319 g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); 320 } 321 322 inline void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid) 323 { 324 trace_smmu_iotlb_inv_vmid_s1(vmid); 325 g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid_s1, &vmid); 326 } 327 328 /* VMSAv8-64 Translation */ 329 330 /** 331 * get_pte - Get the content of a page table entry located at 332 * @base_addr[@index] 333 */ 334 static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte, 335 SMMUPTWEventInfo *info) 336 { 337 int ret; 338 dma_addr_t addr = baseaddr + index * sizeof(*pte); 339 340 /* TODO: guarantee 64-bit single-copy atomicity */ 341 ret = ldq_le_dma(&address_space_memory, addr, pte, MEMTXATTRS_UNSPECIFIED); 342 343 if (ret != MEMTX_OK) { 344 info->type = SMMU_PTW_ERR_WALK_EABT; 345 info->addr = addr; 346 return -EINVAL; 347 } 348 trace_smmu_get_pte(baseaddr, index, addr, *pte); 349 return 0; 350 } 351 352 /* VMSAv8-64 Translation Table Format Descriptor Decoding */ 353 354 /** 355 * get_page_pte_address - returns the L3 descriptor output address, 356 * ie. the page frame 357 * ARM ARM spec: Figure D4-17 VMSAv8-64 level 3 descriptor format 358 */ 359 static inline hwaddr get_page_pte_address(uint64_t pte, int granule_sz) 360 { 361 return PTE_ADDRESS(pte, granule_sz); 362 } 363 364 /** 365 * get_table_pte_address - return table descriptor output address, 366 * ie. address of next level table 367 * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats 368 */ 369 static inline hwaddr get_table_pte_address(uint64_t pte, int granule_sz) 370 { 371 return PTE_ADDRESS(pte, granule_sz); 372 } 373 374 /** 375 * get_block_pte_address - return block descriptor output address and block size 376 * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats 377 */ 378 static inline hwaddr get_block_pte_address(uint64_t pte, int level, 379 int granule_sz, uint64_t *bsz) 380 { 381 int n = level_shift(level, granule_sz); 382 383 *bsz = 1ULL << n; 384 return PTE_ADDRESS(pte, n); 385 } 386 387 SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova) 388 { 389 bool tbi = extract64(iova, 55, 1) ? TBI1(cfg->tbi) : TBI0(cfg->tbi); 390 uint8_t tbi_byte = tbi * 8; 391 392 if (cfg->tt[0].tsz && 393 !extract64(iova, 64 - cfg->tt[0].tsz, cfg->tt[0].tsz - tbi_byte)) { 394 /* there is a ttbr0 region and we are in it (high bits all zero) */ 395 return &cfg->tt[0]; 396 } else if (cfg->tt[1].tsz && 397 sextract64(iova, 64 - cfg->tt[1].tsz, cfg->tt[1].tsz - tbi_byte) == -1) { 398 /* there is a ttbr1 region and we are in it (high bits all one) */ 399 return &cfg->tt[1]; 400 } else if (!cfg->tt[0].tsz) { 401 /* ttbr0 region is "everything not in the ttbr1 region" */ 402 return &cfg->tt[0]; 403 } else if (!cfg->tt[1].tsz) { 404 /* ttbr1 region is "everything not in the ttbr0 region" */ 405 return &cfg->tt[1]; 406 } 407 /* in the gap between the two regions, this is a Translation fault */ 408 return NULL; 409 } 410 411 /* Translate stage-1 table address using stage-2 page table. */ 412 static inline int translate_table_addr_ipa(SMMUState *bs, 413 dma_addr_t *table_addr, 414 SMMUTransCfg *cfg, 415 SMMUPTWEventInfo *info) 416 { 417 dma_addr_t addr = *table_addr; 418 SMMUTLBEntry *cached_entry; 419 int asid; 420 421 /* 422 * The translation table walks performed from TTB0 or TTB1 are always 423 * performed in IPA space if stage 2 translations are enabled. 424 */ 425 asid = cfg->asid; 426 cfg->stage = SMMU_STAGE_2; 427 cfg->asid = -1; 428 cached_entry = smmu_translate(bs, cfg, addr, IOMMU_RO, info); 429 cfg->asid = asid; 430 cfg->stage = SMMU_NESTED; 431 432 if (cached_entry) { 433 *table_addr = CACHED_ENTRY_TO_ADDR(cached_entry, addr); 434 return 0; 435 } 436 437 info->stage = SMMU_STAGE_2; 438 info->addr = addr; 439 info->is_ipa_descriptor = true; 440 return -EINVAL; 441 } 442 443 /** 444 * smmu_ptw_64_s1 - VMSAv8-64 Walk of the page tables for a given IOVA 445 * @bs: smmu state which includes TLB instance 446 * @cfg: translation config 447 * @iova: iova to translate 448 * @perm: access type 449 * @tlbe: SMMUTLBEntry (out) 450 * @info: handle to an error info 451 * 452 * Return 0 on success, < 0 on error. In case of error, @info is filled 453 * and tlbe->perm is set to IOMMU_NONE. 454 * Upon success, @tlbe is filled with translated_addr and entry 455 * permission rights. 456 */ 457 static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg, 458 dma_addr_t iova, IOMMUAccessFlags perm, 459 SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) 460 { 461 dma_addr_t baseaddr, indexmask; 462 SMMUStage stage = cfg->stage; 463 SMMUTransTableInfo *tt = select_tt(cfg, iova); 464 uint8_t level, granule_sz, inputsize, stride; 465 466 if (!tt || tt->disabled) { 467 info->type = SMMU_PTW_ERR_TRANSLATION; 468 goto error; 469 } 470 471 granule_sz = tt->granule_sz; 472 stride = VMSA_STRIDE(granule_sz); 473 inputsize = 64 - tt->tsz; 474 level = 4 - (inputsize - 4) / stride; 475 indexmask = VMSA_IDXMSK(inputsize, stride, level); 476 477 baseaddr = extract64(tt->ttb, 0, cfg->oas); 478 baseaddr &= ~indexmask; 479 480 while (level < VMSA_LEVELS) { 481 uint64_t subpage_size = 1ULL << level_shift(level, granule_sz); 482 uint64_t mask = subpage_size - 1; 483 uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz); 484 uint64_t pte, gpa; 485 dma_addr_t pte_addr = baseaddr + offset * sizeof(pte); 486 uint8_t ap; 487 488 if (get_pte(baseaddr, offset, &pte, info)) { 489 goto error; 490 } 491 trace_smmu_ptw_level(stage, level, iova, subpage_size, 492 baseaddr, offset, pte); 493 494 if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) { 495 trace_smmu_ptw_invalid_pte(stage, level, baseaddr, 496 pte_addr, offset, pte); 497 break; 498 } 499 500 if (is_table_pte(pte, level)) { 501 ap = PTE_APTABLE(pte); 502 503 if (is_permission_fault(ap, perm) && !tt->had) { 504 info->type = SMMU_PTW_ERR_PERMISSION; 505 goto error; 506 } 507 baseaddr = get_table_pte_address(pte, granule_sz); 508 if (cfg->stage == SMMU_NESTED) { 509 if (translate_table_addr_ipa(bs, &baseaddr, cfg, info)) { 510 goto error; 511 } 512 } 513 level++; 514 continue; 515 } else if (is_page_pte(pte, level)) { 516 gpa = get_page_pte_address(pte, granule_sz); 517 trace_smmu_ptw_page_pte(stage, level, iova, 518 baseaddr, pte_addr, pte, gpa); 519 } else { 520 uint64_t block_size; 521 522 gpa = get_block_pte_address(pte, level, granule_sz, 523 &block_size); 524 trace_smmu_ptw_block_pte(stage, level, baseaddr, 525 pte_addr, pte, iova, gpa, 526 block_size >> 20); 527 } 528 529 /* 530 * QEMU does not currently implement HTTU, so if AFFD and PTE.AF 531 * are 0 we take an Access flag fault. (5.4. Context Descriptor) 532 * An Access flag fault takes priority over a Permission fault. 533 */ 534 if (!PTE_AF(pte) && !cfg->affd) { 535 info->type = SMMU_PTW_ERR_ACCESS; 536 goto error; 537 } 538 539 ap = PTE_AP(pte); 540 if (is_permission_fault(ap, perm)) { 541 info->type = SMMU_PTW_ERR_PERMISSION; 542 goto error; 543 } 544 545 /* 546 * The address output from the translation causes a stage 1 Address 547 * Size fault if it exceeds the range of the effective IPA size for 548 * the given CD. 549 */ 550 if (gpa >= (1ULL << cfg->oas)) { 551 info->type = SMMU_PTW_ERR_ADDR_SIZE; 552 goto error; 553 } 554 555 tlbe->entry.translated_addr = gpa; 556 tlbe->entry.iova = iova & ~mask; 557 tlbe->entry.addr_mask = mask; 558 tlbe->parent_perm = PTE_AP_TO_PERM(ap); 559 tlbe->entry.perm = tlbe->parent_perm; 560 tlbe->level = level; 561 tlbe->granule = granule_sz; 562 return 0; 563 } 564 info->type = SMMU_PTW_ERR_TRANSLATION; 565 566 error: 567 info->stage = SMMU_STAGE_1; 568 tlbe->entry.perm = IOMMU_NONE; 569 return -EINVAL; 570 } 571 572 /** 573 * smmu_ptw_64_s2 - VMSAv8-64 Walk of the page tables for a given ipa 574 * for stage-2. 575 * @cfg: translation config 576 * @ipa: ipa to translate 577 * @perm: access type 578 * @tlbe: SMMUTLBEntry (out) 579 * @info: handle to an error info 580 * 581 * Return 0 on success, < 0 on error. In case of error, @info is filled 582 * and tlbe->perm is set to IOMMU_NONE. 583 * Upon success, @tlbe is filled with translated_addr and entry 584 * permission rights. 585 */ 586 static int smmu_ptw_64_s2(SMMUTransCfg *cfg, 587 dma_addr_t ipa, IOMMUAccessFlags perm, 588 SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) 589 { 590 const SMMUStage stage = SMMU_STAGE_2; 591 int granule_sz = cfg->s2cfg.granule_sz; 592 /* ARM DDI0487I.a: Table D8-7. */ 593 int inputsize = 64 - cfg->s2cfg.tsz; 594 int level = get_start_level(cfg->s2cfg.sl0, granule_sz); 595 int stride = VMSA_STRIDE(granule_sz); 596 int idx = pgd_concat_idx(level, granule_sz, ipa); 597 /* 598 * Get the ttb from concatenated structure. 599 * The offset is the idx * size of each ttb(number of ptes * (sizeof(pte)) 600 */ 601 uint64_t baseaddr = extract64(cfg->s2cfg.vttb, 0, cfg->s2cfg.eff_ps) + 602 (1 << stride) * idx * sizeof(uint64_t); 603 dma_addr_t indexmask = VMSA_IDXMSK(inputsize, stride, level); 604 605 baseaddr &= ~indexmask; 606 607 /* 608 * On input, a stage 2 Translation fault occurs if the IPA is outside the 609 * range configured by the relevant S2T0SZ field of the STE. 610 */ 611 if (ipa >= (1ULL << inputsize)) { 612 info->type = SMMU_PTW_ERR_TRANSLATION; 613 goto error_ipa; 614 } 615 616 while (level < VMSA_LEVELS) { 617 uint64_t subpage_size = 1ULL << level_shift(level, granule_sz); 618 uint64_t mask = subpage_size - 1; 619 uint32_t offset = iova_level_offset(ipa, inputsize, level, granule_sz); 620 uint64_t pte, gpa; 621 dma_addr_t pte_addr = baseaddr + offset * sizeof(pte); 622 uint8_t s2ap; 623 624 if (get_pte(baseaddr, offset, &pte, info)) { 625 goto error; 626 } 627 trace_smmu_ptw_level(stage, level, ipa, subpage_size, 628 baseaddr, offset, pte); 629 if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) { 630 trace_smmu_ptw_invalid_pte(stage, level, baseaddr, 631 pte_addr, offset, pte); 632 break; 633 } 634 635 if (is_table_pte(pte, level)) { 636 baseaddr = get_table_pte_address(pte, granule_sz); 637 level++; 638 continue; 639 } else if (is_page_pte(pte, level)) { 640 gpa = get_page_pte_address(pte, granule_sz); 641 trace_smmu_ptw_page_pte(stage, level, ipa, 642 baseaddr, pte_addr, pte, gpa); 643 } else { 644 uint64_t block_size; 645 646 gpa = get_block_pte_address(pte, level, granule_sz, 647 &block_size); 648 trace_smmu_ptw_block_pte(stage, level, baseaddr, 649 pte_addr, pte, ipa, gpa, 650 block_size >> 20); 651 } 652 653 /* 654 * If S2AFFD and PTE.AF are 0 => fault. (5.2. Stream Table Entry) 655 * An Access fault takes priority over a Permission fault. 656 */ 657 if (!PTE_AF(pte) && !cfg->s2cfg.affd) { 658 info->type = SMMU_PTW_ERR_ACCESS; 659 goto error_ipa; 660 } 661 662 s2ap = PTE_AP(pte); 663 if (is_permission_fault_s2(s2ap, perm)) { 664 info->type = SMMU_PTW_ERR_PERMISSION; 665 goto error_ipa; 666 } 667 668 /* 669 * The address output from the translation causes a stage 2 Address 670 * Size fault if it exceeds the effective PA output range. 671 */ 672 if (gpa >= (1ULL << cfg->s2cfg.eff_ps)) { 673 info->type = SMMU_PTW_ERR_ADDR_SIZE; 674 goto error_ipa; 675 } 676 677 tlbe->entry.translated_addr = gpa; 678 tlbe->entry.iova = ipa & ~mask; 679 tlbe->entry.addr_mask = mask; 680 tlbe->parent_perm = s2ap; 681 tlbe->entry.perm = tlbe->parent_perm; 682 tlbe->level = level; 683 tlbe->granule = granule_sz; 684 return 0; 685 } 686 info->type = SMMU_PTW_ERR_TRANSLATION; 687 688 error_ipa: 689 info->addr = ipa; 690 error: 691 info->stage = SMMU_STAGE_2; 692 tlbe->entry.perm = IOMMU_NONE; 693 return -EINVAL; 694 } 695 696 /* 697 * combine S1 and S2 TLB entries into a single entry. 698 * As a result the S1 entry is overridden with combined data. 699 */ 700 static void combine_tlb(SMMUTLBEntry *tlbe, SMMUTLBEntry *tlbe_s2, 701 dma_addr_t iova, SMMUTransCfg *cfg) 702 { 703 if (tlbe_s2->entry.addr_mask < tlbe->entry.addr_mask) { 704 tlbe->entry.addr_mask = tlbe_s2->entry.addr_mask; 705 tlbe->granule = tlbe_s2->granule; 706 tlbe->level = tlbe_s2->level; 707 } 708 709 tlbe->entry.translated_addr = CACHED_ENTRY_TO_ADDR(tlbe_s2, 710 tlbe->entry.translated_addr); 711 712 tlbe->entry.iova = iova & ~tlbe->entry.addr_mask; 713 /* parent_perm has s2 perm while perm keeps s1 perm. */ 714 tlbe->parent_perm = tlbe_s2->entry.perm; 715 } 716 717 /** 718 * smmu_ptw - Walk the page tables for an IOVA, according to @cfg 719 * 720 * @bs: smmu state which includes TLB instance 721 * @cfg: translation configuration 722 * @iova: iova to translate 723 * @perm: tentative access type 724 * @tlbe: returned entry 725 * @info: ptw event handle 726 * 727 * return 0 on success 728 */ 729 int smmu_ptw(SMMUState *bs, SMMUTransCfg *cfg, dma_addr_t iova, 730 IOMMUAccessFlags perm, SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) 731 { 732 int ret; 733 SMMUTLBEntry tlbe_s2; 734 dma_addr_t ipa; 735 736 if (cfg->stage == SMMU_STAGE_1) { 737 return smmu_ptw_64_s1(bs, cfg, iova, perm, tlbe, info); 738 } else if (cfg->stage == SMMU_STAGE_2) { 739 /* 740 * If bypassing stage 1(or unimplemented), the input address is passed 741 * directly to stage 2 as IPA. If the input address of a transaction 742 * exceeds the size of the IAS, a stage 1 Address Size fault occurs. 743 * For AA64, IAS = OAS according to (IHI 0070.E.a) "3.4 Address sizes" 744 */ 745 if (iova >= (1ULL << cfg->oas)) { 746 info->type = SMMU_PTW_ERR_ADDR_SIZE; 747 info->stage = SMMU_STAGE_1; 748 tlbe->entry.perm = IOMMU_NONE; 749 return -EINVAL; 750 } 751 752 return smmu_ptw_64_s2(cfg, iova, perm, tlbe, info); 753 } 754 755 /* SMMU_NESTED. */ 756 ret = smmu_ptw_64_s1(bs, cfg, iova, perm, tlbe, info); 757 if (ret) { 758 return ret; 759 } 760 761 ipa = CACHED_ENTRY_TO_ADDR(tlbe, iova); 762 ret = smmu_ptw_64_s2(cfg, ipa, perm, &tlbe_s2, info); 763 if (ret) { 764 return ret; 765 } 766 767 combine_tlb(tlbe, &tlbe_s2, iova, cfg); 768 return 0; 769 } 770 771 SMMUTLBEntry *smmu_translate(SMMUState *bs, SMMUTransCfg *cfg, dma_addr_t addr, 772 IOMMUAccessFlags flag, SMMUPTWEventInfo *info) 773 { 774 SMMUTLBEntry *cached_entry = NULL; 775 SMMUTransTableInfo *tt; 776 int status; 777 778 /* 779 * Combined attributes used for TLB lookup, holds the attributes for 780 * the input stage. 781 */ 782 SMMUTransTableInfo tt_combined; 783 784 if (cfg->stage == SMMU_STAGE_2) { 785 /* Stage2. */ 786 tt_combined.granule_sz = cfg->s2cfg.granule_sz; 787 tt_combined.tsz = cfg->s2cfg.tsz; 788 } else { 789 /* Select stage1 translation table. */ 790 tt = select_tt(cfg, addr); 791 if (!tt) { 792 info->type = SMMU_PTW_ERR_TRANSLATION; 793 info->stage = SMMU_STAGE_1; 794 return NULL; 795 } 796 tt_combined.granule_sz = tt->granule_sz; 797 tt_combined.tsz = tt->tsz; 798 } 799 800 cached_entry = smmu_iotlb_lookup(bs, cfg, &tt_combined, addr); 801 if (cached_entry) { 802 if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & 803 cached_entry->parent_perm & IOMMU_WO)) { 804 info->type = SMMU_PTW_ERR_PERMISSION; 805 info->stage = !(cached_entry->entry.perm & IOMMU_WO) ? 806 SMMU_STAGE_1 : 807 SMMU_STAGE_2; 808 return NULL; 809 } 810 return cached_entry; 811 } 812 813 cached_entry = g_new0(SMMUTLBEntry, 1); 814 status = smmu_ptw(bs, cfg, addr, flag, cached_entry, info); 815 if (status) { 816 g_free(cached_entry); 817 return NULL; 818 } 819 smmu_iotlb_insert(bs, cfg, cached_entry); 820 return cached_entry; 821 } 822 823 /** 824 * The bus number is used for lookup when SID based invalidation occurs. 825 * In that case we lazily populate the SMMUPciBus array from the bus hash 826 * table. At the time the SMMUPciBus is created (smmu_find_add_as), the bus 827 * numbers may not be always initialized yet. 828 */ 829 SMMUPciBus *smmu_find_smmu_pcibus(SMMUState *s, uint8_t bus_num) 830 { 831 SMMUPciBus *smmu_pci_bus = s->smmu_pcibus_by_bus_num[bus_num]; 832 GHashTableIter iter; 833 834 if (smmu_pci_bus) { 835 return smmu_pci_bus; 836 } 837 838 g_hash_table_iter_init(&iter, s->smmu_pcibus_by_busptr); 839 while (g_hash_table_iter_next(&iter, NULL, (void **)&smmu_pci_bus)) { 840 if (pci_bus_num(smmu_pci_bus->bus) == bus_num) { 841 s->smmu_pcibus_by_bus_num[bus_num] = smmu_pci_bus; 842 return smmu_pci_bus; 843 } 844 } 845 846 return NULL; 847 } 848 849 static AddressSpace *smmu_find_add_as(PCIBus *bus, void *opaque, int devfn) 850 { 851 SMMUState *s = opaque; 852 SMMUPciBus *sbus = g_hash_table_lookup(s->smmu_pcibus_by_busptr, bus); 853 SMMUDevice *sdev; 854 static unsigned int index; 855 856 if (!sbus) { 857 sbus = g_malloc0(sizeof(SMMUPciBus) + 858 sizeof(SMMUDevice *) * SMMU_PCI_DEVFN_MAX); 859 sbus->bus = bus; 860 g_hash_table_insert(s->smmu_pcibus_by_busptr, bus, sbus); 861 } 862 863 sdev = sbus->pbdev[devfn]; 864 if (!sdev) { 865 char *name = g_strdup_printf("%s-%d-%d", s->mrtypename, devfn, index++); 866 867 sdev = sbus->pbdev[devfn] = g_new0(SMMUDevice, 1); 868 869 sdev->smmu = s; 870 sdev->bus = bus; 871 sdev->devfn = devfn; 872 873 memory_region_init_iommu(&sdev->iommu, sizeof(sdev->iommu), 874 s->mrtypename, 875 OBJECT(s), name, UINT64_MAX); 876 address_space_init(&sdev->as, 877 MEMORY_REGION(&sdev->iommu), name); 878 trace_smmu_add_mr(name); 879 g_free(name); 880 } 881 882 return &sdev->as; 883 } 884 885 static const PCIIOMMUOps smmu_ops = { 886 .get_address_space = smmu_find_add_as, 887 }; 888 889 SMMUDevice *smmu_find_sdev(SMMUState *s, uint32_t sid) 890 { 891 uint8_t bus_n, devfn; 892 SMMUPciBus *smmu_bus; 893 894 bus_n = PCI_BUS_NUM(sid); 895 smmu_bus = smmu_find_smmu_pcibus(s, bus_n); 896 if (smmu_bus) { 897 devfn = SMMU_PCI_DEVFN(sid); 898 return smmu_bus->pbdev[devfn]; 899 } 900 return NULL; 901 } 902 903 /* Unmap all notifiers attached to @mr */ 904 static void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr) 905 { 906 IOMMUNotifier *n; 907 908 trace_smmu_inv_notifiers_mr(mr->parent_obj.name); 909 IOMMU_NOTIFIER_FOREACH(n, mr) { 910 memory_region_unmap_iommu_notifier_range(n); 911 } 912 } 913 914 /* Unmap all notifiers of all mr's */ 915 void smmu_inv_notifiers_all(SMMUState *s) 916 { 917 SMMUDevice *sdev; 918 919 QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { 920 smmu_inv_notifiers_mr(&sdev->iommu); 921 } 922 } 923 924 static void smmu_base_realize(DeviceState *dev, Error **errp) 925 { 926 SMMUState *s = ARM_SMMU(dev); 927 SMMUBaseClass *sbc = ARM_SMMU_GET_CLASS(dev); 928 Error *local_err = NULL; 929 930 sbc->parent_realize(dev, &local_err); 931 if (local_err) { 932 error_propagate(errp, local_err); 933 return; 934 } 935 s->configs = g_hash_table_new_full(NULL, NULL, NULL, g_free); 936 s->iotlb = g_hash_table_new_full(smmu_iotlb_key_hash, smmu_iotlb_key_equal, 937 g_free, g_free); 938 s->smmu_pcibus_by_busptr = g_hash_table_new(NULL, NULL); 939 940 if (s->primary_bus) { 941 pci_setup_iommu(s->primary_bus, &smmu_ops, s); 942 } else { 943 error_setg(errp, "SMMU is not attached to any PCI bus!"); 944 } 945 } 946 947 /* 948 * Make sure the IOMMU is reset in 'exit' phase after 949 * all outstanding DMA requests have been quiesced during 950 * the 'enter' or 'hold' reset phases 951 */ 952 static void smmu_base_reset_exit(Object *obj, ResetType type) 953 { 954 SMMUState *s = ARM_SMMU(obj); 955 956 memset(s->smmu_pcibus_by_bus_num, 0, sizeof(s->smmu_pcibus_by_bus_num)); 957 958 g_hash_table_remove_all(s->configs); 959 g_hash_table_remove_all(s->iotlb); 960 } 961 962 static const Property smmu_dev_properties[] = { 963 DEFINE_PROP_UINT8("bus_num", SMMUState, bus_num, 0), 964 DEFINE_PROP_LINK("primary-bus", SMMUState, primary_bus, 965 TYPE_PCI_BUS, PCIBus *), 966 }; 967 968 static void smmu_base_class_init(ObjectClass *klass, const void *data) 969 { 970 DeviceClass *dc = DEVICE_CLASS(klass); 971 ResettableClass *rc = RESETTABLE_CLASS(klass); 972 SMMUBaseClass *sbc = ARM_SMMU_CLASS(klass); 973 974 device_class_set_props(dc, smmu_dev_properties); 975 device_class_set_parent_realize(dc, smmu_base_realize, 976 &sbc->parent_realize); 977 rc->phases.exit = smmu_base_reset_exit; 978 } 979 980 static const TypeInfo smmu_base_info = { 981 .name = TYPE_ARM_SMMU, 982 .parent = TYPE_SYS_BUS_DEVICE, 983 .instance_size = sizeof(SMMUState), 984 .class_data = NULL, 985 .class_size = sizeof(SMMUBaseClass), 986 .class_init = smmu_base_class_init, 987 .abstract = true, 988 }; 989 990 static void smmu_base_register_types(void) 991 { 992 type_register_static(&smmu_base_info); 993 } 994 995 type_init(smmu_base_register_types) 996 997