Lines Matching +full:dma +full:- +full:requests
2 * QEMU emulation of an Intel IOMMU (VT-d)
3 * (DMA Remapping device)
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
30 #include "hw/qdev-properties.h"
32 #include "hw/i386/apic-msidef.h"
33 #include "hw/i386/x86-iommu.h"
34 #include "hw/pci-host/q35.h"
36 #include "system/dma.h"
45 ((ce)->val[1] & VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK)
47 ((ce)->val[0] & VTD_PASID_DIR_BASE_ADDR_MASK)
50 #define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT)
52 (4 + (((pe)->val[2] >> 2) & VTD_SM_PASID_ENTRY_FLPM))
54 (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW))
90 error_report("We need to set caching-mode=on for intel-iommu to enable " in vtd_panic_require_caching_mode()
98 stq_le_p(&s->csr[addr], val); in vtd_define_quad()
99 stq_le_p(&s->wmask[addr], wmask); in vtd_define_quad()
100 stq_le_p(&s->w1cmask[addr], w1cmask); in vtd_define_quad()
105 stq_le_p(&s->womask[addr], mask); in vtd_define_quad_wo()
111 stl_le_p(&s->csr[addr], val); in vtd_define_long()
112 stl_le_p(&s->wmask[addr], wmask); in vtd_define_long()
113 stl_le_p(&s->w1cmask[addr], w1cmask); in vtd_define_long()
118 stl_le_p(&s->womask[addr], mask); in vtd_define_long_wo()
124 uint64_t oldval = ldq_le_p(&s->csr[addr]); in vtd_set_quad()
125 uint64_t wmask = ldq_le_p(&s->wmask[addr]); in vtd_set_quad()
126 uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]); in vtd_set_quad()
127 stq_le_p(&s->csr[addr], in vtd_set_quad()
133 uint32_t oldval = ldl_le_p(&s->csr[addr]); in vtd_set_long()
134 uint32_t wmask = ldl_le_p(&s->wmask[addr]); in vtd_set_long()
135 uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]); in vtd_set_long()
136 stl_le_p(&s->csr[addr], in vtd_set_long()
142 uint64_t val = ldq_le_p(&s->csr[addr]); in vtd_get_quad()
143 uint64_t womask = ldq_le_p(&s->womask[addr]); in vtd_get_quad()
149 uint32_t val = ldl_le_p(&s->csr[addr]); in vtd_get_long()
150 uint32_t womask = ldl_le_p(&s->womask[addr]); in vtd_get_long()
157 return ldq_le_p(&s->csr[addr]); in vtd_get_quad_raw()
162 return ldl_le_p(&s->csr[addr]); in vtd_get_long_raw()
167 stq_le_p(&s->csr[addr], val); in vtd_set_quad_raw()
173 uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask; in vtd_set_clear_mask_long()
174 stl_le_p(&s->csr[addr], new_val); in vtd_set_clear_mask_long()
181 uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask; in vtd_set_clear_mask_quad()
182 stq_le_p(&s->csr[addr], new_val); in vtd_set_clear_mask_quad()
188 qemu_mutex_lock(&s->iommu_lock); in vtd_iommu_lock()
193 qemu_mutex_unlock(&s->iommu_lock); in vtd_iommu_unlock()
200 if (s->scalable_mode) { in vtd_update_scalable_state()
201 s->root_scalable = val & VTD_RTADDR_SMT; in vtd_update_scalable_state()
209 if (s->ecap & VTD_ECAP_SMTS && in vtd_update_iq_dw()
211 s->iq_dw = true; in vtd_update_iq_dw()
213 s->iq_dw = false; in vtd_update_iq_dw()
220 return as->notifier_flags & IOMMU_NOTIFIER_MAP; in vtd_as_has_map_notifier()
229 return key1->sid == key2->sid && in vtd_iotlb_equal()
230 key1->pasid == key2->pasid && in vtd_iotlb_equal()
231 key1->level == key2->level && in vtd_iotlb_equal()
232 key1->gfn == key2->gfn; in vtd_iotlb_equal()
238 uint64_t hash64 = key->gfn | ((uint64_t)(key->sid) << VTD_IOTLB_SID_SHIFT) | in vtd_iotlb_hash()
239 (uint64_t)(key->level - 1) << VTD_IOTLB_LVL_SHIFT | in vtd_iotlb_hash()
240 (uint64_t)(key->pasid) << VTD_IOTLB_PASID_SHIFT; in vtd_iotlb_hash()
250 return (key1->bus == key2->bus) && (key1->devfn == key2->devfn) && in vtd_as_equal()
251 (key1->pasid == key2->pasid); in vtd_as_equal()
262 guint value = (guint)(uintptr_t)key->bus; in vtd_as_hash()
264 return (guint)(value << 8 | key->devfn); in vtd_as_hash()
278 return (key1->bus == key2->bus) && (key1->devfn == key2->devfn); in vtd_hiod_equal()
291 return entry->domain_id == domain_id; in vtd_hash_remove_by_domain()
298 return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_LEVEL_BITS; in vtd_pt_level_shift()
303 return ~((1ULL << vtd_pt_level_shift(level)) - 1); in vtd_pt_level_page_mask()
311 uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask; in vtd_hash_remove_by_page()
312 uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K; in vtd_hash_remove_by_page()
314 if (entry->domain_id != info->domain_id) { in vtd_hash_remove_by_page()
319 * According to spec, IOTLB entries caching first-stage (PGTT=001b) or in vtd_hash_remove_by_page()
320 * nested (PGTT=011b) mapping associated with specified domain-id are in vtd_hash_remove_by_page()
323 if (entry->pgtt == VTD_SM_PASID_ENTRY_FLT) { in vtd_hash_remove_by_page()
327 return (entry->gfn & info->mask) == gfn || entry->gfn == gfn_tlb; in vtd_hash_remove_by_page()
335 uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask; in vtd_hash_remove_by_page_piotlb()
336 uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K; in vtd_hash_remove_by_page_piotlb()
339 * According to spec, PASID-based-IOTLB Invalidation in page granularity in vtd_hash_remove_by_page_piotlb()
340 * doesn't invalidate IOTLB entries caching second-stage (PGTT=010b) in vtd_hash_remove_by_page_piotlb()
341 * or pass-through (PGTT=100b) mappings. Nested isn't supported yet, in vtd_hash_remove_by_page_piotlb()
342 * so only need to check first-stage (PGTT=001b) mappings. in vtd_hash_remove_by_page_piotlb()
344 if (entry->pgtt != VTD_SM_PASID_ENTRY_FLT) { in vtd_hash_remove_by_page_piotlb()
348 return entry->domain_id == info->domain_id && entry->pasid == info->pasid && in vtd_hash_remove_by_page_piotlb()
349 ((entry->gfn & info->mask) == gfn || entry->gfn == gfn_tlb); in vtd_hash_remove_by_page_piotlb()
362 g_hash_table_iter_init(&as_it, s->vtd_address_spaces); in vtd_reset_context_cache_locked()
365 vtd_as->context_cache_entry.context_cache_gen = 0; in vtd_reset_context_cache_locked()
367 s->context_cache_gen = 1; in vtd_reset_context_cache_locked()
373 assert(s->iotlb); in vtd_reset_iotlb_locked()
374 g_hash_table_remove_all(s->iotlb); in vtd_reset_iotlb_locked()
410 entry = g_hash_table_lookup(s->iotlb, &key); in vtd_lookup_iotlb()
431 if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) { in vtd_update_iotlb()
436 entry->gfn = gfn; in vtd_update_iotlb()
437 entry->domain_id = domain_id; in vtd_update_iotlb()
438 entry->pte = pte; in vtd_update_iotlb()
439 entry->access_flags = access_flags; in vtd_update_iotlb()
440 entry->mask = vtd_pt_level_page_mask(level); in vtd_update_iotlb()
441 entry->pasid = pasid; in vtd_update_iotlb()
442 entry->pgtt = pgtt; in vtd_update_iotlb()
444 key->gfn = gfn; in vtd_update_iotlb()
445 key->sid = source_id; in vtd_update_iotlb()
446 key->level = level; in vtd_update_iotlb()
447 key->pasid = pasid; in vtd_update_iotlb()
449 g_hash_table_replace(s->iotlb, key, entry); in vtd_update_iotlb()
468 apic_get_class(NULL)->send_msi(&msi); in vtd_generate_interrupt()
498 /* Each reg is 128-bit */ in vtd_is_frcd_set()
500 addr += 8; /* Access the high 64-bit half */ in vtd_is_frcd_set()
528 /* Each reg is 128-bit */ in vtd_set_frcd_and_update_ppf()
530 addr += 8; /* Access the high 64-bit half */ in vtd_set_frcd_and_update_ppf()
557 hwaddr addr = DMAR_FRCD_REG_OFFSET + 8; /* The high 64-bit half */ in vtd_try_collapse_fault()
565 addr += 16; /* 128-bit for each */ in vtd_try_collapse_fault()
588 if (vtd_is_frcd_set(s, s->next_frcd_reg)) { in vtd_report_frcd_fault()
595 vtd_record_frcd(s, s->next_frcd_reg, hi, lo); in vtd_report_frcd_fault()
600 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); in vtd_report_frcd_fault()
601 s->next_frcd_reg++; in vtd_report_frcd_fault()
602 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { in vtd_report_frcd_fault()
603 s->next_frcd_reg = 0; in vtd_report_frcd_fault()
607 VTD_FSTS_FRI(s->next_frcd_reg)); in vtd_report_frcd_fault()
608 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); /* Will set PPF */ in vtd_report_frcd_fault()
609 s->next_frcd_reg++; in vtd_report_frcd_fault()
610 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { in vtd_report_frcd_fault()
611 s->next_frcd_reg = 0; in vtd_report_frcd_fault()
690 if (s->root_scalable && devfn > UINT8_MAX / 2) { in vtd_root_entry_present()
691 return re->hi & VTD_ROOT_ENTRY_P; in vtd_root_entry_present()
694 return re->lo & VTD_ROOT_ENTRY_P; in vtd_root_entry_present()
702 addr = s->root + index * sizeof(*re); in vtd_get_root_entry()
705 re->lo = 0; in vtd_get_root_entry()
706 return -VTD_FR_ROOT_TABLE_INV; in vtd_get_root_entry()
708 re->lo = le64_to_cpu(re->lo); in vtd_get_root_entry()
709 re->hi = le64_to_cpu(re->hi); in vtd_get_root_entry()
715 return context->lo & VTD_CONTEXT_ENTRY_P; in vtd_ce_present()
726 ce_size = s->root_scalable ? VTD_CTX_ENTRY_SCALABLE_SIZE : in vtd_get_context_entry_from_root()
729 if (s->root_scalable && index > UINT8_MAX / 2) { in vtd_get_context_entry_from_root()
731 addr = re->hi & VTD_ROOT_ENTRY_CTP; in vtd_get_context_entry_from_root()
733 addr = re->lo & VTD_ROOT_ENTRY_CTP; in vtd_get_context_entry_from_root()
739 return -VTD_FR_CONTEXT_TABLE_INV; in vtd_get_context_entry_from_root()
742 ce->lo = le64_to_cpu(ce->lo); in vtd_get_context_entry_from_root()
743 ce->hi = le64_to_cpu(ce->hi); in vtd_get_context_entry_from_root()
745 ce->val[2] = le64_to_cpu(ce->val[2]); in vtd_get_context_entry_from_root()
746 ce->val[3] = le64_to_cpu(ce->val[3]); in vtd_get_context_entry_from_root()
753 return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR; in vtd_ce_get_slpt_base()
777 pte = (uint64_t)-1; in vtd_get_pte()
790 ((1ULL << VTD_LEVEL_BITS) - 1); in vtd_iova_level_offset()
793 /* Check Capability Register to see if the @level of page-table is supported */
796 return VTD_CAP_SAGAW_MASK & s->cap & in vtd_is_sl_level_supported()
797 (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT)); in vtd_is_sl_level_supported()
810 return !!(s->ecap & VTD_ECAP_FLTS); in vtd_pe_type_check()
812 return !!(s->ecap & VTD_ECAP_SLTS); in vtd_pe_type_check()
817 return !!(s->ecap & VTD_ECAP_PT); in vtd_pe_type_check()
826 return pdire->val & 1; in vtd_pdire_present()
845 return -VTD_FR_PASID_DIR_ACCESS_ERR; in vtd_get_pdire_from_pdir_table()
848 pdire->val = le64_to_cpu(pdire->val); in vtd_get_pdire_from_pdir_table()
855 return pe->val[0] & VTD_PASID_ENTRY_P; in vtd_pe_present()
872 return -VTD_FR_PASID_TABLE_ACCESS_ERR; in vtd_get_pe_in_pasid_leaf_table()
874 for (size_t i = 0; i < ARRAY_SIZE(pe->val); i++) { in vtd_get_pe_in_pasid_leaf_table()
875 pe->val[i] = le64_to_cpu(pe->val[i]); in vtd_get_pe_in_pasid_leaf_table()
880 return -VTD_FR_PASID_TABLE_ENTRY_INV; in vtd_get_pe_in_pasid_leaf_table()
886 return -VTD_FR_PASID_TABLE_ENTRY_INV; in vtd_get_pe_in_pasid_leaf_table()
891 return -VTD_FR_PASID_TABLE_ENTRY_INV; in vtd_get_pe_in_pasid_leaf_table()
906 dma_addr_t addr = pdire->val & VTD_PASID_TABLE_BASE_ADDR_MASK; in vtd_get_pe_from_pdire()
932 return -VTD_FR_PASID_DIR_ENTRY_P; in vtd_get_pe_from_pasid_table()
941 return -VTD_FR_PASID_ENTRY_P; in vtd_get_pe_from_pasid_table()
994 return -VTD_FR_PASID_DIR_ENTRY_P; in vtd_ce_get_pasid_fpd()
1013 /* Get the page-table level that hardware should use for the second-level
1014 * page-table walk from the Address Width field of context-entry.
1018 return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW); in vtd_ce_get_level()
1027 if (s->root_scalable) { in vtd_get_iova_level()
1029 if (s->flts) { in vtd_get_iova_level()
1041 return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9; in vtd_ce_get_agaw()
1050 if (s->root_scalable) { in vtd_get_iova_agaw()
1060 return ce->lo & VTD_CONTEXT_ENTRY_TT; in vtd_ce_get_type()
1072 if (!x86_iommu->dt_supported) { in vtd_ce_type_check()
1078 if (!x86_iommu->pt_supported) { in vtd_ce_type_check()
1106 * Check if @iova is above 2^X-1, where X is the minimum of MGAW in vtd_iova_sl_range_check()
1107 * in CAP_REG and AW in context-entry. in vtd_iova_sl_range_check()
1109 return !(iova & ~(vtd_iova_limit(s, ce, aw, pasid) - 1)); in vtd_iova_sl_range_check()
1118 if (s->root_scalable) { in vtd_get_iova_pgtbl_base()
1120 if (s->flts) { in vtd_get_iova_pgtbl_base()
1135 * We support only 3-level and 4-level page tables (see vtd_init() which
1136 * sets only VTD_CAP_SAGAW_39bit and maybe VTD_CAP_SAGAW_48bit bits in s->cap).
1147 * We should have caught a guest-mis-programmed level earlier, in vtd_slpte_nonzero_rsvd()
1186 return -VTD_FR_ADDR_BEYOND_MGAW; in vtd_iova_to_slpte()
1196 if (slpte == (uint64_t)-1) { in vtd_iova_to_slpte()
1201 /* Invalid programming of context-entry */ in vtd_iova_to_slpte()
1202 return -VTD_FR_CONTEXT_ENTRY_INV; in vtd_iova_to_slpte()
1204 return -VTD_FR_PAGING_ENTRY_INV; in vtd_iova_to_slpte()
1215 return is_write ? -VTD_FR_WRITE : -VTD_FR_READ; in vtd_iova_to_slpte()
1218 error_report_once("%s: detected splte reserve non-zero " in vtd_iova_to_slpte()
1222 return -VTD_FR_PAGING_ENTRY_RSVD; in vtd_iova_to_slpte()
1231 level--; in vtd_iova_to_slpte()
1245 * @as: VT-d address space of the device
1260 VTDAddressSpace *as = info->as; in vtd_page_walk_one()
1261 vtd_page_walk_hook hook_fn = info->hook_fn; in vtd_page_walk_one()
1262 void *private = info->private; in vtd_page_walk_one()
1263 IOMMUTLBEntry *entry = &event->entry; in vtd_page_walk_one()
1265 .iova = entry->iova, in vtd_page_walk_one()
1266 .size = entry->addr_mask, in vtd_page_walk_one()
1267 .translated_addr = entry->translated_addr, in vtd_page_walk_one()
1268 .perm = entry->perm, in vtd_page_walk_one()
1270 const DMAMap *mapped = iova_tree_find(as->iova_tree, &target); in vtd_page_walk_one()
1272 if (event->type == IOMMU_NOTIFIER_UNMAP && !info->notify_unmap) { in vtd_page_walk_one()
1273 trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); in vtd_page_walk_one()
1280 if (event->type == IOMMU_NOTIFIER_MAP) { in vtd_page_walk_one()
1284 trace_vtd_page_walk_one_skip_map(entry->iova, entry->addr_mask, in vtd_page_walk_one()
1285 entry->translated_addr); in vtd_page_walk_one()
1302 IOMMUAccessFlags cache_perm = entry->perm; in vtd_page_walk_one()
1306 event->type = IOMMU_NOTIFIER_UNMAP; in vtd_page_walk_one()
1307 entry->perm = IOMMU_NONE; in vtd_page_walk_one()
1308 trace_vtd_page_walk_one(info->domain_id, in vtd_page_walk_one()
1309 entry->iova, in vtd_page_walk_one()
1310 entry->translated_addr, in vtd_page_walk_one()
1311 entry->addr_mask, in vtd_page_walk_one()
1312 entry->perm); in vtd_page_walk_one()
1318 iova_tree_remove(as->iova_tree, target); in vtd_page_walk_one()
1320 event->type = IOMMU_NOTIFIER_MAP; in vtd_page_walk_one()
1321 entry->perm = cache_perm; in vtd_page_walk_one()
1324 iova_tree_insert(as->iova_tree, &target); in vtd_page_walk_one()
1328 trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); in vtd_page_walk_one()
1331 iova_tree_remove(as->iova_tree, target); in vtd_page_walk_one()
1334 trace_vtd_page_walk_one(info->domain_id, entry->iova, in vtd_page_walk_one()
1335 entry->translated_addr, entry->addr_mask, in vtd_page_walk_one()
1336 entry->perm); in vtd_page_walk_one()
1341 * vtd_page_walk_level - walk over specific level for IOVA range
1374 if (slpte == (uint64_t)-1) { in vtd_page_walk_level()
1400 ret = vtd_page_walk_level(vtd_get_pte_addr(slpte, info->aw), in vtd_page_walk_level()
1401 iova, MIN(iova_next, end), level - 1, in vtd_page_walk_level()
1417 event.entry.translated_addr = vtd_get_pte_addr(slpte, info->aw); in vtd_page_walk_level()
1435 * vtd_page_walk - walk specific IOVA range, and call the hook
1451 if (!vtd_iova_sl_range_check(s, start, ce, info->aw, pasid)) { in vtd_page_walk()
1452 return -VTD_FR_ADDR_BEYOND_MGAW; in vtd_page_walk()
1455 if (!vtd_iova_sl_range_check(s, end, ce, info->aw, pasid)) { in vtd_page_walk()
1457 end = vtd_iova_limit(s, ce, info->aw, pasid); in vtd_page_walk()
1467 if (!s->root_scalable && in vtd_root_entry_rsvd_bits_check()
1468 (re->hi || (re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) in vtd_root_entry_rsvd_bits_check()
1472 if (s->root_scalable && in vtd_root_entry_rsvd_bits_check()
1473 ((re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)) || in vtd_root_entry_rsvd_bits_check()
1474 (re->hi & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) in vtd_root_entry_rsvd_bits_check()
1482 __func__, re->hi, re->lo); in vtd_root_entry_rsvd_bits_check()
1483 return -VTD_FR_ROOT_ENTRY_RSVD; in vtd_root_entry_rsvd_bits_check()
1489 if (!s->root_scalable && in vtd_context_entry_rsvd_bits_check()
1490 (ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI || in vtd_context_entry_rsvd_bits_check()
1491 ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO(s->aw_bits))) { in vtd_context_entry_rsvd_bits_check()
1494 __func__, ce->hi, ce->lo); in vtd_context_entry_rsvd_bits_check()
1495 return -VTD_FR_CONTEXT_ENTRY_RSVD; in vtd_context_entry_rsvd_bits_check()
1498 if (s->root_scalable && in vtd_context_entry_rsvd_bits_check()
1499 (ce->val[0] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(s->aw_bits) || in vtd_context_entry_rsvd_bits_check()
1500 ce->val[1] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL1 || in vtd_context_entry_rsvd_bits_check()
1501 ce->val[2] || in vtd_context_entry_rsvd_bits_check()
1502 ce->val[3])) { in vtd_context_entry_rsvd_bits_check()
1507 __func__, ce->val[3], ce->val[2], in vtd_context_entry_rsvd_bits_check()
1508 ce->val[1], ce->val[0]); in vtd_context_entry_rsvd_bits_check()
1509 return -VTD_FR_CONTEXT_ENTRY_RSVD; in vtd_context_entry_rsvd_bits_check()
1528 /* Map a device to its corresponding domain (context-entry) */
1542 /* Not error - it's okay we don't have root entry. */ in vtd_dev_to_context_entry()
1544 return -VTD_FR_ROOT_ENTRY_P; in vtd_dev_to_context_entry()
1558 /* Not error - it's okay we don't have context entry. */ in vtd_dev_to_context_entry()
1560 return -VTD_FR_CONTEXT_ENTRY_P; in vtd_dev_to_context_entry()
1568 /* Check if the programming of context-entry is valid */ in vtd_dev_to_context_entry()
1569 if (!s->root_scalable && in vtd_dev_to_context_entry()
1573 __func__, ce->hi, ce->lo, in vtd_dev_to_context_entry()
1575 return -VTD_FR_CONTEXT_ENTRY_INV; in vtd_dev_to_context_entry()
1578 if (!s->root_scalable) { in vtd_dev_to_context_entry()
1582 return -VTD_FR_CONTEXT_ENTRY_INV; in vtd_dev_to_context_entry()
1586 * Check if the programming of context-entry.rid2pasid in vtd_dev_to_context_entry()
1613 if (s->root_scalable) { in vtd_get_domain_id()
1618 return VTD_CONTEXT_ENTRY_DID(ce->hi); in vtd_get_domain_id()
1625 IntelIOMMUState *s = vtd_as->iommu_state; in vtd_sync_shadow_page_table_range()
1628 .private = (void *)&vtd_as->iommu, in vtd_sync_shadow_page_table_range()
1630 .aw = s->aw_bits, in vtd_sync_shadow_page_table_range()
1632 .domain_id = vtd_get_domain_id(s, ce, vtd_as->pasid), in vtd_sync_shadow_page_table_range()
1635 return vtd_page_walk(s, ce, addr, addr + size, &info, vtd_as->pasid); in vtd_sync_shadow_page_table_range()
1646 IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { in vtd_address_space_sync()
1652 ret = vtd_dev_to_context_entry(vtd_as->iommu_state, in vtd_address_space_sync()
1653 pci_bus_num(vtd_as->bus), in vtd_address_space_sync()
1654 vtd_as->devfn, &ce); in vtd_address_space_sync()
1656 if (ret == -VTD_FR_CONTEXT_ENTRY_P) { in vtd_address_space_sync()
1666 IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { in vtd_address_space_sync()
1679 * translation for DMA requests. In Scalable Mode, bypass
1680 * 1st-level translation or 2nd-level translation, it depends
1689 if (s->root_scalable) { in vtd_dev_pt_enabled()
1712 s = as->iommu_state; in vtd_as_pt_enabled()
1713 if (vtd_dev_to_context_entry(s, pci_bus_num(as->bus), as->devfn, in vtd_as_pt_enabled()
1724 return vtd_dev_pt_enabled(s, &ce, as->pasid); in vtd_as_pt_enabled()
1734 use_iommu = as->iommu_state->dmar_enabled && !vtd_as_pt_enabled(as); in vtd_switch_address_space()
1735 pt = as->iommu_state->dmar_enabled && vtd_as_pt_enabled(as); in vtd_switch_address_space()
1737 trace_vtd_switch_address_space(pci_bus_num(as->bus), in vtd_switch_address_space()
1738 VTD_PCI_SLOT(as->devfn), in vtd_switch_address_space()
1739 VTD_PCI_FUNC(as->devfn), in vtd_switch_address_space()
1751 memory_region_set_enabled(&as->nodmar, false); in vtd_switch_address_space()
1752 memory_region_set_enabled(MEMORY_REGION(&as->iommu), true); in vtd_switch_address_space()
1754 * vt-d spec v3.4 3.14: in vtd_switch_address_space()
1757 * Requests-with-PASID with input address in range 0xFEEx_xxxx in vtd_switch_address_space()
1758 * are translated normally like any other request-with-PASID in vtd_switch_address_space()
1759 * through DMA-remapping hardware. in vtd_switch_address_space()
1764 if (as->pasid != PCI_NO_PASID) { in vtd_switch_address_space()
1765 memory_region_set_enabled(&as->iommu_ir, false); in vtd_switch_address_space()
1767 memory_region_set_enabled(&as->iommu_ir, true); in vtd_switch_address_space()
1770 memory_region_set_enabled(MEMORY_REGION(&as->iommu), false); in vtd_switch_address_space()
1771 memory_region_set_enabled(&as->nodmar, true); in vtd_switch_address_space()
1775 * vtd-spec v3.4 3.14: in vtd_switch_address_space()
1778 * Requests-with-PASID with input address in range 0xFEEx_xxxx are in vtd_switch_address_space()
1779 * translated normally like any other request-with-PASID through in vtd_switch_address_space()
1780 * DMA-remapping hardware. However, if such a request is processed in vtd_switch_address_space()
1781 * using pass-through translation, it will be blocked as described in vtd_switch_address_space()
1784 * Software must not program paging-structure entries to remap any in vtd_switch_address_space()
1785 * address to the interrupt address range. Untranslated requests in vtd_switch_address_space()
1786 * and translation requests that result in an address in the in vtd_switch_address_space()
1794 if (pt && as->pasid != PCI_NO_PASID) { in vtd_switch_address_space()
1795 memory_region_set_enabled(&as->iommu_ir_fault, true); in vtd_switch_address_space()
1797 memory_region_set_enabled(&as->iommu_ir_fault, false); in vtd_switch_address_space()
1808 g_hash_table_iter_init(&iter, s->vtd_address_spaces); in vtd_switch_address_space_all()
1847 * only if the FPD field in the context-entry used to process the faulting
1865 uint16_t sid = PCI_BUILD_BDF(pci_bus_num(as_key->bus), as_key->devfn); in vtd_find_as_by_sid_and_pasid()
1867 return (as_key->pasid == target->pasid) && (sid == target->sid); in vtd_find_as_by_sid_and_pasid()
1879 return g_hash_table_find(s->vtd_address_spaces, in vtd_get_as_by_sid_and_pasid()
1912 * We support only 4-level page tables.
1923 * We should have caught a guest-mis-programmed level earlier, in vtd_flpte_nonzero_rsvd()
1953 uint64_t iova_limit = vtd_iova_limit(s, ce, s->aw_bits, pasid); in vtd_iova_fl_check_canonical()
1954 uint64_t upper_bits_mask = ~(iova_limit - 1); in vtd_iova_fl_check_canonical()
1997 return -VTD_FR_FS_NON_CANONICAL; in vtd_iova_to_flpte()
2004 if (flpte == (uint64_t)-1) { in vtd_iova_to_flpte()
2006 /* Invalid programming of pasid-entry */ in vtd_iova_to_flpte()
2007 return -VTD_FR_PASID_ENTRY_FSPTPTR_INV; in vtd_iova_to_flpte()
2009 return -VTD_FR_FS_PAGING_ENTRY_INV; in vtd_iova_to_flpte()
2016 return -VTD_FR_FS_PAGING_ENTRY_P; in vtd_iova_to_flpte()
2023 return -VTD_FR_FS_PAGING_ENTRY_US; in vtd_iova_to_flpte()
2029 return -VTD_FR_SM_WRITE; in vtd_iova_to_flpte()
2032 error_report_once("%s: detected flpte reserved non-zero " in vtd_iova_to_flpte()
2036 return -VTD_FR_FS_PAGING_ENTRY_RSVD; in vtd_iova_to_flpte()
2044 return -VTD_FR_FS_BIT_UPDATE_FAILED; in vtd_iova_to_flpte()
2054 level--; in vtd_iova_to_flpte()
2074 /* Map dev to context-entry then do a paging-structures walk to do a iommu
2090 IntelIOMMUState *s = vtd_as->iommu_state; in vtd_do_iommu_translate()
2095 uint32_t level, pasid = vtd_as->pasid; in vtd_do_iommu_translate()
2102 bool rid2pasid = (pasid == PCI_NO_PASID) && s->root_scalable; in vtd_do_iommu_translate()
2108 * should never receive translation requests in this region. in vtd_do_iommu_translate()
2114 cc_entry = &vtd_as->context_cache_entry; in vtd_do_iommu_translate()
2120 trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->pte, in vtd_do_iommu_translate()
2121 iotlb_entry->domain_id); in vtd_do_iommu_translate()
2122 pte = iotlb_entry->pte; in vtd_do_iommu_translate()
2123 access_flags = iotlb_entry->access_flags; in vtd_do_iommu_translate()
2124 page_mask = iotlb_entry->mask; in vtd_do_iommu_translate()
2129 /* Try to fetch context-entry from cache first */ in vtd_do_iommu_translate()
2130 if (cc_entry->context_cache_gen == s->context_cache_gen) { in vtd_do_iommu_translate()
2131 trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi, in vtd_do_iommu_translate()
2132 cc_entry->context_entry.lo, in vtd_do_iommu_translate()
2133 cc_entry->context_cache_gen); in vtd_do_iommu_translate()
2134 ce = cc_entry->context_entry; in vtd_do_iommu_translate()
2136 if (!is_fpd_set && s->root_scalable) { in vtd_do_iommu_translate()
2139 vtd_report_fault(s, -ret_fr, is_fpd_set, in vtd_do_iommu_translate()
2148 if (!ret_fr && !is_fpd_set && s->root_scalable) { in vtd_do_iommu_translate()
2152 vtd_report_fault(s, -ret_fr, is_fpd_set, in vtd_do_iommu_translate()
2157 /* Update context-cache */ in vtd_do_iommu_translate()
2159 cc_entry->context_cache_gen, in vtd_do_iommu_translate()
2160 s->context_cache_gen); in vtd_do_iommu_translate()
2161 cc_entry->context_entry = ce; in vtd_do_iommu_translate()
2162 cc_entry->context_cache_gen = s->context_cache_gen; in vtd_do_iommu_translate()
2170 * We don't need to translate for pass-through context entries. in vtd_do_iommu_translate()
2174 entry->iova = addr & VTD_PAGE_MASK_4K; in vtd_do_iommu_translate()
2175 entry->translated_addr = entry->iova; in vtd_do_iommu_translate()
2176 entry->addr_mask = ~VTD_PAGE_MASK_4K; in vtd_do_iommu_translate()
2177 entry->perm = IOMMU_RW; in vtd_do_iommu_translate()
2178 trace_vtd_translate_pt(source_id, entry->iova); in vtd_do_iommu_translate()
2181 * When this happens, it means firstly caching-mode is not in vtd_do_iommu_translate()
2198 trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->pte, in vtd_do_iommu_translate()
2199 iotlb_entry->domain_id); in vtd_do_iommu_translate()
2200 pte = iotlb_entry->pte; in vtd_do_iommu_translate()
2201 access_flags = iotlb_entry->access_flags; in vtd_do_iommu_translate()
2202 page_mask = iotlb_entry->mask; in vtd_do_iommu_translate()
2207 if (s->flts && s->root_scalable) { in vtd_do_iommu_translate()
2209 &reads, &writes, s->aw_bits, pasid); in vtd_do_iommu_translate()
2213 &reads, &writes, s->aw_bits, pasid); in vtd_do_iommu_translate()
2217 xlat = vtd_get_pte_addr(pte, s->aw_bits); in vtd_do_iommu_translate()
2221 * Per VT-d spec 4.1 section 3.15: Untranslated requests and translation in vtd_do_iommu_translate()
2222 * requests that result in an address in the interrupt range will be in vtd_do_iommu_translate()
2226 xlat + size - 1 >= VTD_INTERRUPT_ADDR_FIRST)) { in vtd_do_iommu_translate()
2234 ret_fr = s->scalable_mode ? -VTD_FR_SM_INTERRUPT_ADDR : in vtd_do_iommu_translate()
2235 -VTD_FR_INTERRUPT_ADDR; in vtd_do_iommu_translate()
2240 vtd_report_fault(s, -ret_fr, is_fpd_set, source_id, in vtd_do_iommu_translate()
2251 entry->iova = addr & page_mask; in vtd_do_iommu_translate()
2252 entry->translated_addr = vtd_get_pte_addr(pte, s->aw_bits) & page_mask; in vtd_do_iommu_translate()
2253 entry->addr_mask = ~page_mask; in vtd_do_iommu_translate()
2254 entry->perm = access_flags; in vtd_do_iommu_translate()
2259 entry->iova = 0; in vtd_do_iommu_translate()
2260 entry->translated_addr = 0; in vtd_do_iommu_translate()
2261 entry->addr_mask = 0; in vtd_do_iommu_translate()
2262 entry->perm = IOMMU_NONE; in vtd_do_iommu_translate()
2268 s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG); in vtd_root_table_setup()
2269 s->root &= VTD_RTADDR_ADDR_MASK(s->aw_bits); in vtd_root_table_setup()
2273 trace_vtd_reg_dmar_root(s->root, s->root_scalable); in vtd_root_table_setup()
2286 s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1); in vtd_interrupt_remap_table_setup()
2287 s->intr_root = value & VTD_IRTA_ADDR_MASK(s->aw_bits); in vtd_interrupt_remap_table_setup()
2288 s->intr_eime = value & VTD_IRTA_EIME; in vtd_interrupt_remap_table_setup()
2293 trace_vtd_reg_ir_root(s->intr_root, s->intr_size); in vtd_interrupt_remap_table_setup()
2300 QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { in vtd_iommu_replay_all()
2310 s->context_cache_gen++; in vtd_context_global_invalidate()
2311 if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) { in vtd_context_global_invalidate()
2317 * From VT-d spec 6.5.2.1, a global context entry invalidation in vtd_context_global_invalidate()
2321 * VT-d emulation codes. in vtd_context_global_invalidate()
2326 /* Do a context-cache device-selective invalidation.
2361 g_hash_table_iter_init(&as_it, s->vtd_address_spaces); in vtd_context_device_invalidate()
2363 if ((pci_bus_num(vtd_as->bus) == bus_n) && in vtd_context_device_invalidate()
2364 (vtd_as->devfn & mask) == (devfn & mask)) { in vtd_context_device_invalidate()
2365 trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(vtd_as->devfn), in vtd_context_device_invalidate()
2366 VTD_PCI_FUNC(vtd_as->devfn)); in vtd_context_device_invalidate()
2368 vtd_as->context_cache_entry.context_cache_gen = 0; in vtd_context_device_invalidate()
2379 * notifier registered - the IOMMU notification in vtd_context_device_invalidate()
2388 /* Context-cache invalidation
2433 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain, in vtd_iotlb_domain_invalidate()
2437 QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { in vtd_iotlb_domain_invalidate()
2438 if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), in vtd_iotlb_domain_invalidate()
2439 vtd_as->devfn, &ce) && in vtd_iotlb_domain_invalidate()
2440 domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) { in vtd_iotlb_domain_invalidate()
2460 QLIST_FOREACH(vtd_as, &(s->vtd_as_with_notifiers), next) { in vtd_iotlb_page_invalidate_notify()
2461 ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), in vtd_iotlb_page_invalidate_notify()
2462 vtd_as->devfn, &ce); in vtd_iotlb_page_invalidate_notify()
2463 if (!ret && domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) { in vtd_iotlb_page_invalidate_notify()
2466 if (s->root_scalable) { in vtd_iotlb_page_invalidate_notify()
2471 * In legacy mode, vtd_as->pasid == pasid is always true. in vtd_iotlb_page_invalidate_notify()
2476 if (!(vtd_as->pasid == pasid || in vtd_iotlb_page_invalidate_notify()
2477 (vtd_as->pasid == PCI_NO_PASID && pasid == rid2pasid))) { in vtd_iotlb_page_invalidate_notify()
2483 * When stage-1 translation is off, as long as we have MAP in vtd_iotlb_page_invalidate_notify()
2489 if (!s->flts || !s->root_scalable) { in vtd_iotlb_page_invalidate_notify()
2494 * For UNMAP-only notifiers, we don't need to walk the in vtd_iotlb_page_invalidate_notify()
2504 .addr_mask = size - 1, in vtd_iotlb_page_invalidate_notify()
2508 memory_region_notify_iommu(&vtd_as->iommu, 0, event); in vtd_iotlb_page_invalidate_notify()
2524 info.mask = ~((1 << am) - 1); in vtd_iotlb_page_invalidate()
2526 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info); in vtd_iotlb_page_invalidate()
2582 return s->qi_enabled && (s->iq_tail == s->iq_head) && in vtd_queued_inv_disable_check()
2583 (s->iq_last_desc_type == VTD_INV_DESC_WAIT); in vtd_queued_inv_disable_check()
2593 s->iq = iqa_val & VTD_IQA_IQA_MASK(s->aw_bits); in vtd_handle_gcmd_qie()
2595 s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8 - (s->iq_dw ? 1 : 0)); in vtd_handle_gcmd_qie()
2596 s->qi_enabled = true; in vtd_handle_gcmd_qie()
2597 trace_vtd_inv_qi_setup(s->iq, s->iq_size); in vtd_handle_gcmd_qie()
2598 /* Ok - report back to driver */ in vtd_handle_gcmd_qie()
2601 if (s->iq_tail != 0) { in vtd_handle_gcmd_qie()
2607 trace_vtd_warn_invalid_qi_tail(s->iq_tail); in vtd_handle_gcmd_qie()
2616 s->iq_head = 0; in vtd_handle_gcmd_qie()
2617 s->qi_enabled = false; in vtd_handle_gcmd_qie()
2618 /* Ok - report back to driver */ in vtd_handle_gcmd_qie()
2624 s->iq_head, s->iq_tail, s->iq_last_desc_type); in vtd_handle_gcmd_qie()
2633 /* Ok - report back to driver */ in vtd_handle_gcmd_srtp()
2643 /* Ok - report back to driver */ in vtd_handle_gcmd_sirtp()
2650 if (s->dmar_enabled == en) { in vtd_handle_gcmd_te()
2657 s->dmar_enabled = true; in vtd_handle_gcmd_te()
2658 /* Ok - report back to driver */ in vtd_handle_gcmd_te()
2661 s->dmar_enabled = false; in vtd_handle_gcmd_te()
2664 s->next_frcd_reg = 0; in vtd_handle_gcmd_te()
2665 /* Ok - report back to driver */ in vtd_handle_gcmd_te()
2679 s->intr_enabled = true; in vtd_handle_gcmd_ire()
2680 /* Ok - report back to driver */ in vtd_handle_gcmd_ire()
2683 s->intr_enabled = false; in vtd_handle_gcmd_ire()
2684 /* Ok - report back to driver */ in vtd_handle_gcmd_ire()
2698 if ((changed & VTD_GCMD_TE) && s->dma_translation) { in vtd_handle_gcmd_write()
2703 /* Set/update the root-table pointer */ in vtd_handle_gcmd_write()
2711 /* Set/update the interrupt remapping root-table pointer */ in vtd_handle_gcmd_write()
2727 /* Context-cache invalidation request */ in vtd_handle_ccmd_write()
2729 if (s->qi_enabled) { in vtd_handle_ccmd_write()
2731 "should not use register-based invalidation"); in vtd_handle_ccmd_write()
2750 if (s->qi_enabled) { in vtd_handle_iotlb_write()
2752 "should not use register-based invalidation"); in vtd_handle_iotlb_write()
2767 dma_addr_t base_addr = s->iq; in vtd_get_inv_desc()
2768 uint32_t offset = s->iq_head; in vtd_get_inv_desc()
2769 uint32_t dw = s->iq_dw ? 32 : 16; in vtd_get_inv_desc()
2777 inv_desc->lo = le64_to_cpu(inv_desc->lo); in vtd_get_inv_desc()
2778 inv_desc->hi = le64_to_cpu(inv_desc->hi); in vtd_get_inv_desc()
2780 inv_desc->val[2] = le64_to_cpu(inv_desc->val[2]); in vtd_get_inv_desc()
2781 inv_desc->val[3] = le64_to_cpu(inv_desc->val[3]); in vtd_get_inv_desc()
2792 if (s->iq_dw) { in vtd_inv_desc_reserved_check()
2793 if (inv_desc->val[0] & mask[0] || inv_desc->val[1] & mask[1] || in vtd_inv_desc_reserved_check()
2794 inv_desc->val[2] & mask[2] || inv_desc->val[3] & mask[3]) { in vtd_inv_desc_reserved_check()
2798 func_name, desc_type, inv_desc->val[3], in vtd_inv_desc_reserved_check()
2799 inv_desc->val[2], inv_desc->val[1], in vtd_inv_desc_reserved_check()
2800 inv_desc->val[0]); in vtd_inv_desc_reserved_check()
2805 error_report("%s: 256-bit %s desc in 128-bit invalidation queue", in vtd_inv_desc_reserved_check()
2810 if (inv_desc->lo & mask[0] || inv_desc->hi & mask[1]) { in vtd_inv_desc_reserved_check()
2813 inv_desc->hi, inv_desc->lo); in vtd_inv_desc_reserved_check()
2831 if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) { in vtd_process_wait_desc()
2833 uint32_t status_data = (uint32_t)(inv_desc->lo >> in vtd_process_wait_desc()
2836 assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF)); in vtd_process_wait_desc()
2839 dma_addr_t status_addr = inv_desc->hi; in vtd_process_wait_desc()
2845 trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo); in vtd_process_wait_desc()
2848 } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) { in vtd_process_wait_desc()
2853 " (unknown type)", __func__, inv_desc->hi, in vtd_process_wait_desc()
2854 inv_desc->lo); in vtd_process_wait_desc()
2872 switch (inv_desc->lo & VTD_INV_DESC_CC_G) { in vtd_process_context_cache_desc()
2875 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo)); in vtd_process_context_cache_desc()
2882 sid = VTD_INV_DESC_CC_SID(inv_desc->lo); in vtd_process_context_cache_desc()
2883 fmask = VTD_INV_DESC_CC_FM(inv_desc->lo); in vtd_process_context_cache_desc()
2889 " (invalid type)", __func__, inv_desc->hi, in vtd_process_context_cache_desc()
2890 inv_desc->lo); in vtd_process_context_cache_desc()
2909 switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) { in vtd_process_iotlb_desc()
2915 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); in vtd_process_iotlb_desc()
2920 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); in vtd_process_iotlb_desc()
2921 addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi); in vtd_process_iotlb_desc()
2922 am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi); in vtd_process_iotlb_desc()
2926 __func__, inv_desc->hi, inv_desc->lo, in vtd_process_iotlb_desc()
2936 __func__, inv_desc->hi, inv_desc->lo, in vtd_process_iotlb_desc()
2937 inv_desc->lo & VTD_INV_DESC_IOTLB_G); in vtd_process_iotlb_desc()
2949 return ((entry->domain_id == info->domain_id) && in vtd_hash_remove_by_pasid()
2950 (entry->pasid == info->pasid)); in vtd_hash_remove_by_pasid()
2964 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_pasid, in vtd_piotlb_pasid_invalidate()
2968 QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { in vtd_piotlb_pasid_invalidate()
2969 if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), in vtd_piotlb_pasid_invalidate()
2970 vtd_as->devfn, &ce) && in vtd_piotlb_pasid_invalidate()
2971 domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) { in vtd_piotlb_pasid_invalidate()
2974 if ((vtd_as->pasid != PCI_NO_PASID || pasid != rid2pasid) && in vtd_piotlb_pasid_invalidate()
2975 vtd_as->pasid != pasid) { in vtd_piotlb_pasid_invalidate()
2979 if (!s->flts || !vtd_as_has_map_notifier(vtd_as)) { in vtd_piotlb_pasid_invalidate()
2994 info.mask = ~((1 << am) - 1); in vtd_piotlb_page_invalidate()
2997 g_hash_table_foreach_remove(s->iotlb, in vtd_piotlb_page_invalidate()
3020 domain_id = VTD_INV_DESC_PIOTLB_DID(inv_desc->val[0]); in vtd_process_piotlb_desc()
3021 pasid = VTD_INV_DESC_PIOTLB_PASID(inv_desc->val[0]); in vtd_process_piotlb_desc()
3022 switch (inv_desc->val[0] & VTD_INV_DESC_PIOTLB_G) { in vtd_process_piotlb_desc()
3028 am = VTD_INV_DESC_PIOTLB_AM(inv_desc->val[1]); in vtd_process_piotlb_desc()
3029 addr = (hwaddr) VTD_INV_DESC_PIOTLB_ADDR(inv_desc->val[1]); in vtd_process_piotlb_desc()
3036 __func__, inv_desc->val[1], inv_desc->val[0], in vtd_process_piotlb_desc()
3037 inv_desc->val[0] & VTD_INV_DESC_IOTLB_G); in vtd_process_piotlb_desc()
3054 trace_vtd_inv_desc_iec(inv_desc->iec.granularity, in vtd_process_inv_iec_desc()
3055 inv_desc->iec.index, in vtd_process_inv_iec_desc()
3056 inv_desc->iec.index_mask); in vtd_process_inv_iec_desc()
3058 vtd_iec_notify_all(s, !inv_desc->iec.granularity, in vtd_process_inv_iec_desc()
3059 inv_desc->iec.index, in vtd_process_inv_iec_desc()
3060 inv_desc->iec.index_mask); in vtd_process_inv_iec_desc()
3082 addr &= ~(sz - 1); in do_invalidate_device_tlb()
3088 event.entry.target_as = &vtd_dev_as->as; in do_invalidate_device_tlb()
3089 event.entry.addr_mask = sz - 1; in do_invalidate_device_tlb()
3093 memory_region_notify_iommu(&vtd_dev_as->iommu, 0, event); in do_invalidate_device_tlb()
3114 global = VTD_INV_DESC_PASID_DEVICE_IOTLB_GLOBAL(inv_desc->hi); in vtd_process_device_piotlb_desc()
3115 size = VTD_INV_DESC_PASID_DEVICE_IOTLB_SIZE(inv_desc->hi); in vtd_process_device_piotlb_desc()
3116 addr = VTD_INV_DESC_PASID_DEVICE_IOTLB_ADDR(inv_desc->hi); in vtd_process_device_piotlb_desc()
3117 sid = VTD_INV_DESC_PASID_DEVICE_IOTLB_SID(inv_desc->lo); in vtd_process_device_piotlb_desc()
3119 QLIST_FOREACH(vtd_dev_as, &s->vtd_as_with_notifiers, next) { in vtd_process_device_piotlb_desc()
3120 if ((vtd_dev_as->pasid != PCI_NO_PASID) && in vtd_process_device_piotlb_desc()
3121 (PCI_BUILD_BDF(pci_bus_num(vtd_dev_as->bus), in vtd_process_device_piotlb_desc()
3122 vtd_dev_as->devfn) == sid)) { in vtd_process_device_piotlb_desc()
3127 pasid = VTD_INV_DESC_PASID_DEVICE_IOTLB_PASID(inv_desc->lo); in vtd_process_device_piotlb_desc()
3151 __func__, "dev-iotlb inv")) { in vtd_process_device_iotlb_desc()
3155 addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi); in vtd_process_device_iotlb_desc()
3156 sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo); in vtd_process_device_iotlb_desc()
3157 size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi); in vtd_process_device_iotlb_desc()
3179 trace_vtd_inv_qi_head(s->iq_head); in vtd_process_inv_desc()
3181 s->iq_last_desc_type = VTD_INV_DESC_NONE; in vtd_process_inv_desc()
3187 s->iq_last_desc_type = desc_type; in vtd_process_inv_desc()
3191 trace_vtd_inv_desc("context-cache", inv_desc.hi, inv_desc.lo); in vtd_process_inv_desc()
3205 trace_vtd_inv_desc("p-iotlb", inv_desc.val[1], inv_desc.val[0]); in vtd_process_inv_desc()
3226 trace_vtd_inv_desc("device-piotlb", inv_desc.hi, inv_desc.lo); in vtd_process_inv_desc()
3245 if (s->scalable_mode) { in vtd_process_inv_desc()
3255 s->iq_head++; in vtd_process_inv_desc()
3256 if (s->iq_head == s->iq_size) { in vtd_process_inv_desc()
3257 s->iq_head = 0; in vtd_process_inv_desc()
3267 /* Refer to 10.4.23 of VT-d spec 3.0 */ in vtd_fetch_inv_desc()
3268 qi_shift = s->iq_dw ? VTD_IQH_QH_SHIFT_5 : VTD_IQH_QH_SHIFT_4; in vtd_fetch_inv_desc()
3272 if (s->iq_tail >= s->iq_size) { in vtd_fetch_inv_desc()
3276 __func__, s->iq_tail, s->iq_size); in vtd_fetch_inv_desc()
3280 while (s->iq_head != s->iq_tail) { in vtd_fetch_inv_desc()
3288 (((uint64_t)(s->iq_head)) << qi_shift) & in vtd_fetch_inv_desc()
3298 if (s->iq_dw && (val & VTD_IQT_QT_256_RSV_BIT)) { in vtd_handle_iqt_write()
3304 s->iq_tail = VTD_IQT_QT(s->iq_dw, val); in vtd_handle_iqt_write()
3305 trace_vtd_inv_qi_tail(s->iq_tail); in vtd_handle_iqt_write()
3307 if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) { in vtd_handle_iqt_write()
3383 return (uint64_t)-1; in vtd_mem_read()
3387 /* Root Table Address Register, 64-bit */ in vtd_mem_read()
3391 val = val & ((1ULL << 32) - 1); in vtd_mem_read()
3400 /* Invalidation Queue Address Register, 64-bit */ in vtd_mem_read()
3402 val = s->iq | in vtd_mem_read()
3406 val = val & ((1ULL << 32) - 1); in vtd_mem_read()
3412 val = s->iq >> 32; in vtd_mem_read()
3440 /* Global Command Register, 32-bit */ in vtd_mem_write()
3446 /* Context Command Register, 64-bit */ in vtd_mem_write()
3462 /* IOTLB Invalidation Register, 64-bit */ in vtd_mem_write()
3478 /* Invalidate Address Register, 64-bit */ in vtd_mem_write()
3492 /* Fault Status Register, 32-bit */ in vtd_mem_write()
3499 /* Fault Event Control Register, 32-bit */ in vtd_mem_write()
3506 /* Fault Event Data Register, 32-bit */ in vtd_mem_write()
3512 /* Fault Event Address Register, 32-bit */ in vtd_mem_write()
3518 * While the register is 32-bit only, some guests (Xen...) write to in vtd_mem_write()
3519 * it with 64-bit. in vtd_mem_write()
3525 /* Fault Event Upper Address Register, 32-bit */ in vtd_mem_write()
3531 /* Protected Memory Enable Register, 32-bit */ in vtd_mem_write()
3537 /* Root Table Address Register, 64-bit */ in vtd_mem_write()
3551 /* Invalidation Queue Tail Register, 64-bit */ in vtd_mem_write()
3567 /* Invalidation Queue Address Register, 64-bit */ in vtd_mem_write()
3582 /* Invalidation Completion Status Register, 32-bit */ in vtd_mem_write()
3589 /* Invalidation Event Control Register, 32-bit */ in vtd_mem_write()
3596 /* Invalidation Event Data Register, 32-bit */ in vtd_mem_write()
3602 /* Invalidation Event Address Register, 32-bit */ in vtd_mem_write()
3608 /* Invalidation Event Upper Address Register, 32-bit */ in vtd_mem_write()
3614 /* Fault Recording Registers, 128-bit */ in vtd_mem_write()
3671 IntelIOMMUState *s = vtd_as->iommu_state; in vtd_iommu_translate()
3678 if (likely(s->dmar_enabled)) { in vtd_iommu_translate()
3679 success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, in vtd_iommu_translate()
3682 /* DMAR disabled, passthrough, use 4k-page*/ in vtd_iommu_translate()
3691 trace_vtd_dmar_translate(pci_bus_num(vtd_as->bus), in vtd_iommu_translate()
3692 VTD_PCI_SLOT(vtd_as->devfn), in vtd_iommu_translate()
3693 VTD_PCI_FUNC(vtd_as->devfn), in vtd_iommu_translate()
3699 __func__, pci_bus_num(vtd_as->bus), in vtd_iommu_translate()
3700 VTD_PCI_SLOT(vtd_as->devfn), in vtd_iommu_translate()
3701 VTD_PCI_FUNC(vtd_as->devfn), in vtd_iommu_translate()
3714 IntelIOMMUState *s = vtd_as->iommu_state; in vtd_iommu_notify_flag_changed()
3718 if (s->snoop_control) { in vtd_iommu_notify_flag_changed()
3721 return -ENOTSUP; in vtd_iommu_notify_flag_changed()
3723 if (!s->caching_mode && (new & IOMMU_NOTIFIER_MAP)) { in vtd_iommu_notify_flag_changed()
3726 pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn), in vtd_iommu_notify_flag_changed()
3727 PCI_FUNC(vtd_as->devfn)); in vtd_iommu_notify_flag_changed()
3728 return -ENOTSUP; in vtd_iommu_notify_flag_changed()
3730 if (!x86_iommu->dt_supported && (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP)) { in vtd_iommu_notify_flag_changed()
3733 pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn), in vtd_iommu_notify_flag_changed()
3734 PCI_FUNC(vtd_as->devfn)); in vtd_iommu_notify_flag_changed()
3735 return -ENOTSUP; in vtd_iommu_notify_flag_changed()
3738 /* Update per-address-space notifier flags */ in vtd_iommu_notify_flag_changed()
3739 vtd_as->notifier_flags = new; in vtd_iommu_notify_flag_changed()
3742 QLIST_INSERT_HEAD(&s->vtd_as_with_notifiers, vtd_as, next); in vtd_iommu_notify_flag_changed()
3775 .name = "iommu-intel",
3791 VMSTATE_UNUSED(1), /* bool root_extended is obsolete by VT-d */
3818 DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false),
3819 DEFINE_PROP_UINT8("aw-bits", IntelIOMMUState, aw_bits,
3821 DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE),
3822 DEFINE_PROP_BOOL("x-scalable-mode", IntelIOMMUState, scalable_mode, FALSE),
3823 DEFINE_PROP_BOOL("x-flts", IntelIOMMUState, flts, FALSE),
3824 DEFINE_PROP_BOOL("snoop-control", IntelIOMMUState, snoop_control, false),
3825 DEFINE_PROP_BOOL("x-pasid-mode", IntelIOMMUState, pasid, false),
3826 DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true),
3827 DEFINE_PROP_BOOL("dma-translation", IntelIOMMUState, dma_translation, true),
3828 DEFINE_PROP_BOOL("stale-tm", IntelIOMMUState, stale_tm, false),
3843 if (index >= iommu->intr_size) { in vtd_irte_get()
3852 addr = iommu->intr_root + index * sizeof(*entry); in vtd_irte_get()
3863 entry->data[0] = le64_to_cpu(entry->data[0]); in vtd_irte_get()
3864 entry->data[1] = le64_to_cpu(entry->data[1]); in vtd_irte_get()
3866 trace_vtd_ir_irte_get(index, entry->data[1], entry->data[0]); in vtd_irte_get()
3874 if (entry->irte.fault_disable) { in vtd_irte_get()
3878 if (!entry->irte.present) { in vtd_irte_get()
3879 error_report_once("%s: detected non-present IRTE " in vtd_irte_get()
3881 __func__, index, entry->data[1], entry->data[0]); in vtd_irte_get()
3888 if (entry->irte.__reserved_0 || entry->irte.__reserved_1 || in vtd_irte_get()
3889 entry->irte.__reserved_2) { in vtd_irte_get()
3890 error_report_once("%s: detected non-zero reserved IRTE " in vtd_irte_get()
3892 __func__, index, entry->data[1], entry->data[0]); in vtd_irte_get()
3901 source_id = entry->irte.source_id; in vtd_irte_get()
3902 switch (entry->irte.sid_vtype) { in vtd_irte_get()
3907 mask = vtd_svt_mask[entry->irte.sid_q]; in vtd_irte_get()
3937 index, entry->irte.sid_vtype); in vtd_irte_get()
3959 irq->trigger_mode = irte.irte.trigger_mode; in vtd_remap_irq_get()
3960 irq->vector = irte.irte.vector; in vtd_remap_irq_get()
3961 irq->delivery_mode = irte.irte.delivery_mode; in vtd_remap_irq_get()
3962 irq->dest = irte.irte.dest_id; in vtd_remap_irq_get()
3963 if (!iommu->intr_eime) { in vtd_remap_irq_get()
3966 irq->dest = (irq->dest & VTD_IR_APIC_DEST_MASK) >> in vtd_remap_irq_get()
3969 irq->dest_mode = irte.irte.dest_mode; in vtd_remap_irq_get()
3970 irq->redir_hint = irte.irte.redir_hint; in vtd_remap_irq_get()
3972 trace_vtd_ir_remap(index, irq->trigger_mode, irq->vector, in vtd_remap_irq_get()
3973 irq->delivery_mode, irq->dest, irq->dest_mode); in vtd_remap_irq_get()
3978 /* Interrupt remapping for MSI/MSI-X entry */
3990 trace_vtd_ir_remap_msi_req(origin->address, origin->data); in vtd_interrupt_remap_msi()
3992 if (!iommu || !iommu->intr_enabled) { in vtd_interrupt_remap_msi()
3997 if (origin->address & VTD_MSI_ADDR_HI_MASK) { in vtd_interrupt_remap_msi()
3998 error_report_once("%s: MSI address high 32 bits non-zero detected: " in vtd_interrupt_remap_msi()
3999 "address=0x%" PRIx64, __func__, origin->address); in vtd_interrupt_remap_msi()
4003 return -EINVAL; in vtd_interrupt_remap_msi()
4006 addr.data = origin->address & VTD_MSI_ADDR_LO_MASK; in vtd_interrupt_remap_msi()
4013 return -EINVAL; in vtd_interrupt_remap_msi()
4028 /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */ in vtd_interrupt_remap_msi()
4029 index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE; in vtd_interrupt_remap_msi()
4033 return -EINVAL; in vtd_interrupt_remap_msi()
4038 if (origin->data & VTD_IR_MSI_DATA_RESERVED) { in vtd_interrupt_remap_msi()
4042 __func__, sid, origin->address, origin->data); in vtd_interrupt_remap_msi()
4046 return -EINVAL; in vtd_interrupt_remap_msi()
4049 uint8_t vector = origin->data & 0xff; in vtd_interrupt_remap_msi()
4050 uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; in vtd_interrupt_remap_msi()
4054 * (see vt-d spec 5.1.5.1). */ in vtd_interrupt_remap_msi()
4060 * (see vt-d spec 5.1.5.1). */ in vtd_interrupt_remap_msi()
4077 trace_vtd_ir_remap_msi(origin->address, origin->data, in vtd_interrupt_remap_msi()
4078 translated->address, translated->data); in vtd_interrupt_remap_msi()
4118 apic_get_class(NULL)->send_msi(&to); in vtd_mem_ir_write()
4140 IntelIOMMUState *s = vtd_as->iommu_state; in vtd_report_ir_illegal_access()
4141 uint8_t bus_n = pci_bus_num(vtd_as->bus); in vtd_report_ir_illegal_access()
4142 uint16_t sid = PCI_BUILD_BDF(bus_n, vtd_as->devfn); in vtd_report_ir_illegal_access()
4146 assert(vtd_as->pasid != PCI_NO_PASID); in vtd_report_ir_illegal_access()
4149 if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { in vtd_report_ir_illegal_access()
4151 if (!is_fpd_set && s->root_scalable) { in vtd_report_ir_illegal_access()
4152 vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set, vtd_as->pasid); in vtd_report_ir_illegal_access()
4158 true, vtd_as->pasid); in vtd_report_ir_illegal_access()
4209 vtd_dev_as = g_hash_table_lookup(s->vtd_address_spaces, &key); in vtd_find_add_as()
4224 vtd_dev_as = g_hash_table_lookup(s->vtd_address_spaces, &key); in vtd_find_add_as()
4233 new_key->bus = bus; in vtd_find_add_as()
4234 new_key->devfn = devfn; in vtd_find_add_as()
4235 new_key->pasid = pasid; in vtd_find_add_as()
4238 snprintf(name, sizeof(name), "vtd-%02x.%x", PCI_SLOT(devfn), in vtd_find_add_as()
4241 snprintf(name, sizeof(name), "vtd-%02x.%x-pasid-%x", PCI_SLOT(devfn), in vtd_find_add_as()
4247 vtd_dev_as->bus = bus; in vtd_find_add_as()
4248 vtd_dev_as->devfn = (uint8_t)devfn; in vtd_find_add_as()
4249 vtd_dev_as->pasid = pasid; in vtd_find_add_as()
4250 vtd_dev_as->iommu_state = s; in vtd_find_add_as()
4251 vtd_dev_as->context_cache_entry.context_cache_gen = 0; in vtd_find_add_as()
4252 vtd_dev_as->iova_tree = iova_tree_new(); in vtd_find_add_as()
4254 memory_region_init(&vtd_dev_as->root, OBJECT(s), name, UINT64_MAX); in vtd_find_add_as()
4255 address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, "vtd-root"); in vtd_find_add_as()
4258 * Build the DMAR-disabled container with aliases to the in vtd_find_add_as()
4266 memory_region_init_alias(&vtd_dev_as->nodmar, OBJECT(s), in vtd_find_add_as()
4267 "vtd-nodmar", &s->mr_nodmar, 0, in vtd_find_add_as()
4268 memory_region_size(&s->mr_nodmar)); in vtd_find_add_as()
4271 * Build the per-device DMAR-enabled container. in vtd_find_add_as()
4273 * TODO: currently we have per-device IOMMU memory region only in vtd_find_add_as()
4274 * because we have per-device IOMMU notifiers for devices. If in vtd_find_add_as()
4280 strcat(name, "-dmar"); in vtd_find_add_as()
4281 memory_region_init_iommu(&vtd_dev_as->iommu, sizeof(vtd_dev_as->iommu), in vtd_find_add_as()
4284 memory_region_init_alias(&vtd_dev_as->iommu_ir, OBJECT(s), "vtd-ir", in vtd_find_add_as()
4285 &s->mr_ir, 0, memory_region_size(&s->mr_ir)); in vtd_find_add_as()
4286 memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as->iommu), in vtd_find_add_as()
4288 &vtd_dev_as->iommu_ir, 1); in vtd_find_add_as()
4297 memory_region_init_io(&vtd_dev_as->iommu_ir_fault, OBJECT(s), in vtd_find_add_as()
4298 &vtd_mem_ir_fault_ops, vtd_dev_as, "vtd-no-ir", in vtd_find_add_as()
4301 * Hook to root since when PT is enabled vtd_dev_as->iommu in vtd_find_add_as()
4304 memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as->root), in vtd_find_add_as()
4306 &vtd_dev_as->iommu_ir_fault, 2); in vtd_find_add_as()
4311 * corresponding sub-containers in vtd_find_add_as()
4313 memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, in vtd_find_add_as()
4314 MEMORY_REGION(&vtd_dev_as->iommu), in vtd_find_add_as()
4316 memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, in vtd_find_add_as()
4317 &vtd_dev_as->nodmar, 0); in vtd_find_add_as()
4321 g_hash_table_insert(s->vtd_address_spaces, new_key, vtd_dev_as); in vtd_find_add_as()
4334 if (!hiodc->get_cap) { in vtd_check_hiod()
4340 ret = hiodc->get_cap(hiod, HOST_IOMMU_DEVICE_CAP_AW_BITS, errp); in vtd_check_hiod()
4344 if (s->aw_bits > ret) { in vtd_check_hiod()
4345 error_setg(errp, "aw-bits %d > host aw-bits %d", s->aw_bits, ret); in vtd_check_hiod()
4349 if (!s->flts) { in vtd_check_hiod()
4350 /* All checks requested by VTD stage-2 translation pass */ in vtd_check_hiod()
4354 error_setg(errp, "host device is uncompatible with stage-1 translation"); in vtd_check_hiod()
4372 if (g_hash_table_lookup(s->vtd_host_iommu_dev, &key)) { in vtd_dev_set_iommu_device()
4384 new_key->bus = bus; in vtd_dev_set_iommu_device()
4385 new_key->devfn = devfn; in vtd_dev_set_iommu_device()
4388 g_hash_table_insert(s->vtd_host_iommu_dev, new_key, hiod); in vtd_dev_set_iommu_device()
4405 if (!g_hash_table_lookup(s->vtd_host_iommu_dev, &key)) { in vtd_dev_unset_iommu_device()
4410 g_hash_table_remove(s->vtd_host_iommu_dev, &key); in vtd_dev_unset_iommu_device()
4419 hwaddr start = n->start; in vtd_address_space_unmap()
4420 hwaddr end = n->end; in vtd_address_space_unmap()
4421 IntelIOMMUState *s = as->iommu_state; in vtd_address_space_unmap()
4427 * VT-d spec), otherwise we need to consider overflow of 64 bits. in vtd_address_space_unmap()
4430 if (end > VTD_ADDRESS_SIZE(s->aw_bits) - 1) { in vtd_address_space_unmap()
4433 * VT-d supported address space size in vtd_address_space_unmap()
4435 end = VTD_ADDRESS_SIZE(s->aw_bits) - 1; in vtd_address_space_unmap()
4439 total = remain = end - start + 1; in vtd_address_space_unmap()
4443 uint64_t mask = dma_aligned_pow2_mask(start, end, s->aw_bits); in vtd_address_space_unmap()
4459 remain -= size; in vtd_address_space_unmap()
4464 trace_vtd_as_unmap_whole(pci_bus_num(as->bus), in vtd_address_space_unmap()
4465 VTD_PCI_SLOT(as->devfn), in vtd_address_space_unmap()
4466 VTD_PCI_FUNC(as->devfn), in vtd_address_space_unmap()
4467 n->start, total); in vtd_address_space_unmap()
4469 map.iova = n->start; in vtd_address_space_unmap()
4470 map.size = total - 1; /* Inclusive */ in vtd_address_space_unmap()
4471 iova_tree_remove(as->iova_tree, map); in vtd_address_space_unmap()
4479 QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { in vtd_address_space_unmap_all()
4480 IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { in vtd_address_space_unmap_all()
4501 IntelIOMMUState *s = vtd_as->iommu_state; in vtd_iommu_replay()
4502 uint8_t bus_n = pci_bus_num(vtd_as->bus); in vtd_iommu_replay()
4506 /* replay is protected by BQL, page walk will re-setup it safely */ in vtd_iommu_replay()
4507 iova_tree_remove(vtd_as->iova_tree, map); in vtd_iommu_replay()
4509 if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { in vtd_iommu_replay()
4510 trace_vtd_replay_ce_valid(s->root_scalable ? "scalable mode" : in vtd_iommu_replay()
4512 bus_n, PCI_SLOT(vtd_as->devfn), in vtd_iommu_replay()
4513 PCI_FUNC(vtd_as->devfn), in vtd_iommu_replay()
4514 vtd_get_domain_id(s, &ce, vtd_as->pasid), in vtd_iommu_replay()
4516 if (n->notifier_flags & IOMMU_NOTIFIER_MAP) { in vtd_iommu_replay()
4522 .aw = s->aw_bits, in vtd_iommu_replay()
4524 .domain_id = vtd_get_domain_id(s, &ce, vtd_as->pasid), in vtd_iommu_replay()
4527 vtd_page_walk(s, &ce, 0, ~0ULL, &info, vtd_as->pasid); in vtd_iommu_replay()
4530 trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), in vtd_iommu_replay()
4531 PCI_FUNC(vtd_as->devfn)); in vtd_iommu_replay()
4539 s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | in vtd_cap_init()
4541 VTD_CAP_MGAW(s->aw_bits); in vtd_cap_init()
4542 if (s->dma_drain) { in vtd_cap_init()
4543 s->cap |= VTD_CAP_DRAIN; in vtd_cap_init()
4545 if (s->dma_translation) { in vtd_cap_init()
4546 if (s->aw_bits >= VTD_HOST_AW_39BIT) { in vtd_cap_init()
4547 s->cap |= VTD_CAP_SAGAW_39bit; in vtd_cap_init()
4549 if (s->aw_bits >= VTD_HOST_AW_48BIT) { in vtd_cap_init()
4550 s->cap |= VTD_CAP_SAGAW_48bit; in vtd_cap_init()
4553 s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO; in vtd_cap_init()
4556 s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV; in vtd_cap_init()
4557 if (s->intr_eim == ON_OFF_AUTO_ON) { in vtd_cap_init()
4558 s->ecap |= VTD_ECAP_EIM; in vtd_cap_init()
4560 assert(s->intr_eim != ON_OFF_AUTO_AUTO); in vtd_cap_init()
4563 if (x86_iommu->dt_supported) { in vtd_cap_init()
4564 s->ecap |= VTD_ECAP_DT; in vtd_cap_init()
4567 if (x86_iommu->pt_supported) { in vtd_cap_init()
4568 s->ecap |= VTD_ECAP_PT; in vtd_cap_init()
4571 if (s->caching_mode) { in vtd_cap_init()
4572 s->cap |= VTD_CAP_CM; in vtd_cap_init()
4576 if (s->flts) { in vtd_cap_init()
4577 s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_FLTS; in vtd_cap_init()
4578 if (s->fs1gp) { in vtd_cap_init()
4579 s->cap |= VTD_CAP_FS1GP; in vtd_cap_init()
4581 } else if (s->scalable_mode) { in vtd_cap_init()
4582 s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_SRS | VTD_ECAP_SLTS; in vtd_cap_init()
4585 if (s->snoop_control) { in vtd_cap_init()
4586 s->ecap |= VTD_ECAP_SC; in vtd_cap_init()
4589 if (s->pasid) { in vtd_cap_init()
4590 s->ecap |= VTD_ECAP_PASID; in vtd_cap_init()
4602 memset(s->csr, 0, DMAR_REG_SIZE); in vtd_init()
4603 memset(s->wmask, 0, DMAR_REG_SIZE); in vtd_init()
4604 memset(s->w1cmask, 0, DMAR_REG_SIZE); in vtd_init()
4605 memset(s->womask, 0, DMAR_REG_SIZE); in vtd_init()
4607 s->root = 0; in vtd_init()
4608 s->root_scalable = false; in vtd_init()
4609 s->dmar_enabled = false; in vtd_init()
4610 s->intr_enabled = false; in vtd_init()
4611 s->iq_head = 0; in vtd_init()
4612 s->iq_tail = 0; in vtd_init()
4613 s->iq = 0; in vtd_init()
4614 s->iq_size = 0; in vtd_init()
4615 s->qi_enabled = false; in vtd_init()
4616 s->iq_last_desc_type = VTD_INV_DESC_NONE; in vtd_init()
4617 s->iq_dw = false; in vtd_init()
4618 s->next_frcd_reg = 0; in vtd_init()
4626 vtd_spte_rsvd[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s->aw_bits, in vtd_init()
4627 x86_iommu->dt_supported && s->stale_tm); in vtd_init()
4628 vtd_spte_rsvd[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s->aw_bits); in vtd_init()
4629 vtd_spte_rsvd[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s->aw_bits); in vtd_init()
4630 vtd_spte_rsvd[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s->aw_bits); in vtd_init()
4632 vtd_spte_rsvd_large[2] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s->aw_bits, in vtd_init()
4633 x86_iommu->dt_supported && s->stale_tm); in vtd_init()
4634 vtd_spte_rsvd_large[3] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits, in vtd_init()
4635 x86_iommu->dt_supported && s->stale_tm); in vtd_init()
4641 vtd_fpte_rsvd[1] = VTD_FPTE_PAGE_L1_RSVD_MASK(s->aw_bits); in vtd_init()
4642 vtd_fpte_rsvd[2] = VTD_FPTE_PAGE_L2_RSVD_MASK(s->aw_bits); in vtd_init()
4643 vtd_fpte_rsvd[3] = VTD_FPTE_PAGE_L3_RSVD_MASK(s->aw_bits); in vtd_init()
4644 vtd_fpte_rsvd[4] = VTD_FPTE_PAGE_L4_RSVD_MASK(s->aw_bits); in vtd_init()
4646 vtd_fpte_rsvd_large[2] = VTD_FPTE_LPAGE_L2_RSVD_MASK(s->aw_bits); in vtd_init()
4647 vtd_fpte_rsvd_large[3] = VTD_FPTE_LPAGE_L3_RSVD_MASK(s->aw_bits); in vtd_init()
4649 if (s->scalable_mode || s->snoop_control) { in vtd_init()
4659 vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0); in vtd_init()
4660 vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0); in vtd_init()
4700 /* Fault Recording Registers, 128-bit */ in vtd_init()
4730 return &vtd_as->as; in vtd_host_dma_iommu()
4743 if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu_ir_supported(x86_iommu)) { in vtd_decide_config()
4748 if (s->intr_eim == ON_OFF_AUTO_AUTO) { in vtd_decide_config()
4749 s->intr_eim = (kvm_irqchip_in_kernel() || s->buggy_eim) in vtd_decide_config()
4753 if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) { in vtd_decide_config()
4761 if (!s->scalable_mode && s->flts) { in vtd_decide_config()
4762 error_setg(errp, "x-flts is only available in scalable mode"); in vtd_decide_config()
4766 if (!s->flts && s->aw_bits != VTD_HOST_AW_39BIT && in vtd_decide_config()
4767 s->aw_bits != VTD_HOST_AW_48BIT) { in vtd_decide_config()
4768 error_setg(errp, "%s: supported values for aw-bits are: %d, %d", in vtd_decide_config()
4769 s->scalable_mode ? "Scalable mode(flts=off)" : "Legacy mode", in vtd_decide_config()
4774 if (s->flts && s->aw_bits != VTD_HOST_AW_48BIT) { in vtd_decide_config()
4776 "Scalable mode(flts=on): supported value for aw-bits is: %d", in vtd_decide_config()
4781 if (s->scalable_mode && !s->dma_drain) { in vtd_decide_config()
4786 if (s->pasid && !s->scalable_mode) { in vtd_decide_config()
4799 * We hard-coded here because vfio-pci is the only special case in vtd_machine_done_notify_one()
4803 if (object_dynamic_cast(child, "vfio-pci") && !iommu->caching_mode) { in vtd_machine_done_notify_one()
4825 PCIBus *bus = pcms->pcibus; in vtd_realize()
4829 if (s->pasid && x86_iommu->dt_supported) { in vtd_realize()
4831 * PASID-based-Device-TLB Invalidate Descriptor is not in vtd_realize()
4843 QLIST_INIT(&s->vtd_as_with_notifiers); in vtd_realize()
4844 qemu_mutex_init(&s->iommu_lock); in vtd_realize()
4845 memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s, in vtd_realize()
4848 Q35_HOST_BRIDGE_IOMMU_ADDR, &s->csrmem); in vtd_realize()
4851 memory_region_init(&s->mr_nodmar, OBJECT(s), "vtd-nodmar", in vtd_realize()
4853 memory_region_init_io(&s->mr_ir, OBJECT(s), &vtd_mem_ir_ops, in vtd_realize()
4854 s, "vtd-ir", VTD_INTERRUPT_ADDR_SIZE); in vtd_realize()
4855 memory_region_init_alias(&s->mr_sys_alias, OBJECT(s), in vtd_realize()
4856 "vtd-sys-alias", get_system_memory(), 0, in vtd_realize()
4858 memory_region_add_subregion_overlap(&s->mr_nodmar, 0, in vtd_realize()
4859 &s->mr_sys_alias, 0); in vtd_realize()
4860 memory_region_add_subregion_overlap(&s->mr_nodmar, in vtd_realize()
4862 &s->mr_ir, 1); in vtd_realize()
4864 s->iotlb = g_hash_table_new_full(vtd_iotlb_hash, vtd_iotlb_equal, in vtd_realize()
4866 s->vtd_address_spaces = g_hash_table_new_full(vtd_as_hash, vtd_as_equal, in vtd_realize()
4868 s->vtd_host_iommu_dev = g_hash_table_new_full(vtd_hiod_hash, vtd_hiod_equal, in vtd_realize()
4873 x86ms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC); in vtd_realize()
4884 * Use 'exit' reset phase to make sure all DMA requests in vtd_class_init()
4887 rc->phases.exit = vtd_reset_exit; in vtd_class_init()
4888 dc->vmsd = &vtd_vmstate; in vtd_class_init()
4890 dc->hotpluggable = false; in vtd_class_init()
4891 x86_class->realize = vtd_realize; in vtd_class_init()
4892 x86_class->int_remap = vtd_int_remap; in vtd_class_init()
4893 set_bit(DEVICE_CATEGORY_MISC, dc->categories); in vtd_class_init()
4894 dc->desc = "Intel IOMMU (VT-d) DMA Remapping device"; in vtd_class_init()
4909 imrc->translate = vtd_iommu_translate; in vtd_iommu_memory_region_class_init()
4910 imrc->notify_flag_changed = vtd_iommu_notify_flag_changed; in vtd_iommu_memory_region_class_init()
4911 imrc->replay = vtd_iommu_replay; in vtd_iommu_memory_region_class_init()