Lines Matching +full:page +full:- +full:level
1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic AMD IO page table v2 allocator.
10 #define pr_fmt(fmt) "AMD-Vi: " fmt
14 #include <linux/io-pgtable.h>
25 #define IOMMU_PAGE_PWT BIT_ULL(3) /* Page write through */
26 #define IOMMU_PAGE_PCD BIT_ULL(4) /* Page cache disabled */
29 #define IOMMU_PAGE_PSE BIT_ULL(7) /* Page Size Extensions */
48 static inline u64 set_pgtable_attr(u64 *page) in set_pgtable_attr() argument
55 return (iommu_virt_to_phys(page) | prot); in set_pgtable_attr()
74 /* Large page */ in set_pte_attr()
107 static void free_pgtable(u64 *pt, int level) in free_pgtable() argument
121 * Free the next level. No need to look at l1 tables here since in free_pgtable()
125 if (level > 2) in free_pgtable()
126 free_pgtable(p, level - 1); in free_pgtable()
134 /* Allocate page table */
138 u64 *pte, *page; in v2_alloc_pte() local
139 int level, end_level; in v2_alloc_pte() local
141 level = get_pgtable_level() - 1; in v2_alloc_pte()
143 pte = &pgd[PM_LEVEL_INDEX(level, iova)]; in v2_alloc_pte()
146 while (level >= end_level) { in v2_alloc_pte()
159 page = alloc_pgtable_page(nid, gfp); in v2_alloc_pte()
160 if (!page) in v2_alloc_pte()
163 __npte = set_pgtable_attr(page); in v2_alloc_pte()
166 free_pgtable_page(page); in v2_alloc_pte()
173 level -= 1; in v2_alloc_pte()
175 pte = &pte[PM_LEVEL_INDEX(level, iova)]; in v2_alloc_pte()
186 free_pgtable(__pte, end_level - 1); in v2_alloc_pte()
202 int level; in fetch_pte() local
204 level = get_pgtable_level() - 1; in fetch_pte()
205 pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)]; in fetch_pte()
206 /* Default page size is 4K */ in fetch_pte()
209 while (level) { in fetch_pte()
214 /* Walk to the next level */ in fetch_pte()
216 pte = &pte[PM_LEVEL_INDEX(level - 1, iova)]; in fetch_pte()
218 /* Large page */ in fetch_pte()
220 if (level == PAGE_MODE_3_LEVEL) in fetch_pte()
222 else if (level == PAGE_MODE_2_LEVEL) in fetch_pte()
230 level -= 1; in fetch_pte()
241 struct io_pgtable_cfg *cfg = &pdom->iop.iop.cfg; in iommu_v2_map_pages()
250 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount) in iommu_v2_map_pages()
251 return -EINVAL; in iommu_v2_map_pages()
254 return -EINVAL; in iommu_v2_map_pages()
258 pte = v2_alloc_pte(pdom->nid, pdom->iop.pgd, in iommu_v2_map_pages()
261 ret = -EINVAL; in iommu_v2_map_pages()
288 struct io_pgtable_cfg *cfg = &pgtable->iop.cfg; in iommu_v2_unmap_pages()
294 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) in iommu_v2_unmap_pages()
304 iova = (iova & ~(unmap_size - 1)) + unmap_size; in iommu_v2_unmap_pages()
321 offset_mask = pte_pgsize - 1; in iommu_v2_iova_to_phys()
328 * ----------------------------------------------------
357 if (!(pdom->flags & PD_IOMMUV2_MASK)) in v2_free_pgtable()
361 amd_iommu_domain_clear_gcr3(&pdom->domain, 0); in v2_free_pgtable()
366 /* Free page table */ in v2_free_pgtable()
367 free_pgtable(pgtable->pgd, get_pgtable_level()); in v2_free_pgtable()
377 pgtable->pgd = alloc_pgtable_page(pdom->nid, GFP_ATOMIC); in v2_alloc_pgtable()
378 if (!pgtable->pgd) in v2_alloc_pgtable()
381 ret = amd_iommu_domain_set_gcr3(&pdom->domain, 0, iommu_virt_to_phys(pgtable->pgd)); in v2_alloc_pgtable()
388 pgtable->iop.ops.map_pages = iommu_v2_map_pages; in v2_alloc_pgtable()
389 pgtable->iop.ops.unmap_pages = iommu_v2_unmap_pages; in v2_alloc_pgtable()
390 pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys; in v2_alloc_pgtable()
392 cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2, in v2_alloc_pgtable()
393 cfg->ias = ias, in v2_alloc_pgtable()
394 cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE, in v2_alloc_pgtable()
395 cfg->tlb = &v2_flush_ops; in v2_alloc_pgtable()
397 return &pgtable->iop; in v2_alloc_pgtable()
400 free_pgtable_page(pgtable->pgd); in v2_alloc_pgtable()