Lines Matching +full:page +full:- +full:level
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
31 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
37 unsigned int level; member
41 * amdgpu_vm_pt_level_shift - return the addr shift for each level
44 * @level: VMPT level
47 * The number of bits the pfn needs to be right shifted for a level.
50 unsigned int level) in amdgpu_vm_pt_level_shift() argument
52 switch (level) { in amdgpu_vm_pt_level_shift()
56 return 9 * (AMDGPU_VM_PDB0 - level) + in amdgpu_vm_pt_level_shift()
57 adev->vm_manager.block_size; in amdgpu_vm_pt_level_shift()
66 * amdgpu_vm_pt_num_entries - return the number of entries in a PD/PT
69 * @level: VMPT level
72 * The number of entries in a page directory or page table.
75 unsigned int level) in amdgpu_vm_pt_num_entries() argument
79 shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level); in amdgpu_vm_pt_num_entries()
80 if (level == adev->vm_manager.root_level) in amdgpu_vm_pt_num_entries()
82 return round_up(adev->vm_manager.max_pfn, 1ULL << shift) in amdgpu_vm_pt_num_entries()
84 else if (level != AMDGPU_VM_PTB) in amdgpu_vm_pt_num_entries()
88 /* For the page tables on the leaves */ in amdgpu_vm_pt_num_entries()
93 * amdgpu_vm_pt_entries_mask - the mask to get the entry number of a PD/PT
96 * @level: VMPT level
102 unsigned int level) in amdgpu_vm_pt_entries_mask() argument
104 if (level <= adev->vm_manager.root_level) in amdgpu_vm_pt_entries_mask()
106 else if (level != AMDGPU_VM_PTB) in amdgpu_vm_pt_entries_mask()
109 return AMDGPU_VM_PTE_COUNT(adev) - 1; in amdgpu_vm_pt_entries_mask()
113 * amdgpu_vm_pt_size - returns the size of the page table in bytes
116 * @level: VMPT level
119 * The size of the BO for a page directory or page table in bytes.
122 unsigned int level) in amdgpu_vm_pt_size() argument
124 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_pt_num_entries(adev, level) * 8); in amdgpu_vm_pt_size()
128 * amdgpu_vm_pt_parent - get the parent page directory
130 * @pt: child page table
132 * Helper to get the parent entry for the child page table. NULL if we are at
133 * the root page directory.
138 struct amdgpu_bo *parent = pt->bo->parent; in amdgpu_vm_pt_parent()
143 return parent->vm_bo; in amdgpu_vm_pt_parent()
147 * amdgpu_vm_pt_start - start PD/PT walk
160 cursor->pfn = start; in amdgpu_vm_pt_start()
161 cursor->parent = NULL; in amdgpu_vm_pt_start()
162 cursor->entry = &vm->root; in amdgpu_vm_pt_start()
163 cursor->level = adev->vm_manager.root_level; in amdgpu_vm_pt_start()
167 * amdgpu_vm_pt_descendant - go to child node
181 if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry || in amdgpu_vm_pt_descendant()
182 !cursor->entry->bo) in amdgpu_vm_pt_descendant()
185 mask = amdgpu_vm_pt_entries_mask(adev, cursor->level); in amdgpu_vm_pt_descendant()
186 shift = amdgpu_vm_pt_level_shift(adev, cursor->level); in amdgpu_vm_pt_descendant()
188 ++cursor->level; in amdgpu_vm_pt_descendant()
189 idx = (cursor->pfn >> shift) & mask; in amdgpu_vm_pt_descendant()
190 cursor->parent = cursor->entry; in amdgpu_vm_pt_descendant()
191 cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx]; in amdgpu_vm_pt_descendant()
196 * amdgpu_vm_pt_sibling - go to sibling node
213 if (!cursor->parent) in amdgpu_vm_pt_sibling()
217 shift = amdgpu_vm_pt_level_shift(adev, cursor->level - 1); in amdgpu_vm_pt_sibling()
218 num_entries = amdgpu_vm_pt_num_entries(adev, cursor->level - 1); in amdgpu_vm_pt_sibling()
219 parent = to_amdgpu_bo_vm(cursor->parent->bo); in amdgpu_vm_pt_sibling()
221 if (cursor->entry == &parent->entries[num_entries - 1]) in amdgpu_vm_pt_sibling()
224 cursor->pfn += 1ULL << shift; in amdgpu_vm_pt_sibling()
225 cursor->pfn &= ~((1ULL << shift) - 1); in amdgpu_vm_pt_sibling()
226 ++cursor->entry; in amdgpu_vm_pt_sibling()
231 * amdgpu_vm_pt_ancestor - go to parent node
241 if (!cursor->parent) in amdgpu_vm_pt_ancestor()
244 --cursor->level; in amdgpu_vm_pt_ancestor()
245 cursor->entry = cursor->parent; in amdgpu_vm_pt_ancestor()
246 cursor->parent = amdgpu_vm_pt_parent(cursor->parent); in amdgpu_vm_pt_ancestor()
251 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
269 cursor->pfn = ~0ll; in amdgpu_vm_pt_next()
276 * amdgpu_vm_pt_first_dfs - start a deep first search
300 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
311 return entry && (!start || entry != start->entry); in amdgpu_vm_pt_continue_dfs()
315 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
325 if (!cursor->entry) in amdgpu_vm_pt_next_dfs()
328 if (!cursor->parent) in amdgpu_vm_pt_next_dfs()
329 cursor->entry = NULL; in amdgpu_vm_pt_next_dfs()
338 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
347 * amdgpu_vm_pt_clear - initially clear the PDs/PTs
362 unsigned int level = adev->vm_manager.root_level; in amdgpu_vm_pt_clear() local
365 struct amdgpu_bo *ancestor = &vmbo->bo; in amdgpu_vm_pt_clear()
367 struct amdgpu_bo *bo = &vmbo->bo; in amdgpu_vm_pt_clear()
372 if (ancestor->parent) { in amdgpu_vm_pt_clear()
373 ++level; in amdgpu_vm_pt_clear()
374 while (ancestor->parent->parent) { in amdgpu_vm_pt_clear()
375 ++level; in amdgpu_vm_pt_clear()
376 ancestor = ancestor->parent; in amdgpu_vm_pt_clear()
382 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_vm_pt_clear()
387 return -ENODEV; in amdgpu_vm_pt_clear()
389 r = vm->update_funcs->map_table(vmbo); in amdgpu_vm_pt_clear()
398 r = vm->update_funcs->prepare(¶ms, NULL); in amdgpu_vm_pt_clear()
405 if (adev->asic_type >= CHIP_VEGA10) { in amdgpu_vm_pt_clear()
406 if (level != AMDGPU_VM_PTB) { in amdgpu_vm_pt_clear()
409 amdgpu_gmc_get_vm_pde(adev, level, in amdgpu_vm_pt_clear()
417 r = vm->update_funcs->update(¶ms, vmbo, addr, 0, entries, in amdgpu_vm_pt_clear()
422 r = vm->update_funcs->commit(¶ms, NULL); in amdgpu_vm_pt_clear()
429 * amdgpu_vm_pt_create - create bo for PD/PT
433 * @level: the page table level
439 int level, bool immediate, struct amdgpu_bo_vm **vmbo, in amdgpu_vm_pt_create() argument
447 bp.size = amdgpu_vm_pt_size(adev, level); in amdgpu_vm_pt_create()
450 if (!adev->gmc.is_app_apu) in amdgpu_vm_pt_create()
459 if (level < AMDGPU_VM_PTB) in amdgpu_vm_pt_create()
460 num_entries = amdgpu_vm_pt_num_entries(adev, level); in amdgpu_vm_pt_create()
466 if (vm->use_cpu_for_update) in amdgpu_vm_pt_create()
473 if (vm->root.bo) in amdgpu_vm_pt_create()
474 bp.resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_pt_create()
480 * amdgpu_vm_pt_alloc - Allocate a specific page table
483 * @vm: VM to allocate page tables for
484 * @cursor: Which page table to allocate
487 * Make sure a specific page table or directory is allocated.
490 * 1 if page table needed to be allocated, 0 if page table was already
498 struct amdgpu_vm_bo_base *entry = cursor->entry; in amdgpu_vm_pt_alloc()
503 if (entry->bo) in amdgpu_vm_pt_alloc()
507 r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt, in amdgpu_vm_pt_alloc()
508 vm->root.bo->xcp_id); in amdgpu_vm_pt_alloc()
516 pt_bo = &pt->bo; in amdgpu_vm_pt_alloc()
517 pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo); in amdgpu_vm_pt_alloc()
531 * amdgpu_vm_pt_free - free one PD/PT
537 if (!entry->bo) in amdgpu_vm_pt_free()
540 amdgpu_vm_update_stats(entry, entry->bo->tbo.resource, -1); in amdgpu_vm_pt_free()
541 entry->bo->vm_bo = NULL; in amdgpu_vm_pt_free()
542 ttm_bo_set_bulk_move(&entry->bo->tbo, NULL); in amdgpu_vm_pt_free()
544 spin_lock(&entry->vm->status_lock); in amdgpu_vm_pt_free()
545 list_del(&entry->vm_status); in amdgpu_vm_pt_free()
546 spin_unlock(&entry->vm->status_lock); in amdgpu_vm_pt_free()
547 amdgpu_bo_unref(&entry->bo); in amdgpu_vm_pt_free()
551 * amdgpu_vm_pt_free_list - free PD/PT levels
556 * Free the page directory objects saved in the flush list
562 bool unlocked = params->unlocked; in amdgpu_vm_pt_free_list()
564 if (list_empty(¶ms->tlb_flush_waitlist)) in amdgpu_vm_pt_free_list()
568 * unlocked unmap clear page table leaves, warning to free the page entry. in amdgpu_vm_pt_free_list()
572 list_for_each_entry_safe(entry, next, ¶ms->tlb_flush_waitlist, vm_status) in amdgpu_vm_pt_free_list()
577 * amdgpu_vm_pt_add_list - add PD/PT level to the flush list
590 spin_lock(¶ms->vm->status_lock); in amdgpu_vm_pt_add_list()
591 for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) { in amdgpu_vm_pt_add_list()
592 if (entry && entry->bo) in amdgpu_vm_pt_add_list()
593 list_move(&entry->vm_status, ¶ms->tlb_flush_waitlist); in amdgpu_vm_pt_add_list()
597 list_move(&cursor->entry->vm_status, ¶ms->tlb_flush_waitlist); in amdgpu_vm_pt_add_list()
598 spin_unlock(¶ms->vm->status_lock); in amdgpu_vm_pt_add_list()
602 * amdgpu_vm_pt_free_root - free root PD
606 * Free the root page directory and everything below it.
620 * amdgpu_vm_pde_update - update a single level in the hierarchy
632 struct amdgpu_vm *vm = params->vm; in amdgpu_vm_pde_update()
634 unsigned int level; in amdgpu_vm_pde_update() local
637 return -EINVAL; in amdgpu_vm_pde_update()
639 bo = parent->bo; in amdgpu_vm_pde_update()
640 for (level = 0, pbo = bo->parent; pbo; ++level) in amdgpu_vm_pde_update()
641 pbo = pbo->parent; in amdgpu_vm_pde_update()
643 level += params->adev->vm_manager.root_level; in amdgpu_vm_pde_update()
644 amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags); in amdgpu_vm_pde_update()
645 pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8; in amdgpu_vm_pde_update()
646 return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt, in amdgpu_vm_pde_update()
651 * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags
656 * Update PTE no-retry flags when TF is enabled.
662 * Update no-retry flags with the corresponding TF in amdgpu_vm_pte_update_noretry_flags()
663 * no-retry combination. in amdgpu_vm_pte_update_noretry_flags()
667 *flags |= adev->gmc.noretry_flags; in amdgpu_vm_pte_update_noretry_flags()
672 * amdgpu_vm_pte_update_flags - figure out flags for PTE updates
674 * Make sure to set the right flags for the PTEs at the desired level.
678 unsigned int level, in amdgpu_vm_pte_update_flags() argument
683 struct amdgpu_device *adev = params->adev; in amdgpu_vm_pte_update_flags()
685 if (level != AMDGPU_VM_PTB) { in amdgpu_vm_pte_update_flags()
686 flags |= AMDGPU_PDE_PTE_FLAG(params->adev); in amdgpu_vm_pte_update_flags()
687 amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags); in amdgpu_vm_pte_update_flags()
689 } else if (adev->asic_type >= CHIP_VEGA10 && in amdgpu_vm_pte_update_flags()
691 !(flags & AMDGPU_PTE_PRT_FLAG(params->adev))) { in amdgpu_vm_pte_update_flags()
698 * Update no-retry flags to use the no-retry flag combination in amdgpu_vm_pte_update_flags()
704 if (level == AMDGPU_VM_PTB) in amdgpu_vm_pte_update_flags()
711 if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) && in amdgpu_vm_pte_update_flags()
712 adev->gmc.gmc_funcs->override_vm_pte_flags && in amdgpu_vm_pte_update_flags()
713 num_possible_nodes() > 1 && !params->pages_addr && params->allow_override) in amdgpu_vm_pte_update_flags()
714 amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags); in amdgpu_vm_pte_update_flags()
716 params->vm->update_funcs->update(params, pt, pe, addr, count, incr, in amdgpu_vm_pte_update_flags()
721 * amdgpu_vm_pte_fragment - get fragment for PTEs
738 * field in the PTE. When this field is set to a non-zero value, page in amdgpu_vm_pte_fragment()
759 if (params->adev->asic_type < CHIP_VEGA10) in amdgpu_vm_pte_fragment()
760 max_frag = params->adev->vm_manager.fragment_size; in amdgpu_vm_pte_fragment()
765 if (params->pages_addr) { in amdgpu_vm_pte_fragment()
772 *frag = min_t(unsigned int, ffs(start) - 1, fls64(end - start) - 1); in amdgpu_vm_pte_fragment()
775 *frag_end = end & ~((1ULL << max_frag) - 1); in amdgpu_vm_pte_fragment()
782 * amdgpu_vm_ptes_update - make sure that page tables are valid
790 * Update the page tables in the range @start - @end.
793 * 0 for success, -EINVAL for failure.
799 struct amdgpu_device *adev = params->adev; in amdgpu_vm_ptes_update()
810 amdgpu_vm_pt_start(adev, params->vm, start, &cursor); in amdgpu_vm_ptes_update()
816 if (!params->unlocked) { in amdgpu_vm_ptes_update()
817 /* make sure that the page tables covering the in amdgpu_vm_ptes_update()
820 r = amdgpu_vm_pt_alloc(params->adev, params->vm, in amdgpu_vm_ptes_update()
821 &cursor, params->immediate); in amdgpu_vm_ptes_update()
826 shift = amdgpu_vm_pt_level_shift(adev, cursor.level); in amdgpu_vm_ptes_update()
827 parent_shift = amdgpu_vm_pt_level_shift(adev, cursor.level - 1); in amdgpu_vm_ptes_update()
828 if (params->unlocked) { in amdgpu_vm_ptes_update()
832 } else if (adev->asic_type < CHIP_VEGA10 && in amdgpu_vm_ptes_update()
834 /* No huge page support before GMC v9 */ in amdgpu_vm_ptes_update()
835 if (cursor.level != AMDGPU_VM_PTB) { in amdgpu_vm_ptes_update()
837 return -ENOENT; in amdgpu_vm_ptes_update()
841 /* We can't use this level when the fragment size is in amdgpu_vm_ptes_update()
849 * shift we should go up one level and check it again. in amdgpu_vm_ptes_update()
852 return -EINVAL; in amdgpu_vm_ptes_update()
856 pt = cursor.entry->bo; in amdgpu_vm_ptes_update()
860 return -ENOENT; in amdgpu_vm_ptes_update()
863 * level. in amdgpu_vm_ptes_update()
866 return -EINVAL; in amdgpu_vm_ptes_update()
868 pt = cursor.entry->bo; in amdgpu_vm_ptes_update()
876 mask = amdgpu_vm_pt_entries_mask(adev, cursor.level); in amdgpu_vm_ptes_update()
879 if (cursor.level < AMDGPU_VM_PTB && params->unlocked) in amdgpu_vm_ptes_update()
881 * MMU notifier callback unlocked unmap huge page, leave is PDE entry, in amdgpu_vm_ptes_update()
887 entry_end += cursor.pfn & ~(entry_end - 1); in amdgpu_vm_ptes_update()
891 struct amdgpu_vm *vm = params->vm; in amdgpu_vm_ptes_update()
893 unsigned int nptes = (upd_end - frag_start) >> shift; in amdgpu_vm_ptes_update()
896 /* This can happen when we set higher level PDs to in amdgpu_vm_ptes_update()
904 vm->task_info ? vm->task_info->tgid : 0, in amdgpu_vm_ptes_update()
905 vm->immediate.fence_context); in amdgpu_vm_ptes_update()
907 cursor.level, pe_start, dst, in amdgpu_vm_ptes_update()
927 * This is the maximum you can free, because all other page tables are not in amdgpu_vm_ptes_update()
932 if (cursor.entry->bo) { in amdgpu_vm_ptes_update()
933 params->needs_flush = true; in amdgpu_vm_ptes_update()
940 /* or just move on to the next on the same level. */ in amdgpu_vm_ptes_update()
949 * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible
953 * make root page directory and everything below it cpu accessible.
965 if (entry->bo) { in amdgpu_vm_pt_map_tables()
966 bo = to_amdgpu_bo_vm(entry->bo); in amdgpu_vm_pt_map_tables()
967 r = vm->update_funcs->map_table(bo); in amdgpu_vm_pt_map_tables()