Lines Matching full:last

77 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
95 prange, prange->start, prange->last); in svm_range_unlink()
104 if (prange->it_node.start != 0 && prange->it_node.last != 0) in svm_range_unlink()
112 prange, prange->start, prange->last); in svm_range_add_notifier_locked()
131 prange, prange->start, prange->last); in svm_range_add_to_svms()
135 prange->it_node.last = prange->last; in svm_range_add_to_svms()
144 prange->notifier.interval_tree.last >> PAGE_SHIFT); in svm_range_remove_notifier()
147 prange->notifier.interval_tree.last != 0) in svm_range_remove_notifier()
281 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT; in svm_range_free()
286 prange->start, prange->last); in svm_range_free()
325 uint64_t last, bool update_mem_usage) in svm_range_new() argument
327 uint64_t size = last - start + 1; in svm_range_new()
346 prange->last = last; in svm_range_new()
366 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last); in svm_range_new()
398 prange->start, prange->last); in svm_range_bo_release()
516 prange->svms, prange->start, prange->last); in svm_range_validate_svm_bo()
568 prange->start, prange->last); in svm_range_vram_node_new()
986 uint64_t start, uint64_t last) in svm_range_split_pages() argument
988 uint64_t npages = last - start + 1; in svm_range_split_pages()
1007 uint64_t start, uint64_t last) in svm_range_split_nodes() argument
1009 uint64_t npages = last - start + 1; in svm_range_split_nodes()
1012 new->svms, new, new->start, start, last); in svm_range_split_nodes()
1037 * @last: the old range adjust to last address in pages
1041 * start to last
1048 uint64_t start, uint64_t last) in svm_range_split_adjust() argument
1053 new->svms, new->start, old->start, old->last, start, last); in svm_range_split_adjust()
1056 new->last > old->last) { in svm_range_split_adjust()
1057 WARN_ONCE(1, "invalid new range start or last\n"); in svm_range_split_adjust()
1061 r = svm_range_split_pages(new, old, start, last); in svm_range_split_adjust()
1066 r = svm_range_split_nodes(new, old, start, last); in svm_range_split_adjust()
1071 old->npages = last - start + 1; in svm_range_split_adjust()
1073 old->last = last; in svm_range_split_adjust()
1092 * @last: the remaining range last address in pages
1097 * prange ==> prange[start, last]
1098 * new range [last + 1, prange->last]
1100 * case 2: if last == prange->last
1101 * prange ==> prange[start, last]
1105 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1108 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last, in svm_range_split() argument
1112 uint64_t old_last = prange->last; in svm_range_split()
1117 old_start, old_last, start, last); in svm_range_split()
1119 if (old_start != start && old_last != last) in svm_range_split()
1121 if (start < old_start || last > old_last) in svm_range_split()
1126 *new = svm_range_new(svms, last + 1, old_last, false); in svm_range_split()
1132 r = svm_range_split_adjust(*new, prange, start, last); in svm_range_split()
1135 r, old_start, old_last, start, last); in svm_range_split()
1163 int r = svm_range_split(prange, new_start, prange->last, &head); in svm_range_split_head()
1178 pchild, pchild->start, pchild->last, prange, op); in svm_range_add_child()
1316 uint64_t start, uint64_t last, in svm_range_unmap_from_gpu() argument
1321 pr_debug("[0x%llx 0x%llx]\n", start, last); in svm_range_unmap_from_gpu()
1324 last, init_pte_value, 0, 0, NULL, NULL, in svm_range_unmap_from_gpu()
1330 unsigned long last, uint32_t trigger) in svm_range_unmap_from_gpus() argument
1341 prange, prange->start, prange->last); in svm_range_unmap_from_gpus()
1345 if (prange->start == start && prange->last == last) { in svm_range_unmap_from_gpus()
1363 start, last, trigger); in svm_range_unmap_from_gpus()
1367 start, last, &fence); in svm_range_unmap_from_gpus()
1848 prange->svms, prange, prange->start, prange->last, in svm_range_restore_work()
1856 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, in svm_range_restore_work()
1907 * @last: last process queue number
1920 unsigned long start, unsigned long last, in svm_range_evict() argument
1931 svms, prange->start, prange->last, start, last); in svm_range_evict()
1943 if (pchild->start <= last && pchild->last >= start) { in svm_range_evict()
1945 pchild->start, pchild->last); in svm_range_evict()
1954 if (prange->start <= last && prange->last >= start) in svm_range_evict()
1962 prange->svms, prange->start, prange->last); in svm_range_evict()
1982 prange->svms, start, last); in svm_range_evict()
1986 l = min(last, pchild->last); in svm_range_evict()
1992 l = min(last, prange->last); in svm_range_evict()
2004 new = svm_range_new(old->svms, old->start, old->last, false); in svm_range_clone()
2058 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last, in svm_range_split_new() argument
2066 max_pages, start, last); in svm_range_split_new()
2068 while (last >= start) { in svm_range_split_new()
2069 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1); in svm_range_split_new()
2118 unsigned long last = start + size - 1UL; in svm_range_add() local
2126 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last); in svm_range_add()
2134 node = interval_tree_iter_first(&svms->objects, start, last); in svm_range_add()
2140 node->last); in svm_range_add()
2143 next = interval_tree_iter_next(node, start, last); in svm_range_add()
2144 next_start = min(node->last, last) + 1; in svm_range_add()
2149 } else if (node->start < start || node->last > last) { in svm_range_add()
2173 if (node->last > last) { in svm_range_add()
2174 pr_debug("change old range last\n"); in svm_range_add()
2175 r = svm_range_split_tail(prange, last, in svm_range_add()
2181 /* The node is contained within start..last, in svm_range_add()
2201 if (start <= last) in svm_range_add()
2202 r = svm_range_split_new(svms, start, last, in svm_range_add()
2224 unsigned long last; in svm_range_update_notifier_and_interval_tree() local
2227 last = prange->notifier.interval_tree.last >> PAGE_SHIFT; in svm_range_update_notifier_and_interval_tree()
2229 if (prange->start == start && prange->last == last) in svm_range_update_notifier_and_interval_tree()
2233 prange->svms, prange, start, last, prange->start, in svm_range_update_notifier_and_interval_tree()
2234 prange->last); in svm_range_update_notifier_and_interval_tree()
2236 if (start != 0 && last != 0) { in svm_range_update_notifier_and_interval_tree()
2241 prange->it_node.last = prange->last; in svm_range_update_notifier_and_interval_tree()
2254 svms, prange, prange->start, prange->last); in svm_range_handle_list_op()
2258 svms, prange, prange->start, prange->last); in svm_range_handle_list_op()
2265 svms, prange, prange->start, prange->last); in svm_range_handle_list_op()
2270 svms, prange, prange->start, prange->last); in svm_range_handle_list_op()
2276 prange->start, prange->last); in svm_range_handle_list_op()
2282 prange, prange->start, prange->last); in svm_range_handle_list_op()
2338 prange->start, prange->last, prange->work_item.op); in svm_range_deferred_list_work()
2375 * last mm refcount, schedule release work to avoid circular locking in svm_range_deferred_list_work()
2406 prange, prange->start, prange->last, op); in svm_range_add_list_work()
2422 unsigned long last) in svm_range_unmap_split() argument
2429 prange->start, prange->last); in svm_range_unmap_split()
2432 if (start > prange->last || last < prange->start) in svm_range_unmap_split()
2438 if (last < tail->last) in svm_range_unmap_split()
2439 svm_range_split(tail, last + 1, tail->last, &head); in svm_range_unmap_split()
2455 unsigned long start, unsigned long last) in svm_range_unmap_from_cpu() argument
2481 prange, prange->start, prange->last, start, last); in svm_range_unmap_from_cpu()
2516 unmap_parent = start <= prange->start && last >= prange->last; in svm_range_unmap_from_cpu()
2521 l = min(last, pchild->last); in svm_range_unmap_from_cpu()
2524 svm_range_unmap_split(mm, prange, pchild, start, last); in svm_range_unmap_from_cpu()
2528 l = min(last, prange->last); in svm_range_unmap_from_cpu()
2531 svm_range_unmap_split(mm, prange, prange, start, last); in svm_range_unmap_from_cpu()
2570 unsigned long last; in svm_range_cpu_invalidate_pagetables() local
2578 last = mni->interval_tree.last; in svm_range_cpu_invalidate_pagetables()
2580 last = min(last, range->end - 1) >> PAGE_SHIFT; in svm_range_cpu_invalidate_pagetables()
2582 start, last, range->start >> PAGE_SHIFT, in svm_range_cpu_invalidate_pagetables()
2585 mni->interval_tree.last >> PAGE_SHIFT, range->event); in svm_range_cpu_invalidate_pagetables()
2594 svm_range_unmap_from_cpu(mni->mm, prange, start, last); in svm_range_cpu_invalidate_pagetables()
2597 svm_range_evict(prange, mni->mm, start, last, range->event); in svm_range_cpu_invalidate_pagetables()
2631 addr, prange->start, prange->last, node->start, node->last); in svm_range_from_addr()
2633 if (addr >= prange->start && addr <= prange->last) { in svm_range_from_addr()
2639 if (addr >= pchild->start && addr <= pchild->last) { in svm_range_from_addr()
2641 addr, pchild->start, pchild->last); in svm_range_from_addr()
2721 unsigned long *start, unsigned long *last, in svm_range_get_range_boundaries() argument
2746 /* Last range that ends before the fault address */ in svm_range_get_range_boundaries()
2749 /* Last range must end before addr because in svm_range_get_range_boundaries()
2756 if (node->last >= addr) { in svm_range_get_range_boundaries()
2760 start_limit = max(start_limit, node->last + 1); in svm_range_get_range_boundaries()
2764 *last = end_limit - 1; in svm_range_get_range_boundaries()
2768 *start, *last, *is_heap_stack); in svm_range_get_range_boundaries()
2774 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last, in svm_range_check_vm_userptr() argument
2804 last << PAGE_SHIFT, in svm_range_check_vm_userptr()
2811 start, last); in svm_range_check_vm_userptr()
2831 unsigned long start, last; in svm_range_create_unregistered_range() local
2838 if (svm_range_get_range_boundaries(p, addr, &start, &last, in svm_range_create_unregistered_range()
2842 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l); in svm_range_create_unregistered_range()
2844 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l); in svm_range_create_unregistered_range()
2852 last = addr; in svm_range_create_unregistered_range()
2855 prange = svm_range_new(&p->svms, start, last, true); in svm_range_create_unregistered_range()
2901 svms, prange, prange->start, prange->last); in svm_range_skip_recover()
2907 svms, prange, prange->start, prange->last); in svm_range_skip_recover()
2958 unsigned long start, last, size; in svm_range_restore_pages() local
3079 svms, prange->start, prange->last); in svm_range_restore_pages()
3104 svms, prange->start, prange->last); in svm_range_restore_pages()
3110 svms, prange->start, prange->last, best_loc, in svm_range_restore_pages()
3119 last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last); in svm_range_restore_pages()
3122 r = svm_migrate_to_vram(prange, best_loc, start, last, in svm_range_restore_pages()
3131 r = svm_migrate_vram_to_ram(prange, mm, start, last, in svm_range_restore_pages()
3137 r = svm_migrate_vram_to_ram(prange, mm, start, last, in svm_range_restore_pages()
3142 r, svms, start, last); in svm_range_restore_pages()
3149 r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false, in svm_range_restore_pages()
3153 r, svms, start, last); in svm_range_restore_pages()
3195 size = (pchild->last - pchild->start + 1) << PAGE_SHIFT; in svm_range_switch_xnack_reserve_mem()
3208 size = (prange->last - prange->start + 1) << PAGE_SHIFT; in svm_range_switch_xnack_reserve_mem()
3304 * @last: range last address, in pages
3306 * @bo_l: mapping last address in pages if address range already mapped
3321 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last, in svm_range_check_vm() argument
3340 node = interval_tree_iter_first(&vm->va, start, last); in svm_range_check_vm()
3343 start, last); in svm_range_check_vm()
3348 *bo_l = mapping->last; in svm_range_check_vm()
3470 p->xnack_enabled, &p->svms, prange->start, prange->last, in svm_range_best_prefetch_location()
3520 r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, in svm_range_trigger_migration()
3526 r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last, in svm_range_trigger_migration()
3577 prange->start, prange->last); in svm_range_evict_svm_bo_worker()
3585 prange->start, prange->last, in svm_range_evict_svm_bo_worker()
3607 /* This is the last reference to svm_bo, after svm_range_vram_node_free in svm_range_evict_svm_bo_worker()
3610 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n"); in svm_range_evict_svm_bo_worker()
3673 prange->last); in svm_range_set_attr()
3709 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, in svm_range_set_attr()
3722 prange, prange->start, prange->last); in svm_range_set_attr()
3724 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, in svm_range_set_attr()
3758 uint64_t last = start + size - 1UL; in svm_range_get_attr() local
3821 node = interval_tree_iter_first(&svms->objects, start, last); in svm_range_get_attr()
3842 next = interval_tree_iter_next(node, start, last); in svm_range_get_attr()