Lines Matching +full:row +full:- +full:hold

26 #include <linux/dma-fence-array.h>
54 if (kref_read(&vma->vm->ref)) in assert_vma_held_evict()
55 assert_object_held_shared(vma->obj); in assert_vma_held_evict()
78 if (!vma->node.stack) { in vma_print_allocator()
79 drm_dbg(vma->obj->base.dev, in vma_print_allocator()
81 vma->node.start, vma->node.size, reason); in vma_print_allocator()
85 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); in vma_print_allocator()
86 drm_dbg(vma->obj->base.dev, in vma_print_allocator()
88 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator()
106 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; in __i915_vma_active()
119 struct i915_vma *pos = ERR_PTR(-E2BIG); in vma_create()
125 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); in vma_create()
129 return ERR_PTR(-ENOMEM); in vma_create()
131 vma->ops = &vm->vma_ops; in vma_create()
132 vma->obj = obj; in vma_create()
133 vma->size = obj->base.size; in vma_create()
134 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; in vma_create()
136 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0); in vma_create()
141 might_lock(&vma->active.mutex); in vma_create()
145 INIT_LIST_HEAD(&vma->closed_link); in vma_create()
146 INIT_LIST_HEAD(&vma->obj_link); in vma_create()
147 RB_CLEAR_NODE(&vma->obj_node); in vma_create()
149 if (view && view->type != I915_GTT_VIEW_NORMAL) { in vma_create()
150 vma->gtt_view = *view; in vma_create()
151 if (view->type == I915_GTT_VIEW_PARTIAL) { in vma_create()
153 view->partial.offset, in vma_create()
154 view->partial.size, in vma_create()
155 obj->base.size >> PAGE_SHIFT)); in vma_create()
156 vma->size = view->partial.size; in vma_create()
157 vma->size <<= PAGE_SHIFT; in vma_create()
158 GEM_BUG_ON(vma->size > obj->base.size); in vma_create()
159 } else if (view->type == I915_GTT_VIEW_ROTATED) { in vma_create()
160 vma->size = intel_rotation_info_size(&view->rotated); in vma_create()
161 vma->size <<= PAGE_SHIFT; in vma_create()
162 } else if (view->type == I915_GTT_VIEW_REMAPPED) { in vma_create()
163 vma->size = intel_remapped_info_size(&view->remapped); in vma_create()
164 vma->size <<= PAGE_SHIFT; in vma_create()
168 if (unlikely(vma->size > vm->total)) in vma_create()
171 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); in vma_create()
173 err = mutex_lock_interruptible(&vm->mutex); in vma_create()
179 vma->vm = vm; in vma_create()
180 list_add_tail(&vma->vm_link, &vm->unbound_list); in vma_create()
182 spin_lock(&obj->vma.lock); in vma_create()
184 if (unlikely(overflows_type(vma->size, u32))) in vma_create()
187 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, in vma_create()
190 if (unlikely(vma->fence_size < vma->size || /* overflow */ in vma_create()
191 vma->fence_size > vm->total)) in vma_create()
194 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); in vma_create()
196 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, in vma_create()
199 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); in vma_create()
205 p = &obj->vma.tree.rb_node; in vma_create()
219 p = &rb->rb_right; in vma_create()
221 p = &rb->rb_left; in vma_create()
225 rb_link_node(&vma->obj_node, rb, p); in vma_create()
226 rb_insert_color(&vma->obj_node, &obj->vma.tree); in vma_create()
230 * We put the GGTT vma at the start of the vma-list, followed in vma_create()
235 list_add(&vma->obj_link, &obj->vma.list); in vma_create()
237 list_add_tail(&vma->obj_link, &obj->vma.list); in vma_create()
239 spin_unlock(&obj->vma.lock); in vma_create()
240 mutex_unlock(&vm->mutex); in vma_create()
245 spin_unlock(&obj->vma.lock); in vma_create()
246 list_del_init(&vma->vm_link); in vma_create()
247 mutex_unlock(&vm->mutex); in vma_create()
260 rb = obj->vma.tree.rb_node; in i915_vma_lookup()
270 rb = rb->rb_right; in i915_vma_lookup()
272 rb = rb->rb_left; in i915_vma_lookup()
279 * i915_vma_instance - return the singleton instance of the VMA
299 GEM_BUG_ON(!kref_read(&vm->ref)); in i915_vma_instance()
301 spin_lock(&obj->vma.lock); in i915_vma_instance()
303 spin_unlock(&obj->vma.lock); in i915_vma_instance()
327 struct i915_vma_resource *vma_res = vw->vma_res; in __vma_bind()
335 if (i915_gem_object_has_unknown_state(vw->obj)) in __vma_bind()
338 vma_res->ops->bind_vma(vma_res->vm, &vw->stash, in __vma_bind()
339 vma_res, vw->pat_index, vw->flags); in __vma_bind()
346 if (vw->obj) in __vma_release()
347 i915_gem_object_put(vw->obj); in __vma_release()
349 i915_vm_free_pt_stash(vw->vm, &vw->stash); in __vma_release()
350 if (vw->vma_res) in __vma_release()
351 i915_vma_resource_put(vw->vma_res); in __vma_release()
368 dma_fence_work_init(&vw->base, &bind_ops); in i915_vma_work()
369 vw->base.dma.error = -EAGAIN; /* disable the worker by default */ in i915_vma_work()
378 if (rcu_access_pointer(vma->active.excl.fence)) { in i915_vma_wait_for_bind()
382 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); in i915_vma_wait_for_bind()
396 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl); in i915_vma_verify_bind_complete()
403 err = fence->error; in i915_vma_verify_bind_complete()
405 err = -EBUSY; in i915_vma_verify_bind_complete()
419 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_resource_init_from_vma()
421 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes, in i915_vma_resource_init_from_vma()
422 obj->mm.rsgt, i915_gem_object_is_readonly(obj), in i915_vma_resource_init_from_vma()
423 i915_gem_object_is_lmem(obj), obj->mm.region, in i915_vma_resource_init_from_vma()
424 vma->ops, vma->private, __i915_vma_offset(vma), in i915_vma_resource_init_from_vma()
425 __i915_vma_size(vma), vma->size, vma->guard); in i915_vma_resource_init_from_vma()
429 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
437 * DMA addresses are taken from the scatter-gather table of this object (or of
438 * this VMA in case of non-default GGTT views) and PTE entries set up.
451 lockdep_assert_held(&vma->vm->mutex); in i915_vma_bind()
452 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_vma_bind()
453 GEM_BUG_ON(vma->size > i915_vma_size(vma)); in i915_vma_bind()
455 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, in i915_vma_bind()
456 vma->node.size, in i915_vma_bind()
457 vma->vm->total))) { in i915_vma_bind()
459 return -ENODEV; in i915_vma_bind()
464 return -EINVAL; in i915_vma_bind()
470 vma_flags = atomic_read(&vma->flags); in i915_vma_bind()
479 GEM_BUG_ON(!atomic_read(&vma->pages_count)); in i915_vma_bind()
482 if (work && bind_flags & vma->vm->bind_async_flags) in i915_vma_bind()
483 ret = i915_vma_resource_bind_dep_await(vma->vm, in i915_vma_bind()
484 &work->base.chain, in i915_vma_bind()
485 vma->node.start, in i915_vma_bind()
486 vma->node.size, in i915_vma_bind()
492 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start, in i915_vma_bind()
493 vma->node.size, true); in i915_vma_bind()
499 if (vma->resource || !vma_res) { in i915_vma_bind()
505 vma->resource = vma_res; in i915_vma_bind()
508 if (work && bind_flags & vma->vm->bind_async_flags) { in i915_vma_bind()
511 work->vma_res = i915_vma_resource_get(vma->resource); in i915_vma_bind()
512 work->pat_index = pat_index; in i915_vma_bind()
513 work->flags = bind_flags; in i915_vma_bind()
521 * part of the obj->resv->excl_fence as it only affects in i915_vma_bind()
524 prev = i915_active_set_exclusive(&vma->active, &work->base.dma); in i915_vma_bind()
526 __i915_sw_fence_await_dma_fence(&work->base.chain, in i915_vma_bind()
528 &work->cb); in i915_vma_bind()
532 work->base.dma.error = 0; /* enable the queue_work() */ in i915_vma_bind()
533 work->obj = i915_gem_object_get(vma->obj); in i915_vma_bind()
535 ret = i915_gem_object_wait_moving_fence(vma->obj, true); in i915_vma_bind()
537 i915_vma_resource_free(vma->resource); in i915_vma_bind()
538 vma->resource = NULL; in i915_vma_bind()
542 vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index, in i915_vma_bind()
546 atomic_or(bind_flags, &vma->flags); in i915_vma_bind()
555 if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY)) in i915_vma_pin_iomap()
556 return IOMEM_ERR_PTR(-EINVAL); in i915_vma_pin_iomap()
562 ptr = READ_ONCE(vma->iomap); in i915_vma_pin_iomap()
566 * instead, which already supports mapping non-contiguous chunks in i915_vma_pin_iomap()
570 if (i915_gem_object_is_lmem(vma->obj)) { in i915_vma_pin_iomap()
571 ptr = i915_gem_object_lmem_io_map(vma->obj, 0, in i915_vma_pin_iomap()
572 vma->obj->base.size); in i915_vma_pin_iomap()
574 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, in i915_vma_pin_iomap()
579 i915_gem_object_pin_map(vma->obj, I915_MAP_WC); in i915_vma_pin_iomap()
588 err = -ENOMEM; in i915_vma_pin_iomap()
592 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { in i915_vma_pin_iomap()
594 __i915_gem_object_release_map(vma->obj); in i915_vma_pin_iomap()
597 ptr = vma->iomap; in i915_vma_pin_iomap()
621 intel_gt_flush_ggtt_writes(vma->vm->gt); in i915_vma_flush_writes()
626 GEM_BUG_ON(vma->iomap == NULL); in i915_vma_unpin_iomap()
645 obj = vma->obj; in i915_vma_unpin_and_release()
659 if (!drm_mm_node_allocated(&vma->node)) in i915_vma_misplaced()
684 vma->guard < (flags & PIN_OFFSET_MASK)) in i915_vma_misplaced()
695 GEM_BUG_ON(!vma->fence_size); in __i915_vma_set_map_and_fenceable()
697 fenceable = (i915_vma_size(vma) >= vma->fence_size && in __i915_vma_set_map_and_fenceable()
698 IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment)); in __i915_vma_set_map_and_fenceable()
700 mappable = i915_ggtt_offset(vma) + vma->fence_size <= in __i915_vma_set_map_and_fenceable()
701 i915_vm_to_ggtt(vma->vm)->mappable_end; in __i915_vma_set_map_and_fenceable()
711 struct drm_mm_node *node = &vma->node; in i915_gem_valid_gtt_space()
721 if (!i915_vm_has_cache_coloring(vma->vm)) in i915_gem_valid_gtt_space()
726 GEM_BUG_ON(list_empty(&node->node_list)); in i915_gem_valid_gtt_space()
742 * i915_vma_insert - finds a slot for the vma in its address space
765 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); in i915_vma_insert()
768 size = max(size, vma->size); in i915_vma_insert()
769 alignment = max_t(typeof(alignment), alignment, vma->display_alignment); in i915_vma_insert()
771 size = max_t(typeof(size), size, vma->fence_size); in i915_vma_insert()
773 alignment, vma->fence_alignment); in i915_vma_insert()
780 guard = vma->guard; /* retain guard across rebinds */ in i915_vma_insert()
795 end = vma->vm->total; in i915_vma_insert()
797 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); in i915_vma_insert()
799 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); in i915_vma_insert()
802 alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj)); in i915_vma_insert()
809 if (size > end - 2 * guard) { in i915_vma_insert()
810 drm_dbg(vma->obj->base.dev, in i915_vma_insert()
813 return -ENOSPC; in i915_vma_insert()
818 if (i915_vm_has_cache_coloring(vma->vm)) in i915_vma_insert()
819 color = vma->obj->pat_index; in i915_vma_insert()
825 return -EINVAL; in i915_vma_insert()
830 * of the vma->node due to the guard pages. in i915_vma_insert()
832 if (offset < guard || offset + size > end - guard) in i915_vma_insert()
833 return -ENOSPC; in i915_vma_insert()
835 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node, in i915_vma_insert()
837 offset - guard, in i915_vma_insert()
851 if (upper_32_bits(end - 1) && in i915_vma_insert()
852 vma->page_sizes.sg > I915_GTT_PAGE_SIZE && in i915_vma_insert()
853 !HAS_64K_PAGES(vma->vm->i915)) { in i915_vma_insert()
855 * We can't mix 64K and 4K PTEs in the same page-table in i915_vma_insert()
861 rounddown_pow_of_two(vma->page_sizes.sg | in i915_vma_insert()
867 * also checks that we exclude the aliasing-ppgtt. in i915_vma_insert()
873 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) in i915_vma_insert()
877 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node, in i915_vma_insert()
883 GEM_BUG_ON(vma->node.start < start); in i915_vma_insert()
884 GEM_BUG_ON(vma->node.start + vma->node.size > end); in i915_vma_insert()
886 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_vma_insert()
889 list_move_tail(&vma->vm_link, &vma->vm->bound_list); in i915_vma_insert()
890 vma->guard = guard; in i915_vma_insert()
898 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_vma_detach()
903 * vma, we can drop its hold on the backing storage and allow in i915_vma_detach()
906 list_move_tail(&vma->vm_link, &vma->vm->unbound_list); in i915_vma_detach()
913 bound = atomic_read(&vma->flags); in try_qad_pin()
931 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); in try_qad_pin()
942 unsigned int column, row; in rotate_pages() local
948 src_idx = src_stride * (height - 1) + column + offset; in rotate_pages()
949 for (row = 0; row < height; row++) { in rotate_pages()
950 st->nents++; in rotate_pages()
961 src_idx -= src_stride; in rotate_pages()
964 left = (dst_stride - height) * I915_GTT_PAGE_SIZE; in rotate_pages()
969 st->nents++; in rotate_pages()
990 struct drm_i915_private *i915 = to_i915(obj->base.dev); in intel_rotate_pages()
993 int ret = -ENOMEM; in intel_rotate_pages()
1005 st->nents = 0; in intel_rotate_pages()
1006 sg = st->sgl; in intel_rotate_pages()
1008 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) in intel_rotate_pages()
1009 sg = rotate_pages(obj, rot_info->plane[i].offset, in intel_rotate_pages()
1010 rot_info->plane[i].width, rot_info->plane[i].height, in intel_rotate_pages()
1011 rot_info->plane[i].src_stride, in intel_rotate_pages()
1012 rot_info->plane[i].dst_stride, in intel_rotate_pages()
1021 …drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)… in intel_rotate_pages()
1022 obj->base.size, rot_info->plane[0].width, in intel_rotate_pages()
1023 rot_info->plane[0].height, size); in intel_rotate_pages()
1032 st->nents++; in add_padding_pages()
1055 unsigned int row; in remap_tiled_color_plane_pages() local
1063 for (row = 0; row < height; row++) { in remap_tiled_color_plane_pages()
1080 st->nents++; in remap_tiled_color_plane_pages()
1088 left -= length; in remap_tiled_color_plane_pages()
1091 offset += src_stride - width; in remap_tiled_color_plane_pages()
1093 left = (dst_stride - width) * I915_GTT_PAGE_SIZE; in remap_tiled_color_plane_pages()
1121 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), in remap_contiguous_pages()
1128 st->nents++; in remap_contiguous_pages()
1129 count -= len >> PAGE_SHIFT; in remap_contiguous_pages()
1169 if (rem_info->plane_alignment) in remap_color_plane_pages()
1170 alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset; in remap_color_plane_pages()
1172 if (rem_info->plane[color_plane].linear) in remap_color_plane_pages()
1174 rem_info->plane[color_plane].offset, in remap_color_plane_pages()
1176 rem_info->plane[color_plane].size, in remap_color_plane_pages()
1182 rem_info->plane[color_plane].offset, in remap_color_plane_pages()
1184 rem_info->plane[color_plane].width, in remap_color_plane_pages()
1185 rem_info->plane[color_plane].height, in remap_color_plane_pages()
1186 rem_info->plane[color_plane].src_stride, in remap_color_plane_pages()
1187 rem_info->plane[color_plane].dst_stride, in remap_color_plane_pages()
1199 struct drm_i915_private *i915 = to_i915(obj->base.dev); in intel_remap_pages()
1203 int ret = -ENOMEM; in intel_remap_pages()
1215 st->nents = 0; in intel_remap_pages()
1216 sg = st->sgl; in intel_remap_pages()
1218 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) in intel_remap_pages()
1229 …drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages… in intel_remap_pages()
1230 obj->base.size, rem_info->plane[0].width, in intel_remap_pages()
1231 rem_info->plane[0].height, size); in intel_remap_pages()
1242 unsigned int count = view->partial.size; in intel_partial_pages()
1243 int ret = -ENOMEM; in intel_partial_pages()
1253 st->nents = 0; in intel_partial_pages()
1255 sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl); in intel_partial_pages()
1274 * The vma->pages are only valid within the lifespan of the borrowed in __i915_vma_get_pages()
1275 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so in __i915_vma_get_pages()
1276 * must be the vma->pages. A simple rule is that vma->pages must only in __i915_vma_get_pages()
1277 * be accessed when the obj->mm.pages are pinned. in __i915_vma_get_pages()
1279 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); in __i915_vma_get_pages()
1281 switch (vma->gtt_view.type) { in __i915_vma_get_pages()
1283 GEM_BUG_ON(vma->gtt_view.type); in __i915_vma_get_pages()
1286 pages = vma->obj->mm.pages; in __i915_vma_get_pages()
1291 intel_rotate_pages(&vma->gtt_view.rotated, vma->obj); in __i915_vma_get_pages()
1296 intel_remap_pages(&vma->gtt_view.remapped, vma->obj); in __i915_vma_get_pages()
1300 pages = intel_partial_pages(&vma->gtt_view, vma->obj); in __i915_vma_get_pages()
1305 drm_err(&vma->vm->i915->drm, in __i915_vma_get_pages()
1307 vma->gtt_view.type, PTR_ERR(pages)); in __i915_vma_get_pages()
1311 vma->pages = pages; in __i915_vma_get_pages()
1320 if (atomic_add_unless(&vma->pages_count, 1, 0)) in i915_vma_get_pages()
1323 err = i915_gem_object_pin_pages(vma->obj); in i915_vma_get_pages()
1331 vma->page_sizes = vma->obj->mm.page_sizes; in i915_vma_get_pages()
1332 atomic_inc(&vma->pages_count); in i915_vma_get_pages()
1337 __i915_gem_object_unpin_pages(vma->obj); in i915_vma_get_pages()
1358 for_each_gt(gt, vm->i915, id) in vma_invalidate_tlb()
1366 GEM_BUG_ON(atomic_read(&vma->pages_count) < count); in __vma_put_pages()
1368 if (atomic_sub_return(count, &vma->pages_count) == 0) { in __vma_put_pages()
1369 if (vma->pages != vma->obj->mm.pages) { in __vma_put_pages()
1370 sg_free_table(vma->pages); in __vma_put_pages()
1371 kfree(vma->pages); in __vma_put_pages()
1373 vma->pages = NULL; in __vma_put_pages()
1375 i915_gem_object_unpin_pages(vma->obj); in __vma_put_pages()
1381 if (atomic_add_unless(&vma->pages_count, -1, 1)) in i915_vma_put_pages()
1391 lockdep_assert_held(&vma->vm->mutex); in vma_unbind_pages()
1394 count = atomic_read(&vma->pages_count); in vma_unbind_pages()
1428 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); in i915_vma_pin_ww()
1430 if (flags & vma->vm->bind_async_flags) { in i915_vma_pin_ww()
1432 err = i915_vm_lock_objects(vma->vm, ww); in i915_vma_pin_ww()
1438 err = -ENOMEM; in i915_vma_pin_ww()
1442 work->vm = vma->vm; in i915_vma_pin_ww()
1444 err = i915_gem_object_get_moving_fence(vma->obj, &moving); in i915_vma_pin_ww()
1448 dma_fence_work_chain(&work->base, moving); in i915_vma_pin_ww()
1451 if (vma->vm->allocate_va_range) { in i915_vma_pin_ww()
1452 err = i915_vm_alloc_pt_stash(vma->vm, in i915_vma_pin_ww()
1453 &work->stash, in i915_vma_pin_ww()
1454 vma->size); in i915_vma_pin_ww()
1458 err = i915_vm_map_pt_stash(vma->vm, &work->stash); in i915_vma_pin_ww()
1471 * Differentiate between user/kernel vma inside the aliasing-ppgtt. in i915_vma_pin_ww()
1474 * aliasing-ppgtt, but it is still vitally important to try and in i915_vma_pin_ww()
1477 * inversions when we have to evict them the mmu_notifier callbacks - in i915_vma_pin_ww()
1483 * NB this may cause us to mask real lock inversions -- while the in i915_vma_pin_ww()
1487 err = mutex_lock_interruptible_nested(&vma->vm->mutex, in i915_vma_pin_ww()
1492 /* No more allocations allowed now we hold vm->mutex */ in i915_vma_pin_ww()
1495 err = -ENOENT; in i915_vma_pin_ww()
1499 bound = atomic_read(&vma->flags); in i915_vma_pin_ww()
1501 err = -ENOMEM; in i915_vma_pin_ww()
1506 err = -EAGAIN; /* pins are meant to be fairly temporary */ in i915_vma_pin_ww()
1516 err = i915_active_acquire(&vma->active); in i915_vma_pin_ww()
1525 if (i915_is_ggtt(vma->vm)) in i915_vma_pin_ww()
1529 GEM_BUG_ON(!vma->pages); in i915_vma_pin_ww()
1531 vma->obj->pat_index, in i915_vma_pin_ww()
1539 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); in i915_vma_pin_ww()
1540 list_move_tail(&vma->vm_link, &vma->vm->bound_list); in i915_vma_pin_ww()
1552 drm_mm_remove_node(&vma->node); in i915_vma_pin_ww()
1555 i915_active_release(&vma->active); in i915_vma_pin_ww()
1557 mutex_unlock(&vma->vm->mutex); in i915_vma_pin_ww()
1562 dma_fence_work_commit_imm(&work->base); in i915_vma_pin_ww()
1565 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); in i915_vma_pin_ww()
1588 struct i915_address_space *vm = vma->vm; in __i915_ggtt_pin()
1596 if (err != -ENOSPC) { in __i915_ggtt_pin()
1606 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) in __i915_ggtt_pin()
1608 if (mutex_lock_interruptible(&vm->mutex) == 0) { in __i915_ggtt_pin()
1615 mutex_unlock(&vm->mutex); in __i915_ggtt_pin()
1631 lockdep_assert_not_held(&vma->obj->base.resv->lock.base); in i915_ggtt_pin()
1634 err = i915_gem_object_lock(vma->obj, &_ww); in i915_ggtt_pin()
1643 * i915_ggtt_clear_scanout - Clear scanout flag for all objects ggtt vmas
1654 spin_lock(&obj->vma.lock); in i915_ggtt_clear_scanout()
1657 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; in i915_ggtt_clear_scanout()
1659 spin_unlock(&obj->vma.lock); in i915_ggtt_clear_scanout()
1677 list_add(&vma->closed_link, &gt->closed_vma); in __vma_close()
1682 struct intel_gt *gt = vma->vm->gt; in i915_vma_close()
1688 GEM_BUG_ON(!atomic_read(&vma->open_count)); in i915_vma_close()
1689 if (atomic_dec_and_lock_irqsave(&vma->open_count, in i915_vma_close()
1690 &gt->closed_lock, in i915_vma_close()
1693 spin_unlock_irqrestore(&gt->closed_lock, flags); in i915_vma_close()
1699 list_del_init(&vma->closed_link); in __i915_vma_remove_closed()
1704 struct intel_gt *gt = vma->vm->gt; in i915_vma_reopen()
1706 spin_lock_irq(&gt->closed_lock); in i915_vma_reopen()
1709 spin_unlock_irq(&gt->closed_lock); in i915_vma_reopen()
1714 if (!drm_mm_node_allocated(&vma->node)) in force_unbind()
1717 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); in force_unbind()
1719 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); in force_unbind()
1725 struct drm_i915_gem_object *obj = vma->obj; in release_references()
1729 spin_lock(&obj->vma.lock); in release_references()
1730 list_del(&vma->obj_link); in release_references()
1731 if (!RB_EMPTY_NODE(&vma->obj_node)) in release_references()
1732 rb_erase(&vma->obj_node, &obj->vma.tree); in release_references()
1734 spin_unlock(&obj->vma.lock); in release_references()
1736 spin_lock_irq(&gt->closed_lock); in release_references()
1738 spin_unlock_irq(&gt->closed_lock); in release_references()
1741 i915_vm_resv_put(vma->vm); in release_references()
1744 i915_active_wait(&vma->active); in release_references()
1745 i915_active_fini(&vma->active); in release_references()
1746 GEM_WARN_ON(vma->resource); in release_references()
1751 * i915_vma_destroy_locked - Remove all weak reference to the vma and put
1759 * - __i915_gem_object_pages_fini()
1760 * - __i915_vm_close() - Blocks the above function by taking a reference on
1762 * - __i915_vma_parked() - Blocks the above functions by taking a reference
1772 * - vm->mutex
1773 * - obj->vma.lock
1774 * - gt->closed_lock
1778 lockdep_assert_held(&vma->vm->mutex); in i915_vma_destroy_locked()
1781 list_del_init(&vma->vm_link); in i915_vma_destroy_locked()
1782 release_references(vma, vma->vm->gt, false); in i915_vma_destroy_locked()
1790 mutex_lock(&vma->vm->mutex); in i915_vma_destroy()
1792 list_del_init(&vma->vm_link); in i915_vma_destroy()
1793 vm_ddestroy = vma->vm_ddestroy; in i915_vma_destroy()
1794 vma->vm_ddestroy = false; in i915_vma_destroy()
1796 /* vma->vm may be freed when releasing vma->vm->mutex. */ in i915_vma_destroy()
1797 gt = vma->vm->gt; in i915_vma_destroy()
1798 mutex_unlock(&vma->vm->mutex); in i915_vma_destroy()
1807 spin_lock_irq(&gt->closed_lock); in i915_vma_parked()
1808 list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) { in i915_vma_parked()
1809 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_parked()
1810 struct i915_address_space *vm = vma->vm; in i915_vma_parked()
1814 if (!kref_get_unless_zero(&obj->base.refcount)) in i915_vma_parked()
1822 list_move(&vma->closed_link, &closed); in i915_vma_parked()
1824 spin_unlock_irq(&gt->closed_lock); in i915_vma_parked()
1828 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_parked()
1829 struct i915_address_space *vm = vma->vm; in i915_vma_parked()
1832 INIT_LIST_HEAD(&vma->closed_link); in i915_vma_parked()
1837 spin_lock_irq(&gt->closed_lock); in i915_vma_parked()
1838 list_add(&vma->closed_link, &gt->closed_vma); in i915_vma_parked()
1839 spin_unlock_irq(&gt->closed_lock); in i915_vma_parked()
1851 if (vma->iomap == NULL) in __i915_vma_iounmap()
1854 if (page_unmask_bits(vma->iomap)) in __i915_vma_iounmap()
1855 __i915_gem_object_release_map(vma->obj); in __i915_vma_iounmap()
1857 io_mapping_unmap(vma->iomap); in __i915_vma_iounmap()
1858 vma->iomap = NULL; in __i915_vma_iounmap()
1870 GEM_BUG_ON(!vma->obj->userfault_count); in i915_vma_revoke_mmap()
1872 node = &vma->mmo->vma_node; in i915_vma_revoke_mmap()
1873 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT; in i915_vma_revoke_mmap()
1874 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, in i915_vma_revoke_mmap()
1876 vma->size, in i915_vma_revoke_mmap()
1880 if (!--vma->obj->userfault_count) in i915_vma_revoke_mmap()
1881 list_del(&vma->obj->userfault_link); in i915_vma_revoke_mmap()
1887 return __i915_request_await_exclusive(rq, &vma->active); in __i915_request_await_bind()
1899 return i915_active_add_request(&vma->active, rq); in __i915_vma_move_to_active()
1907 struct drm_i915_gem_object *obj = vma->obj; in _i915_vma_move_to_active()
1912 GEM_BUG_ON(!vma->pages); in _i915_vma_move_to_active()
1915 err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE); in _i915_vma_move_to_active()
1933 err = dma_resv_reserve_fences(vma->obj->base.resv, idx); in _i915_vma_move_to_active()
1944 i915_active_add_request(&front->write, rq); in _i915_vma_move_to_active()
1956 obj->write_domain = I915_GEM_DOMAIN_RENDER; in _i915_vma_move_to_active()
1957 obj->read_domains = 0; in _i915_vma_move_to_active()
1960 obj->write_domain = 0; in _i915_vma_move_to_active()
1964 dma_resv_add_fence(vma->obj->base.resv, curr, usage); in _i915_vma_move_to_active()
1967 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) in _i915_vma_move_to_active()
1968 i915_active_add_request(&vma->fence->active, rq); in _i915_vma_move_to_active()
1970 obj->read_domains |= I915_GEM_GPU_DOMAINS; in _i915_vma_move_to_active()
1971 obj->mm.dirty = true; in _i915_vma_move_to_active()
1979 struct i915_vma_resource *vma_res = vma->resource; in __i915_vma_evict()
1991 * before the unbind, other due to non-strict nature of those in __i915_vma_evict()
1996 * bit from set-domain, as we mark all GGTT vma associated in __i915_vma_evict()
1998 * are currently unbinding this one -- so if this vma will be in __i915_vma_evict()
2012 GEM_BUG_ON(vma->fence); in __i915_vma_evict()
2016 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt); in __i915_vma_evict()
2019 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) && in __i915_vma_evict()
2020 kref_read(&vma->vm->ref); in __i915_vma_evict()
2021 vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) || in __i915_vma_evict()
2022 vma->vm->skip_pte_rewrite; in __i915_vma_evict()
2027 vma->obj->mm.tlb); in __i915_vma_evict()
2031 vma->resource = NULL; in __i915_vma_evict()
2034 &vma->flags); in __i915_vma_evict()
2044 vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb); in __i915_vma_evict()
2061 lockdep_assert_held(&vma->vm->mutex); in __i915_vma_unbind()
2064 if (!drm_mm_node_allocated(&vma->node)) in __i915_vma_unbind()
2069 return -EAGAIN; in __i915_vma_unbind()
2075 * a residual pin skipping the vm->mutex) to complete. in __i915_vma_unbind()
2084 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ in __i915_vma_unbind()
2092 lockdep_assert_held(&vma->vm->mutex); in __i915_vma_unbind_async()
2094 if (!drm_mm_node_allocated(&vma->node)) in __i915_vma_unbind_async()
2098 &vma->obj->mm.rsgt->table != vma->resource->bi.pages) in __i915_vma_unbind_async()
2099 return ERR_PTR(-EAGAIN); in __i915_vma_unbind_async()
2110 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active, in __i915_vma_unbind_async()
2113 return ERR_PTR(-EBUSY); in __i915_vma_unbind_async()
2118 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ in __i915_vma_unbind_async()
2125 struct i915_address_space *vm = vma->vm; in i915_vma_unbind()
2129 assert_object_held_shared(vma->obj); in i915_vma_unbind()
2136 if (!drm_mm_node_allocated(&vma->node)) in i915_vma_unbind()
2141 return -EAGAIN; in i915_vma_unbind()
2146 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); in i915_vma_unbind()
2148 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); in i915_vma_unbind()
2153 mutex_unlock(&vm->mutex); in i915_vma_unbind()
2157 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); in i915_vma_unbind()
2163 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_unbind_async()
2164 struct i915_address_space *vm = vma->vm; in i915_vma_unbind_async()
2170 * We need the dma-resv lock since we add the in i915_vma_unbind_async()
2171 * unbind fence to the dma-resv object. in i915_vma_unbind_async()
2175 if (!drm_mm_node_allocated(&vma->node)) in i915_vma_unbind_async()
2180 return -EAGAIN; in i915_vma_unbind_async()
2183 if (!obj->mm.rsgt) in i915_vma_unbind_async()
2184 return -EBUSY; in i915_vma_unbind_async()
2186 err = dma_resv_reserve_fences(obj->base.resv, 2); in i915_vma_unbind_async()
2188 return -EBUSY; in i915_vma_unbind_async()
2193 * kmalloc and it's in the dma-fence signalling critical path. in i915_vma_unbind_async()
2196 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); in i915_vma_unbind_async()
2198 if (trylock_vm && !mutex_trylock(&vm->mutex)) { in i915_vma_unbind_async()
2199 err = -EBUSY; in i915_vma_unbind_async()
2202 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref); in i915_vma_unbind_async()
2208 mutex_unlock(&vm->mutex); in i915_vma_unbind_async()
2214 dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ); in i915_vma_unbind_async()
2219 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); in i915_vma_unbind_async()
2227 i915_gem_object_lock(vma->obj, NULL); in i915_vma_unbind_unlocked()
2229 i915_gem_object_unlock(vma->obj); in i915_vma_unbind_unlocked()
2236 i915_gem_object_make_unshrinkable(vma->obj); in i915_vma_make_unshrinkable()
2242 i915_gem_object_make_shrinkable(vma->obj); in i915_vma_make_shrinkable()
2247 i915_gem_object_make_purgeable(vma->obj); in i915_vma_make_purgeable()
2263 return -ENOMEM; in i915_vma_module_init()