Home
last modified time | relevance | path

Searched refs:xe_vm (Results 1 – 25 of 30) sorted by relevance

12

/linux/drivers/gpu/drm/xe/
H A Dxe_vm.h29 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef);
31 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
34 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm) in xe_vm_get()
40 static inline void xe_vm_put(struct xe_vm *vm) in xe_vm_put()
45 int xe_vm_lock(struct xe_vm *vm, bool intr);
47 void xe_vm_unlock(struct xe_vm *vm);
49 static inline bool xe_vm_is_closed(struct xe_vm *vm) in xe_vm_is_closed()
55 static inline bool xe_vm_is_banned(struct xe_vm *vm) in xe_vm_is_banned()
60 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm) in xe_vm_is_closed_or_banned()
67 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
[all …]
H A Dxe_svm.h26 struct xe_vm;
82 int xe_svm_init(struct xe_vm *vm);
84 void xe_svm_fini(struct xe_vm *vm);
86 void xe_svm_close(struct xe_vm *vm);
88 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
92 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
101 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
104 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
110 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
112 bool xe_svm_range_validate(struct xe_vm *vm,
[all …]
H A Dxe_trace_bo.h31 __field(struct xe_vm *, vm)
95 __field(struct xe_vm *, vm)
188 DECLARE_EVENT_CLASS(xe_vm,
189 TP_PROTO(struct xe_vm *vm),
194 __field(struct xe_vm *, vm)
211 DEFINE_EVENT(xe_vm, xe_vm_kill,
212 TP_PROTO(struct xe_vm *vm),
216 DEFINE_EVENT(xe_vm, xe_vm_create,
217 TP_PROTO(struct xe_vm *vm),
221 DEFINE_EVENT(xe_vm, xe_vm_free,
[all …]
H A Dxe_exec_queue.h21 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
26 struct xe_vm *vm,
31 struct xe_vm *user_vm,
133 void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
136 struct xe_vm *vm);
138 struct xe_vm *vm);
139 void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
143 struct xe_vm *vm,
150 struct xe_vm *vm,
154 struct xe_vm *vm,
H A Dxe_userptr.h17 struct xe_vm;
77 int xe_vm_userptr_pin(struct xe_vm *vm);
78 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
79 int xe_vm_userptr_check_repin(struct xe_vm *vm);
93 static inline int xe_vm_userptr_pin(struct xe_vm *vm) { return 0; } in xe_vm_userptr_pin()
94 static inline int __xe_vm_userptr_needs_repin(struct xe_vm *vm) { return 0; } in __xe_vm_userptr_needs_repin()
95 static inline int xe_vm_userptr_check_repin(struct xe_vm *vm) { return 0; } in xe_vm_userptr_check_repin()
H A Dxe_pt.h20 struct xe_vm;
32 struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
35 void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
49 bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
H A Dxe_svm.c52 static int xe_svm_get_pagemaps(struct xe_vm *vm);
54 void *xe_svm_private_page_owner(struct xe_vm *vm, bool force_smem) in xe_svm_private_page_owner()
80 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm) in gpusvm_to_vm()
82 return container_of(gpusvm, struct xe_vm, svm.gpusvm); in gpusvm_to_vm()
85 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r) in range_to_vm()
129 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range, in xe_svm_garbage_collector_add_range()
153 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r, in xe_svm_range_notifier_event_begin()
203 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r, in xe_svm_range_notifier_event_end()
227 struct xe_vm *vm = gpusvm_to_vm(gpusvm); in xe_svm_invalidate()
295 static int __xe_svm_garbage_collector(struct xe_vm *vm, in __xe_svm_garbage_collector()
[all …]
H A Dxe_vm.c44 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) in xe_vm_obj()
58 int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec) in xe_vm_drm_exec_lock()
63 static bool preempt_fences_waiting(struct xe_vm *vm) in preempt_fences_waiting()
89 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, in alloc_preempt_fences()
110 static int wait_for_existing_preempt_fences(struct xe_vm *vm) in wait_for_existing_preempt_fences()
142 static bool xe_vm_is_idle(struct xe_vm *vm) in xe_vm_is_idle()
155 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) in arm_preempt_fences()
174 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) in add_preempt_fences()
198 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm, in resume_and_reinstall_preempt_fences()
214 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_add_compute_exec_queue()
[all …]
H A Dxe_vm_madvise.c38 static int get_vmas(struct xe_vm *vm, struct xe_vmas_in_madvise_range *madvise_range) in get_vmas()
88 static void madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm, in madvise_preferred_mem_loc()
121 static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm, in madvise_atomic()
163 static void madvise_pat_index(struct xe_device *xe, struct xe_vm *vm, in madvise_pat_index()
182 typedef void (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
193 static u8 xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end) in xe_zap_ptes_in_madvise_range()
235 static int xe_vm_invalidate_madvise_range(struct xe_vm *vm, u64 start, u64 end) in xe_vm_invalidate_madvise_range()
325 static int xe_madvise_details_init(struct xe_vm *vm, const struct drm_xe_madvise *args, in xe_madvise_details_init()
360 static bool check_bo_args_are_sane(struct xe_vm *vm, struct xe_vma **vmas, in check_bo_args_are_sane()
421 struct xe_vm *vm; in xe_vm_madvise_ioctl()
H A Dxe_pt.c61 static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, in __xe_pt_empty_pte()
103 struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, in xe_pt_create()
163 void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm, in xe_pt_populate_empty()
288 struct xe_vm *vm;
525 struct xe_vm *vm = xe_walk->vm; in xe_pt_stage_bind_entry()
646 static bool xe_atomic_for_vram(struct xe_vm *vm, struct xe_vma *vma) in xe_atomic_for_vram()
654 static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_vma *vma) in xe_atomic_for_system()
705 struct xe_vm *vm = xe_vma_vm(vma); in xe_pt_stage_bind()
945 bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm, in xe_pt_zap_ptes_range()
1029 struct xe_vm *vm; in xe_pt_commit_prepare_locks_assert()
[all …]
H A Dxe_exec_queue_types.h23 struct xe_vm;
92 struct xe_vm *vm;
97 struct xe_vm *user_vm;
H A Dxe_bo_types.h21 struct xe_vm;
41 struct xe_vm *vm;
H A Dxe_pxp_types.h20 struct xe_vm;
44 struct xe_vm *vm;
H A Dxe_exec_queue.c199 struct xe_vm *vm, in __xe_exec_queue_alloc()
337 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, in xe_exec_queue_create()
381 struct xe_vm *vm, in xe_exec_queue_create_class()
425 struct xe_vm *user_vm, in xe_exec_queue_create_bind()
430 struct xe_vm *migrate_vm; in xe_exec_queue_create_bind()
1096 struct xe_vm *vm; in xe_exec_queue_create_ioctl()
1440 struct xe_vm *vm) in xe_exec_queue_last_fence_lockdep_assert()
1457 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm) in xe_exec_queue_last_fence_put()
1488 struct xe_vm *vm) in xe_exec_queue_last_fence_get()
1515 struct xe_vm *vm) in xe_exec_queue_last_fence_get_for_resume()
[all …]
H A Dxe_vm_types.h29 struct xe_vm;
178 struct xe_vm { struct
473 struct xe_vm *vm;
H A Dxe_pagefault.c52 struct xe_vm *vm = xe_vma_vm(vma); in xe_pagefault_begin()
69 struct xe_vm *vm = xe_vma_vm(vma); in xe_pagefault_handle_vma()
142 static struct xe_vm *xe_pagefault_asid_to_vm(struct xe_device *xe, u32 asid) in xe_pagefault_asid_to_vm()
144 struct xe_vm *vm; in xe_pagefault_asid_to_vm()
161 struct xe_vm *vm; in xe_pagefault_service()
H A Dxe_tlb_inval_job.c28 struct xe_vm *vm;
100 struct xe_vm *vm, u64 start, u64 end, int type) in xe_tlb_inval_job_create()
182 struct xe_vm *vm = job->vm; in xe_tlb_inval_job_destroy()
H A Dxe_migrate.h25 struct xe_vm;
160 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m);
H A Dxe_exec.c101 struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm); in xe_exec_fn()
126 struct xe_vm *vm; in xe_exec_ioctl()
H A Dxe_lrc.h21 struct xe_vm;
53 struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
H A Dxe_sched_job.h12 struct xe_vm;
H A Dxe_migrate.c141 static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo, in xe_migrate_program_identity()
187 struct xe_vm *vm, struct drm_exec *exec) in xe_migrate_prepare_vm()
413 static int xe_migrate_lock_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm) in xe_migrate_lock_prepare_vm()
442 struct xe_vm *vm; in xe_migrate_init()
607 struct xe_vm *vm = m->q->vm; in emit_pte()
1693 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m) in xe_migrate_get_vm()
1716 struct xe_vm *vm = pt_update->vops->vm; in xe_migrate_update_pgtables_cpu()
1852 struct xe_vm *vm = pt_update->vops->vm; in __xe_migrate_update_pgtables()
H A Dxe_bo.c687 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_bo_trigger_rebind()
2307 struct xe_tile *tile, struct xe_vm *vm, in __xe_bo_create_locked()
2405 struct xe_vm *vm, size_t size, in xe_bo_create_locked()
2454 struct xe_vm *vm, size_t size, in xe_bo_create_user()
2528 struct xe_vm *vm, in xe_bo_create_pin_map_at_aligned()
2632 struct xe_vm *vm, size_t size, in xe_bo_create_pin_map()
2940 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict, in xe_bo_validate()
3161 struct xe_vm *vm = NULL; in xe_gem_create_ioctl()
H A Dxe_tlb_inval.c340 void xe_tlb_inval_vm(struct xe_tlb_inval *tlb_inval, struct xe_vm *vm) in xe_tlb_inval_vm()
H A Dxe_sync.c333 struct xe_exec_queue *q, struct xe_vm *vm) in xe_sync_in_fence_get()

12