Lines Matching full:sync
46 * amdgpu_sync_create - zero init sync object
48 * @sync: sync object to initialize
50 * Just clear the sync object for now.
52 void amdgpu_sync_create(struct amdgpu_sync *sync) in amdgpu_sync_create() argument
54 hash_init(sync->fences); in amdgpu_sync_create()
127 * @sync: sync object to add the fence to
133 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) in amdgpu_sync_add_later() argument
137 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later()
153 * amdgpu_sync_fence - remember to sync to this fence
155 * @sync: sync object to add fence to
156 * @f: fence to sync to
157 * @flags: memory allocation flags to use when allocating sync entry
159 * Add the fence to the sync object.
161 int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f, in amdgpu_sync_fence() argument
169 if (amdgpu_sync_add_later(sync, f)) in amdgpu_sync_fence()
176 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence()
181 /* Determine based on the owner and mode if we should sync to a fence or not */
188 /* Always sync to moves, no matter what */ in amdgpu_sync_test_fence()
199 /* Never sync to VM updates either. */ in amdgpu_sync_test_fence()
205 /* Ignore fences depending on the sync mode */ in amdgpu_sync_test_fence()
227 "Adding eviction fence to sync obj"); in amdgpu_sync_test_fence()
232 * amdgpu_sync_resv - sync to a reservation object
235 * @sync: sync object to add fences from reservation object to
237 * @mode: how owner affects which fences we sync to
240 * Sync to the fence
242 int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, in amdgpu_sync_resv() argument
259 r = amdgpu_sync_fence(sync, f, GFP_KERNEL); in amdgpu_sync_resv()
271 * amdgpu_sync_kfd - sync to KFD fences
273 * @sync: sync object to add KFD fences to
276 * Extract all KFD fences and add them to the sync object.
278 int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv) in amdgpu_sync_kfd() argument
291 r = amdgpu_sync_fence(sync, f, GFP_KERNEL); in amdgpu_sync_kfd()
311 * @sync: the sync object
314 * Returns the next fence not signaled yet without removing it from the sync
317 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, in amdgpu_sync_peek_fence() argument
324 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_peek_fence()
351 * amdgpu_sync_get_fence - get the next fence from the sync object
353 * @sync: sync object to use
355 * Get and removes the next fence from the sync object not signaled yet.
357 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) in amdgpu_sync_get_fence() argument
364 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_get_fence()
380 * amdgpu_sync_clone - clone a sync object
382 * @source: sync object to clone
383 * @clone: pointer to destination sync object
430 * @sync: sync object to get the fences from
433 * Add all unsignaled fences from sync to job.
435 int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job) in amdgpu_sync_push_to_job() argument
442 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_push_to_job()
459 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr) in amdgpu_sync_wait() argument
465 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_wait()
477 * amdgpu_sync_free - free the sync object
479 * @sync: sync object to use
481 * Free the sync object.
483 void amdgpu_sync_free(struct amdgpu_sync *sync) in amdgpu_sync_free() argument
489 hash_for_each_safe(sync->fences, i, tmp, e, node) in amdgpu_sync_free()
494 * amdgpu_sync_init - init sync object subsystem
508 * amdgpu_sync_fini - fini sync object subsystem