Home
last modified time | relevance | path

Searched refs:fences (Results 1 – 25 of 71) sorted by relevance

123

/linux/drivers/gpu/drm/i915/
H A Di915_deps.c18 * the migration fence with the unbind fences if these are coalesced
38 if (deps->fences != &deps->single) in i915_deps_reset_fences()
39 kfree(deps->fences); in i915_deps_reset_fences()
42 deps->fences = &deps->single; in i915_deps_reset_fences()
52 deps->fences = NULL; in i915_deps_init()
62 * then resets the fences array.
69 dma_fence_put(deps->fences[i]); in i915_deps_fini()
71 if (deps->fences != &deps->single) in i915_deps_fini()
72 kfree(deps->fences); in i915_deps_fini()
89 memcpy(new_fences, deps->fences, in i915_deps_grow()
134 struct dma_fence **fences = deps->fences; i915_deps_sync() local
[all...]
/linux/drivers/dma-buf/
H A Ddma-resv.c49 * can have any number of fences attaches to it. Each fence carries an usage
51 * resource. The RCU mechanism is used to protect read access to fences from
142 RCU_INIT_POINTER(obj->fences, NULL); in dma_resv_init()
156 dma_resv_list_free(rcu_dereference_protected(obj->fences, true)); in dma_resv_fini()
161 /* Dereference the fences while ensuring RCU rules */
164 return rcu_dereference_check(obj->fences, dma_resv_held(obj)); in dma_resv_fences_list()
168 * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
170 * @num_fences: number of fences we want to add
191 * the number of needed fences dynamically. in dma_resv_reserve_fences()
228 * We are not changing the effective set of fences her in dma_resv_reserve_fences()
265 struct dma_resv_list *fences = dma_resv_fences_list(obj); dma_resv_reset_max_fences() local
570 dma_resv_get_fences(struct dma_resv * obj,enum dma_resv_usage usage,unsigned int * num_fences,struct dma_fence *** fences) dma_resv_get_fences() argument
631 struct dma_fence **fences; dma_resv_get_singleton() local
[all...]
H A Dst-dma-fence-chain.c102 struct dma_fence **fences; member
124 fc->fences = kvmalloc_array(count, sizeof(*fc->fences), in fence_chains_init()
126 if (!fc->fences) { in fence_chains_init()
133 fc->fences[i] = mock_fence(); in fence_chains_init()
134 if (!fc->fences[i]) { in fence_chains_init()
140 fc->fences[i], in fence_chains_init()
157 dma_fence_put(fc->fences[i]); in fence_chains_init()
160 kvfree(fc->fences); in fence_chains_init()
171 dma_fence_signal(fc->fences[ in fence_chains_fini()
[all...]
H A Dst-dma-resv.c228 cursor.fences = (void*)~0; in test_for_each_unlocked()
247 struct dma_fence *f, **fences = NULL; in test_get_fences() local
274 r = dma_resv_get_fences(&resv, usage, &i, &fences); in test_get_fences()
280 if (i != 1 || fences[0] != f) { in test_get_fences()
288 dma_fence_put(fences[i]); in test_get_fences()
289 kfree(fences); in test_get_fences()
H A Dst-dma-fence-unwrap.c53 struct dma_fence **fences; in mock_array() local
57 fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); in mock_array()
58 if (!fences) in mock_array()
63 fences[i] = va_arg(valist, typeof(*fences)); in mock_array()
66 array = dma_fence_array_create(num_fences, fences, in mock_array()
74 kfree(fences); in mock_array()
79 dma_fence_put(va_arg(valist, typeof(*fences))); in mock_array()
159 pr_err("Not all fences see in unwrap_array()
[all...]
H A Ddma-fence.c32 * fence context, this allows checking if fences belong to the same
39 * DOC: DMA fences overview
41 * DMA fences, represented by &struct dma_fence, are the kernel internal
47 * dma_fence_context_alloc(), and all fences on the same context are
50 * Since the purposes of fences is to facilitate cross-device and
53 * - Individual fences can be exposed as a &sync_file, accessed as a file
64 * implicit fences are stored in &struct dma_resv through the
77 * further command submission and force complete all in-flight fences, e.g.
86 * Drivers should not try to second guess timeout handling of fences from
91 * which completes the fences, wit
828 dma_fence_test_signaled_any(struct dma_fence ** fences,uint32_t count,uint32_t * idx) dma_fence_test_signaled_any() argument
865 dma_fence_wait_any_timeout(struct dma_fence ** fences,uint32_t count,bool intr,signed long timeout,uint32_t * idx) dma_fence_wait_any_timeout() argument
[all...]
H A Dst-dma-fence.c446 struct dma_fence __rcu **fences; member
477 rcu_assign_pointer(t->fences[t->id], f1); in thread_signal_callback()
482 f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]); in thread_signal_callback()
514 rcu_assign_pointer(t->fences[t->id], NULL); in thread_signal_callback()
538 t[i].fences = f; in race_signal_callback()
/linux/drivers/gpu/host1x/
H A Dintr.c35 if (!list_empty(&sp->fences.list)) { in host1x_intr_update_hw_state()
36 fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list); in host1x_intr_update_hw_state()
47 struct host1x_fence_list *fence_list = &fence->sp->fences; in host1x_intr_add_fence_locked()
57 struct host1x_fence_list *fence_list = &fence->sp->fences; in host1x_intr_remove_fence()
83 spin_lock(&sp->fences.lock); in host1x_intr_handle_interrupt()
85 list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) { in host1x_intr_handle_interrupt()
98 spin_unlock(&sp->fences.lock); in host1x_intr_handle_interrupt()
110 spin_lock_init(&syncpt->fences.lock); in host1x_intr_init()
111 INIT_LIST_HEAD(&syncpt->fences.list); in host1x_intr_init()
H A Ddebug.c96 spin_lock_irqsave(&m->syncpt[i].fences.lock, irqflags); in show_syncpts()
97 list_for_each(pos, &m->syncpt[i].fences.list) in show_syncpts()
99 spin_unlock_irqrestore(&m->syncpt[i].fences.lock, irqflags); in show_syncpts()
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_sync.c54 hash_init(sync->fences); in amdgpu_sync_create()
137 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later()
176 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence()
192 /* We only want to trigger KFD eviction fences on in amdgpu_sync_test_fence()
193 * evict or move jobs. Skip KFD fences otherwise. in amdgpu_sync_test_fence()
205 /* Ignore fences depending on the sync mode */ in amdgpu_sync_test_fence()
235 * @sync: sync object to add fences from reservation object to
237 * @mode: how owner affects which fences we sync to
270 * amdgpu_sync_kfd - sync to KFD fences
272 * @sync: sync object to add KFD fences t
[all...]
H A Damdgpu_userq_fence.c90 INIT_LIST_HEAD(&fence_drv->fences); in amdgpu_userq_fence_driver_alloc()
155 list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) { in amdgpu_userq_fence_driver_process()
185 list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) { in amdgpu_userq_fence_driver_destroy()
280 list_add_tail(&userq_fence->link, &fence_drv->fences); in amdgpu_userq_fence_create()
607 struct dma_fence **fences = NULL; in amdgpu_userq_wait_ioctl() local
755 * userq_fence_info and return the actual number of fences on in amdgpu_userq_wait_ioctl()
767 /* Array of fences */ in amdgpu_userq_wait_ioctl()
768 fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL); in amdgpu_userq_wait_ioctl()
769 if (!fences) { in amdgpu_userq_wait_ioctl()
[all...]
H A Damdgpu_ids.c99 * amdgpu_pasid_free_delayed - free pasid when fences signal
101 * @resv: reservation object with the fences to wait for
104 * Free the pasid only after all the fences in resv are signaled.
139 * block for all the fences to complete. in amdgpu_pasid_free_delayed()
204 struct dma_fence **fences; in amdgpu_vmid_grab_idle() local
212 fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_NOWAIT); in amdgpu_vmid_grab_idle()
213 if (!fences) in amdgpu_vmid_grab_idle()
223 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); in amdgpu_vmid_grab_idle()
224 if (!fences[i]) in amdgpu_vmid_grab_idle()
238 dma_fence_get(fences[ in amdgpu_vmid_grab_idle()
[all...]
H A Damdgpu_fence.c148 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_emit()
263 ptr = &drv->fences[last_seq]; in amdgpu_fence_process()
306 * amdgpu_fence_wait_empty - wait for all fences to signal
310 * Wait for all fences on the requested ring to signal (all asics).
311 * Returns 0 if the fences have passed, error for all other cases.
322 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_wait_empty()
343 * Wait for all fences on the requested ring to signal (all asics).
358 * amdgpu_fence_count_emitted - get the count of emitted fences
362 * Get the number of fences emitted on the requested ring (all asics).
363 * Returns the number of emitted fences o
[all...]
H A Damdgpu_ctx.c198 res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i])); in amdgpu_ctx_entity_time()
215 entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs), in amdgpu_ctx_init_entity()
282 res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i])); in amdgpu_ctx_fini_entity()
283 dma_fence_put(entity->fences[i]); in amdgpu_ctx_fini_entity()
767 other = centity->fences[idx]; in amdgpu_ctx_add_fence()
773 centity->fences[idx] = fence; in amdgpu_ctx_add_fence()
807 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); in amdgpu_ctx_get_fence()
869 other = dma_fence_get(centity->fences[idx]); in amdgpu_ctx_wait_prev_fence()
/linux/Documentation/driver-api/
H A Dsync_file.rst9 the fences(struct dma_fence) that are needed to synchronize between drivers or
29 in-fences and out-fences
33 the driver to userspace we call the fences it contains 'out-fences'. They are
37 Out-fences are fences that the driver creates.
40 userspace we call these fence(s) 'in-fences'. Receiving in-fences means that
42 the in-fences
[all...]
H A Ddma-buf.rst21 - dma-resv, which manages a set of dma-fences for a particular dma-buf
169 :doc: DMA fences overview
243 * Future fences, used in HWC1 to signal when a buffer isn't used by the display
247 * Proxy fences, proposed to handle &drm_syncobj for which the fence has not yet
250 * Userspace fences or gpu futexes, fine-grained locking within a command buffer
256 batch DMA fences for memory management instead of context preemption DMA
257 fences which get reattached when the compute job is rescheduled.
260 fences and controls when they fire. Mixing indefinite fences with normal
261 in-kernel DMA fences doe
[all...]
/linux/drivers/gpu/drm/i915/selftests/
H A Di915_sw_fence.c228 /* Test a chain of fences, A waits on B who waits on C */ in test_ABC()
308 /* Test multiple fences (AB) waiting on a single event (C) */ in test_AB_C()
453 struct i915_sw_fence **fences; in test_chain() local
456 /* Test a long chain of fences */ in test_chain()
457 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL); in test_chain()
458 if (!fences) in test_chain()
462 fences[i] = alloc_fence(); in test_chain()
463 if (!fences[i]) { in test_chain()
470 ret = i915_sw_fence_await_sw_fence_gfp(fences[ in test_chain()
[all...]
/linux/drivers/gpu/drm/
H A Ddrm_suballoc.c91 * Cleans up the suballocation manager after use. All fences added
227 struct dma_fence **fences, in drm_suballoc_next_hole() argument
250 fences[i] = NULL; in drm_suballoc_next_hole()
259 fences[i] = sa->fence; in drm_suballoc_next_hole()
318 struct dma_fence *fences[DRM_SUBALLOC_MAX_QUEUES]; in drm_suballoc_new() local
355 } while (drm_suballoc_next_hole(sa_manager, fences, tries)); in drm_suballoc_new()
358 if (fences[i]) in drm_suballoc_new()
359 fences[count++] = dma_fence_get(fences[i]); in drm_suballoc_new()
365 t = dma_fence_wait_any_timeout(fences, coun in drm_suballoc_new()
[all...]
/linux/drivers/gpu/drm/xe/
H A Dxe_sync.c273 * Get a fence from syncs, exec queue, and VM. If syncs contain in-fences create
274 * and return a composite fence of all in-fences + last fence. If no in-fences
284 struct dma_fence **fences = NULL; in xe_sync_in_fence_get() local
291 /* Count in-fences */ in xe_sync_in_fence_get()
306 fences = kmalloc_array(num_in_fence + 1, sizeof(*fences), GFP_KERNEL); in xe_sync_in_fence_get()
307 if (!fences) in xe_sync_in_fence_get()
312 fences[current_fence++] = sync[i].fence; in xe_sync_in_fence_get()
315 fences[current_fenc in xe_sync_in_fence_get()
[all...]
/linux/Documentation/gpu/
H A Ddrm-vm-bind-async.rst20 synchronization objects can be either generic, like dma-fences or
31 understanding of dma-fences is required to digest this
38 the GPU and CPU. Memory fences are sometimes referred to as
39 user-fences, userspace-fences or gpu futexes and do not necessarily obey
41 The kernel should thus avoid waiting for memory fences with locks held.
46 a certain mode that disallows completion dma-fences.
72 IOCTL returns. A synchronous VM_BIND takes neither in-fences nor
73 out-fences. Synchronous VM_BIND may block and wait for GPU operations;
94 Since asynchronous VM_BIND operations may use dma-fences embedde
[all...]
/linux/include/linux/
H A Ddma-fence-array.h29 * struct dma_fence_array - fence to represent an array of fences
32 * @num_fences: number of fences in the array
33 * @num_pending: fences in the array still pending
34 * @fences: array of the fences
44 struct dma_fence **fences; member
68 * dma_fence_array_for_each - iterate over all fences in array
73 * Test if @array is a dma_fence_array object and if yes iterate over all fences
84 int num_fences, struct dma_fence **fences,
89 struct dma_fence **fences,
[all...]
H A Ddma-resv.h53 * enum dma_resv_usage - how the fences from a dma_resv obj are used
56 * controls which fences are returned when queried.
59 * when the dma_resv object is asked for fences for one use case the fences
62 * For example when asking for WRITE fences then the KERNEL fences are returned
63 * as well. Similar when asked for READ fences then both WRITE and KERNEL
64 * fences are returned as well.
66 * Already used fences can be promoted in the sense that a fence with
68 * with this usage. But fences ca
178 struct dma_resv_list __rcu *fences; global() member
210 struct dma_resv_list *fences; global() member
[all...]
/linux/drivers/gpu/drm/virtio/
H A Dvirtgpu_fence.c95 list_add_tail(&fence->node, &drv->fences); in virtio_gpu_fence_emit()
120 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process()
127 * Signal any fences with a strictly smaller sequence number in virtio_gpu_fence_event_process()
130 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process()
/linux/drivers/gpu/drm/i915/gem/
H A Di915_gem_execbuffer.c312 struct eb_fence *fences; member
1047 /* Reserve enough slots to accommodate composite fences */ in eb_validate_vmas()
2127 * using mandatory fences underneath. Currently the below in eb_move_to_gpu()
2788 __free_fence_array(struct eb_fence *fences, unsigned int n) in __free_fence_array() argument
2791 drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2)); in __free_fence_array()
2792 dma_fence_put(fences[n].dma_fence); in __free_fence_array()
2793 dma_fence_chain_free(fences[n].chain_fence); in __free_fence_array()
2795 kvfree(fences); in __free_fence_array()
2827 f = krealloc(eb->fences, in add_timeline_fence_array()
2833 eb->fences in add_timeline_fence_array()
3006 put_fence_array(struct eb_fence * fences,int num_fences) put_fence_array() argument
3201 struct dma_fence **fences; eb_composite_fence_create() local
[all...]
/linux/drivers/gpu/drm/radeon/
H A Dradeon_trace.h36 __field(u32, fences)
42 __entry->fences = radeon_fence_count_emitted(
45 TP_printk("ring=%u, dw=%u, fences=%u",
47 __entry->fences)

123