/linux/drivers/gpu/drm/i915/selftests/ |
H A D | i915_syncmap.c | 101 static int check_syncmap_free(struct i915_syncmap **sync) in check_syncmap_free() argument 103 i915_syncmap_free(sync); in check_syncmap_free() 104 if (*sync) { in check_syncmap_free() 105 pr_err("sync not cleared after free\n"); in check_syncmap_free() 112 static int dump_syncmap(struct i915_syncmap *sync, int err) in dump_syncmap() argument 117 return check_syncmap_free(&sync); in dump_syncmap() 123 if (i915_syncmap_print_to_buf(sync, buf, PAGE_SIZE)) in dump_syncmap() 129 i915_syncmap_free(&sync); in dump_syncmap() 135 struct i915_syncmap *sync = (void *)~0ul; in igt_syncmap_init() local 142 i915_syncmap_init(&sync); in igt_syncmap_init() 163 check_one(struct i915_syncmap ** sync,u64 context,u32 seqno) check_one() argument 206 struct i915_syncmap *sync; igt_syncmap_one() local 238 check_leaf(struct i915_syncmap ** sync,u64 context,u32 seqno) check_leaf() argument 273 struct i915_syncmap *sync; igt_syncmap_join_above() local 334 struct i915_syncmap *sync; igt_syncmap_join_below() local 404 struct i915_syncmap *sync; igt_syncmap_neighbours() local 448 struct i915_syncmap *sync; igt_syncmap_compact() local 546 struct i915_syncmap *sync; igt_syncmap_random() local [all...] |
/linux/tools/testing/selftests/powerpc/ptrace/ |
H A D | child.h | 3 * Helper functions to sync execution between parent and child processes. 29 #define CHILD_FAIL_IF(x, sync) \ argument 34 (sync)->child_gave_up = true; \ 35 prod_parent(sync); \ 40 #define PARENT_FAIL_IF(x, sync) \ argument 45 (sync)->parent_gave_up = true; \ 46 prod_child(sync); \ 51 #define PARENT_SKIP_IF_UNSUPPORTED(x, sync, msg) \ argument 54 (sync)->parent_gave_up = true; \ 55 prod_child(sync); \ 60 init_child_sync(struct child_sync * sync) init_child_sync() argument 79 destroy_child_sync(struct child_sync * sync) destroy_child_sync() argument 85 wait_child(struct child_sync * sync) wait_child() argument 99 prod_child(struct child_sync * sync) prod_child() argument 113 wait_parent(struct child_sync * sync) wait_parent() argument 127 prod_parent(struct child_sync * sync) prod_parent() argument [all...] |
/linux/drivers/gpu/drm/xe/ |
H A D | xe_sync.c | 114 struct xe_sync_entry *sync, in xe_sync_entry_parse() argument 141 sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle); in xe_sync_entry_parse() 142 if (XE_IOCTL_DBG(xe, !sync->syncobj)) in xe_sync_entry_parse() 146 sync->fence = drm_syncobj_fence_get(sync->syncobj); in xe_sync_entry_parse() 147 if (XE_IOCTL_DBG(xe, !sync->fence)) in xe_sync_entry_parse() 162 sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle); in xe_sync_entry_parse() 163 if (XE_IOCTL_DBG(xe, !sync->syncobj)) in xe_sync_entry_parse() 167 sync->chain_fence = dma_fence_chain_alloc(); in xe_sync_entry_parse() 168 if (!sync in xe_sync_entry_parse() 215 xe_sync_entry_add_deps(struct xe_sync_entry * sync,struct xe_sched_job * job) xe_sync_entry_add_deps() argument 224 xe_sync_entry_signal(struct xe_sync_entry * sync,struct dma_fence * fence) xe_sync_entry_signal() argument 256 xe_sync_entry_cleanup(struct xe_sync_entry * sync) xe_sync_entry_cleanup() argument 281 xe_sync_in_fence_get(struct xe_sync_entry * sync,int num_sync,struct xe_exec_queue * q,struct xe_vm * vm) xe_sync_in_fence_get() argument 359 xe_sync_ufence_get(struct xe_sync_entry * sync) xe_sync_ufence_get() argument [all...] |
H A D | xe_sync.h | 22 struct xe_sync_entry *sync, 25 int xe_sync_entry_add_deps(struct xe_sync_entry *sync, 27 void xe_sync_entry_signal(struct xe_sync_entry *sync, 29 void xe_sync_entry_cleanup(struct xe_sync_entry *sync); 31 xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, 34 static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync) in xe_sync_is_ufence() argument 36 return !!sync->ufence; in xe_sync_is_ufence() 40 struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
|
/linux/io_uring/ |
H A D | sync.c | 15 #include "sync.h" 27 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); in io_sfr_prep() local 32 sync->off = READ_ONCE(sqe->off); in io_sfr_prep() 33 sync->len = READ_ONCE(sqe->len); in io_sfr_prep() 34 sync->flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep() 42 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); in io_sync_file_range() local 48 ret = sync_file_range(req->file, sync->off, sync->len, sync->flags); in io_sync_file_range() 55 struct io_sync *sync in io_fsync_prep() local 72 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); io_fsync() local 87 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); io_fallocate_prep() local 101 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); io_fallocate() local [all...] |
/linux/arch/powerpc/platforms/powermac/ |
H A D | cache.S | 45 sync 52 sync 58 sync 60 sync 81 sync 88 sync 91 sync 96 sync 106 3: sync 110 3: sync [all...] |
/linux/arch/powerpc/kernel/ |
H A D | l2cr_6xx.S | 100 sync 107 sync 118 sync 120 sync 156 sync 158 sync 182 sync 196 sync 198 sync 202 21: sync [all...] |
H A D | cpu_setup_ppc970.S | 28 sync 31 sync 34 sync 37 sync 50 sync 104 sync 111 sync 155 sync 158 sync 162 sync [all...] |
H A D | misc_64.S | 47 sync 53 sync 58 sync 62 sync 68 sync 73 sync 86 sync 88 sync 94 sync 100 sync [all...] |
H A D | cpu_setup_6xx.S | 102 1: sync 104 sync 106 sync 118 sync 120 sync /* on 604e/604r */ 122 sync 132 sync 173 sync 175 sync 202 sync [all...] |
/linux/drivers/clk/tegra/ |
H A D | clk-audio-sync.c | 15 struct tegra_clk_sync_source *sync = to_clk_sync_source(hw); in clk_sync_source_recalc_rate() local 17 return sync->rate; in clk_sync_source_recalc_rate() 23 struct tegra_clk_sync_source *sync = to_clk_sync_source(hw); in clk_sync_source_round_rate() local 25 if (rate > sync->max_rate) in clk_sync_source_round_rate() 34 struct tegra_clk_sync_source *sync = to_clk_sync_source(hw); in clk_sync_source_set_rate() local 36 sync->rate = rate; in clk_sync_source_set_rate() 49 struct tegra_clk_sync_source *sync; in tegra_clk_register_sync_source() local 53 sync = kzalloc(sizeof(*sync), GFP_KERNEL); in tegra_clk_register_sync_source() 54 if (!sync) { in tegra_clk_register_sync_source() [all...] |
/linux/arch/powerpc/platforms/52xx/ |
H A D | mpc52xx_sleep.S | 16 sync; isync; 25 sync 30 sync 40 sync; isync; 42 sync; isync; 57 sync; isync; 59 sync; isync; 73 sync 77 sync 82 sync [all...] |
/linux/drivers/gpu/drm/radeon/ |
H A D | radeon_sync.c | 35 * radeon_sync_create - zero init sync object 37 * @sync: sync object to initialize 39 * Just clear the sync object for now. 41 void radeon_sync_create(struct radeon_sync *sync) in radeon_sync_create() argument 46 sync->semaphores[i] = NULL; in radeon_sync_create() 49 sync->sync_to[i] = NULL; in radeon_sync_create() 51 sync->last_vm_update = NULL; in radeon_sync_create() 55 * radeon_sync_fence - use the semaphore to sync to a fence 57 * @sync 62 radeon_sync_fence(struct radeon_sync * sync,struct radeon_fence * fence) radeon_sync_fence() argument 90 radeon_sync_resv(struct radeon_device * rdev,struct radeon_sync * sync,struct dma_resv * resv,bool shared) radeon_sync_resv() argument 122 radeon_sync_rings(struct radeon_device * rdev,struct radeon_sync * sync,int ring) radeon_sync_rings() argument 197 radeon_sync_free(struct radeon_device * rdev,struct radeon_sync * sync,struct radeon_fence * fence) radeon_sync_free() argument [all...] |
H A D | rv770_dma.c | 36 * @resv: reservation object to sync to 48 struct radeon_sync sync; in rv770_copy_dma() local 55 radeon_sync_create(&sync); in rv770_copy_dma() 62 radeon_sync_free(rdev, &sync, NULL); in rv770_copy_dma() 66 radeon_sync_resv(rdev, &sync, resv, false); in rv770_copy_dma() 67 radeon_sync_rings(rdev, &sync, ring->idx); in rv770_copy_dma() 86 radeon_sync_free(rdev, &sync, NULL); in rv770_copy_dma() 91 radeon_sync_free(rdev, &sync, fence); in rv770_copy_dma()
|
/linux/tools/testing/selftests/kvm/ |
H A D | memslot_perf_test.c | 152 "Unexpected sync ucall, got %lx", in vcpu_worker() 291 struct sync_area *sync; in prepare_vm() local 352 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); in prepare_vm() 353 sync->guest_page_size = data->vm->page_size; in prepare_vm() 354 atomic_init(&sync->start_flag, false); in prepare_vm() 355 atomic_init(&sync->exit_flag, false); in prepare_vm() 356 atomic_init(&sync->sync_flag, false); in prepare_vm() 385 static void let_guest_run(struct sync_area *sync) in let_guest_run() argument 387 atomic_store_explicit(&sync in let_guest_run() 392 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; guest_spin_until_start() local 398 make_guest_exit(struct sync_area * sync) make_guest_exit() argument 405 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; _guest_should_exit() local 418 host_perform_sync(struct sync_area * sync) host_perform_sync() argument 431 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; guest_perform_sync() local 449 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; guest_code_test_memslot_move() local 478 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; guest_code_test_memslot_map() local 510 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; guest_code_test_memslot_unmap() local 545 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; guest_code_test_memslot_rw() local 578 test_memslot_move_prepare(struct vm_data * data,struct sync_area * sync,uint64_t * maxslots,bool isactive) test_memslot_move_prepare() argument 614 test_memslot_move_prepare_active(struct vm_data * data,struct sync_area * sync,uint64_t * maxslots) test_memslot_move_prepare_active() argument 621 test_memslot_move_prepare_inactive(struct vm_data * data,struct sync_area * sync,uint64_t * maxslots) test_memslot_move_prepare_inactive() argument 627 test_memslot_move_loop(struct vm_data * data,struct sync_area * sync) test_memslot_move_loop() argument 680 test_memslot_map_loop(struct vm_data * data,struct sync_area * sync) test_memslot_map_loop() argument 719 test_memslot_unmap_loop_common(struct vm_data * data,struct sync_area * sync,uint64_t chunk) test_memslot_unmap_loop_common() argument 746 test_memslot_unmap_loop(struct vm_data * data,struct sync_area * sync) test_memslot_unmap_loop() argument 757 test_memslot_unmap_loop_chunked(struct vm_data * data,struct sync_area * sync) test_memslot_unmap_loop_chunked() argument 765 test_memslot_rw_loop(struct vm_data * data,struct sync_area * sync) test_memslot_rw_loop() argument 808 struct sync_area *sync; test_execute() local [all...] |
/linux/arch/parisc/kernel/ |
H A D | perf_asm.S | 46 sync ; follow ERS 163 sync 199 sync 211 sync 223 sync 307 sync 331 sync 343 sync 391 sync 403 sync [all...] |
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_sync.h | 43 * Container for fences used to sync command submissions. 49 void amdgpu_sync_create(struct amdgpu_sync *sync); 50 int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f, 52 int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, 55 int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv); 56 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, 58 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); 61 int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job); 62 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr); 63 void amdgpu_sync_free(struct amdgpu_sync *sync); [all...] |
H A D | amdgpu_sync.c | 46 * amdgpu_sync_create - zero init sync object 48 * @sync: sync object to initialize 50 * Just clear the sync object for now. 52 void amdgpu_sync_create(struct amdgpu_sync *sync) in amdgpu_sync_create() argument 54 hash_init(sync->fences); in amdgpu_sync_create() 127 * @sync: sync object to add the fence to 133 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) in amdgpu_sync_add_later() argument 137 hash_for_each_possible(sync in amdgpu_sync_add_later() 161 amdgpu_sync_fence(struct amdgpu_sync * sync,struct dma_fence * f,gfp_t flags) amdgpu_sync_fence() argument 242 amdgpu_sync_resv(struct amdgpu_device * adev,struct amdgpu_sync * sync,struct dma_resv * resv,enum amdgpu_sync_mode mode,void * owner) amdgpu_sync_resv() argument 277 amdgpu_sync_kfd(struct amdgpu_sync * sync,struct dma_resv * resv) amdgpu_sync_kfd() argument 316 amdgpu_sync_peek_fence(struct amdgpu_sync * sync,struct amdgpu_ring * ring) amdgpu_sync_peek_fence() argument 356 amdgpu_sync_get_fence(struct amdgpu_sync * sync) amdgpu_sync_get_fence() argument 434 amdgpu_sync_push_to_job(struct amdgpu_sync * sync,struct amdgpu_job * job) amdgpu_sync_push_to_job() argument 458 amdgpu_sync_wait(struct amdgpu_sync * sync,bool intr) amdgpu_sync_wait() argument 482 amdgpu_sync_free(struct amdgpu_sync * sync) amdgpu_sync_free() argument [all...] |
/linux/arch/powerpc/platforms/44x/ |
H A D | misc_44x.S | 18 sync 20 sync 23 sync 25 sync 32 sync 34 sync 37 sync 39 sync
|
/linux/arch/mips/alchemy/common/ |
H A D | sleeper.S | 74 sync 76 sync 103 sync 105 sync 107 sync 129 sync 131 sync 146 sync 175 sync 181 sync [all...] |
/linux/drivers/gpu/drm/sti/ |
H A D | sti_vtg.c | 185 static void vtg_set_hsync_vsync_pos(struct sti_vtg_sync_params *sync, in vtg_set_hsync_vsync_pos() argument 212 sync->hsync = (stop << 16) | start; in vtg_set_hsync_vsync_pos() 230 sync->vsync_line_top = (fallsync_top << 16) | risesync_top; in vtg_set_hsync_vsync_pos() 231 sync->vsync_off_top = (fallsync_offs_top << 16) | risesync_offs_top; in vtg_set_hsync_vsync_pos() 234 sync->vsync_line_bot = sync->vsync_line_top; in vtg_set_hsync_vsync_pos() 235 sync->vsync_off_bot = sync->vsync_off_top; in vtg_set_hsync_vsync_pos() 240 struct sti_vtg_sync_params *sync, in vtg_set_mode() argument 255 vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_HDM in vtg_set_mode() [all...] |
/linux/net/caif/ |
H A D | cfserl.c | 25 spinlock_t sync; member 49 spin_lock_init(&this->sync); in cfserl_create() 67 spin_lock(&layr->sync); in cfserl_receive() 74 spin_unlock(&layr->sync); in cfserl_receive() 94 spin_unlock(&layr->sync); in cfserl_receive() 112 spin_unlock(&layr->sync); in cfserl_receive() 131 spin_unlock(&layr->sync); in cfserl_receive() 142 spin_unlock(&layr->sync); in cfserl_receive() 156 spin_unlock(&layr->sync); in cfserl_receive() 158 spin_lock(&layr->sync); in cfserl_receive() [all...] |
/linux/drivers/gpu/drm/nouveau/ |
H A D | nouveau_sched.c | 39 job->sync = args->sync; in nouveau_job_init() 46 if (job->sync) in nouveau_job_init() 59 if (job->sync) { in nouveau_job_init() 141 struct drm_nouveau_sync *sync, in sync_find_fence() argument 144 u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK; in sync_find_fence() 153 point = sync->timeline_value; in sync_find_fence() 156 sync->handle, point, in sync_find_fence() 171 struct drm_nouveau_sync *sync = &job->in_sync.data[i]; in nouveau_job_add_deps() local 173 ret = sync_find_fence(job, sync, in nouveau_job_add_deps() 212 struct drm_nouveau_sync *sync = &job->out_sync.data[i]; nouveau_job_fence_attach_prepare() local 255 struct drm_nouveau_sync *sync = &job->out_sync.data[i]; nouveau_job_fence_attach() local [all...] |
/linux/tools/testing/selftests/net/bench/page_pool/ |
H A D | time_bench.c | 257 struct time_bench_sync *sync = cpu->sync; in invoke_test_on_cpu_func() local 266 atomic_inc(&sync->nr_tests_running); in invoke_test_on_cpu_func() 267 wait_for_completion(&sync->start_event); in invoke_test_on_cpu_func() 281 atomic_dec(&sync->nr_tests_running); in invoke_test_on_cpu_func() 331 struct time_bench_sync *sync, in time_bench_run_concurrent() argument 341 /* Reset sync conditions */ in time_bench_run_concurrent() 342 atomic_set(&sync->nr_tests_running, 0); in time_bench_run_concurrent() 343 init_completion(&sync->start_event); in time_bench_run_concurrent() 350 c->sync in time_bench_run_concurrent() [all...] |
/linux/include/trace/events/ |
H A D | compaction.h | 103 unsigned long zone_end, bool sync), 105 TP_ARGS(cc, zone_start, zone_end, sync), 112 __field(bool, sync) 120 __entry->sync = sync; 128 __entry->sync ? "sync" : "async") 133 unsigned long zone_end, bool sync, 136 TP_ARGS(cc, zone_start, zone_end, sync, status), 143 __field(bool, sync) [all...] |