Home
last modified time | relevance | path

Searched refs:mmap_lock (Results 1 – 25 of 41) sorted by relevance

12

/linux/include/linux/
H A Dmmap_lock.h18 .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
71 rwsem_assert_held(&mm->mmap_lock); in mmap_assert_locked()
76 rwsem_assert_held_write(&mm->mmap_lock); in mmap_assert_write_locked()
312 down_write(&mm->mmap_lock); in mmap_write_lock()
320 down_write_nested(&mm->mmap_lock, subclass); in mmap_write_lock_nested()
330 ret = down_write_killable(&mm->mmap_lock); in mmap_write_lock_killable()
355 up_write(&mm->mmap_lock); in mmap_write_unlock()
362 downgrade_write(&mm->mmap_lock); in mmap_write_downgrade()
368 down_read(&mm->mmap_lock); in mmap_read_lock()
377 ret = down_read_killable(&mm->mmap_lock); in mmap_read_lock_killable()
[all …]
H A Dio_uring_types.h463 struct mutex mmap_lock; member
/linux/include/trace/events/
H A Dmmap_lock.h3 #define TRACE_SYSTEM mmap_lock
14 DECLARE_EVENT_CLASS(mmap_lock,
40 DEFINE_EVENT(mmap_lock, name, \
/linux/drivers/media/common/videobuf2/
H A Dvideobuf2-core.c588 lockdep_assert_held(&q->mmap_lock); in __vb2_queue_free()
900 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs()
912 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()
933 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs()
936 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()
1005 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs()
1014 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()
1017 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()
1030 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs()
1032 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()
[all …]
/linux/io_uring/
H A Dmemmap.c253 guard(mutex)(&ctx->mmap_lock); in io_create_region_mmap_safe()
286 lockdep_assert_held(&ctx->mmap_lock); in io_region_validate_mmap()
330 guard(mutex)(&ctx->mmap_lock); in io_uring_mmap()
362 guard(mutex)(&ctx->mmap_lock); in io_uring_get_unmapped_area()
412 guard(mutex)(&ctx->mmap_lock); in io_uring_get_unmapped_area()
H A Dkbuf.c91 guard(mutex)(&ctx->mmap_lock); in io_buffer_add_list()
445 scoped_guard(mutex, &ctx->mmap_lock) { in io_destroy_buffers()
458 scoped_guard(mutex, &ctx->mmap_lock) in io_destroy_bl()
698 scoped_guard(mutex, &ctx->mmap_lock) in io_unregister_pbuf_ring()
733 lockdep_assert_held(&ctx->mmap_lock); in io_pbuf_get_region()
H A Dzcrx.c539 lockdep_assert_held(&ctx->mmap_lock); in io_zcrx_get_region()
591 scoped_guard(mutex, &ctx->mmap_lock) { in io_register_zcrx_ifq()
629 scoped_guard(mutex, &ctx->mmap_lock) { in io_register_zcrx_ifq()
644 scoped_guard(mutex, &ctx->mmap_lock) in io_register_zcrx_ifq()
658 scoped_guard(mutex, &ctx->mmap_lock) { in io_unregister_zcrx_ifqs()
H A Dregister.c500 mutex_lock(&ctx->mmap_lock); in io_register_resize_rings()
566 mutex_unlock(&ctx->mmap_lock); in io_register_resize_rings()
609 guard(mutex)(&ctx->mmap_lock); in io_register_mem_region()
/linux/kernel/bpf/
H A Dmmap_unlock_work.h60 rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_); in bpf_mmap_unlock_mm()
/linux/mm/
H A Dinit-mm.c33 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, init_mm.mmap_lock),
H A DMakefile58 debug.o gup.o mmap_lock.o vma_init.o $(mmu-y)
H A Dmmu_notifier.c980 might_lock(&mm->mmap_lock); in mmu_interval_notifier_insert()
/linux/drivers/infiniband/hw/cxgb4/
H A Diw_cxgb4.h525 spinlock_t mmap_lock; member
558 spin_lock(&ucontext->mmap_lock); in remove_mmap()
564 spin_unlock(&ucontext->mmap_lock); in remove_mmap()
570 spin_unlock(&ucontext->mmap_lock); in remove_mmap()
603 spin_lock(&ucontext->mmap_lock); in insert_mmap()
607 spin_unlock(&ucontext->mmap_lock); in insert_mmap()
H A Dprovider.c89 spin_lock_init(&context->mmap_lock); in c4iw_alloc_ucontext()
103 spin_lock(&context->mmap_lock); in c4iw_alloc_ucontext()
106 spin_unlock(&context->mmap_lock); in c4iw_alloc_ucontext()
H A Dcq.c1110 spin_lock(&ucontext->mmap_lock); in c4iw_create_cq()
1120 spin_unlock(&ucontext->mmap_lock); in c4iw_create_cq()
/linux/tools/perf/util/bpf_skel/vmlinux/
H A Dvmlinux.h97 struct rw_semaphore mmap_lock; member
/linux/tools/perf/util/bpf_skel/
H A Dlock_contention.bpf.c175 struct rw_semaphore mmap_lock; member
377 if (bpf_core_field_exists(mm_new->mmap_lock)) { in check_lock_type()
378 if (&mm_new->mmap_lock == (void *)lock) in check_lock_type()
/linux/Documentation/mm/
H A Dtranshuge.rst94 mmap_lock in read (or write) mode to be sure a huge pmd cannot be
96 takes the mmap_lock in write mode in addition to the anon_vma lock). If
H A Dprocess_addrs.rst45 * **mmap locks** - Each MM has a read/write semaphore :c:member:`!mmap_lock`
419 mm->mmap_lock
453 ->mmap_lock
458 ->mmap_lock
463 ->mmap_lock (fault_in_readable->do_page_fault)
/linux/Documentation/admin-guide/mm/
H A Dnuma_memory_policy.rst381 task's mm's mmap_lock for read during the query. The set_mempolicy() and
382 mbind() APIs [see below] always acquire the mmap_lock for write when
388 we hold them mmap_lock for read. Again, because replacing the task or vma
389 policy requires that the mmap_lock be held for write, the policy can't be
393 shared memory policy while another task, with a distinct mmap_lock, is
H A Dmultigen_lru.rst41 theoretically worsen lock contention (mmap_lock). If it is
/linux/include/media/
H A Dvideobuf2-core.h646 struct mutex mmap_lock; member
/linux/arch/x86/kernel/
H A Dtboot.c98 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, tboot_mm.mmap_lock),
/linux/Documentation/kernel-hacking/
H A Dfalse-sharing.rst48 false sharing. One of these is a rw_semaphore 'mmap_lock' inside
/linux/arch/s390/kernel/
H A Duv.c345 lockdep_assert_not_held(&mm->mmap_lock); in s390_wiggle_split_folio()

12