| /linux/include/linux/ |
| H A D | mmap_lock.h | 18 .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), 71 rwsem_assert_held(&mm->mmap_lock); in mmap_assert_locked() 76 rwsem_assert_held_write(&mm->mmap_lock); in mmap_assert_write_locked() 312 down_write(&mm->mmap_lock); in mmap_write_lock() 320 down_write_nested(&mm->mmap_lock, subclass); in mmap_write_lock_nested() 330 ret = down_write_killable(&mm->mmap_lock); in mmap_write_lock_killable() 355 up_write(&mm->mmap_lock); in mmap_write_unlock() 362 downgrade_write(&mm->mmap_lock); in mmap_write_downgrade() 368 down_read(&mm->mmap_lock); in mmap_read_lock() 377 ret = down_read_killable(&mm->mmap_lock); in mmap_read_lock_killable() [all …]
|
| H A D | io_uring_types.h | 463 struct mutex mmap_lock; member
|
| /linux/include/trace/events/ |
| H A D | mmap_lock.h | 3 #define TRACE_SYSTEM mmap_lock 14 DECLARE_EVENT_CLASS(mmap_lock, 40 DEFINE_EVENT(mmap_lock, name, \
|
| /linux/drivers/media/common/videobuf2/ |
| H A D | videobuf2-core.c | 588 lockdep_assert_held(&q->mmap_lock); in __vb2_queue_free() 900 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 912 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 933 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 936 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 1005 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 1014 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 1017 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 1030 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 1032 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() [all …]
|
| /linux/io_uring/ |
| H A D | memmap.c | 253 guard(mutex)(&ctx->mmap_lock); in io_create_region_mmap_safe() 286 lockdep_assert_held(&ctx->mmap_lock); in io_region_validate_mmap() 330 guard(mutex)(&ctx->mmap_lock); in io_uring_mmap() 362 guard(mutex)(&ctx->mmap_lock); in io_uring_get_unmapped_area() 412 guard(mutex)(&ctx->mmap_lock); in io_uring_get_unmapped_area()
|
| H A D | kbuf.c | 91 guard(mutex)(&ctx->mmap_lock); in io_buffer_add_list() 445 scoped_guard(mutex, &ctx->mmap_lock) { in io_destroy_buffers() 458 scoped_guard(mutex, &ctx->mmap_lock) in io_destroy_bl() 698 scoped_guard(mutex, &ctx->mmap_lock) in io_unregister_pbuf_ring() 733 lockdep_assert_held(&ctx->mmap_lock); in io_pbuf_get_region()
|
| H A D | zcrx.c | 539 lockdep_assert_held(&ctx->mmap_lock); in io_zcrx_get_region() 591 scoped_guard(mutex, &ctx->mmap_lock) { in io_register_zcrx_ifq() 629 scoped_guard(mutex, &ctx->mmap_lock) { in io_register_zcrx_ifq() 644 scoped_guard(mutex, &ctx->mmap_lock) in io_register_zcrx_ifq() 658 scoped_guard(mutex, &ctx->mmap_lock) { in io_unregister_zcrx_ifqs()
|
| H A D | register.c | 500 mutex_lock(&ctx->mmap_lock); in io_register_resize_rings() 566 mutex_unlock(&ctx->mmap_lock); in io_register_resize_rings() 609 guard(mutex)(&ctx->mmap_lock); in io_register_mem_region()
|
| /linux/kernel/bpf/ |
| H A D | mmap_unlock_work.h | 60 rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_); in bpf_mmap_unlock_mm()
|
| /linux/mm/ |
| H A D | init-mm.c | 33 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, init_mm.mmap_lock),
|
| H A D | Makefile | 58 debug.o gup.o mmap_lock.o vma_init.o $(mmu-y)
|
| H A D | mmu_notifier.c | 980 might_lock(&mm->mmap_lock); in mmu_interval_notifier_insert()
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| H A D | iw_cxgb4.h | 525 spinlock_t mmap_lock; member 558 spin_lock(&ucontext->mmap_lock); in remove_mmap() 564 spin_unlock(&ucontext->mmap_lock); in remove_mmap() 570 spin_unlock(&ucontext->mmap_lock); in remove_mmap() 603 spin_lock(&ucontext->mmap_lock); in insert_mmap() 607 spin_unlock(&ucontext->mmap_lock); in insert_mmap()
|
| H A D | provider.c | 89 spin_lock_init(&context->mmap_lock); in c4iw_alloc_ucontext() 103 spin_lock(&context->mmap_lock); in c4iw_alloc_ucontext() 106 spin_unlock(&context->mmap_lock); in c4iw_alloc_ucontext()
|
| H A D | cq.c | 1110 spin_lock(&ucontext->mmap_lock); in c4iw_create_cq() 1120 spin_unlock(&ucontext->mmap_lock); in c4iw_create_cq()
|
| /linux/tools/perf/util/bpf_skel/vmlinux/ |
| H A D | vmlinux.h | 97 struct rw_semaphore mmap_lock; member
|
| /linux/tools/perf/util/bpf_skel/ |
| H A D | lock_contention.bpf.c | 175 struct rw_semaphore mmap_lock; member 377 if (bpf_core_field_exists(mm_new->mmap_lock)) { in check_lock_type() 378 if (&mm_new->mmap_lock == (void *)lock) in check_lock_type()
|
| /linux/Documentation/mm/ |
| H A D | transhuge.rst | 94 mmap_lock in read (or write) mode to be sure a huge pmd cannot be 96 takes the mmap_lock in write mode in addition to the anon_vma lock). If
|
| H A D | process_addrs.rst | 45 * **mmap locks** - Each MM has a read/write semaphore :c:member:`!mmap_lock` 419 mm->mmap_lock 453 ->mmap_lock 458 ->mmap_lock 463 ->mmap_lock (fault_in_readable->do_page_fault)
|
| /linux/Documentation/admin-guide/mm/ |
| H A D | numa_memory_policy.rst | 381 task's mm's mmap_lock for read during the query. The set_mempolicy() and 382 mbind() APIs [see below] always acquire the mmap_lock for write when 388 we hold them mmap_lock for read. Again, because replacing the task or vma 389 policy requires that the mmap_lock be held for write, the policy can't be 393 shared memory policy while another task, with a distinct mmap_lock, is
|
| H A D | multigen_lru.rst | 41 theoretically worsen lock contention (mmap_lock). If it is
|
| /linux/include/media/ |
| H A D | videobuf2-core.h | 646 struct mutex mmap_lock; member
|
| /linux/arch/x86/kernel/ |
| H A D | tboot.c | 98 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, tboot_mm.mmap_lock),
|
| /linux/Documentation/kernel-hacking/ |
| H A D | false-sharing.rst | 48 false sharing. One of these is a rw_semaphore 'mmap_lock' inside
|
| /linux/arch/s390/kernel/ |
| H A D | uv.c | 345 lockdep_assert_not_held(&mm->mmap_lock); in s390_wiggle_split_folio()
|