| /linux/mm/ |
| H A D | mlock.c | 332 if (!(vma->vm_flags & VM_LOCKED)) in allow_mlock_munlock() 371 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range() 396 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range() 442 if (newflags & VM_LOCKED) in mlock_vma_pages_range() 491 if (!(newflags & VM_LOCKED)) in mlock_fixup() 493 else if (oldflags & VM_LOCKED) in mlock_fixup() 502 if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) { in mlock_fixup() 586 if (vma->vm_flags & VM_LOCKED) { in count_mm_mlocked_page_nr() 661 return do_mlock(start, len, VM_LOCKED); in SYSCALL_DEFINE2() 666 vm_flags_t vm_flags = VM_LOCKED; in SYSCALL_DEFINE3() [all …]
|
| H A D | secretmem.c | 128 if (!mlock_future_ok(desc->mm, desc->vm_flags | VM_LOCKED, len)) in secretmem_mmap_prepare() 131 desc->vm_flags |= VM_LOCKED | VM_DONTDUMP; in secretmem_mmap_prepare()
|
| H A D | mmap.c | 198 if (mm->def_flags & VM_LOCKED) in SYSCALL_DEFINE1() 233 if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) in mlock_future_ok() 560 ((vm_flags & VM_LOCKED) || in do_mmap() 974 if (prev->vm_flags & VM_LOCKED) in find_extend_vma_locked() 998 if (vma->vm_flags & VM_LOCKED) in find_extend_vma_locked() 1134 if (vma->vm_flags & VM_LOCKED) in SYSCALL_DEFINE5() 1239 populate = ((mm->def_flags & VM_LOCKED) != 0); in vm_brk_flags()
|
| H A D | msync.c | 83 (vma->vm_flags & VM_LOCKED)) { in SYSCALL_DEFINE3()
|
| H A D | rmap.c | 858 if (vma->vm_flags & VM_LOCKED) { in folio_referenced_one() 879 pra->vm_flags |= VM_LOCKED; in folio_referenced_one() 923 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; in folio_referenced_one() 1918 (vma->vm_flags & VM_LOCKED)) { in try_to_unmap_one() 2228 if (vma->vm_flags & VM_LOCKED) in try_to_unmap_one() 2581 if (vma->vm_flags & VM_LOCKED) in try_to_migrate_one()
|
| H A D | swap.c | 527 if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED)) in folio_add_lru_vma()
|
| H A D | internal.h | 1042 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED)) in mlock_vma_folio() 1059 if (unlikely(vma->vm_flags & VM_LOCKED)) in munlock_vma_folio()
|
| H A D | madvise.c | 595 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); in can_madv_lru_vma() 886 forbidden |= VM_LOCKED; in madvise_dontneed_free_valid_vma() 1023 if (vma->vm_flags & VM_LOCKED) in madvise_remove() 1067 disallowed |= VM_LOCKED; in is_valid_guard_vma()
|
| H A D | mprotect.c | 843 if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED && in mprotect_fixup()
|
| H A D | vma.c | 1394 if (next->vm_flags & VM_LOCKED) in vms_gather_munmap_vmas() 2535 if (vm_flags & VM_LOCKED) { in __mmap_complete() 2820 if (vm_flags & VM_LOCKED) in do_brk_flags() 3047 if (vma->vm_flags & VM_LOCKED) in expand_upwards() 3126 if (vma->vm_flags & VM_LOCKED) in expand_downwards()
|
| H A D | mremap.c | 1023 if (vma->vm_flags & VM_LOCKED) in vrm_stat_account() 1727 if (vma->vm_flags & VM_LOCKED) in check_prep_vma()
|
| H A D | userfaultfd.c | 1576 if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED)) in validate_move_areas()
|
| H A D | shmem.c | 1554 if ((info->flags & VM_LOCKED) || sbinfo->noswap) in shmem_writeout() 2913 if (lock && !(info->flags & VM_LOCKED)) { in shmem_lock() 2916 info->flags |= VM_LOCKED; in shmem_lock() 2919 if (!lock && (info->flags & VM_LOCKED) && ucounts) { in shmem_lock() 2921 info->flags &= ~VM_LOCKED; in shmem_lock()
|
| H A D | migrate.c | 311 if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) || in try_to_map_unused_to_zeropage() 437 if (READ_ONCE(vma->vm_flags) & VM_LOCKED) in remove_migration_pte()
|
| H A D | vmscan.c | 919 if (vm_flags & VM_LOCKED) in folio_check_references() 3369 if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) in should_skip_vma()
|
| H A D | filemap.c | 3401 if (!(vma->vm_flags & VM_LOCKED)) in filemap_fault_recheck_pte_none()
|
| H A D | huge_memory.c | 3193 if (vma->vm_flags & VM_LOCKED) in __discard_anon_folio_pmd_locked()
|
| H A D | memory.c | 4324 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || in should_try_to_free_swap()
|
| /linux/Documentation/mm/ |
| H A D | unevictable-lru.rst | 49 * Those mapped into VM_LOCKED [mlock()ed] VMAs. 182 VM_LOCKED VMA, or found in a VMA being VM_LOCKED. 198 There may be situations where a folio is mapped into a VM_LOCKED VMA, 230 of VM_LOCKED VMAs that map the page (Rik van Riel had the same idea three years 237 attempting to isolate them, thus abandoning the count of VM_LOCKED VMAs. When 240 other VM_LOCKED VMAs still mapped the page. 254 mlocked pages - pages mapped into a VM_LOCKED VMA - are a class of unevictable 270 (4) in the fault path and when a VM_LOCKED stack segment is expanded; or 273 reclaim a page in a VM_LOCKED VMA by folio_referenced() or try_to_unmap(). 279 (2) munmap()'d out of the last VM_LOCKED VMA that maps the page, including [all …]
|
| /linux/include/linux/ |
| H A D | mman.h | 156 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | in calc_vm_flag_bits()
|
| H A D | mm.h | 299 #define VM_LOCKED 0x00002000 macro 499 #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
|
| /linux/tools/testing/vma/ |
| H A D | vma_internal.h | 59 #define VM_LOCKED 0x00002000 macro 92 #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) 1183 if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) in mlock_future_ok()
|
| /linux/include/trace/events/ |
| H A D | mmflags.h | 217 {VM_LOCKED, "locked" }, \
|
| /linux/drivers/misc/sgi-gru/ |
| H A D | grufile.c | 104 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_LOCKED | in gru_file_mmap()
|
| /linux/fs/proc/ |
| H A D | task_mmu.c | 1010 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pte_entry() 1058 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pmd_entry() 1149 [ilog2(VM_LOCKED)] = "lo", in show_smap_vma_flags()
|