Lines Matching +full:t +full:- +full:head +full:- +full:semi

1 // SPDX-License-Identifier: GPL-2.0
52 * in vmscan and, possibly, the fault path; and to support semi-accurate
88 folio->mlock_count++; in __mlock_folio()
95 folio->mlock_count = !!folio_test_mlocked(folio); in __mlock_folio()
114 folio->mlock_count = !!folio_test_mlocked(folio); in __mlock_new_folio()
135 if (folio->mlock_count) in __munlock_folio()
136 folio->mlock_count--; in __munlock_folio()
137 if (folio->mlock_count) in __munlock_folio()
144 __zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); in __munlock_folio()
194 folio = fbatch->folios[i]; in mlock_folio_batch()
196 folio = (struct folio *)((unsigned long)folio - mlock); in mlock_folio_batch()
197 fbatch->folios[i] = folio; in mlock_folio_batch()
239 * mlock_folio - mlock a folio already on (or temporarily off) LRU
264 * mlock_new_folio - mlock a newly allocated folio not yet on LRU
265 * @folio: folio to be mlocked, either normal or a THP head.
287 * munlock_folio - munlock a folio
288 * @folio: folio to be munlocked, either normal or a THP head.
311 unsigned int count = (end - addr) >> PAGE_SHIFT; in folio_mlock_step()
334 if (!(vma->vm_flags & VM_LOCKED)) in allow_mlock_munlock()
356 struct vm_area_struct *vma = walk->vma; in mlock_pte_range()
373 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range()
380 start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mlock_pte_range()
382 walk->action = ACTION_AGAIN; in mlock_pte_range()
398 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range()
404 pte += step - 1; in mlock_pte_range()
405 addr += (step - 1) << PAGE_SHIFT; in mlock_pte_range()
415 * mlock_vma_pages_range() - mlock any pages already in the range,
417 * @vma - vma containing range to be mlock()ed or munlock()ed
418 * @start - start address in @vma of the range
419 * @end - end of range in @vma
420 * @newflags - the new set of flags for @vma.
435 * or page reclaim finding a page of this now-VM_LOCKED vma, in mlock_vma_pages_range()
450 walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL); in mlock_vma_pages_range()
460 * mlock_fixup - handle mlock[all]/munlock[all] requests.
462 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
463 * munlock is a no-op. However, for some special vmas, we go ahead and
472 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
475 vm_flags_t oldflags = vma->vm_flags; in mlock_fixup()
478 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || in mlock_fixup()
480 /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ in mlock_fixup()
492 nr_pages = (end - start) >> PAGE_SHIFT; in mlock_fixup()
494 nr_pages = -nr_pages; in mlock_fixup()
497 mm->locked_vm += nr_pages; in mlock_fixup()
521 VMA_ITERATOR(vmi, current->mm, start); in apply_vma_lock_flags()
527 return -EINVAL; in apply_vma_lock_flags()
532 return -ENOMEM; in apply_vma_lock_flags()
535 if (start > vma->vm_start) in apply_vma_lock_flags()
539 tmp = vma->vm_start; in apply_vma_lock_flags()
544 if (vma->vm_start != tmp) in apply_vma_lock_flags()
545 return -ENOMEM; in apply_vma_lock_flags()
547 newflags = vma->vm_flags & ~VM_LOCKED_MASK; in apply_vma_lock_flags()
549 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ in apply_vma_lock_flags()
550 tmp = vma->vm_end; in apply_vma_lock_flags()
561 return -ENOMEM; in apply_vma_lock_flags()
581 /* Don't overflow past ULONG_MAX */ in count_mm_mlocked_page_nr()
582 if (unlikely(ULONG_MAX - len < start)) in count_mm_mlocked_page_nr()
588 if (vma->vm_flags & VM_LOCKED) { in count_mm_mlocked_page_nr()
589 if (start > vma->vm_start) in count_mm_mlocked_page_nr()
590 count -= (start - vma->vm_start); in count_mm_mlocked_page_nr()
591 if (end < vma->vm_end) { in count_mm_mlocked_page_nr()
592 count += end - vma->vm_start; in count_mm_mlocked_page_nr()
595 count += vma->vm_end - vma->vm_start; in count_mm_mlocked_page_nr()
607 if (retval == -EFAULT) in __mlock_posix_error_return()
608 retval = -ENOMEM; in __mlock_posix_error_return()
609 else if (retval == -ENOMEM) in __mlock_posix_error_return()
610 retval = -EAGAIN; in __mlock_posix_error_return()
618 int error = -ENOMEM; in do_mlock()
623 return -EPERM; in do_mlock()
632 if (mmap_write_lock_killable(current->mm)) in do_mlock()
633 return -EINTR; in do_mlock()
635 locked += current->mm->locked_vm; in do_mlock()
639 * previously mlocked areas, that part area in "mm->locked_vm" in do_mlock()
643 locked -= count_mm_mlocked_page_nr(current->mm, in do_mlock()
651 mmap_write_unlock(current->mm); in do_mlock()
671 return -EINVAL; in SYSCALL_DEFINE3()
688 if (mmap_write_lock_killable(current->mm)) in SYSCALL_DEFINE2()
689 return -EINTR; in SYSCALL_DEFINE2()
691 mmap_write_unlock(current->mm); in SYSCALL_DEFINE2()
698 * and translate into the appropriate modifications to mm->def_flags and/or the
704 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
708 VMA_ITERATOR(vmi, current->mm, 0); in apply_mlockall_flags()
712 current->mm->def_flags &= ~VM_LOCKED_MASK; in apply_mlockall_flags()
714 current->mm->def_flags |= VM_LOCKED; in apply_mlockall_flags()
717 current->mm->def_flags |= VM_LOCKONFAULT; in apply_mlockall_flags()
733 newflags = vma->vm_flags & ~VM_LOCKED_MASK; in apply_mlockall_flags()
736 error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end, in apply_mlockall_flags()
754 return -EINVAL; in SYSCALL_DEFINE1()
757 return -EPERM; in SYSCALL_DEFINE1()
762 if (mmap_write_lock_killable(current->mm)) in SYSCALL_DEFINE1()
763 return -EINTR; in SYSCALL_DEFINE1()
765 ret = -ENOMEM; in SYSCALL_DEFINE1()
766 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || in SYSCALL_DEFINE1()
769 mmap_write_unlock(current->mm); in SYSCALL_DEFINE1()
780 if (mmap_write_lock_killable(current->mm)) in SYSCALL_DEFINE0()
781 return -EINTR; in SYSCALL_DEFINE0()
783 mmap_write_unlock(current->mm); in SYSCALL_DEFINE0()
799 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in user_shm_lock()
824 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); in user_shm_unlock()