Lines Matching +full:async +full:- +full:prefix
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
14 #include <linux/tracepoint-defs.h>
62 #define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
76 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; in folio_nr_pages_mapped()
81 unsigned long mapping = (unsigned long)folio->mapping; in folio_raw_mapping()
91 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); in acct_reclaim_writeback()
101 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; in wake_throttle_isolated()
129 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); in force_page_cache_readahead()
146 * folio_evictable - Test whether a folio is evictable.
149 * Test whether @folio is evictable -- i.e., should be placed on
169 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
180 * Return true if a folio needs ->release_folio() calling upon it.
215 #define K(x) ((x) << (PAGE_SHIFT-10))
264 * general, page_zone(page)->lock must be held by the caller to prevent the
266 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
298 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
350 * function is used in the performance-critical __free_one_page().
366 buddy = page + (__buddy_pfn - pfn); in find_buddy_page_pfn()
381 if (zone->contiguous) in pageblock_pfn_to_page()
391 zone->contiguous = false; in clear_zone_contiguous()
403 * caller passes in a non-large folio.
410 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order; in folio_set_order()
412 folio->_folio_nr_pages = 1U << order; in folio_set_order()
432 atomic_set(&folio->_entire_mapcount, -1); in prep_compound_head()
433 atomic_set(&folio->_nr_pages_mapped, 0); in prep_compound_head()
434 atomic_set(&folio->_pincount, 0); in prep_compound_head()
441 p->mapping = TAIL_MAPPING; in prep_compound_tail()
507 enum migrate_mode mode; /* Async or sync migration mode */
552 return list_empty(&area->free_list[migratetype]); in free_area_empty()
560 * Executable code area - executable, not writable, not stack
579 * Data area - private, writable, not stack
625 if (start < vma->vm_start) in folio_within_range()
626 start = vma->vm_start; in folio_within_range()
628 if (end > vma->vm_end) in folio_within_range()
629 end = vma->vm_end; in folio_within_range()
634 if (!in_range(pgoff, vma->vm_pgoff, vma_pglen)) in folio_within_range()
637 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in folio_within_range()
639 return !(addr < start || end - addr < folio_size(folio)); in folio_within_range()
645 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end); in folio_within_vma()
663 * 1) VM_IO check prevents migration from double-counting during mlock. in mlock_vma_folio()
666 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may in mlock_vma_folio()
669 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED)) in mlock_vma_folio()
686 if (unlikely(vma->vm_flags & VM_LOCKED)) in munlock_vma_folio()
707 if (pgoff >= vma->vm_pgoff) { in vma_pgoff_address()
708 address = vma->vm_start + in vma_pgoff_address()
709 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_pgoff_address()
711 if (address < vma->vm_start || address >= vma->vm_end) in vma_pgoff_address()
712 address = -EFAULT; in vma_pgoff_address()
713 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) { in vma_pgoff_address()
714 /* Test above avoids possibility of wrap to 0 on 32-bit */ in vma_pgoff_address()
715 address = vma->vm_start; in vma_pgoff_address()
717 address = -EFAULT; in vma_pgoff_address()
724 * Returns -EFAULT if all of the page is outside the range of vma.
730 VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ in vma_address()
740 struct vm_area_struct *vma = pvmw->vma; in vma_address_end()
744 /* Common case, plus ->pgoff is invalid for KSM */ in vma_address_end()
745 if (pvmw->nr_pages == 1) in vma_address_end()
746 return pvmw->address + PAGE_SIZE; in vma_address_end()
748 pgoff = pvmw->pgoff + pvmw->nr_pages; in vma_address_end()
749 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_address_end()
751 if (address < vma->vm_start || address > vma->vm_end) in vma_address_end()
752 address = vma->vm_end; in vma_address_end()
759 int flags = vmf->flags; in maybe_unlock_mmap_for_io()
771 fpin = get_file(vmf->vma->vm_file); in maybe_unlock_mmap_for_io()
804 #define mminit_dprintk(level, prefix, fmt, arg...) \ argument
808 pr_warn("mminit::" prefix " " fmt, ##arg); \
810 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
819 const char *prefix, const char *fmt, ...) in mminit_dprintk() argument
832 #define NODE_RECLAIM_NOSCAN -2
833 #define NODE_RECLAIM_FULL -1
853 * mm/memory-failure.c
872 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
879 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
882 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
983 return -EINVAL; in vmap_pages_range_noflush()
1026 /* we are working on non-current tsk/mm */
1030 /* gup_fast: prevent fall-back to slow gup */
1040 * Indicates for which pages that are write-protected in the page table,
1048 * * GUP-fast and fork(): mm->write_protect_seq
1049 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1054 * PTE-mapped THP.
1056 * If the vma is NULL, we're coming from the GUP-fast path and might have
1064 * has to be writable -- and if it references (part of) an anonymous in gup_must_unshare()
1075 * We only care about R/O long-term pining: R/O short-term in gup_must_unshare()
1090 return is_cow_mapping(vma->vm_flags); in gup_must_unshare()
1098 * During GUP-fast we might not get called on the head page for a in gup_must_unshare()
1099 * hugetlb page that is mapped using cont-PTE, because GUP-fast does in gup_must_unshare()
1102 * page (as it cannot be partially COW-shared), so lookup the head page. in gup_must_unshare()
1120 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty in vma_soft_dirty_enabled()
1121 * enablements, because when without soft-dirty being compiled in, in vma_soft_dirty_enabled()
1129 * Soft-dirty is kind of special: its tracking is enabled when the in vma_soft_dirty_enabled()
1132 return !(vma->vm_flags & VM_SOFTDIRTY); in vma_soft_dirty_enabled()
1138 __mas_set_range(&vmi->mas, index, last - 1); in vma_iter_config()
1147 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL); in vma_iter_prealloc()
1152 mas_store_prealloc(&vmi->mas, NULL); in vma_iter_clear()
1157 return mas_walk(&vmi->mas); in vma_iter_load()
1166 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && in vma_iter_store()
1167 vmi->mas.index > vma->vm_start)) { in vma_iter_store()
1168 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n", in vma_iter_store()
1169 vmi->mas.index, vma->vm_start, vma->vm_start, in vma_iter_store()
1170 vma->vm_end, vmi->mas.index, vmi->mas.last); in vma_iter_store()
1172 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && in vma_iter_store()
1173 vmi->mas.last < vma->vm_start)) { in vma_iter_store()
1174 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n", in vma_iter_store()
1175 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, in vma_iter_store()
1176 vmi->mas.index, vmi->mas.last); in vma_iter_store()
1180 if (vmi->mas.status != ma_start && in vma_iter_store()
1181 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store()
1184 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store()
1185 mas_store_prealloc(&vmi->mas, vma); in vma_iter_store()
1191 if (vmi->mas.status != ma_start && in vma_iter_store_gfp()
1192 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store_gfp()
1195 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_gfp()
1196 mas_store_gfp(&vmi->mas, vma, gfp); in vma_iter_store_gfp()
1197 if (unlikely(mas_is_err(&vmi->mas))) in vma_iter_store_gfp()
1198 return -ENOMEM; in vma_iter_store_gfp()
1228 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); in shrinker_debugfs_name_alloc()
1230 return shrinker->name ? 0 : -ENOMEM; in shrinker_debugfs_name_alloc()
1235 kfree_const(shrinker->name); in shrinker_debugfs_name_free()
1236 shrinker->name = NULL; in shrinker_debugfs_name_free()
1260 *debugfs_id = -1; in shrinker_debugfs_detach()