Lines Matching +full:performance +full:- +full:affecting

1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/page-isolation.h>
26 #include <linux/backing-dev.h>
43 * Any behaviour which results in changes to the vma->vm_flags needs to
71 struct mm_struct *mm = vma->vm_mm; in madvise_behavior()
74 unsigned long new_flags = vma->vm_flags; in madvise_behavior()
90 if (vma->vm_flags & VM_IO) { in madvise_behavior()
91 error = -EINVAL; in madvise_behavior()
98 if (vma->vm_file || vma->vm_flags & VM_SHARED) { in madvise_behavior()
99 error = -EINVAL; in madvise_behavior()
112 error = -EINVAL; in madvise_behavior()
131 if (new_flags == vma->vm_flags) { in madvise_behavior()
136 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_behavior()
137 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior()
138 vma->vm_file, pgoff, vma_policy(vma), in madvise_behavior()
139 vma->vm_userfaultfd_ctx); in madvise_behavior()
147 if (start != vma->vm_start) { in madvise_behavior()
148 if (unlikely(mm->map_count >= sysctl_max_map_count)) { in madvise_behavior()
149 error = -ENOMEM; in madvise_behavior()
157 if (end != vma->vm_end) { in madvise_behavior()
158 if (unlikely(mm->map_count >= sysctl_max_map_count)) { in madvise_behavior()
159 error = -ENOMEM; in madvise_behavior()
171 vma->vm_flags = new_flags; in madvise_behavior()
178 if (error == -ENOMEM) in madvise_behavior()
179 error = -EAGAIN; in madvise_behavior()
189 struct vm_area_struct *vma = walk->private; in swapin_walk_pmd_entry()
201 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
202 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry()
228 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); in force_shm_swapin_readahead()
229 pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1); in force_shm_swapin_readahead()
262 struct mm_struct *mm = vma->vm_mm; in madvise_willneed()
263 struct file *file = vma->vm_file; in madvise_willneed()
269 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); in madvise_willneed()
274 if (shmem_mapping(file->f_mapping)) { in madvise_willneed()
276 file->f_mapping); in madvise_willneed()
281 return -EBADF; in madvise_willneed()
297 offset = (loff_t)(start - vma->vm_start) in madvise_willneed()
298 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in madvise_willneed()
300 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); in madvise_willneed()
310 struct madvise_walk_private *private = walk->private; in madvise_cold_or_pageout_pte_range()
311 struct mmu_gather *tlb = private->tlb; in madvise_cold_or_pageout_pte_range()
312 bool pageout = private->pageout; in madvise_cold_or_pageout_pte_range()
313 struct mm_struct *mm = tlb->mm; in madvise_cold_or_pageout_pte_range()
314 struct vm_area_struct *vma = walk->vma; in madvise_cold_or_pageout_pte_range()
321 return -EINTR; in madvise_cold_or_pageout_pte_range()
349 if (next - addr != HPAGE_PMD_SIZE) { in madvise_cold_or_pageout_pte_range()
378 list_add(&page->lru, &page_list); in madvise_cold_or_pageout_pte_range()
394 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range()
432 pte--; in madvise_cold_or_pageout_pte_range()
433 addr -= PAGE_SIZE; in madvise_cold_or_pageout_pte_range()
445 tlb->fullmm); in madvise_cold_or_pageout_pte_range()
454 * As a side effect, it makes confuse idle-page tracking in madvise_cold_or_pageout_pte_range()
464 list_add(&page->lru, &page_list); in madvise_cold_or_pageout_pte_range()
493 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_cold_page_range()
501 struct mm_struct *mm = vma->vm_mm; in madvise_cold()
506 return -EINVAL; in madvise_cold()
526 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_pageout_page_range()
534 if (!vma->vm_file) in can_do_pageout()
537 * paging out pagecache only for non-anonymous mappings that correspond in can_do_pageout()
539 * otherwise we'd be including shared non-exclusive mappings, which in can_do_pageout()
542 return inode_owner_or_capable(file_inode(vma->vm_file)) || in can_do_pageout()
543 inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0; in can_do_pageout()
550 struct mm_struct *mm = vma->vm_mm; in madvise_pageout()
555 return -EINVAL; in madvise_pageout()
572 struct mmu_gather *tlb = walk->private; in madvise_free_pte_range()
573 struct mm_struct *mm = tlb->mm; in madvise_free_pte_range()
574 struct vm_area_struct *vma = walk->vma; in madvise_free_pte_range()
600 * prevent swap-in which is more expensive rather than in madvise_free_pte_range()
609 nr_swap--; in madvise_free_pte_range()
611 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in madvise_free_pte_range()
642 pte--; in madvise_free_pte_range()
643 addr -= PAGE_SIZE; in madvise_free_pte_range()
678 tlb->fullmm); in madvise_free_pte_range()
689 if (current->mm == mm) in madvise_free_pte_range()
708 struct mm_struct *mm = vma->vm_mm; in madvise_free_single_vma()
714 return -EINVAL; in madvise_free_single_vma()
716 range.start = max(vma->vm_start, start_addr); in madvise_free_single_vma()
717 if (range.start >= vma->vm_end) in madvise_free_single_vma()
718 return -EINVAL; in madvise_free_single_vma()
719 range.end = min(vma->vm_end, end_addr); in madvise_free_single_vma()
720 if (range.end <= vma->vm_start) in madvise_free_single_vma()
721 return -EINVAL; in madvise_free_single_vma()
731 walk_page_range(vma->vm_mm, range.start, range.end, in madvise_free_single_vma()
750 * as some implementations do. This has performance implications for
762 zap_page_range(vma, start, end - start); in madvise_dontneed_single_vma()
771 struct mm_struct *mm = vma->vm_mm; in madvise_dontneed_free()
775 return -EINVAL; in madvise_dontneed_free()
783 return -ENOMEM; in madvise_dontneed_free()
784 if (start < vma->vm_start) { in madvise_dontneed_free()
787 * with the lowest vma->vm_start where start in madvise_dontneed_free()
788 * is also < vma->vm_end. If start < in madvise_dontneed_free()
789 * vma->vm_start it means an hole materialized in madvise_dontneed_free()
794 return -ENOMEM; in madvise_dontneed_free()
797 return -EINVAL; in madvise_dontneed_free()
798 if (end > vma->vm_end) { in madvise_dontneed_free()
800 * Don't fail if end > vma->vm_end. If the old in madvise_dontneed_free()
808 * end-vma->vm_end range, but the manager can in madvise_dontneed_free()
811 end = vma->vm_end; in madvise_dontneed_free()
821 return -EINVAL; in madvise_dontneed_free()
835 struct mm_struct *mm = vma->vm_mm; in madvise_remove()
839 if (vma->vm_flags & VM_LOCKED) in madvise_remove()
840 return -EINVAL; in madvise_remove()
842 f = vma->vm_file; in madvise_remove()
844 if (!f || !f->f_mapping || !f->f_mapping->host) { in madvise_remove()
845 return -EINVAL; in madvise_remove()
848 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) in madvise_remove()
849 return -EACCES; in madvise_remove()
851 offset = (loff_t)(start - vma->vm_start) in madvise_remove()
852 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in madvise_remove()
867 offset, end - start); in madvise_remove()
884 return -EPERM; in madvise_inject_error()
925 /* Ensure that all poisoned pages are removed from per-cpu lists */ in madvise_inject_error()
1009 * use appropriate read-ahead and caching techniques. The information
1011 * kernel without affecting the correct operation of the application.
1014 * MADV_NORMAL - the default behavior is to read clusters. This
1015 * results in some read-ahead and read-behind.
1016 * MADV_RANDOM - the system should read the minimum amount of data
1017 * on any access, since it is unlikely that the appli-
1019 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
1022 * MADV_WILLNEED - the application is notifying the system to read
1024 * MADV_DONTNEED - the application is finished with the given range,
1026 * MADV_FREE - the application marks pages in the given range as lazy free,
1028 * MADV_REMOVE - the application wants to free up the given range of
1030 * MADV_DONTFORK - omit this area from child's address space when forking:
1032 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
1033 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
1035 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
1036 * MADV_HWPOISON - trigger memory error handler as if the given memory range
1038 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
1039 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
1041 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
1042 * MADV_HUGEPAGE - the application wants to back the given range by transparent
1045 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
1048 * MADV_DONTDUMP - the application wants to prevent pages in the given range
1050 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
1051 * MADV_COLD - the application is not expected to use this memory soon,
1054 * MADV_PAGEOUT - the application is not expected to use this memory soon,
1058 * zero - success
1059 * -EINVAL - start + len < 0, start is not page-aligned,
1064 * -ENOMEM - addresses in the specified range are not currently
1066 * -EIO - an I/O error occurred while paging in data.
1067 * -EBADF - map exists, but area maps something that isn't a file.
1068 * -EAGAIN - a kernel resource was temporarily unavailable.
1075 int error = -EINVAL; in do_madvise()
1089 /* Check to see whether len was rounded up from small -ve to zero */ in do_madvise()
1109 return -EINTR; in do_madvise()
1116 * ranges, just ignore them, but return -ENOMEM at the end. in do_madvise()
1117 * - different from the way of handling in mlock etc. in do_madvise()
1120 if (vma && start > vma->vm_start) in do_madvise()
1126 error = -ENOMEM; in do_madvise()
1130 /* Here start < (end|vma->vm_end). */ in do_madvise()
1131 if (start < vma->vm_start) { in do_madvise()
1132 unmapped_error = -ENOMEM; in do_madvise()
1133 start = vma->vm_start; in do_madvise()
1138 /* Here vma->vm_start <= start < (end|vma->vm_end) */ in do_madvise()
1139 tmp = vma->vm_end; in do_madvise()
1143 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ in do_madvise()
1148 if (prev && start < prev->vm_end) in do_madvise()
1149 start = prev->vm_end; in do_madvise()
1154 vma = prev->vm_next; in do_madvise()
1170 return do_madvise(current->mm, start, len_in, behavior); in SYSCALL_DEFINE3()
1187 ret = -EINVAL; in SYSCALL_DEFINE5()
1203 ret = -ESRCH; in SYSCALL_DEFINE5()
1208 ret = -EINVAL; in SYSCALL_DEFINE5()
1214 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; in SYSCALL_DEFINE5()
1230 ret = total_len - iov_iter_count(&iter); in SYSCALL_DEFINE5()