Lines Matching full:pages
33 static inline void sanity_check_pinned_pages(struct page **pages, in sanity_check_pinned_pages() argument
40 * We only pin anonymous pages if they are exclusive. Once pinned, we in sanity_check_pinned_pages()
44 * We'd like to verify that our pinned anonymous pages are still mapped in sanity_check_pinned_pages()
51 for (; npages; npages--, pages++) { in sanity_check_pinned_pages()
52 struct page *page = *pages; in sanity_check_pinned_pages()
267 * Pages that were pinned via pin_user_pages*() must be released via either
269 * that such pages can be separately tracked and uniquely handled. In
337 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
338 * @pages: array of pages to be maybe marked dirty, and definitely released.
339 * @npages: number of pages in the @pages array.
340 * @make_dirty: whether to mark the pages dirty
345 * For each page in the @pages array, make that page (or its head page, if a
347 * listed as clean. In any case, releases all pages using unpin_user_page(),
358 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument
366 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
370 sanity_check_pinned_pages(pages, npages); in unpin_user_pages_dirty_lock()
372 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_dirty_lock()
408 * @npages: number of consecutive pages to release.
409 * @make_dirty: whether to mark the pages dirty
411 * "gup-pinned page range" refers to a range of pages that has had one of the
415 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
443 static void unpin_user_pages_lockless(struct page **pages, unsigned long npages) in unpin_user_pages_lockless() argument
451 * fork() and some anonymous pages might now actually be shared -- in unpin_user_pages_lockless()
455 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_lockless()
461 * unpin_user_pages() - release an array of gup-pinned pages.
462 * @pages: array of pages to be marked dirty and released.
463 * @npages: number of pages in the @pages array.
465 * For each page in the @pages array, release the page using unpin_user_page().
469 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
476 * If this WARN_ON() fires, then the system *might* be leaking pages (by in unpin_user_pages()
483 sanity_check_pinned_pages(pages, npages); in unpin_user_pages()
485 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages()
508 * has touched so far, we don't want to allocate unnecessary pages or in no_page_table()
606 * We only care about anon pages in can_follow_write_pte() and don't in follow_page_pte()
617 * Only return device mapping pages in the FOLL_GET or FOLL_PIN in follow_page_pte()
628 /* Avoid special (like zero) pages in core dumps */ in follow_page_pte()
797 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
851 * to fail on PROT_NONE-mapped pages. in follow_page()
871 /* user gate pages are read-only */ in get_gate_page()
1068 * Anon pages in shared mappings are surprising: now in check_vma_flags()
1133 * __get_user_pages() - pin user pages in memory
1136 * @nr_pages: number of pages from start to pin
1138 * @pages: array that receives pointers to the pages pinned.
1140 * only intends to ensure the pages are faulted in.
1143 * Returns either number of pages pinned (which may be less than the
1147 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1148 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1149 * pages pinned. Again, this may be less than nr_pages.
1152 * The caller is responsible for releasing returned @pages, via put_page().
1188 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
1200 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages()
1213 pages ? &page : NULL); in __get_user_pages()
1230 * If we have a pending SIGKILL, don't keep faulting pages and in __get_user_pages()
1259 * struct page. If the caller expects **pages to be in __get_user_pages()
1263 if (pages) { in __get_user_pages()
1276 if (pages) { in __get_user_pages()
1288 * pages. in __get_user_pages()
1313 pages[i + j] = subpage; in __get_user_pages()
1469 struct page **pages, in __get_user_pages_locked() argument
1497 * is to set FOLL_GET if the caller wants pages[] filled in (but has in __get_user_pages_locked()
1501 * FOLL_PIN always expects pages to be non-null, but no need to assert in __get_user_pages_locked()
1504 if (pages && !(flags & FOLL_PIN)) in __get_user_pages_locked()
1509 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1540 * For the prefault case (!pages) we only update counts. in __get_user_pages_locked()
1542 if (likely(pages)) in __get_user_pages_locked()
1543 pages += ret; in __get_user_pages_locked()
1574 pages, locked); in __get_user_pages_locked()
1590 if (likely(pages)) in __get_user_pages_locked()
1591 pages++; in __get_user_pages_locked()
1615 * populate_vma_page_range() - populate a range of pages in the vma.
1621 * This takes care of mlocking the pages too if VM_LOCKED is set.
1623 * Return either number of pages pinned in the vma, or a negative error
1689 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1697 * Returns either number of processed pages in the vma, or a negative error
1744 * __mm_populate - populate and/or mlock pages within a range of address space.
1762 * We want to fault in pages for [nstart; end) address range. in __mm_populate()
1784 * Now fault in a range of pages. populate_vma_page_range() in __mm_populate()
1785 * double checks the vma flags, so that it won't mlock pages in __mm_populate()
1805 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() argument
1845 if (pages) { in __get_user_pages_locked()
1846 pages[i] = virt_to_page((void *)start); in __get_user_pages_locked()
1847 if (pages[i]) in __get_user_pages_locked()
1848 get_page(pages[i]); in __get_user_pages_locked()
1934 * already know that some or all of the pages in the address range aren't in
1939 * Note that we don't pin or otherwise hold the pages referenced that we fault
2039 * Returns the number of collected pages. Return value is always >= 0.
2044 struct page **pages) in collect_longterm_unpinnable_pages() argument
2051 struct folio *folio = page_folio(pages[i]); in collect_longterm_unpinnable_pages()
2088 * Unpins all pages and migrates device coherent pages and movable_page_list.
2089 * Returns -EAGAIN if all pages were successfully migrated or -errno for failure
2095 struct page **pages) in migrate_longterm_unpinnable_pages() argument
2101 struct folio *folio = page_folio(pages[i]); in migrate_longterm_unpinnable_pages()
2108 pages[i] = NULL; in migrate_longterm_unpinnable_pages()
2121 * We can't migrate pages with unexpected references, so drop in migrate_longterm_unpinnable_pages()
2123 * Migrating pages have been added to movable_page_list after in migrate_longterm_unpinnable_pages()
2127 unpin_user_page(pages[i]); in migrate_longterm_unpinnable_pages()
2128 pages[i] = NULL; in migrate_longterm_unpinnable_pages()
2151 if (pages[i]) in migrate_longterm_unpinnable_pages()
2152 unpin_user_page(pages[i]); in migrate_longterm_unpinnable_pages()
2159 * Check whether all pages are *allowed* to be pinned. Rather confusingly, all
2160 * pages in the range are required to be pinned via FOLL_PIN, before calling
2163 * If any pages in the range are not allowed to be pinned, then this routine
2164 * will migrate those pages away, unpin all the pages in the range and return
2171 * If everything is OK and all pages in the range are allowed to be pinned, then
2172 * this routine leaves all pages pinned and returns zero for success.
2175 struct page **pages) in check_and_migrate_movable_pages() argument
2181 nr_pages, pages); in check_and_migrate_movable_pages()
2186 pages); in check_and_migrate_movable_pages()
2190 struct page **pages) in check_and_migrate_movable_pages() argument
2203 struct page **pages, in __gup_longterm_locked() argument
2211 return __get_user_pages_locked(mm, start, nr_pages, pages, in __gup_longterm_locked()
2217 pages, locked, in __gup_longterm_locked()
2225 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); in __gup_longterm_locked()
2235 static bool is_valid_gup_args(struct page **pages, int *locked, in is_valid_gup_args() argument
2268 /* Pages input must be given if using GET/PIN */ in is_valid_gup_args()
2269 if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages)) in is_valid_gup_args()
2283 * get_user_pages_remote() - pin user pages in memory
2286 * @nr_pages: number of pages from start to pin
2288 * @pages: array that receives pointers to the pages pinned.
2290 * only intends to ensure the pages are faulted in.
2295 * Returns either number of pages pinned (which may be less than the
2299 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2300 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2301 * pages pinned. Again, this may be less than nr_pages.
2303 * The caller is responsible for releasing returned @pages, via put_page().
2327 * via the user virtual addresses. The pages may be submitted for
2340 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
2345 if (!is_valid_gup_args(pages, locked, &gup_flags, in get_user_pages_remote()
2349 return __get_user_pages_locked(mm, start, nr_pages, pages, in get_user_pages_remote()
2358 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
2366 * get_user_pages() - pin user pages in memory
2368 * @nr_pages: number of pages from start to pin
2370 * @pages: array that receives pointers to the pages pinned.
2372 * only intends to ensure the pages are faulted in.
2380 unsigned int gup_flags, struct page **pages) in get_user_pages() argument
2384 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) in get_user_pages()
2387 return __get_user_pages_locked(current->mm, start, nr_pages, pages, in get_user_pages()
2396 * get_user_pages(mm, ..., pages, NULL);
2401 * get_user_pages_unlocked(mm, ..., pages);
2408 struct page **pages, unsigned int gup_flags) in get_user_pages_unlocked() argument
2412 if (!is_valid_gup_args(pages, NULL, &gup_flags, in get_user_pages_unlocked()
2416 return __get_user_pages_locked(current->mm, start, nr_pages, pages, in get_user_pages_unlocked()
2424 * get_user_pages_fast attempts to pin user pages by walking the page
2426 * protected from page table pages being freed from under it, and should
2431 * pages are freed. This is unsuitable for architectures that do not need
2434 * Another way to achieve this is to batch up page table containing pages
2436 * pages. Disabling interrupts will allow the fast_gup walker to both block
2444 * free pages containing page tables or TLB flushing requires IPI broadcast.
2534 struct page **pages) in undo_dev_pagemap() argument
2537 struct page *page = pages[--(*nr)]; in undo_dev_pagemap()
2569 struct page **pages, int *nr) in gup_pte_range() argument
2584 * Always fallback to ordinary GUP on PROT_NONE-mapped pages: in gup_pte_range()
2585 * pte_access_permitted() better should reject these pages in gup_pte_range()
2602 undo_dev_pagemap(nr, nr_start, flags, pages); in gup_pte_range()
2650 pages[*nr] = page; in gup_pte_range()
2670 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2675 struct page **pages, int *nr) in gup_pte_range() argument
2684 struct page **pages, int *nr) in __gup_device_huge() argument
2694 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2699 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2704 pages[*nr] = page; in __gup_device_huge()
2706 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2719 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2725 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pmd()
2729 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pmd()
2737 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2743 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pud()
2747 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pud()
2755 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2763 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2771 unsigned long end, struct page **pages) in record_subpages() argument
2776 pages[nr] = nth_page(page, nr); in record_subpages()
2791 struct page **pages, int *nr) in gup_hugepte() argument
2812 refs = record_subpages(page, addr, end, pages + *nr); in gup_hugepte()
2840 struct page **pages, int *nr) in gup_huge_pd() argument
2849 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) in gup_huge_pd()
2858 struct page **pages, int *nr) in gup_huge_pd() argument
2866 struct page **pages, int *nr) in gup_huge_pmd() argument
2879 pages, nr); in gup_huge_pmd()
2883 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pmd()
2910 struct page **pages, int *nr) in gup_huge_pud() argument
2923 pages, nr); in gup_huge_pud()
2927 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pud()
2955 struct page **pages, int *nr) in gup_huge_pgd() argument
2967 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pgd()
2994 unsigned int flags, struct page **pages, int *nr) in gup_pmd_range() argument
3014 pages, nr)) in gup_pmd_range()
3023 PMD_SHIFT, next, flags, pages, nr)) in gup_pmd_range()
3025 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) in gup_pmd_range()
3033 unsigned int flags, struct page **pages, int *nr) in gup_pud_range() argument
3047 pages, nr)) in gup_pud_range()
3051 PUD_SHIFT, next, flags, pages, nr)) in gup_pud_range()
3053 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) in gup_pud_range()
3061 unsigned int flags, struct page **pages, int *nr) in gup_p4d_range() argument
3076 P4D_SHIFT, next, flags, pages, nr)) in gup_p4d_range()
3078 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) in gup_p4d_range()
3086 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
3100 pages, nr)) in gup_pgd_range()
3104 PGDIR_SHIFT, next, flags, pages, nr)) in gup_pgd_range()
3106 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) in gup_pgd_range()
3112 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
3131 struct page **pages) in lockless_pages_from_mm() argument
3151 * With interrupts disabled, we block page table pages from being freed in lockless_pages_from_mm()
3159 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned); in lockless_pages_from_mm()
3163 * When pinning pages for DMA there could be a concurrent write protect in lockless_pages_from_mm()
3168 unpin_user_pages_lockless(pages, nr_pinned); in lockless_pages_from_mm()
3171 sanity_check_pinned_pages(pages, nr_pinned); in lockless_pages_from_mm()
3180 struct page **pages) in internal_get_user_pages_fast() argument
3208 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); in internal_get_user_pages_fast()
3212 /* Slow path: try to get the remaining pages with get_user_pages */ in internal_get_user_pages_fast()
3214 pages += nr_pinned; in internal_get_user_pages_fast()
3216 pages, &locked, in internal_get_user_pages_fast()
3220 * The caller has to unpin the pages we already pinned so in internal_get_user_pages_fast()
3231 * get_user_pages_fast_only() - pin user pages in memory
3233 * @nr_pages: number of pages from start to pin
3235 * @pages: array that receives pointers to the pages pinned.
3242 * pages pinned.
3249 unsigned int gup_flags, struct page **pages) in get_user_pages_fast_only() argument
3258 if (!is_valid_gup_args(pages, NULL, &gup_flags, in get_user_pages_fast_only()
3262 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in get_user_pages_fast_only()
3267 * get_user_pages_fast() - pin user pages in memory
3269 * @nr_pages: number of pages from start to pin
3271 * @pages: array that receives pointers to the pages pinned.
3274 * Attempt to pin user pages in memory without taking mm->mmap_lock.
3278 * Returns number of pages pinned. This may be fewer than the number requested.
3279 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3283 unsigned int gup_flags, struct page **pages) in get_user_pages_fast() argument
3291 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) in get_user_pages_fast()
3293 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in get_user_pages_fast()
3298 * pin_user_pages_fast() - pin user pages in memory without taking locks
3301 * @nr_pages: number of pages from start to pin
3303 * @pages: array that receives pointers to the pages pinned.
3310 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3313 * Note that if a zero_page is amongst the returned pages, it will not have
3317 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast() argument
3319 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) in pin_user_pages_fast()
3321 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in pin_user_pages_fast()
3326 * pin_user_pages_remote() - pin pages of a remote process
3330 * @nr_pages: number of pages from start to pin
3332 * @pages: array that receives pointers to the pages pinned.
3342 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3345 * Note that if a zero_page is amongst the returned pages, it will not have
3350 unsigned int gup_flags, struct page **pages, in pin_user_pages_remote() argument
3355 if (!is_valid_gup_args(pages, locked, &gup_flags, in pin_user_pages_remote()
3358 return __gup_longterm_locked(mm, start, nr_pages, pages, in pin_user_pages_remote()
3365 * pin_user_pages() - pin user pages in memory for use by other devices
3368 * @nr_pages: number of pages from start to pin
3370 * @pages: array that receives pointers to the pages pinned.
3376 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3379 * Note that if a zero_page is amongst the returned pages, it will not have
3383 unsigned int gup_flags, struct page **pages) in pin_user_pages() argument
3387 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) in pin_user_pages()
3390 pages, &locked, gup_flags); in pin_user_pages()
3399 * Note that if a zero_page is amongst the returned pages, it will not have
3403 struct page **pages, unsigned int gup_flags) in pin_user_pages_unlocked() argument
3407 if (!is_valid_gup_args(pages, NULL, &gup_flags, in pin_user_pages_unlocked()
3411 return __gup_longterm_locked(current->mm, start, nr_pages, pages, in pin_user_pages_unlocked()