Lines Matching +full:write +full:- +full:to +full:- +full:write

23 	pte.pte_low = ptep->pte_low;  in gup_get_pte()
25 pte.pte_high = ptep->pte_high; in gup_get_pte()
27 if (unlikely(pte.pte_low != ptep->pte_low)) in gup_get_pte()
37 int write, struct page **pages, int *nr) in gup_pte_range() argument
45 pte_special(pte) || (write && !pte_write(pte))) { in gup_pte_range()
58 pte_unmap(ptep - 1); in gup_pte_range()
66 atomic_add(nr, &page->_count); in get_head_page_multiple()
71 int write, struct page **pages, int *nr) in gup_huge_pmd() argument
77 if (write && !pte_write(pte)) in gup_huge_pmd()
101 int write, struct page **pages, int *nr) in gup_pmd_range() argument
113 * pmdp_splitting_flush has to flush the tlb, to stop in gup_pmd_range()
114 * this gup-fast code from running while we set the in gup_pmd_range()
117 * if the pmd is still in splitting state. gup-fast in gup_pmd_range()
125 if (!gup_huge_pmd(pmd, addr, next, write, pages,nr)) in gup_pmd_range()
128 if (!gup_pte_range(pmd, addr, next, write, pages,nr)) in gup_pmd_range()
137 int write, struct page **pages, int *nr) in gup_huge_pud() argument
143 if (write && !pte_write(pte)) in gup_huge_pud()
165 int write, struct page **pages, int *nr) in gup_pud_range() argument
178 if (!gup_huge_pud(pud, addr, next, write, pages,nr)) in gup_pud_range()
181 if (!gup_pmd_range(pud, addr, next, write, pages,nr)) in gup_pud_range()
190 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
191 * back to the regular GUP.
193 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument
196 struct mm_struct *mm = current->mm; in __get_user_pages_fast()
207 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, in __get_user_pages_fast()
212 * XXX: batch / limit 'nr', to avoid large irq off latency in __get_user_pages_fast()
213 * needs some instrumenting to determine the common sizes used by in __get_user_pages_fast()
217 * It seems like we're in the clear for the moment. Direct-IO is in __get_user_pages_fast()
219 * they are limited to 64-at-a-time which is not so many. in __get_user_pages_fast()
226 * we can follow the address down to the page and take a ref on it. in __get_user_pages_fast()
236 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) in __get_user_pages_fast()
245 * get_user_pages_fast() - pin user pages in memory
247 * @nr_pages: number of pages from start to pin
248 * @write: whether pages will be written to
249 * @pages: array that receives pointers to the pages pinned.
252 * Attempt to pin user pages in memory without taking mm->mmap_sem.
253 * If not successful, it will fall back to taking the lock and
258 * were pinned, returns -errno.
260 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument
263 struct mm_struct *mm = current->mm; in get_user_pages_fast()
286 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) in get_user_pages_fast()
291 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); in get_user_pages_fast()
297 /* Try to get the remaining pages with get_user_pages */ in get_user_pages_fast()
301 down_read(&mm->mmap_sem); in get_user_pages_fast()
303 (end - start) >> PAGE_SHIFT, in get_user_pages_fast()
304 write, 0, pages, NULL); in get_user_pages_fast()
305 up_read(&mm->mmap_sem); in get_user_pages_fast()
307 /* Have to be a bit careful with return values */ in get_user_pages_fast()