Lines Matching +full:write +full:- +full:to +full:- +full:write
4 * Copyright (C) 2009 - 2010 Paul Mundt
24 * taking any locks. For this we would like to load the pointers in gup_get_pte()
25 * atomically, but that is not possible with 64-bit PTEs. What in gup_get_pte()
27 * from not present to present, or present to not present or both in gup_get_pte()
28 * -- it will not switch to a completely different present page in gup_get_pte()
32 * Setting ptes from not present to present goes: in gup_get_pte()
33 * ptep->pte_high = h; in gup_get_pte()
35 * ptep->pte_low = l; in gup_get_pte()
37 * And present to not present goes: in gup_get_pte()
38 * ptep->pte_low = 0; in gup_get_pte()
40 * ptep->pte_high = 0; in gup_get_pte()
52 * very careful -- it does not atomically load the pte or anything that in gup_get_pte()
53 * is likely to be useful for you. in gup_get_pte()
58 pte.pte_low = ptep->pte_low; in gup_get_pte()
60 pte.pte_high = ptep->pte_high; in gup_get_pte()
62 if (unlikely(pte.pte_low != ptep->pte_low)) in gup_get_pte()
75 unsigned long end, int write, struct page **pages, int *nr) in gup_pte_range() argument
82 if (write) in gup_pte_range()
86 if (write) in gup_pte_range()
90 if (write) in gup_pte_range()
112 pte_unmap(ptep - 1); in gup_pte_range()
118 int write, struct page **pages, int *nr) in gup_pmd_range() argument
130 if (!gup_pte_range(pmd, addr, next, write, pages, nr)) in gup_pmd_range()
138 int write, struct page **pages, int *nr) in gup_pud_range() argument
150 if (!gup_pmd_range(pud, addr, next, write, pages, nr)) in gup_pud_range()
158 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
159 * back to the regular GUP.
161 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument
164 struct mm_struct *mm = current->mm; in __get_user_pages_fast()
175 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, in __get_user_pages_fast()
191 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) in __get_user_pages_fast()
200 * get_user_pages_fast() - pin user pages in memory
202 * @nr_pages: number of pages from start to pin
203 * @write: whether pages will be written to
204 * @pages: array that receives pointers to the pages pinned.
207 * Attempt to pin user pages in memory without taking mm->mmap_sem.
208 * If not successful, it will fall back to taking the lock and
213 * were pinned, returns -errno.
215 int get_user_pages_fast(unsigned long start, int nr_pages, int write, in get_user_pages_fast() argument
218 struct mm_struct *mm = current->mm; in get_user_pages_fast()
240 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) in get_user_pages_fast()
245 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); in get_user_pages_fast()
254 /* Try to get the remaining pages with get_user_pages */ in get_user_pages_fast()
258 down_read(&mm->mmap_sem); in get_user_pages_fast()
260 (end - start) >> PAGE_SHIFT, write, 0, pages, NULL); in get_user_pages_fast()
261 up_read(&mm->mmap_sem); in get_user_pages_fast()
263 /* Have to be a bit careful with return values */ in get_user_pages_fast()