Lines Matching full:page
15 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma… in flush_anon_page() argument
21 static inline void flush_kernel_dcache_page(struct page *page) in flush_kernel_dcache_page() argument
35 extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
47 void *kmap_high(struct page *page);
48 static inline void *kmap(struct page *page) in kmap() argument
53 if (!PageHighMem(page)) in kmap()
54 addr = page_address(page); in kmap()
56 addr = kmap_high(page); in kmap()
61 void kunmap_high(struct page *page);
63 static inline void kunmap(struct page *page) in kunmap() argument
66 if (!PageHighMem(page)) in kunmap()
68 kunmap_high(page); in kunmap()
84 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) in kmap_atomic_prot() argument
88 if (!PageHighMem(page)) in kmap_atomic_prot()
89 return page_address(page); in kmap_atomic_prot()
90 return kmap_atomic_high_prot(page, prot); in kmap_atomic_prot()
92 #define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot) argument
124 struct page *kmap_to_page(void *addr);
130 static inline struct page *kmap_to_page(void *addr) in kmap_to_page()
137 static inline void *kmap(struct page *page) in kmap() argument
140 return page_address(page); in kmap()
143 static inline void kunmap_high(struct page *page) in kunmap_high() argument
147 static inline void kunmap(struct page *page) in kunmap() argument
150 kunmap_flush_on_unmap(page_address(page)); in kunmap()
154 static inline void *kmap_atomic(struct page *page) in kmap_atomic() argument
158 return page_address(page); in kmap_atomic()
160 #define kmap_atomic_prot(page, prot) kmap_atomic(page) argument
214 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
218 BUILD_BUG_ON(__same_type((addr), struct page *)); \
227 static inline void clear_user_highpage(struct page *page, unsigned long vaddr) in clear_user_highpage() argument
229 void *addr = kmap_atomic(page); in clear_user_highpage()
230 clear_user_page(addr, vaddr, page); in clear_user_highpage()
237 …* __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified mo…
239 * @vma: The VMA the page is to be allocated for
240 * @vaddr: The virtual address the page will be inserted into
242 * This function will allocate a page for a VMA but the caller is expected
243 * to specify via movableflags whether the page will be movable in the
250 static inline struct page *
255 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, in __alloc_zeroed_user_highpage() local
258 if (page) in __alloc_zeroed_user_highpage()
259 clear_user_highpage(page, vaddr); in __alloc_zeroed_user_highpage()
261 return page; in __alloc_zeroed_user_highpage()
266 …* alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller kn…
267 * @vma: The VMA the page is to be allocated for
268 * @vaddr: The virtual address the page will be inserted into
270 * This function will allocate a page for a VMA that the caller knows will
273 static inline struct page *
280 static inline void clear_highpage(struct page *page) in clear_highpage() argument
282 void *kaddr = kmap_atomic(page); in clear_highpage()
287 static inline void zero_user_segments(struct page *page, in zero_user_segments() argument
291 void *kaddr = kmap_atomic(page); in zero_user_segments()
302 flush_dcache_page(page); in zero_user_segments()
305 static inline void zero_user_segment(struct page *page, in zero_user_segment() argument
308 zero_user_segments(page, start, end, 0, 0); in zero_user_segment()
311 static inline void zero_user(struct page *page, in zero_user() argument
314 zero_user_segments(page, start, start + size, 0, 0); in zero_user()
319 static inline void copy_user_highpage(struct page *to, struct page *from, in copy_user_highpage()
335 static inline void copy_highpage(struct page *to, struct page *from) in copy_highpage()