1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_H
3 #define _LINUX_HIGHMEM_H
4
5 #include <linux/fs.h>
6 #include <linux/kernel.h>
7 #include <linux/bug.h>
8 #include <linux/cacheflush.h>
9 #include <linux/kmsan.h>
10 #include <linux/mm.h>
11 #include <linux/uaccess.h>
12 #include <linux/hardirq.h>
13
14 #include "highmem-internal.h"
15
16 /**
17 * kmap - Map a page for long term usage
18 * @page: Pointer to the page to be mapped
19 *
20 * Returns: The virtual address of the mapping
21 *
22 * Can only be invoked from preemptible task context because on 32bit
23 * systems with CONFIG_HIGHMEM enabled this function might sleep.
24 *
25 * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
26 * this returns the virtual address of the direct kernel mapping.
27 *
28 * The returned virtual address is globally visible and valid up to the
29 * point where it is unmapped via kunmap(). The pointer can be handed to
30 * other contexts.
31 *
32 * For highmem pages on 32bit systems this can be slow as the mapping space
33 * is limited and protected by a global lock. In case that there is no
34 * mapping slot available the function blocks until a slot is released via
35 * kunmap().
36 */
37 static inline void *kmap(struct page *page);
38
39 /**
40 * kunmap - Unmap the virtual address mapped by kmap()
41 * @page: Pointer to the page which was mapped by kmap()
42 *
43 * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
44 * pages in the low memory area.
45 */
46 static inline void kunmap(const struct page *page);
47
48 /**
49 * kmap_to_page - Get the page for a kmap'ed address
50 * @addr: The address to look up
51 *
52 * Returns: The page which is mapped to @addr.
53 */
54 static inline struct page *kmap_to_page(void *addr);
55
56 /**
57 * kmap_flush_unused - Flush all unused kmap mappings in order to
58 * remove stray mappings
59 */
60 static inline void kmap_flush_unused(void);
61
62 /**
63 * kmap_local_page - Map a page for temporary usage
64 * @page: Pointer to the page to be mapped
65 *
66 * Returns: The virtual address of the mapping
67 *
68 * Can be invoked from any context, including interrupts.
69 *
70 * Requires careful handling when nesting multiple mappings because the map
71 * management is stack based. The unmap has to be in the reverse order of
72 * the map operation:
73 *
74 * addr1 = kmap_local_page(page1);
75 * addr2 = kmap_local_page(page2);
76 * ...
77 * kunmap_local(addr2);
78 * kunmap_local(addr1);
79 *
80 * Unmapping addr1 before addr2 is invalid and causes malfunction.
81 *
82 * Contrary to kmap() mappings the mapping is only valid in the context of
83 * the caller and cannot be handed to other contexts.
84 *
85 * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
86 * virtual address of the direct mapping. Only real highmem pages are
87 * temporarily mapped.
88 *
89 * While kmap_local_page() is significantly faster than kmap() for the highmem
90 * case it comes with restrictions about the pointer validity.
91 *
92 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
93 * disabling migration in order to keep the virtual address stable across
94 * preemption. No caller of kmap_local_page() can rely on this side effect.
95 */
96 static inline void *kmap_local_page(const struct page *page);
97
98 /**
99 * kmap_local_folio - Map a page in this folio for temporary usage
100 * @folio: The folio containing the page.
101 * @offset: The byte offset within the folio which identifies the page.
102 *
103 * Requires careful handling when nesting multiple mappings because the map
104 * management is stack based. The unmap has to be in the reverse order of
105 * the map operation::
106 *
107 * addr1 = kmap_local_folio(folio1, offset1);
108 * addr2 = kmap_local_folio(folio2, offset2);
109 * ...
110 * kunmap_local(addr2);
111 * kunmap_local(addr1);
112 *
113 * Unmapping addr1 before addr2 is invalid and causes malfunction.
114 *
115 * Contrary to kmap() mappings the mapping is only valid in the context of
116 * the caller and cannot be handed to other contexts.
117 *
118 * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
119 * virtual address of the direct mapping. Only real highmem pages are
120 * temporarily mapped.
121 *
122 * While it is significantly faster than kmap() for the highmem case it
123 * comes with restrictions about the pointer validity.
124 *
125 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
126 * disabling migration in order to keep the virtual address stable across
127 * preemption. No caller of kmap_local_folio() can rely on this side effect.
128 *
129 * Context: Can be invoked from any context.
130 * Return: The virtual address of @offset.
131 */
132 static inline void *kmap_local_folio(const struct folio *folio, size_t offset);
133
134 /**
135 * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
136 * @page: Pointer to the page to be mapped
137 *
138 * Returns: The virtual address of the mapping
139 *
140 * In fact a wrapper around kmap_local_page() which also disables pagefaults
141 * and, depending on PREEMPT_RT configuration, also CPU migration and
142 * preemption. Therefore users should not count on the latter two side effects.
143 *
144 * Mappings should always be released by kunmap_atomic().
145 *
146 * Do not use in new code. Use kmap_local_page() instead.
147 *
148 * It is used in atomic context when code wants to access the contents of a
149 * page that might be allocated from high memory (see __GFP_HIGHMEM), for
150 * example a page in the pagecache. The API has two functions, and they
151 * can be used in a manner similar to the following::
152 *
153 * // Find the page of interest.
154 * struct page *page = find_get_page(mapping, offset);
155 *
156 * // Gain access to the contents of that page.
157 * void *vaddr = kmap_atomic(page);
158 *
159 * // Do something to the contents of that page.
160 * memset(vaddr, 0, PAGE_SIZE);
161 *
162 * // Unmap that page.
163 * kunmap_atomic(vaddr);
164 *
165 * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
166 * call, not the argument.
167 *
168 * If you need to map two pages because you want to copy from one page to
169 * another you need to keep the kmap_atomic calls strictly nested, like:
170 *
171 * vaddr1 = kmap_atomic(page1);
172 * vaddr2 = kmap_atomic(page2);
173 *
174 * memcpy(vaddr1, vaddr2, PAGE_SIZE);
175 *
176 * kunmap_atomic(vaddr2);
177 * kunmap_atomic(vaddr1);
178 */
179 static inline void *kmap_atomic(const struct page *page);
180
181 /* Highmem related interfaces for management code */
182 static inline unsigned long nr_free_highpages(void);
183 static inline unsigned long totalhigh_pages(void);
184
185 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)186 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
187 {
188 }
189 #endif
190
191 #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
flush_kernel_vmap_range(void * vaddr,int size)192 static inline void flush_kernel_vmap_range(void *vaddr, int size)
193 {
194 }
invalidate_kernel_vmap_range(void * vaddr,int size)195 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
196 {
197 }
198 #endif
199
200 #ifndef clear_user_highpage
201 #ifndef clear_user_page
202 /**
203 * clear_user_page() - clear a page to be mapped to user space
204 * @addr: the address of the page
205 * @vaddr: the address of the user mapping
206 * @page: the page
207 *
208 * We condition the definition of clear_user_page() on the architecture
209 * not having a custom clear_user_highpage(). That's because if there
210 * is some special flushing needed for clear_user_highpage() then it
211 * is likely that clear_user_page() also needs some magic. And, since
212 * our only caller is the generic clear_user_highpage(), not defining
213 * is not much of a loss.
214 */
clear_user_page(void * addr,unsigned long vaddr,struct page * page)215 static inline void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
216 {
217 clear_page(addr);
218 }
219 #endif
220
221 /**
222 * clear_user_pages() - clear a page range to be mapped to user space
223 * @addr: start address
224 * @vaddr: start address of the user mapping
225 * @page: start page
226 * @npages: number of pages
227 *
228 * Assumes that the region (@addr, +@npages) has been validated
229 * already so this does no exception handling.
230 *
231 * If the architecture provides a clear_user_page(), use that;
232 * otherwise, we can safely use clear_pages().
233 */
clear_user_pages(void * addr,unsigned long vaddr,struct page * page,unsigned int npages)234 static inline void clear_user_pages(void *addr, unsigned long vaddr,
235 struct page *page, unsigned int npages)
236 {
237
238 #ifdef clear_user_page
239 do {
240 clear_user_page(addr, vaddr, page);
241 addr += PAGE_SIZE;
242 vaddr += PAGE_SIZE;
243 page++;
244 } while (--npages);
245 #else
246 /*
247 * Prefer clear_pages() to allow for architectural optimizations
248 * when operating on contiguous page ranges.
249 */
250 clear_pages(addr, npages);
251 #endif
252 }
253
254 /**
255 * clear_user_highpage() - clear a page to be mapped to user space
256 * @page: start page
257 * @vaddr: start address of the user mapping
258 *
259 * With !CONFIG_HIGHMEM this (and the copy_user_highpage() below) will
260 * be plain clear_user_page() (and copy_user_page()).
261 */
clear_user_highpage(struct page * page,unsigned long vaddr)262 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
263 {
264 void *addr = kmap_local_page(page);
265 clear_user_page(addr, vaddr, page);
266 kunmap_local(addr);
267 }
268 #endif /* clear_user_highpage */
269
270 /**
271 * clear_user_highpages() - clear a page range to be mapped to user space
272 * @page: start page
273 * @vaddr: start address of the user mapping
274 * @npages: number of pages
275 *
276 * Assumes that all the pages in the region (@page, +@npages) are valid
277 * so this does no exception handling.
278 */
clear_user_highpages(struct page * page,unsigned long vaddr,unsigned int npages)279 static inline void clear_user_highpages(struct page *page, unsigned long vaddr,
280 unsigned int npages)
281 {
282
283 #if defined(clear_user_highpage) || defined(CONFIG_HIGHMEM)
284 /*
285 * An architecture defined clear_user_highpage() implies special
286 * handling is needed.
287 *
288 * So we use that or, the generic variant if CONFIG_HIGHMEM is
289 * enabled.
290 */
291 do {
292 clear_user_highpage(page, vaddr);
293 vaddr += PAGE_SIZE;
294 page++;
295 } while (--npages);
296 #else
297
298 /*
299 * Prefer clear_user_pages() to allow for architectural optimizations
300 * when operating on contiguous page ranges.
301 */
302 clear_user_pages(page_address(page), vaddr, page, npages);
303 #endif
304 }
305
306 #ifndef vma_alloc_zeroed_movable_folio
307 /**
308 * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
309 * @vma: The VMA the page is to be allocated for.
310 * @vaddr: The virtual address the page will be inserted into.
311 *
312 * This function will allocate a page suitable for inserting into this
313 * VMA at this virtual address. It may be allocated from highmem or
314 * the movable zone. An architecture may provide its own implementation.
315 *
316 * Return: A folio containing one allocated and zeroed page or NULL if
317 * we are out of memory.
318 */
319 static inline
vma_alloc_zeroed_movable_folio(struct vm_area_struct * vma,unsigned long vaddr)320 struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
321 unsigned long vaddr)
322 {
323 struct folio *folio;
324
325 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr);
326 if (folio && user_alloc_needs_zeroing())
327 clear_user_highpage(&folio->page, vaddr);
328
329 return folio;
330 }
331 #endif
332
clear_highpage(struct page * page)333 static inline void clear_highpage(struct page *page)
334 {
335 void *kaddr = kmap_local_page(page);
336 clear_page(kaddr);
337 kunmap_local(kaddr);
338 }
339
clear_highpage_kasan_tagged(struct page * page)340 static inline void clear_highpage_kasan_tagged(struct page *page)
341 {
342 void *kaddr = kmap_local_page(page);
343
344 clear_page(kasan_reset_tag(kaddr));
345 kunmap_local(kaddr);
346 }
347
348 #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGES
349
350 /* Return false to let people know we did not initialize the pages */
tag_clear_highpages(struct page * page,int numpages)351 static inline bool tag_clear_highpages(struct page *page, int numpages)
352 {
353 return false;
354 }
355
356 #endif
357
358 /*
359 * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
360 * If we pass in a head page, we can zero up to the size of the compound page.
361 */
362 #ifdef CONFIG_HIGHMEM
363 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
364 unsigned start2, unsigned end2);
365 #else
zero_user_segments(struct page * page,unsigned start1,unsigned end1,unsigned start2,unsigned end2)366 static inline void zero_user_segments(struct page *page,
367 unsigned start1, unsigned end1,
368 unsigned start2, unsigned end2)
369 {
370 void *kaddr = kmap_local_page(page);
371 unsigned int i;
372
373 BUG_ON(end1 > page_size(page) || end2 > page_size(page));
374
375 if (end1 > start1)
376 memset(kaddr + start1, 0, end1 - start1);
377
378 if (end2 > start2)
379 memset(kaddr + start2, 0, end2 - start2);
380
381 kunmap_local(kaddr);
382 for (i = 0; i < compound_nr(page); i++)
383 flush_dcache_page(page + i);
384 }
385 #endif
386
zero_user_segment(struct page * page,unsigned start,unsigned end)387 static inline void zero_user_segment(struct page *page,
388 unsigned start, unsigned end)
389 {
390 zero_user_segments(page, start, end, 0, 0);
391 }
392
393 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
394
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)395 static inline void copy_user_highpage(struct page *to, struct page *from,
396 unsigned long vaddr, struct vm_area_struct *vma)
397 {
398 char *vfrom, *vto;
399
400 vfrom = kmap_local_page(from);
401 vto = kmap_local_page(to);
402 copy_user_page(vto, vfrom, vaddr, to);
403 kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
404 kunmap_local(vto);
405 kunmap_local(vfrom);
406 }
407
408 #endif
409
410 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
411
copy_highpage(struct page * to,struct page * from)412 static inline void copy_highpage(struct page *to, struct page *from)
413 {
414 char *vfrom, *vto;
415
416 vfrom = kmap_local_page(from);
417 vto = kmap_local_page(to);
418 copy_page(vto, vfrom);
419 kmsan_copy_page_meta(to, from);
420 kunmap_local(vto);
421 kunmap_local(vfrom);
422 }
423
424 #endif
425
426 #ifdef copy_mc_to_kernel
427 /*
428 * If architecture supports machine check exception handling, define the
429 * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
430 * page with #MC in source page (@from) handled, and return the number
431 * of bytes not copied if there was a #MC, otherwise 0 for success.
432 */
copy_mc_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)433 static inline int copy_mc_user_highpage(struct page *to, struct page *from,
434 unsigned long vaddr, struct vm_area_struct *vma)
435 {
436 unsigned long ret;
437 char *vfrom, *vto;
438
439 vfrom = kmap_local_page(from);
440 vto = kmap_local_page(to);
441 ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
442 if (!ret)
443 kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
444 kunmap_local(vto);
445 kunmap_local(vfrom);
446
447 if (ret)
448 memory_failure_queue(page_to_pfn(from), 0);
449
450 return ret;
451 }
452
copy_mc_highpage(struct page * to,struct page * from)453 static inline int copy_mc_highpage(struct page *to, struct page *from)
454 {
455 unsigned long ret;
456 char *vfrom, *vto;
457
458 vfrom = kmap_local_page(from);
459 vto = kmap_local_page(to);
460 ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
461 if (!ret)
462 kmsan_copy_page_meta(to, from);
463 kunmap_local(vto);
464 kunmap_local(vfrom);
465
466 if (ret)
467 memory_failure_queue(page_to_pfn(from), 0);
468
469 return ret;
470 }
471 #else
copy_mc_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)472 static inline int copy_mc_user_highpage(struct page *to, struct page *from,
473 unsigned long vaddr, struct vm_area_struct *vma)
474 {
475 copy_user_highpage(to, from, vaddr, vma);
476 return 0;
477 }
478
copy_mc_highpage(struct page * to,struct page * from)479 static inline int copy_mc_highpage(struct page *to, struct page *from)
480 {
481 copy_highpage(to, from);
482 return 0;
483 }
484 #endif
485
memcpy_page(struct page * dst_page,size_t dst_off,struct page * src_page,size_t src_off,size_t len)486 static inline void memcpy_page(struct page *dst_page, size_t dst_off,
487 struct page *src_page, size_t src_off,
488 size_t len)
489 {
490 char *dst = kmap_local_page(dst_page);
491 char *src = kmap_local_page(src_page);
492
493 VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
494 memcpy(dst + dst_off, src + src_off, len);
495 kunmap_local(src);
496 kunmap_local(dst);
497 }
498
memcpy_folio(struct folio * dst_folio,size_t dst_off,struct folio * src_folio,size_t src_off,size_t len)499 static inline void memcpy_folio(struct folio *dst_folio, size_t dst_off,
500 struct folio *src_folio, size_t src_off, size_t len)
501 {
502 VM_BUG_ON(dst_off + len > folio_size(dst_folio));
503 VM_BUG_ON(src_off + len > folio_size(src_folio));
504
505 do {
506 char *dst = kmap_local_folio(dst_folio, dst_off);
507 const char *src = kmap_local_folio(src_folio, src_off);
508 size_t chunk = len;
509
510 if (folio_test_highmem(dst_folio) &&
511 chunk > PAGE_SIZE - offset_in_page(dst_off))
512 chunk = PAGE_SIZE - offset_in_page(dst_off);
513 if (folio_test_highmem(src_folio) &&
514 chunk > PAGE_SIZE - offset_in_page(src_off))
515 chunk = PAGE_SIZE - offset_in_page(src_off);
516 memcpy(dst, src, chunk);
517 kunmap_local(src);
518 kunmap_local(dst);
519
520 dst_off += chunk;
521 src_off += chunk;
522 len -= chunk;
523 } while (len > 0);
524 }
525
memset_page(struct page * page,size_t offset,int val,size_t len)526 static inline void memset_page(struct page *page, size_t offset, int val,
527 size_t len)
528 {
529 char *addr = kmap_local_page(page);
530
531 VM_BUG_ON(offset + len > PAGE_SIZE);
532 memset(addr + offset, val, len);
533 kunmap_local(addr);
534 }
535
memcpy_from_page(char * to,struct page * page,size_t offset,size_t len)536 static inline void memcpy_from_page(char *to, struct page *page,
537 size_t offset, size_t len)
538 {
539 char *from = kmap_local_page(page);
540
541 VM_BUG_ON(offset + len > PAGE_SIZE);
542 memcpy(to, from + offset, len);
543 kunmap_local(from);
544 }
545
memcpy_to_page(struct page * page,size_t offset,const char * from,size_t len)546 static inline void memcpy_to_page(struct page *page, size_t offset,
547 const char *from, size_t len)
548 {
549 char *to = kmap_local_page(page);
550
551 VM_BUG_ON(offset + len > PAGE_SIZE);
552 memcpy(to + offset, from, len);
553 flush_dcache_page(page);
554 kunmap_local(to);
555 }
556
memzero_page(struct page * page,size_t offset,size_t len)557 static inline void memzero_page(struct page *page, size_t offset, size_t len)
558 {
559 char *addr = kmap_local_page(page);
560
561 VM_BUG_ON(offset + len > PAGE_SIZE);
562 memset(addr + offset, 0, len);
563 flush_dcache_page(page);
564 kunmap_local(addr);
565 }
566
567 /**
568 * memcpy_from_folio - Copy a range of bytes from a folio.
569 * @to: The memory to copy to.
570 * @folio: The folio to read from.
571 * @offset: The first byte in the folio to read.
572 * @len: The number of bytes to copy.
573 */
memcpy_from_folio(char * to,struct folio * folio,size_t offset,size_t len)574 static inline void memcpy_from_folio(char *to, struct folio *folio,
575 size_t offset, size_t len)
576 {
577 VM_BUG_ON(offset + len > folio_size(folio));
578
579 do {
580 const char *from = kmap_local_folio(folio, offset);
581 size_t chunk = len;
582
583 if (folio_test_partial_kmap(folio) &&
584 chunk > PAGE_SIZE - offset_in_page(offset))
585 chunk = PAGE_SIZE - offset_in_page(offset);
586 memcpy(to, from, chunk);
587 kunmap_local(from);
588
589 to += chunk;
590 offset += chunk;
591 len -= chunk;
592 } while (len > 0);
593 }
594
595 /**
596 * memcpy_to_folio - Copy a range of bytes to a folio.
597 * @folio: The folio to write to.
598 * @offset: The first byte in the folio to store to.
599 * @from: The memory to copy from.
600 * @len: The number of bytes to copy.
601 */
memcpy_to_folio(struct folio * folio,size_t offset,const char * from,size_t len)602 static inline void memcpy_to_folio(struct folio *folio, size_t offset,
603 const char *from, size_t len)
604 {
605 VM_BUG_ON(offset + len > folio_size(folio));
606
607 do {
608 char *to = kmap_local_folio(folio, offset);
609 size_t chunk = len;
610
611 if (folio_test_partial_kmap(folio) &&
612 chunk > PAGE_SIZE - offset_in_page(offset))
613 chunk = PAGE_SIZE - offset_in_page(offset);
614 memcpy(to, from, chunk);
615 kunmap_local(to);
616
617 from += chunk;
618 offset += chunk;
619 len -= chunk;
620 } while (len > 0);
621
622 flush_dcache_folio(folio);
623 }
624
625 /**
626 * folio_zero_tail - Zero the tail of a folio.
627 * @folio: The folio to zero.
628 * @offset: The byte offset in the folio to start zeroing at.
629 * @kaddr: The address the folio is currently mapped to.
630 *
631 * If you have already used kmap_local_folio() to map a folio, written
632 * some data to it and now need to zero the end of the folio (and flush
633 * the dcache), you can use this function. If you do not have the
634 * folio kmapped (eg the folio has been partially populated by DMA),
635 * use folio_zero_range() or folio_zero_segment() instead.
636 *
637 * Return: An address which can be passed to kunmap_local().
638 */
folio_zero_tail(struct folio * folio,size_t offset,void * kaddr)639 static inline __must_check void *folio_zero_tail(struct folio *folio,
640 size_t offset, void *kaddr)
641 {
642 size_t len = folio_size(folio) - offset;
643
644 if (folio_test_partial_kmap(folio)) {
645 size_t max = PAGE_SIZE - offset_in_page(offset);
646
647 while (len > max) {
648 memset(kaddr, 0, max);
649 kunmap_local(kaddr);
650 len -= max;
651 offset += max;
652 max = PAGE_SIZE;
653 kaddr = kmap_local_folio(folio, offset);
654 }
655 }
656
657 memset(kaddr, 0, len);
658 flush_dcache_folio(folio);
659
660 return kaddr;
661 }
662
663 /**
664 * folio_fill_tail - Copy some data to a folio and pad with zeroes.
665 * @folio: The destination folio.
666 * @offset: The offset into @folio at which to start copying.
667 * @from: The data to copy.
668 * @len: How many bytes of data to copy.
669 *
670 * This function is most useful for filesystems which support inline data.
671 * When they want to copy data from the inode into the page cache, this
672 * function does everything for them. It supports large folios even on
673 * HIGHMEM configurations.
674 */
folio_fill_tail(struct folio * folio,size_t offset,const char * from,size_t len)675 static inline void folio_fill_tail(struct folio *folio, size_t offset,
676 const char *from, size_t len)
677 {
678 char *to = kmap_local_folio(folio, offset);
679
680 VM_BUG_ON(offset + len > folio_size(folio));
681
682 if (folio_test_partial_kmap(folio)) {
683 size_t max = PAGE_SIZE - offset_in_page(offset);
684
685 while (len > max) {
686 memcpy(to, from, max);
687 kunmap_local(to);
688 len -= max;
689 from += max;
690 offset += max;
691 max = PAGE_SIZE;
692 to = kmap_local_folio(folio, offset);
693 }
694 }
695
696 memcpy(to, from, len);
697 to = folio_zero_tail(folio, offset + len, to + len);
698 kunmap_local(to);
699 }
700
701 /**
702 * memcpy_from_file_folio - Copy some bytes from a file folio.
703 * @to: The destination buffer.
704 * @folio: The folio to copy from.
705 * @pos: The position in the file.
706 * @len: The maximum number of bytes to copy.
707 *
708 * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE
709 * if the folio comes from HIGHMEM, and by the size of the folio.
710 *
711 * Return: The number of bytes copied from the folio.
712 */
memcpy_from_file_folio(char * to,struct folio * folio,loff_t pos,size_t len)713 static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
714 loff_t pos, size_t len)
715 {
716 size_t offset = offset_in_folio(folio, pos);
717 char *from = kmap_local_folio(folio, offset);
718
719 if (folio_test_partial_kmap(folio)) {
720 offset = offset_in_page(offset);
721 len = min_t(size_t, len, PAGE_SIZE - offset);
722 } else
723 len = min(len, folio_size(folio) - offset);
724
725 memcpy(to, from, len);
726 kunmap_local(from);
727
728 return len;
729 }
730
731 /**
732 * folio_zero_segments() - Zero two byte ranges in a folio.
733 * @folio: The folio to write to.
734 * @start1: The first byte to zero.
735 * @xend1: One more than the last byte in the first range.
736 * @start2: The first byte to zero in the second range.
737 * @xend2: One more than the last byte in the second range.
738 */
folio_zero_segments(struct folio * folio,size_t start1,size_t xend1,size_t start2,size_t xend2)739 static inline void folio_zero_segments(struct folio *folio,
740 size_t start1, size_t xend1, size_t start2, size_t xend2)
741 {
742 zero_user_segments(&folio->page, start1, xend1, start2, xend2);
743 }
744
745 /**
746 * folio_zero_segment() - Zero a byte range in a folio.
747 * @folio: The folio to write to.
748 * @start: The first byte to zero.
749 * @xend: One more than the last byte to zero.
750 */
folio_zero_segment(struct folio * folio,size_t start,size_t xend)751 static inline void folio_zero_segment(struct folio *folio,
752 size_t start, size_t xend)
753 {
754 zero_user_segments(&folio->page, start, xend, 0, 0);
755 }
756
757 /**
758 * folio_zero_range() - Zero a byte range in a folio.
759 * @folio: The folio to write to.
760 * @start: The first byte to zero.
761 * @length: The number of bytes to zero.
762 */
folio_zero_range(struct folio * folio,size_t start,size_t length)763 static inline void folio_zero_range(struct folio *folio,
764 size_t start, size_t length)
765 {
766 zero_user_segments(&folio->page, start, start + length, 0, 0);
767 }
768
769 /**
770 * folio_release_kmap - Unmap a folio and drop a refcount.
771 * @folio: The folio to release.
772 * @addr: The address previously returned by a call to kmap_local_folio().
773 *
774 * It is common, eg in directory handling to kmap a folio. This function
775 * unmaps the folio and drops the refcount that was being held to keep the
776 * folio alive while we accessed it.
777 */
folio_release_kmap(struct folio * folio,void * addr)778 static inline void folio_release_kmap(struct folio *folio, void *addr)
779 {
780 kunmap_local(addr);
781 folio_put(folio);
782 }
783 #endif /* _LINUX_HIGHMEM_H */
784