Lines Matching +full:data +full:- +full:mapping
1 /* SPDX-License-Identifier: GPL-2.0 */
21 unsigned long invalidate_mapping_pages(struct address_space *mapping,
26 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || in invalidate_remote_inode()
27 S_ISLNK(inode->i_mode)) in invalidate_remote_inode()
28 invalidate_mapping_pages(inode->i_mapping, 0, -1); in invalidate_remote_inode()
30 int invalidate_inode_pages2(struct address_space *mapping);
31 int invalidate_inode_pages2_range(struct address_space *mapping,
39 int filemap_fdatawait_keep_errors(struct address_space *mapping);
41 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
44 static inline int filemap_fdatawait(struct address_space *mapping) in filemap_fdatawait() argument
46 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait()
50 int filemap_write_and_wait_range(struct address_space *mapping,
52 int __filemap_fdatawrite_range(struct address_space *mapping,
54 int filemap_fdatawrite_range(struct address_space *mapping,
56 int filemap_check_errors(struct address_space *mapping);
57 void __filemap_set_wb_err(struct address_space *mapping, int err);
58 int filemap_fdatawrite_wbc(struct address_space *mapping,
62 static inline int filemap_write_and_wait(struct address_space *mapping) in filemap_write_and_wait() argument
64 return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); in filemap_write_and_wait()
68 * filemap_set_wb_err - set a writeback error on an address_space
69 * @mapping: mapping in which to set writeback error
70 * @err: error to be set in mapping
78 * filemap_set_wb_err to record the error in the mapping so that it will be
81 static inline void filemap_set_wb_err(struct address_space *mapping, int err) in filemap_set_wb_err() argument
85 __filemap_set_wb_err(mapping, err); in filemap_set_wb_err()
89 * filemap_check_wb_err - has an error occurred since the mark was sampled?
90 * @mapping: mapping to check for writeback errors
91 * @since: previously-sampled errseq_t
93 * Grab the errseq_t value from the mapping, and see if it has changed "since"
98 static inline int filemap_check_wb_err(struct address_space *mapping, in filemap_check_wb_err() argument
101 return errseq_check(&mapping->wb_err, since); in filemap_check_wb_err()
105 * filemap_sample_wb_err - sample the current errseq_t to test for later errors
106 * @mapping: mapping to be sampled
111 static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) in filemap_sample_wb_err() argument
113 return errseq_sample(&mapping->wb_err); in filemap_sample_wb_err()
117 * file_sample_sb_err - sample the current errseq_t to test for later errors
120 * Grab the most current superblock-level errseq_t value for the given
125 return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); in file_sample_sb_err()
129 * Flush file data before changing attributes. Caller must hold any locks
136 return filemap_write_and_wait(inode->i_mapping); in inode_drain_writes()
139 static inline bool mapping_empty(struct address_space *mapping) in mapping_empty() argument
141 return xa_empty(&mapping->i_pages); in mapping_empty()
145 * mapping_shrinkable - test if page cache state allows inode reclaim
146 * @mapping: the page cache mapping
148 * This checks the mapping's cache state for the pupose of inode
154 * its LRU state don't nest inside the irq-safe i_pages lock.
165 static inline bool mapping_shrinkable(struct address_space *mapping) in mapping_shrinkable() argument
178 head = rcu_access_pointer(mapping->i_pages.xa_head); in mapping_shrinkable()
183 * The xarray stores single offset-0 entries directly in the in mapping_shrinkable()
184 * head pointer, which allows non-resident page cache entries in mapping_shrinkable()
195 * Bits in mapping->flags.
206 AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
209 AS_UNMOVABLE, /* The mapping cannot be moved, ever */
213 * mapping_set_error - record a writeback error in the address_space
214 * @mapping: the mapping in which an error should be set
215 * @error: the error to set in the mapping
223 * mapping_set_error to record the error in the mapping so that it can be
226 static inline void mapping_set_error(struct address_space *mapping, int error) in mapping_set_error() argument
232 __filemap_set_wb_err(mapping, error); in mapping_set_error()
235 if (mapping->host) in mapping_set_error()
236 errseq_set(&mapping->host->i_sb->s_wb_err, error); in mapping_set_error()
239 if (error == -ENOSPC) in mapping_set_error()
240 set_bit(AS_ENOSPC, &mapping->flags); in mapping_set_error()
242 set_bit(AS_EIO, &mapping->flags); in mapping_set_error()
245 static inline void mapping_set_unevictable(struct address_space *mapping) in mapping_set_unevictable() argument
247 set_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_set_unevictable()
250 static inline void mapping_clear_unevictable(struct address_space *mapping) in mapping_clear_unevictable() argument
252 clear_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_clear_unevictable()
255 static inline bool mapping_unevictable(struct address_space *mapping) in mapping_unevictable() argument
257 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_unevictable()
260 static inline void mapping_set_exiting(struct address_space *mapping) in mapping_set_exiting() argument
262 set_bit(AS_EXITING, &mapping->flags); in mapping_set_exiting()
265 static inline int mapping_exiting(struct address_space *mapping) in mapping_exiting() argument
267 return test_bit(AS_EXITING, &mapping->flags); in mapping_exiting()
270 static inline void mapping_set_no_writeback_tags(struct address_space *mapping) in mapping_set_no_writeback_tags() argument
272 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); in mapping_set_no_writeback_tags()
275 static inline int mapping_use_writeback_tags(struct address_space *mapping) in mapping_use_writeback_tags() argument
277 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); in mapping_use_writeback_tags()
280 static inline bool mapping_release_always(const struct address_space *mapping) in mapping_release_always() argument
282 return test_bit(AS_RELEASE_ALWAYS, &mapping->flags); in mapping_release_always()
285 static inline void mapping_set_release_always(struct address_space *mapping) in mapping_set_release_always() argument
287 set_bit(AS_RELEASE_ALWAYS, &mapping->flags); in mapping_set_release_always()
290 static inline void mapping_clear_release_always(struct address_space *mapping) in mapping_clear_release_always() argument
292 clear_bit(AS_RELEASE_ALWAYS, &mapping->flags); in mapping_clear_release_always()
295 static inline bool mapping_stable_writes(const struct address_space *mapping) in mapping_stable_writes() argument
297 return test_bit(AS_STABLE_WRITES, &mapping->flags); in mapping_stable_writes()
300 static inline void mapping_set_stable_writes(struct address_space *mapping) in mapping_set_stable_writes() argument
302 set_bit(AS_STABLE_WRITES, &mapping->flags); in mapping_set_stable_writes()
305 static inline void mapping_clear_stable_writes(struct address_space *mapping) in mapping_clear_stable_writes() argument
307 clear_bit(AS_STABLE_WRITES, &mapping->flags); in mapping_clear_stable_writes()
310 static inline void mapping_set_unmovable(struct address_space *mapping) in mapping_set_unmovable() argument
317 set_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_set_unmovable()
318 set_bit(AS_UNMOVABLE, &mapping->flags); in mapping_set_unmovable()
321 static inline bool mapping_unmovable(struct address_space *mapping) in mapping_unmovable() argument
323 return test_bit(AS_UNMOVABLE, &mapping->flags); in mapping_unmovable()
326 static inline gfp_t mapping_gfp_mask(struct address_space * mapping) in mapping_gfp_mask() argument
328 return mapping->gfp_mask; in mapping_gfp_mask()
331 /* Restricts the given gfp_mask to what the mapping allows. */
332 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, in mapping_gfp_constraint() argument
335 return mapping_gfp_mask(mapping) & gfp_mask; in mapping_gfp_constraint()
339 * This is non-atomic. Only to be used before the mapping is activated.
344 m->gfp_mask = mask; in mapping_set_gfp_mask()
348 * mapping_set_large_folios() - Indicate the file supports large folios.
349 * @mapping: The file.
356 * is non-atomic.
358 static inline void mapping_set_large_folios(struct address_space *mapping) in mapping_set_large_folios() argument
360 __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); in mapping_set_large_folios()
367 static inline bool mapping_large_folio_support(struct address_space *mapping) in mapping_large_folio_support() argument
370 test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); in mapping_large_folio_support()
373 static inline int filemap_nr_thps(struct address_space *mapping) in filemap_nr_thps() argument
376 return atomic_read(&mapping->nr_thps); in filemap_nr_thps()
382 static inline void filemap_nr_thps_inc(struct address_space *mapping) in filemap_nr_thps_inc() argument
385 if (!mapping_large_folio_support(mapping)) in filemap_nr_thps_inc()
386 atomic_inc(&mapping->nr_thps); in filemap_nr_thps_inc()
388 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); in filemap_nr_thps_inc()
392 static inline void filemap_nr_thps_dec(struct address_space *mapping) in filemap_nr_thps_dec() argument
395 if (!mapping_large_folio_support(mapping)) in filemap_nr_thps_dec()
396 atomic_dec(&mapping->nr_thps); in filemap_nr_thps_dec()
398 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); in filemap_nr_thps_dec()
407 * folio_file_mapping - Find the mapping this folio belongs to.
410 * For folios which are in the page cache, return the mapping that this
411 * page belongs to. Folios in the swap cache return the mapping of the
412 * swap file or swap device where the data is stored. This is different
413 * from the mapping returned by folio_mapping(). The only reason to
414 * use it is if, like NFS, you return 0 from ->activate_swapfile.
423 return folio->mapping; in folio_file_mapping()
427 * folio_flush_mapping - Find the file mapping this folio belongs to.
430 * For folios which are in the page cache, return the mapping that this
452 * folio_inode - Get the host inode for this folio.
462 return folio->mapping->host; in folio_inode()
466 * folio_attach_private - Attach private data to a folio.
467 * @folio: Folio to attach data to.
468 * @data: Data to attach to folio.
470 * Attaching private data to a folio increments the page's reference count.
471 * The data must be detached before the folio will be freed.
473 static inline void folio_attach_private(struct folio *folio, void *data) in folio_attach_private() argument
476 folio->private = data; in folio_attach_private()
481 * folio_change_private - Change private data on a folio.
482 * @folio: Folio to change the data on.
483 * @data: Data to set on the folio.
485 * Change the private data attached to a folio and return the old
486 * data. The page must previously have had data attached and the data
489 * Return: Data that was previously attached to the folio.
491 static inline void *folio_change_private(struct folio *folio, void *data) in folio_change_private() argument
495 folio->private = data; in folio_change_private()
500 * folio_detach_private - Detach private data from a folio.
501 * @folio: Folio to detach data from.
503 * Removes the data that was previously attached to the folio and decrements
506 * Return: Data that was attached to the folio.
510 void *data = folio_get_private(folio); in folio_detach_private() local
515 folio->private = NULL; in folio_detach_private()
518 return data; in folio_detach_private()
521 static inline void attach_page_private(struct page *page, void *data) in attach_page_private() argument
523 folio_attach_private(page_folio(page), data); in attach_page_private()
555 return &filemap_alloc_folio(gfp, 0)->page; in __page_cache_alloc()
570 pgoff_t page_cache_next_miss(struct address_space *mapping,
572 pgoff_t page_cache_prev_miss(struct address_space *mapping,
576 * typedef fgf_t - Flags for getting folios from the page cache.
584 * * %FGP_ACCESSED - The folio will be marked accessed.
585 * * %FGP_LOCK - The folio is returned locked.
586 * * %FGP_CREAT - If no folio is present then a new folio is allocated,
589 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
592 * * %FGP_WRITE - The folio will be written to by the caller.
593 * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
594 * * %FGP_NOWAIT - Don't block on the folio lock.
595 * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
596 * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
614 * fgf_set_order - Encode a length in the fgf_t flags.
630 return (__force fgf_t)((shift - PAGE_SHIFT) << 26); in fgf_set_order()
633 void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
634 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
636 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
640 * filemap_get_folio - Find and get a folio.
641 * @mapping: The address_space to search.
644 * Looks up the page cache entry at @mapping & @index. If a folio is
647 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
650 static inline struct folio *filemap_get_folio(struct address_space *mapping, in filemap_get_folio() argument
653 return __filemap_get_folio(mapping, index, 0, 0); in filemap_get_folio()
657 * filemap_lock_folio - Find and lock a folio.
658 * @mapping: The address_space to search.
661 * Looks up the page cache entry at @mapping & @index. If a folio is
665 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
668 static inline struct folio *filemap_lock_folio(struct address_space *mapping, in filemap_lock_folio() argument
671 return __filemap_get_folio(mapping, index, FGP_LOCK, 0); in filemap_lock_folio()
675 * filemap_grab_folio - grab a folio from the page cache
676 * @mapping: The address space to search
679 * Looks up the page cache entry at @mapping & @index. If no folio is found,
683 * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found
686 static inline struct folio *filemap_grab_folio(struct address_space *mapping, in filemap_grab_folio() argument
689 return __filemap_get_folio(mapping, index, in filemap_grab_folio()
691 mapping_gfp_mask(mapping)); in filemap_grab_folio()
695 * find_get_page - find and get a page reference
696 * @mapping: the address_space to search
699 * Looks up the page cache slot at @mapping & @offset. If there is a
704 static inline struct page *find_get_page(struct address_space *mapping, in find_get_page() argument
707 return pagecache_get_page(mapping, offset, 0, 0); in find_get_page()
710 static inline struct page *find_get_page_flags(struct address_space *mapping, in find_get_page_flags() argument
713 return pagecache_get_page(mapping, offset, fgp_flags, 0); in find_get_page_flags()
717 * find_lock_page - locate, pin and lock a pagecache page
718 * @mapping: the address_space to search
721 * Looks up the page cache entry at @mapping & @index. If there is a
729 static inline struct page *find_lock_page(struct address_space *mapping, in find_lock_page() argument
732 return pagecache_get_page(mapping, index, FGP_LOCK, 0); in find_lock_page()
736 * find_or_create_page - locate or add a pagecache page
737 * @mapping: the page's address_space
738 * @index: the page's index into the mapping
741 * Looks up the page cache slot at @mapping & @offset. If there is a
754 static inline struct page *find_or_create_page(struct address_space *mapping, in find_or_create_page() argument
757 return pagecache_get_page(mapping, index, in find_or_create_page()
763 * grab_cache_page_nowait - returns locked page at given index in given cache
764 * @mapping: target address_space
768 * This is intended for speculative data generators, where the data can
775 static inline struct page *grab_cache_page_nowait(struct address_space *mapping, in grab_cache_page_nowait() argument
778 return pagecache_get_page(mapping, index, in grab_cache_page_nowait()
780 mapping_gfp_mask(mapping)); in grab_cache_page_nowait()
783 #define swapcache_index(folio) __page_file_index(&(folio)->page)
786 * folio_index - File index of a folio.
800 return folio->index; in folio_index()
804 * folio_next_index - Get the index of the next folio.
811 return folio->index + folio_nr_pages(folio); in folio_next_index()
815 * folio_file_page - The page for a particular index.
822 * Return: The page containing the file data for this index.
826 return folio_page(folio, index & (folio_nr_pages(folio) - 1)); in folio_file_page()
830 * folio_contains - Does this folio contain this index?
841 return index - folio_index(folio) < folio_nr_pages(folio); in folio_contains()
854 return head + (index & (thp_nr_pages(head) - 1)); in find_subpage()
857 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
859 unsigned filemap_get_folios_contig(struct address_space *mapping,
861 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
864 struct page *grab_cache_page_write_begin(struct address_space *mapping,
870 static inline struct page *grab_cache_page(struct address_space *mapping, in grab_cache_page() argument
873 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); in grab_cache_page()
882 extern struct page * read_cache_page_gfp(struct address_space *mapping,
885 static inline struct page *read_mapping_page(struct address_space *mapping, in read_mapping_page() argument
888 return read_cache_page(mapping, index, NULL, file); in read_mapping_page()
891 static inline struct folio *read_mapping_folio(struct address_space *mapping, in read_mapping_folio() argument
894 return read_cache_folio(mapping, index, NULL, file); in read_mapping_folio()
905 return page->index; in page_to_pgoff()
909 * We don't initialize ->index for tail pages: calculate based on in page_to_pgoff()
912 return head->index + page - head; in page_to_pgoff()
916 * Return byte-offset into filesystem object for page.
920 return ((loff_t)page->index) << PAGE_SHIFT; in page_offset()
929 * folio_pos - Returns the byte position of this folio in its file.
934 return page_offset(&folio->page); in folio_pos()
938 * folio_file_pos - Returns the byte position of this folio in its file.
946 return page_file_offset(&folio->page); in folio_file_pos()
954 return folio->index; in folio_pgoff()
961 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; in linear_page_index()
962 pgoff += vma->vm_pgoff; in linear_page_index()
981 if (wait_page->folio != key->folio) in wake_page_match()
983 key->page_match = 1; in wake_page_match()
985 if (wait_page->bit_nr != key->bit_nr) in wake_page_match()
998 * folio_trylock() - Attempt to lock a folio.
1023 * folio_lock() - Lock this folio.
1030 * is sufficient to keep folio->mapping stable.
1034 * cross a page boundary). Other modifications to the data in the folio
1052 * lock_page() - Lock the folio containing this page.
1073 * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
1080 * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
1091 * folio_lock_or_retry - Lock the folio, unless this would block and the
1148 struct address_space *mapping, int warn) in __set_page_dirty() argument
1150 __folio_mark_dirty(page_folio(page), mapping, warn); in __set_page_dirty()
1164 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
1167 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
1189 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
1191 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
1196 void delete_from_page_cache_batch(struct address_space *mapping,
1202 /* Must be non-static for BPF error injection */
1203 int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
1206 bool filemap_range_has_writeback(struct address_space *mapping,
1210 * filemap_range_needs_writeback - check if range potentially needs writeback
1211 * @mapping: address space within which to check
1223 static inline bool filemap_range_needs_writeback(struct address_space *mapping, in filemap_range_needs_writeback() argument
1227 if (!mapping->nrpages) in filemap_range_needs_writeback()
1229 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && in filemap_range_needs_writeback()
1230 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) in filemap_range_needs_writeback()
1232 return filemap_range_has_writeback(mapping, start_byte, end_byte); in filemap_range_needs_writeback()
1236 * struct readahead_control - Describes a readahead request.
1239 * implement the ->readahead method should call readahead_page() or
1248 * @mapping: Readahead this filesystem object.
1253 struct address_space *mapping; member
1266 .mapping = m, \
1282 * page_cache_sync_readahead - generic file readahead
1283 * @mapping: address_space which holds the pagecache and I/O vectors
1295 void page_cache_sync_readahead(struct address_space *mapping, in page_cache_sync_readahead() argument
1299 DEFINE_READAHEAD(ractl, file, ra, mapping, index); in page_cache_sync_readahead()
1304 * page_cache_async_readahead - file readahead for marked pages
1305 * @mapping: address_space which holds the pagecache and I/O vectors
1318 void page_cache_async_readahead(struct address_space *mapping, in page_cache_async_readahead() argument
1322 DEFINE_READAHEAD(ractl, file, ra, mapping, index); in page_cache_async_readahead()
1330 BUG_ON(ractl->_batch_count > ractl->_nr_pages); in __readahead_folio()
1331 ractl->_nr_pages -= ractl->_batch_count; in __readahead_folio()
1332 ractl->_index += ractl->_batch_count; in __readahead_folio()
1334 if (!ractl->_nr_pages) { in __readahead_folio()
1335 ractl->_batch_count = 0; in __readahead_folio()
1339 folio = xa_load(&ractl->mapping->i_pages, ractl->_index); in __readahead_folio()
1341 ractl->_batch_count = folio_nr_pages(folio); in __readahead_folio()
1347 * readahead_page - Get the next page to read.
1359 return &folio->page; in readahead_page()
1363 * readahead_folio - Get the next folio to read.
1383 XA_STATE(xas, &rac->mapping->i_pages, 0); in __readahead_batch()
1386 BUG_ON(rac->_batch_count > rac->_nr_pages); in __readahead_batch()
1387 rac->_nr_pages -= rac->_batch_count; in __readahead_batch()
1388 rac->_index += rac->_batch_count; in __readahead_batch()
1389 rac->_batch_count = 0; in __readahead_batch()
1391 xas_set(&xas, rac->_index); in __readahead_batch()
1393 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { in __readahead_batch()
1399 rac->_batch_count += thp_nr_pages(page); in __readahead_batch()
1409 * readahead_page_batch - Get a batch of pages to read.
1423 * readahead_pos - The byte offset into the file of this readahead request.
1428 return (loff_t)rac->_index * PAGE_SIZE; in readahead_pos()
1432 * readahead_length - The number of bytes in this readahead request.
1437 return rac->_nr_pages * PAGE_SIZE; in readahead_length()
1441 * readahead_index - The index of the first page in this readahead request.
1446 return rac->_index; in readahead_index()
1450 * readahead_count - The number of pages in this readahead request.
1455 return rac->_nr_pages; in readahead_count()
1459 * readahead_batch_length - The number of bytes in the current batch.
1464 return rac->_batch_count * PAGE_SIZE; in readahead_batch_length()
1469 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> in dir_pages()
1474 * folio_mkwrite_check_truncate - check if folio was truncated
1479 * or -EFAULT if the folio was truncated.
1488 if (!folio->mapping) in folio_mkwrite_check_truncate()
1489 return -EFAULT; in folio_mkwrite_check_truncate()
1492 if (folio_next_index(folio) - 1 < index) in folio_mkwrite_check_truncate()
1495 if (folio->index > index || !offset) in folio_mkwrite_check_truncate()
1496 return -EFAULT; in folio_mkwrite_check_truncate()
1502 * page_mkwrite_check_truncate - check if page was truncated
1507 * or -EFAULT if the page was truncated.
1516 if (page->mapping != inode->i_mapping) in page_mkwrite_check_truncate()
1517 return -EFAULT; in page_mkwrite_check_truncate()
1520 if (page->index < index) in page_mkwrite_check_truncate()
1523 if (page->index > index || !offset) in page_mkwrite_check_truncate()
1524 return -EFAULT; in page_mkwrite_check_truncate()
1530 * i_blocks_per_folio - How many blocks fit in this folio.
1543 return folio_size(folio) >> inode->i_blkbits; in i_blocks_per_folio()