Home
last modified time | relevance | path

Searched refs:folio_size (Results 1 – 25 of 125) sorted by relevance

12345

/linux/mm/damon/
H A Dpaddr.c79 *folio_sz = folio_size(folio); in damon_pa_young()
187 *sz_filter_passed += folio_size(folio) / addr_unit; in damon_pa_pageout()
198 addr += folio_size(folio); in damon_pa_pageout()
228 *sz_filter_passed += folio_size(folio) / addr_unit; in damon_pa_mark_accessed_or_deactivate()
236 addr += folio_size(folio); in damon_pa_mark_accessed_or_deactivate()
278 *sz_filter_passed += folio_size(folio) / addr_unit; in damon_pa_migrate()
284 addr += folio_size(folio); in damon_pa_migrate()
312 *sz_filter_passed += folio_size(folio) / addr_unit; in damon_pa_stat()
313 addr += folio_size(folio); in damon_pa_stat()
/linux/include/linux/
H A Dhighmem.h404 VM_BUG_ON(dst_off + len > folio_size(dst_folio)); in memcpy_folio()
405 VM_BUG_ON(src_off + len > folio_size(src_folio)); in memcpy_folio()
479 VM_BUG_ON(offset + len > folio_size(folio)); in memcpy_from_folio()
507 VM_BUG_ON(offset + len > folio_size(folio)); in memcpy_to_folio()
544 size_t len = folio_size(folio) - offset; in folio_zero_tail()
582 VM_BUG_ON(offset + len > folio_size(folio)); in folio_fill_tail()
625 len = min(len, folio_size(folio) - offset); in memcpy_from_file_folio()
/linux/mm/
H A Dpage_io.c399 bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); in swap_writepage_fs()
400 sio->len += folio_size(folio); in swap_writepage_fs()
418 bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); in swap_writepage_bdev_sync()
438 bio_add_folio_nofail(bio, folio, folio_size(folio), 0); in swap_writepage_bdev_async()
533 folio_zero_range(folio, 0, folio_size(folio)); in swap_read_folio_zeromap()
561 bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); in swap_read_folio_fs()
562 sio->len += folio_size(folio); in swap_read_folio_fs()
580 bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); in swap_read_folio_bdev_sync()
602 bio_add_folio_nofail(bio, folio, folio_size(folio), 0); in swap_read_folio_bdev_async()
H A Dtruncate.c160 folio_invalidate(folio, 0, folio_size(folio)); in truncate_cleanup_folio()
220 size_t size = folio_size(folio); in truncate_inode_partial_folio()
415 same_folio = lend < folio_pos(folio) + folio_size(folio); in truncate_inode_pages_range()
878 folio_size(folio)); in pagecache_isize_extended()
/linux/fs/xfs/scrub/
H A Dxfile.c148 len = min_t(ssize_t, count, folio_size(folio) - offset); in xfile_load()
209 len = min_t(ssize_t, count, folio_size(folio) - offset); in xfile_store()
279 if (len > folio_size(folio) - offset_in_folio(folio, pos)) { in xfile_get_folio()
308 trace_xfile_put_folio(xf, folio_pos(folio), folio_size(folio)); in xfile_put_folio()
/linux/fs/
H A Dmpage.c156 const unsigned blocks_per_folio = folio_size(folio) >> blkbits; in do_mpage_readpage()
269 folio_zero_segment(folio, first_hole << blkbits, folio_size(folio)); in do_mpage_readpage()
461 const unsigned blocks_per_folio = folio_size(folio) >> blkbits; in mpage_write_folio()
576 length = folio_size(folio); in mpage_write_folio()
587 folio_zero_segment(folio, length, folio_size(folio)); in mpage_write_folio()
611 wbc_account_cgroup_owner(wbc, folio, folio_size(folio)); in mpage_write_folio()
H A Dbuffer.c933 offset = folio_size(folio); in folio_alloc_buffers()
1582 BUG_ON(offset >= folio_size(folio)); in folio_set_bh()
1642 BUG_ON(stop > folio_size(folio) || stop < length); in block_invalidate_folio()
1673 if (length == folio_size(folio)) in block_invalidate_folio()
2123 BUG_ON(to > folio_size(folio)); in __block_write_begin_int()
2357 to = min_t(unsigned, folio_size(folio) - from, count); in block_is_partially_uptodate()
2359 if (from < blocksize && to > folio_size(folio) - blocksize) in block_is_partially_uptodate()
2634 end = folio_size(folio); in block_page_mkwrite()
2735 if (folio_pos(folio) + folio_size(folio) <= i_size) in block_write_full_folio()
2752 folio_size(folio)); in block_write_full_folio()
/linux/fs/iomap/
H A Dbuffered-io.c232 size_t plen = min_t(loff_t, folio_size(folio) - poff, length); in iomap_adjust_read_range()
319 iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset); in iomap_read_inline_data()
462 .len = folio_size(folio), in iomap_read_folio()
593 count = min(folio_size(folio) - from, count); in iomap_is_partially_uptodate()
633 folio_size(folio)); in iomap_release_folio()
656 if (offset == 0 && len == folio_size(folio)) { in iomap_invalidate_folio()
667 size_t len = folio_size(folio); in iomap_dirty_folio()
710 pos + len >= folio_pos(folio) + folio_size(folio)) in __iomap_write_begin()
788 size_t fsize = folio_size(folio); in iomap_trim_folio_range()
1101 folio_pos(folio) + folio_size(folio) - 1); in iomap_write_delalloc_ifs_punch()
[all …]
/linux/fs/gfs2/
H A Daops.c85 i_size < folio_pos(folio) + folio_size(folio)) in gfs2_write_jdata_folio()
87 folio_size(folio)); in gfs2_write_jdata_folio()
115 gfs2_trans_add_databufs(ip->i_gl, folio, 0, folio_size(folio)); in __gfs2_jdata_write_folio()
205 size += folio_size(fbatch->folios[i]); in gfs2_write_jdata_batch()
471 chunk = min(size - copied, folio_size(folio) - offset); in gfs2_internal_read()
614 int partial_page = (offset || length < folio_size(folio)); in gfs2_invalidate_folio()
/linux/fs/btrfs/
H A Daccessors.c60 const int part = eb->folio_size - oif; \
90 const int part = eb->folio_size - oif; \
H A Dextent_io.c560 btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), folio_size(folio)); in begin_folio_read()
708 eb->folio_size = PAGE_SIZE; in alloc_eb_folio_array()
797 ASSERT(pg_offset + size <= folio_size(folio)); in submit_extent_folio()
1002 const u64 end = start + folio_size(folio) - 1; in btrfs_do_readpage()
1020 folio_size(folio) - zero_offset); in btrfs_do_readpage()
1325 const u64 end = start + folio_size(folio) - 1; in btrfs_read_folio()
1356 ASSERT(start >= folio_start && start + len <= folio_start + folio_size(folio)); in set_delalloc_bitmap()
1374 ASSERT(start >= folio_start && start < folio_start + folio_size(folio)); in find_next_delalloc_bitmap()
1410 const u64 page_end = page_start + folio_size(folio) - 1; in writepage_delalloc()
1701 start + len <= folio_start + folio_size(folio)); in extent_writepage_io()
[all …]
H A Dextent_io.h90 u32 folio_size; member
132 ASSERT(eb->folio_size); in offset_in_eb_folio()
133 return start & (eb->folio_size - 1); in offset_in_eb_folio()
/linux/fs/ext4/
H A Dreadpage.c252 blocks_per_folio = folio_size(folio) >> blkbits; in ext4_mpage_readpages()
298 folio_size(folio)); in ext4_mpage_readpages()
332 folio_size(folio)); in ext4_mpage_readpages()
/linux/fs/netfs/
H A Dbuffered_read.c408 size_t flen = folio_size(folio); in netfs_read_gaps()
510 folio_pos(folio), folio_size(folio), in netfs_read_folio()
563 size_t plen = folio_size(folio); in netfs_skip_folio_read()
665 folio_pos(folio), folio_size(folio), in netfs_write_begin()
723 size_t flen = folio_size(folio); in netfs_prefetch_for_write()
H A Dread_pgpriv2.c22 size_t fsize = folio_size(folio), flen = fsize; in netfs_pgpriv2_copy_folio()
195 fsize = folio_size(folio); in netfs_pgpriv2_unlock_copied_folios()
/linux/fs/ubifs/
H A Dfile.c119 folio_zero_range(folio, 0, folio_size(folio)); in do_readpage()
242 if (pos == folio_pos(folio) && len >= folio_size(folio)) in write_begin_slow()
434 if (pos == folio_pos(folio) && len >= folio_size(folio)) { in ubifs_write_begin()
555 if (len == folio_size(folio)) in ubifs_write_end()
608 folio_zero_range(folio, 0, folio_size(folio)); in populate_page()
988 int err, len = folio_size(folio); in ubifs_writepage()
1030 folio_zero_segment(folio, len, folio_size(folio)); in ubifs_writepage()
1291 if (offset || length < folio_size(folio)) in ubifs_invalidate_folio()
/linux/fs/nfs/
H A Dread.c57 folio_zero_segment(folio, 0, folio_size(folio)); in nfs_return_empty_folio()
292 size_t fsize = folio_size(folio); in nfs_read_add_folio()
371 size_t len = folio_size(folio); in nfs_read_folio()
H A Dfile.c304 size_t end = folio_size(folio); in nfs_truncate_last_folio()
443 size_t fsize = folio_size(folio); in nfs_write_end()
491 if (offset != 0 || length < folio_size(folio)) in nfs_invalidate_folio()
565 folio_size(folio), ret); in nfs_launder_folio()
/linux/arch/nios2/mm/
H A Dcacheflush.c172 __flush_dcache(start, start + folio_size(folio)); in __flush_dcache_folio()
196 flush_icache_range(start, start + folio_size(folio)); in flush_dcache_folio()
/linux/fs/hfs/
H A Dbtree.c86 folio_zero_range(folio, 0, folio_size(folio)); in hfs_btree_open()
91 size = folio_size(folio); in hfs_btree_open()
102 len = min_t(size_t, folio_size(folio), sb->s_blocksize); in hfs_btree_open()
/linux/arch/arm64/mm/
H A Dflush.c59 folio_size(folio)); in __sync_icache_dcache()
/linux/arch/microblaze/include/asm/
H A Dcacheflush.h81 flush_dcache_range(addr, addr + folio_size(folio)); in flush_dcache_folio()
/linux/arch/sh/mm/
H A Dcache.c154 folio_size(folio)); in __update_cache()
174 folio_size(folio)); in __flush_anon_page()
/linux/fs/vboxsf/
H A Dfile.c281 u32 nwrite = folio_size(folio); in vboxsf_writepages()
332 if (!folio_test_uptodate(folio) && nwritten == folio_size(folio)) in vboxsf_write_end()
/linux/lib/
H A Dbuildid.c63 file_off < r->folio_off + folio_size(r->folio)) in freader_get_folio()
128 folio_sz = folio_size(r->folio); in freader_fetch()

12345