Lines Matching full:mapping
120 static struct folio *netfs_grab_folio_for_write(struct address_space *mapping, in netfs_grab_folio_for_write() argument
126 if (mapping_large_folio_support(mapping)) in netfs_grab_folio_for_write()
129 return __filemap_get_folio(mapping, index, fgp_flags, in netfs_grab_folio_for_write()
130 mapping_gfp_mask(mapping)); in netfs_grab_folio_for_write()
152 struct address_space *mapping = inode->i_mapping; in netfs_perform_write() local
176 ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count); in netfs_perform_write()
182 wbc_attach_fdatawrite_inode(&wbc, mapping->host); in netfs_perform_write()
202 ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags); in netfs_perform_write()
224 folio = netfs_grab_folio_for_write(mapping, pos, part); in netfs_perform_write()
273 ret = filemap_write_and_wait_range(mapping, from, to); in netfs_perform_write()
279 if (mapping_writably_mapped(mapping)) in netfs_perform_write()
563 static void netfs_kill_pages(struct address_space *mapping, in netfs_kill_pages() argument
575 folio = filemap_get_folio(mapping, index); in netfs_kill_pages()
589 generic_error_remove_folio(mapping, folio); in netfs_kill_pages()
601 static void netfs_redirty_pages(struct address_space *mapping, in netfs_redirty_pages() argument
613 folio = filemap_get_folio(mapping, index); in netfs_redirty_pages()
621 filemap_dirty_folio(mapping, folio); in netfs_redirty_pages()
628 balance_dirty_pages_ratelimited(mapping); in netfs_redirty_pages()
638 struct address_space *mapping = wreq->mapping; in netfs_pages_written_back() local
645 XA_STATE(xas, &mapping->i_pages, wreq->start / PAGE_SIZE); in netfs_pages_written_back()
720 struct address_space *mapping = wreq->mapping; in netfs_cleanup_buffered_write() local
741 netfs_redirty_pages(mapping, wreq->start, wreq->len); in netfs_cleanup_buffered_write()
751 netfs_kill_pages(mapping, wreq->start, wreq->len); in netfs_cleanup_buffered_write()
756 mapping_set_error(mapping, wreq->error); in netfs_cleanup_buffered_write()
768 static void netfs_extend_writeback(struct address_space *mapping, in netfs_extend_writeback() argument
891 static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping, in netfs_write_back_from_locked_folio() argument
901 struct netfs_inode *ctx = netfs_inode(mapping->host); in netfs_write_back_from_locked_folio()
910 wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio), in netfs_write_back_from_locked_folio()
951 netfs_extend_writeback(mapping, group, xas, &count, start, in netfs_write_back_from_locked_folio()
974 iov_iter_xarray(&wreq->iter, ITER_SOURCE, &mapping->i_pages, start, in netfs_write_back_from_locked_folio()
984 fscache_clear_page_bits(mapping, start, len, caching); in netfs_write_back_from_locked_folio()
997 static ssize_t netfs_writepages_begin(struct address_space *mapping, in netfs_writepages_begin() argument
1057 * the page may be truncated or invalidated (changing page->mapping to in netfs_writepages_begin()
1059 * mapping in netfs_writepages_begin()
1071 if (folio->mapping != mapping || in netfs_writepages_begin()
1100 ret = netfs_write_back_from_locked_folio(mapping, wbc, group, xas, in netfs_writepages_begin()
1112 static int netfs_writepages_region(struct address_space *mapping, in netfs_writepages_region() argument
1120 XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE); in netfs_writepages_region()
1123 ret = netfs_writepages_begin(mapping, wbc, group, &xas, in netfs_writepages_region()
1135 int netfs_writepages(struct address_space *mapping, in netfs_writepages() argument
1149 if (wbc->range_cyclic && mapping->writeback_index) { in netfs_writepages()
1150 start = mapping->writeback_index * PAGE_SIZE; in netfs_writepages()
1151 ret = netfs_writepages_region(mapping, wbc, group, in netfs_writepages()
1157 mapping->writeback_index = start / PAGE_SIZE; in netfs_writepages()
1162 end = mapping->writeback_index * PAGE_SIZE; in netfs_writepages()
1163 mapping->writeback_index = 0; in netfs_writepages()
1164 ret = netfs_writepages_region(mapping, wbc, group, &start, end); in netfs_writepages()
1166 mapping->writeback_index = start / PAGE_SIZE; in netfs_writepages()
1169 ret = netfs_writepages_region(mapping, wbc, group, in netfs_writepages()
1172 mapping->writeback_index = start / PAGE_SIZE; in netfs_writepages()
1175 ret = netfs_writepages_region(mapping, wbc, group, in netfs_writepages()
1192 mapping_set_error(wreq->mapping, wreq->error); in netfs_cleanup_launder_folio()
1206 struct address_space *mapping = folio->mapping; in netfs_launder_folio() local
1210 unsigned long long i_size = i_size_read(mapping->host); in netfs_launder_folio()
1224 wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE); in netfs_launder_folio()