Lines Matching +full:data +full:- +full:mapping
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem high-level write support.
23 NETFS_MODIFY_AND_CLEAR, /* We can assume there is no data to be downloaded. */
24 NETFS_STREAMING_WRITE, /* Store incomplete data in non-uptodate page. */
51 * write-streaming, in which case we don't want to a local RMW cycle if we can
76 if (pos >= ctx->zero_point) in netfs_how_to_modify()
82 if (file->f_mode & FMODE_READ) in netfs_how_to_modify()
84 if (test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags)) in netfs_how_to_modify()
92 if (!test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags)) in netfs_how_to_modify()
93 set_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags); in netfs_how_to_modify()
104 if (offset == finfo->dirty_offset + finfo->dirty_len) in netfs_how_to_modify()
120 static struct folio *netfs_grab_folio_for_write(struct address_space *mapping, in netfs_grab_folio_for_write() argument
126 if (mapping_large_folio_support(mapping)) in netfs_grab_folio_for_write()
129 return __filemap_get_folio(mapping, index, fgp_flags, in netfs_grab_folio_for_write()
130 mapping_gfp_mask(mapping)); in netfs_grab_folio_for_write()
134 * netfs_perform_write - Copy data into the pagecache.
139 * Copy data into pagecache pages attached to the inode specified by @iocb.
144 * netfs-specific grouping such that data from an old group gets flushed before
150 struct file *file = iocb->ki_filp; in netfs_perform_write()
152 struct address_space *mapping = inode->i_mapping; in netfs_perform_write() local
158 .range_start = iocb->ki_pos, in netfs_perform_write()
159 .range_end = iocb->ki_pos + iter->count, in netfs_perform_write()
166 unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC; in netfs_perform_write()
168 loff_t i_size, pos = iocb->ki_pos, from, to; in netfs_perform_write()
172 if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) || in netfs_perform_write()
173 iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) in netfs_perform_write()
176 ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count); in netfs_perform_write()
182 wbc_attach_fdatawrite_inode(&wbc, mapping->host); in netfs_perform_write()
184 wreq = netfs_begin_writethrough(iocb, iter->count); in netfs_perform_write()
192 wreq->iocb = iocb; in netfs_perform_write()
193 wreq->cleanup = netfs_cleanup_buffered_write; in netfs_perform_write()
202 ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags); in netfs_perform_write()
206 offset = pos & (max_chunk - 1); in netfs_perform_write()
207 part = min(max_chunk - offset, iov_iter_count(iter)); in netfs_perform_write()
220 ret = -EFAULT; in netfs_perform_write()
224 folio = netfs_grab_folio_for_write(mapping, pos, part); in netfs_perform_write()
231 offset = pos & (flen - 1); in netfs_perform_write()
232 part = min_t(size_t, flen - offset, part); in netfs_perform_write()
235 ret = written ? -EINTR : -ERESTARTSYS; in netfs_perform_write()
260 zero_user_segment(&folio->page, 0, offset); in netfs_perform_write()
263 ret = -EIO; in netfs_perform_write()
270 to = from + folio_size(folio) - 1; in netfs_perform_write()
273 ret = filemap_write_and_wait_range(mapping, from, to); in netfs_perform_write()
279 if (mapping_writably_mapped(mapping)) in netfs_perform_write()
288 ret = -EFAULT; in netfs_perform_write()
299 zero_user_segment(&folio->page, offset + copied, flen); in netfs_perform_write()
323 ret = -ENOMEM; in netfs_perform_write()
326 finfo->netfs_group = netfs_get_group(netfs_group); in netfs_perform_write()
327 finfo->dirty_offset = offset; in netfs_perform_write()
328 finfo->dirty_len = copied; in netfs_perform_write()
334 finfo->dirty_len += copied; in netfs_perform_write()
335 if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) { in netfs_perform_write()
336 if (finfo->netfs_group) in netfs_perform_write()
337 folio_change_private(folio, finfo->netfs_group); in netfs_perform_write()
347 howto, folio->index); in netfs_perform_write()
348 ret = -EIO; in netfs_perform_write()
358 if (ctx->ops->update_i_size) { in netfs_perform_write()
359 ctx->ops->update_i_size(inode, pos); in netfs_perform_write()
363 fscache_update_cookie(ctx->cache, NULL, &pos); in netfs_perform_write()
380 if (wreq->iter.count == 0) in netfs_perform_write()
400 if (ret == -EIOCBQUEUED) in netfs_perform_write()
404 iocb->ki_pos += written; in netfs_perform_write()
416 * netfs_buffered_write_iter_locked - write data to a file
418 * @from: iov_iter with data to write
421 * This function does all the work needed for actually writing data to a
430 * This function does *not* take care of syncing data in case of O_SYNC write.
436 * * negative error code if no data has been written at all
441 struct file *file = iocb->ki_filp; in netfs_buffered_write_iter_locked()
459 * netfs_file_write_iter - write data to a file
461 * @from: iov_iter with data to write
467 * * Negative error code if no data has been written at all of
473 struct file *file = iocb->ki_filp; in netfs_file_write_iter()
474 struct inode *inode = file->f_mapping->host; in netfs_file_write_iter()
478 _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode)); in netfs_file_write_iter()
483 if ((iocb->ki_flags & IOCB_DIRECT) || in netfs_file_write_iter()
484 test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags)) in netfs_file_write_iter()
502 * Notification that a previously read-only page is about to become writable.
507 struct folio *folio = page_folio(vmf->page); in netfs_page_mkwrite()
508 struct file *file = vmf->vma->vm_file; in netfs_page_mkwrite()
513 _enter("%lx", folio->index); in netfs_page_mkwrite()
515 sb_start_pagefault(inode->i_sb); in netfs_page_mkwrite()
531 err = filemap_fdatawait_range(inode->i_mapping, in netfs_page_mkwrite()
538 case -ENOMEM: in netfs_page_mkwrite()
555 sb_end_pagefault(inode->i_sb); in netfs_page_mkwrite()
563 static void netfs_kill_pages(struct address_space *mapping, in netfs_kill_pages() argument
568 pgoff_t last = (start + len - 1) / PAGE_SIZE, next; in netfs_kill_pages()
570 _enter("%llx-%llx", start, start + len - 1); in netfs_kill_pages()
575 folio = filemap_get_folio(mapping, index); in netfs_kill_pages()
589 generic_error_remove_folio(mapping, folio); in netfs_kill_pages()
601 static void netfs_redirty_pages(struct address_space *mapping, in netfs_redirty_pages() argument
606 pgoff_t last = (start + len - 1) / PAGE_SIZE, next; in netfs_redirty_pages()
608 _enter("%llx-%llx", start, start + len - 1); in netfs_redirty_pages()
613 folio = filemap_get_folio(mapping, index); in netfs_redirty_pages()
621 filemap_dirty_folio(mapping, folio); in netfs_redirty_pages()
628 balance_dirty_pages_ratelimited(mapping); in netfs_redirty_pages()
638 struct address_space *mapping = wreq->mapping; in netfs_pages_written_back() local
645 XA_STATE(xas, &mapping->i_pages, wreq->start / PAGE_SIZE); in netfs_pages_written_back()
647 _enter("%llx-%llx", wreq->start, wreq->start + wreq->len); in netfs_pages_written_back()
651 last = (wreq->start + wreq->len - 1) / PAGE_SIZE; in netfs_pages_written_back()
655 wreq->len, wreq->start, folio->index, last); in netfs_pages_written_back()
662 group = finfo->netfs_group; in netfs_pages_written_back()
705 xas_advance(&xas, folio_next_index(folio) - 1); in netfs_pages_written_back()
720 struct address_space *mapping = wreq->mapping; in netfs_cleanup_buffered_write() local
724 switch (wreq->error) { in netfs_cleanup_buffered_write()
730 pr_notice("R=%08x Unexpected error %d\n", wreq->debug_id, wreq->error); in netfs_cleanup_buffered_write()
732 case -EACCES: in netfs_cleanup_buffered_write()
733 case -EPERM: in netfs_cleanup_buffered_write()
734 case -ENOKEY: in netfs_cleanup_buffered_write()
735 case -EKEYEXPIRED: in netfs_cleanup_buffered_write()
736 case -EKEYREJECTED: in netfs_cleanup_buffered_write()
737 case -EKEYREVOKED: in netfs_cleanup_buffered_write()
738 case -ENETRESET: in netfs_cleanup_buffered_write()
739 case -EDQUOT: in netfs_cleanup_buffered_write()
740 case -ENOSPC: in netfs_cleanup_buffered_write()
741 netfs_redirty_pages(mapping, wreq->start, wreq->len); in netfs_cleanup_buffered_write()
744 case -EROFS: in netfs_cleanup_buffered_write()
745 case -EIO: in netfs_cleanup_buffered_write()
746 case -EREMOTEIO: in netfs_cleanup_buffered_write()
747 case -EFBIG: in netfs_cleanup_buffered_write()
748 case -ENOENT: in netfs_cleanup_buffered_write()
749 case -ENOMEDIUM: in netfs_cleanup_buffered_write()
750 case -ENXIO: in netfs_cleanup_buffered_write()
751 netfs_kill_pages(mapping, wreq->start, wreq->len); in netfs_cleanup_buffered_write()
755 if (wreq->error) in netfs_cleanup_buffered_write()
756 mapping_set_error(mapping, wreq->error); in netfs_cleanup_buffered_write()
757 if (wreq->netfs_ops->done) in netfs_cleanup_buffered_write()
758 wreq->netfs_ops->done(wreq); in netfs_cleanup_buffered_write()
768 static void netfs_extend_writeback(struct address_space *mapping, in netfs_extend_writeback() argument
791 * under the RCU read lock - but we can't clear the dirty flags in netfs_extend_writeback()
802 if (folio->index != index) { in netfs_extend_writeback()
839 if (finfo->netfs_group != group || in netfs_extend_writeback()
840 finfo->dirty_offset > 0) { in netfs_extend_writeback()
846 len = finfo->dirty_len; in netfs_extend_writeback()
851 *_count -= folio_nr_pages(folio); in netfs_extend_writeback()
888 * Synchronously write back the locked page and any subsequent non-locked dirty
891 static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping, in netfs_write_back_from_locked_folio() argument
901 struct netfs_inode *ctx = netfs_inode(mapping->host); in netfs_write_back_from_locked_folio()
902 unsigned long long i_size = i_size_read(&ctx->inode); in netfs_write_back_from_locked_folio()
905 long count = wbc->nr_to_write; in netfs_write_back_from_locked_folio()
908 _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching); in netfs_write_back_from_locked_folio()
910 wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio), in netfs_write_back_from_locked_folio()
922 count -= folio_nr_pages(folio); in netfs_write_back_from_locked_folio()
931 len = wreq->len; in netfs_write_back_from_locked_folio()
934 start += finfo->dirty_offset; in netfs_write_back_from_locked_folio()
935 if (finfo->dirty_offset + finfo->dirty_len != len) { in netfs_write_back_from_locked_folio()
936 len = finfo->dirty_len; in netfs_write_back_from_locked_folio()
939 len = finfo->dirty_len; in netfs_write_back_from_locked_folio()
943 /* Trim the write to the EOF; the extra data is ignored. Also in netfs_write_back_from_locked_folio()
947 max_len = min_t(unsigned long long, max_len, end - start + 1); in netfs_write_back_from_locked_folio()
948 max_len = min_t(unsigned long long, max_len, i_size - start); in netfs_write_back_from_locked_folio()
951 netfs_extend_writeback(mapping, group, xas, &count, start, in netfs_write_back_from_locked_folio()
952 max_len, caching, &len, &wreq->upper_len); in netfs_write_back_from_locked_folio()
956 len = min_t(unsigned long long, len, i_size - start); in netfs_write_back_from_locked_folio()
963 wreq->start = start; in netfs_write_back_from_locked_folio()
964 wreq->len = len; in netfs_write_back_from_locked_folio()
972 wreq->cleanup = netfs_cleanup_buffered_write; in netfs_write_back_from_locked_folio()
974 iov_iter_xarray(&wreq->iter, ITER_SOURCE, &mapping->i_pages, start, in netfs_write_back_from_locked_folio()
975 wreq->upper_len); in netfs_write_back_from_locked_folio()
976 __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); in netfs_write_back_from_locked_folio()
978 if (ret == 0 || ret == -EIOCBQUEUED) in netfs_write_back_from_locked_folio()
979 wbc->nr_to_write -= len / PAGE_SIZE; in netfs_write_back_from_locked_folio()
984 fscache_clear_page_bits(mapping, start, len, caching); in netfs_write_back_from_locked_folio()
997 static ssize_t netfs_writepages_begin(struct address_space *mapping, in netfs_writepages_begin() argument
1039 if (finfo->netfs_group != group) { in netfs_writepages_begin()
1054 _debug("wback %lx", folio->index); in netfs_writepages_begin()
1057 * the page may be truncated or invalidated (changing page->mapping to in netfs_writepages_begin()
1059 * mapping in netfs_writepages_begin()
1062 if (wbc->sync_mode != WB_SYNC_NONE) { in netfs_writepages_begin()
1071 if (folio->mapping != mapping || in netfs_writepages_begin()
1081 if (wbc->sync_mode != WB_SYNC_NONE) { in netfs_writepages_begin()
1090 if (wbc->sync_mode == WB_SYNC_NONE) { in netfs_writepages_begin()
1100 ret = netfs_write_back_from_locked_folio(mapping, wbc, group, xas, in netfs_writepages_begin()
1112 static int netfs_writepages_region(struct address_space *mapping, in netfs_writepages_region() argument
1120 XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE); in netfs_writepages_region()
1123 ret = netfs_writepages_begin(mapping, wbc, group, &xas, in netfs_writepages_region()
1125 if (ret > 0 && wbc->nr_to_write > 0) in netfs_writepages_region()
1127 } while (ret > 0 && wbc->nr_to_write > 0); in netfs_writepages_region()
1133 * write some of the pending data back to the server
1135 int netfs_writepages(struct address_space *mapping, in netfs_writepages() argument
1149 if (wbc->range_cyclic && mapping->writeback_index) { in netfs_writepages()
1150 start = mapping->writeback_index * PAGE_SIZE; in netfs_writepages()
1151 ret = netfs_writepages_region(mapping, wbc, group, in netfs_writepages()
1156 if (wbc->nr_to_write <= 0) { in netfs_writepages()
1157 mapping->writeback_index = start / PAGE_SIZE; in netfs_writepages()
1162 end = mapping->writeback_index * PAGE_SIZE; in netfs_writepages()
1163 mapping->writeback_index = 0; in netfs_writepages()
1164 ret = netfs_writepages_region(mapping, wbc, group, &start, end); in netfs_writepages()
1166 mapping->writeback_index = start / PAGE_SIZE; in netfs_writepages()
1167 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { in netfs_writepages()
1169 ret = netfs_writepages_region(mapping, wbc, group, in netfs_writepages()
1171 if (wbc->nr_to_write > 0 && ret == 0) in netfs_writepages()
1172 mapping->writeback_index = start / PAGE_SIZE; in netfs_writepages()
1174 start = wbc->range_start; in netfs_writepages()
1175 ret = netfs_writepages_region(mapping, wbc, group, in netfs_writepages()
1176 &start, wbc->range_end); in netfs_writepages()
1190 if (wreq->error) { in netfs_cleanup_launder_folio()
1191 pr_notice("R=%08x Laundering error %d\n", wreq->debug_id, wreq->error); in netfs_cleanup_launder_folio()
1192 mapping_set_error(wreq->mapping, wreq->error); in netfs_cleanup_launder_folio()
1197 * netfs_launder_folio - Clean up a dirty folio that's being invalidated
1206 struct address_space *mapping = folio->mapping; in netfs_launder_folio() local
1210 unsigned long long i_size = i_size_read(mapping->host); in netfs_launder_folio()
1216 offset = finfo->dirty_offset; in netfs_launder_folio()
1218 len = finfo->dirty_len; in netfs_launder_folio()
1222 len = min_t(unsigned long long, len, i_size - start); in netfs_launder_folio()
1224 wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE); in netfs_launder_folio()
1235 _debug("launder %llx-%llx", start, start + len - 1); in netfs_launder_folio()
1240 wreq->cleanup = netfs_cleanup_launder_folio; in netfs_launder_folio()
1243 iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len); in netfs_launder_folio()
1244 __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); in netfs_launder_folio()