Lines Matching +full:data +full:- +full:mapping
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Network filesystem high-level buffered read support.
21 pgoff_t start_page = rreq->start / PAGE_SIZE; in netfs_rreq_unlock_folios()
22 pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1; in netfs_rreq_unlock_folios()
26 XA_STATE(xas, &rreq->mapping->i_pages, start_page); in netfs_rreq_unlock_folios()
28 if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) { in netfs_rreq_unlock_folios()
29 __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags); in netfs_rreq_unlock_folios()
30 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { in netfs_rreq_unlock_folios()
31 __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); in netfs_rreq_unlock_folios()
41 subreq = list_first_entry(&rreq->subrequests, in netfs_rreq_unlock_folios()
43 subreq_failed = (subreq->error < 0); in netfs_rreq_unlock_folios()
56 pg_end = folio_pos(folio) + folio_size(folio) - 1; in netfs_rreq_unlock_folios()
66 if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { in netfs_rreq_unlock_folios()
72 sreq_end = subreq->start + subreq->len - 1; in netfs_rreq_unlock_folios()
76 account += subreq->transferred; in netfs_rreq_unlock_folios()
77 if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) { in netfs_rreq_unlock_folios()
79 subreq_failed = (subreq->error < 0); in netfs_rreq_unlock_folios()
94 if (finfo->netfs_group) in netfs_rreq_unlock_folios()
95 folio_change_private(folio, finfo->netfs_group); in netfs_rreq_unlock_folios()
103 if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { in netfs_rreq_unlock_folios()
104 if (folio->index == rreq->no_unlock_folio && in netfs_rreq_unlock_folios()
105 test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) in netfs_rreq_unlock_folios()
114 if (rreq->netfs_ops->done) in netfs_rreq_unlock_folios()
115 rreq->netfs_ops->done(rreq); in netfs_rreq_unlock_folios()
121 struct netfs_cache_resources *cres = &rreq->cache_resources; in netfs_cache_expand_readahead()
123 if (cres->ops && cres->ops->expand_readahead) in netfs_cache_expand_readahead()
124 cres->ops->expand_readahead(cres, _start, _len, i_size); in netfs_cache_expand_readahead()
133 netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size); in netfs_rreq_expand()
138 if (rreq->netfs_ops->expand_readahead) in netfs_rreq_expand()
139 rreq->netfs_ops->expand_readahead(rreq); in netfs_rreq_expand()
150 if (rreq->start != readahead_pos(ractl) || in netfs_rreq_expand()
151 rreq->len != readahead_length(ractl)) { in netfs_rreq_expand()
152 readahead_expand(ractl, rreq->start, rreq->len); in netfs_rreq_expand()
153 rreq->start = readahead_pos(ractl); in netfs_rreq_expand()
154 rreq->len = readahead_length(ractl); in netfs_rreq_expand()
167 return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx)); in netfs_begin_cache_read()
171 * netfs_readahead - Helper to manage a read request
174 * Fulfil a readahead request by drawing data from the cache if possible, or
175 * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
188 struct netfs_inode *ctx = netfs_inode(ractl->mapping->host); in netfs_readahead()
196 rreq = netfs_alloc_request(ractl->mapping, ractl->file, in netfs_readahead()
204 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_readahead()
214 iov_iter_xarray(&rreq->iter, ITER_DEST, &ractl->mapping->i_pages, in netfs_readahead()
215 rreq->start, rreq->len); in netfs_readahead()
234 * netfs_read_folio - Helper to manage a read_folio request
238 * Fulfil a read_folio request by drawing data from the cache if
239 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
249 struct address_space *mapping = folio->mapping; in netfs_read_folio() local
251 struct netfs_inode *ctx = netfs_inode(mapping->host); in netfs_read_folio()
255 _enter("%lx", folio->index); in netfs_read_folio()
257 rreq = netfs_alloc_request(mapping, file, in netfs_read_folio()
266 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_read_folio()
270 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); in netfs_read_folio()
281 unsigned int from = finfo->dirty_offset; in netfs_read_folio()
282 unsigned int to = from + finfo->dirty_len; in netfs_read_folio()
288 ret = -ENOMEM; in netfs_read_folio()
299 rreq->direct_bv = bvec; in netfs_read_folio()
300 rreq->direct_bv_count = nr_bvec; in netfs_read_folio()
306 part = min_t(size_t, to - off, PAGE_SIZE); in netfs_read_folio()
311 bvec_set_folio(&bvec[i++], folio, flen - to, to); in netfs_read_folio()
312 iov_iter_bvec(&rreq->iter, ITER_DEST, bvec, i, rreq->len); in netfs_read_folio()
314 iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages, in netfs_read_folio()
315 rreq->start, rreq->len); in netfs_read_folio()
340 * - full folio write
341 * - write that lies in a folio that is completely beyond EOF
342 * - write that covers the folio from start to EOF or beyond it
356 if (pos - offset + len <= i_size) in netfs_skip_folio_read()
358 zero_user_segment(&folio->page, 0, plen); in netfs_skip_folio_read()
368 if (pos - offset >= i_size) in netfs_skip_folio_read()
377 zero_user_segments(&folio->page, 0, offset, offset + len, plen); in netfs_skip_folio_read()
382 * netfs_write_begin - Helper to prepare for writing
385 * @mapping: The mapping to read from
391 * Pre-read data for a write-begin request by drawing data from the cache if
392 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
407 * will cause the folio to be re-got and the process to be retried.
415 struct file *file, struct address_space *mapping, in netfs_write_begin() argument
424 DEFINE_READAHEAD(ractl, file, NULL, mapping, index); in netfs_write_begin()
427 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, in netfs_write_begin()
428 mapping_gfp_mask(mapping)); in netfs_write_begin()
432 if (ctx->ops->check_write_begin) { in netfs_write_begin()
434 ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata); in netfs_write_begin()
446 /* If the page is beyond the EOF, we want to clear it - unless it's in netfs_write_begin()
456 rreq = netfs_alloc_request(mapping, file, in netfs_write_begin()
463 rreq->no_unlock_folio = folio->index; in netfs_write_begin()
464 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); in netfs_write_begin()
467 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_write_begin()
480 iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages, in netfs_write_begin()
481 rreq->start, rreq->len); in netfs_write_begin()
515 * Preload the data into a page we're proposing to write into.
521 struct address_space *mapping = folio->mapping; in netfs_prefetch_for_write() local
522 struct netfs_inode *ctx = netfs_inode(mapping->host); in netfs_prefetch_for_write()
529 ret = -ENOMEM; in netfs_prefetch_for_write()
531 rreq = netfs_alloc_request(mapping, file, start, flen, in netfs_prefetch_for_write()
538 rreq->no_unlock_folio = folio->index; in netfs_prefetch_for_write()
539 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); in netfs_prefetch_for_write()
541 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_prefetch_for_write()
548 iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages, in netfs_prefetch_for_write()
549 rreq->start, rreq->len); in netfs_prefetch_for_write()
563 * netfs_buffered_read_iter - Filesystem buffered I/O read routine
565 * @iter: destination for the data read
567 * This is the ->read_iter() routine for all filesystems that can use the page
570 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
571 * returned when no data can be read without waiting for I/O requests to
574 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
575 * shall be made for the read or for readahead. When no data can be read,
576 * -EAGAIN shall be returned. When readahead would be triggered, a partial,
585 struct inode *inode = file_inode(iocb->ki_filp); in netfs_buffered_read_iter()
589 if (WARN_ON_ONCE((iocb->ki_flags & IOCB_DIRECT) || in netfs_buffered_read_iter()
590 test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))) in netfs_buffered_read_iter()
591 return -EINVAL; in netfs_buffered_read_iter()
603 * netfs_file_read_iter - Generic filesystem read routine
605 * @iter: destination for the data read
607 * This is the ->read_iter() routine for all filesystems that can use the page
610 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
611 * returned when no data can be read without waiting for I/O requests to
614 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
615 * shall be made for the read or for readahead. When no data can be read,
616 * -EAGAIN shall be returned. When readahead would be triggered, a partial,
625 struct netfs_inode *ictx = netfs_inode(iocb->ki_filp->f_mapping->host); in netfs_file_read_iter()
627 if ((iocb->ki_flags & IOCB_DIRECT) || in netfs_file_read_iter()
628 test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags)) in netfs_file_read_iter()