xref: /linux/fs/netfs/read_retry.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1ee4cdf7bSDavid Howells // SPDX-License-Identifier: GPL-2.0-only
2ee4cdf7bSDavid Howells /* Network filesystem read subrequest retrying.
3ee4cdf7bSDavid Howells  *
4ee4cdf7bSDavid Howells  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5ee4cdf7bSDavid Howells  * Written by David Howells (dhowells@redhat.com)
6ee4cdf7bSDavid Howells  */
7ee4cdf7bSDavid Howells 
8ee4cdf7bSDavid Howells #include <linux/fs.h>
9ee4cdf7bSDavid Howells #include <linux/slab.h>
10ee4cdf7bSDavid Howells #include "internal.h"
11ee4cdf7bSDavid Howells 
netfs_reissue_read(struct netfs_io_request * rreq,struct netfs_io_subrequest * subreq)12ee4cdf7bSDavid Howells static void netfs_reissue_read(struct netfs_io_request *rreq,
13ee4cdf7bSDavid Howells 			       struct netfs_io_subrequest *subreq)
14ee4cdf7bSDavid Howells {
15e2d46f2eSDavid Howells 	__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
16ee4cdf7bSDavid Howells 	__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
17d01c495fSDavid Howells 	netfs_stat(&netfs_n_rh_retry_read_subreq);
18ee4cdf7bSDavid Howells 	subreq->rreq->netfs_ops->issue_read(subreq);
19ee4cdf7bSDavid Howells }
20ee4cdf7bSDavid Howells 
21ee4cdf7bSDavid Howells /*
22ee4cdf7bSDavid Howells  * Go through the list of failed/short reads, retrying all retryable ones.  We
23ee4cdf7bSDavid Howells  * need to switch failed cache reads to network downloads.
24ee4cdf7bSDavid Howells  */
netfs_retry_read_subrequests(struct netfs_io_request * rreq)25ee4cdf7bSDavid Howells static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
26ee4cdf7bSDavid Howells {
27ee4cdf7bSDavid Howells 	struct netfs_io_subrequest *subreq;
28e2d46f2eSDavid Howells 	struct netfs_io_stream *stream = &rreq->io_streams[0];
29e2d46f2eSDavid Howells 	struct list_head *next;
30ee4cdf7bSDavid Howells 
31ee4cdf7bSDavid Howells 	_enter("R=%x", rreq->debug_id);
32ee4cdf7bSDavid Howells 
33e2d46f2eSDavid Howells 	if (list_empty(&stream->subrequests))
34ee4cdf7bSDavid Howells 		return;
35ee4cdf7bSDavid Howells 
36ee4cdf7bSDavid Howells 	if (rreq->netfs_ops->retry_request)
37ee4cdf7bSDavid Howells 		rreq->netfs_ops->retry_request(rreq, NULL);
38ee4cdf7bSDavid Howells 
39ee4cdf7bSDavid Howells 	/* If there's no renegotiation to do, just resend each retryable subreq
40ee4cdf7bSDavid Howells 	 * up to the first permanently failed one.
41ee4cdf7bSDavid Howells 	 */
42ee4cdf7bSDavid Howells 	if (!rreq->netfs_ops->prepare_read &&
43d4e338deSDavid Howells 	    !rreq->cache_resources.ops) {
44e2d46f2eSDavid Howells 		list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
45ee4cdf7bSDavid Howells 			if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
46ee4cdf7bSDavid Howells 				break;
47ee4cdf7bSDavid Howells 			if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
484acb665cSDavid Howells 				__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
494acb665cSDavid Howells 				subreq->retry_count++;
50ee4cdf7bSDavid Howells 				netfs_reset_iter(subreq);
511d001396SDavid Howells 				netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
52ee4cdf7bSDavid Howells 				netfs_reissue_read(rreq, subreq);
53ee4cdf7bSDavid Howells 			}
54ee4cdf7bSDavid Howells 		}
55ee4cdf7bSDavid Howells 		return;
56ee4cdf7bSDavid Howells 	}
57ee4cdf7bSDavid Howells 
58ee4cdf7bSDavid Howells 	/* Okay, we need to renegotiate all the download requests and flip any
59ee4cdf7bSDavid Howells 	 * failed cache reads over to being download requests and negotiate
60ee4cdf7bSDavid Howells 	 * those also.  All fully successful subreqs have been removed from the
61ee4cdf7bSDavid Howells 	 * list and any spare data from those has been donated.
62ee4cdf7bSDavid Howells 	 *
63ee4cdf7bSDavid Howells 	 * What we do is decant the list and rebuild it one subreq at a time so
64ee4cdf7bSDavid Howells 	 * that we don't end up with donations jumping over a gap we're busy
65ee4cdf7bSDavid Howells 	 * populating with smaller subrequests.  In the event that the subreq
66ee4cdf7bSDavid Howells 	 * we just launched finishes before we insert the next subreq, it'll
67ee4cdf7bSDavid Howells 	 * fill in rreq->prev_donated instead.
68e2d46f2eSDavid Howells 	 *
69ee4cdf7bSDavid Howells 	 * Note: Alternatively, we could split the tail subrequest right before
70ee4cdf7bSDavid Howells 	 * we reissue it and fix up the donations under lock.
71ee4cdf7bSDavid Howells 	 */
72e2d46f2eSDavid Howells 	next = stream->subrequests.next;
73ee4cdf7bSDavid Howells 
74ee4cdf7bSDavid Howells 	do {
75e2d46f2eSDavid Howells 		struct netfs_io_subrequest *from, *to, *tmp;
76ee4cdf7bSDavid Howells 		struct iov_iter source;
77ee4cdf7bSDavid Howells 		unsigned long long start, len;
78e2d46f2eSDavid Howells 		size_t part;
791d001396SDavid Howells 		bool boundary = false, subreq_superfluous = false;
80ee4cdf7bSDavid Howells 
81ee4cdf7bSDavid Howells 		/* Go through the subreqs and find the next span of contiguous
82ee4cdf7bSDavid Howells 		 * buffer that we then rejig (cifs, for example, needs the
83ee4cdf7bSDavid Howells 		 * rsize renegotiating) and reissue.
84ee4cdf7bSDavid Howells 		 */
85e2d46f2eSDavid Howells 		from = list_entry(next, struct netfs_io_subrequest, rreq_link);
86e2d46f2eSDavid Howells 		to = from;
87ee4cdf7bSDavid Howells 		start = from->start + from->transferred;
88ee4cdf7bSDavid Howells 		len   = from->len   - from->transferred;
89ee4cdf7bSDavid Howells 
90e2d46f2eSDavid Howells 		_debug("from R=%08x[%x] s=%llx ctl=%zx/%zx",
91ee4cdf7bSDavid Howells 		       rreq->debug_id, from->debug_index,
92e2d46f2eSDavid Howells 		       from->start, from->transferred, from->len);
93ee4cdf7bSDavid Howells 
94ee4cdf7bSDavid Howells 		if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
95ee4cdf7bSDavid Howells 		    !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
96ee4cdf7bSDavid Howells 			goto abandon;
97ee4cdf7bSDavid Howells 
98e2d46f2eSDavid Howells 		list_for_each_continue(next, &stream->subrequests) {
99e2d46f2eSDavid Howells 			subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
100e2d46f2eSDavid Howells 			if (subreq->start + subreq->transferred != start + len ||
101e2d46f2eSDavid Howells 			    test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
102ee4cdf7bSDavid Howells 			    !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
103ee4cdf7bSDavid Howells 				break;
104e2d46f2eSDavid Howells 			to = subreq;
105e2d46f2eSDavid Howells 			len += to->len;
106ee4cdf7bSDavid Howells 		}
107ee4cdf7bSDavid Howells 
108ee4cdf7bSDavid Howells 		_debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
109ee4cdf7bSDavid Howells 
110ee4cdf7bSDavid Howells 		/* Determine the set of buffers we're going to use.  Each
111ee4cdf7bSDavid Howells 		 * subreq gets a subset of a single overall contiguous buffer.
112ee4cdf7bSDavid Howells 		 */
113ee4cdf7bSDavid Howells 		netfs_reset_iter(from);
114ee4cdf7bSDavid Howells 		source = from->io_iter;
115ee4cdf7bSDavid Howells 		source.count = len;
116ee4cdf7bSDavid Howells 
117ee4cdf7bSDavid Howells 		/* Work through the sublist. */
118e2d46f2eSDavid Howells 		subreq = from;
119e2d46f2eSDavid Howells 		list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
1201d001396SDavid Howells 			if (!len) {
1211d001396SDavid Howells 				subreq_superfluous = true;
122e2d46f2eSDavid Howells 				break;
1231d001396SDavid Howells 			}
124ee4cdf7bSDavid Howells 			subreq->source	= NETFS_DOWNLOAD_FROM_SERVER;
125ee4cdf7bSDavid Howells 			subreq->start	= start - subreq->transferred;
126ee4cdf7bSDavid Howells 			subreq->len	= len   + subreq->transferred;
127ee4cdf7bSDavid Howells 			__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
1284acb665cSDavid Howells 			__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
1294acb665cSDavid Howells 			subreq->retry_count++;
130ee4cdf7bSDavid Howells 
131ee4cdf7bSDavid Howells 			trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
132ee4cdf7bSDavid Howells 
133ee4cdf7bSDavid Howells 			/* Renegotiate max_len (rsize) */
134e2d46f2eSDavid Howells 			stream->sreq_max_len = subreq->len;
135904abff4SDavid Howells 			if (rreq->netfs_ops->prepare_read &&
136904abff4SDavid Howells 			    rreq->netfs_ops->prepare_read(subreq) < 0) {
137ee4cdf7bSDavid Howells 				trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
138ee4cdf7bSDavid Howells 				__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
139e2d46f2eSDavid Howells 				goto abandon;
140ee4cdf7bSDavid Howells 			}
141ee4cdf7bSDavid Howells 
142e2d46f2eSDavid Howells 			part = umin(len, stream->sreq_max_len);
143e2d46f2eSDavid Howells 			if (unlikely(stream->sreq_max_segs))
144e2d46f2eSDavid Howells 				part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
145ee4cdf7bSDavid Howells 			subreq->len = subreq->transferred + part;
146ee4cdf7bSDavid Howells 			subreq->io_iter = source;
147ee4cdf7bSDavid Howells 			iov_iter_truncate(&subreq->io_iter, part);
148ee4cdf7bSDavid Howells 			iov_iter_advance(&source, part);
149ee4cdf7bSDavid Howells 			len -= part;
150ee4cdf7bSDavid Howells 			start += part;
151ee4cdf7bSDavid Howells 			if (!len) {
152ee4cdf7bSDavid Howells 				if (boundary)
153ee4cdf7bSDavid Howells 					__set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
154ee4cdf7bSDavid Howells 			} else {
155ee4cdf7bSDavid Howells 				__clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
156ee4cdf7bSDavid Howells 			}
157ee4cdf7bSDavid Howells 
158e2d46f2eSDavid Howells 			netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
159ee4cdf7bSDavid Howells 			netfs_reissue_read(rreq, subreq);
1601d001396SDavid Howells 			if (subreq == to) {
1611d001396SDavid Howells 				subreq_superfluous = false;
162ee4cdf7bSDavid Howells 				break;
163ee4cdf7bSDavid Howells 			}
1641d001396SDavid Howells 		}
165ee4cdf7bSDavid Howells 
166ee4cdf7bSDavid Howells 		/* If we managed to use fewer subreqs, we can discard the
167e2d46f2eSDavid Howells 		 * excess; if we used the same number, then we're done.
168ee4cdf7bSDavid Howells 		 */
169e2d46f2eSDavid Howells 		if (!len) {
1701d001396SDavid Howells 			if (!subreq_superfluous)
171e2d46f2eSDavid Howells 				continue;
172e2d46f2eSDavid Howells 			list_for_each_entry_safe_from(subreq, tmp,
173e2d46f2eSDavid Howells 						      &stream->subrequests, rreq_link) {
1741d001396SDavid Howells 				trace_netfs_sreq(subreq, netfs_sreq_trace_superfluous);
175ee4cdf7bSDavid Howells 				list_del(&subreq->rreq_link);
17620d72b00SDavid Howells 				netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
177e2d46f2eSDavid Howells 				if (subreq == to)
178e2d46f2eSDavid Howells 					break;
179e2d46f2eSDavid Howells 			}
180e2d46f2eSDavid Howells 			continue;
181ee4cdf7bSDavid Howells 		}
182ee4cdf7bSDavid Howells 
183e2d46f2eSDavid Howells 		/* We ran out of subrequests, so we need to allocate some more
184e2d46f2eSDavid Howells 		 * and insert them after.
185e2d46f2eSDavid Howells 		 */
186e2d46f2eSDavid Howells 		do {
187e2d46f2eSDavid Howells 			subreq = netfs_alloc_subrequest(rreq);
188e2d46f2eSDavid Howells 			if (!subreq) {
189e2d46f2eSDavid Howells 				subreq = to;
190e2d46f2eSDavid Howells 				goto abandon_after;
191e2d46f2eSDavid Howells 			}
192e2d46f2eSDavid Howells 			subreq->source		= NETFS_DOWNLOAD_FROM_SERVER;
193e2d46f2eSDavid Howells 			subreq->start		= start;
194e2d46f2eSDavid Howells 			subreq->len		= len;
195e2d46f2eSDavid Howells 			subreq->stream_nr	= stream->stream_nr;
196e2d46f2eSDavid Howells 			subreq->retry_count	= 1;
197e2d46f2eSDavid Howells 
198e2d46f2eSDavid Howells 			trace_netfs_sreq_ref(rreq->debug_id, subreq->debug_index,
199e2d46f2eSDavid Howells 					     refcount_read(&subreq->ref),
200e2d46f2eSDavid Howells 					     netfs_sreq_trace_new);
201e2d46f2eSDavid Howells 
202e2d46f2eSDavid Howells 			list_add(&subreq->rreq_link, &to->rreq_link);
203e2d46f2eSDavid Howells 			to = list_next_entry(to, rreq_link);
204e2d46f2eSDavid Howells 			trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
205e2d46f2eSDavid Howells 
206e2d46f2eSDavid Howells 			stream->sreq_max_len	= umin(len, rreq->rsize);
207e2d46f2eSDavid Howells 			stream->sreq_max_segs	= 0;
208e2d46f2eSDavid Howells 			if (unlikely(stream->sreq_max_segs))
209e2d46f2eSDavid Howells 				part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
210e2d46f2eSDavid Howells 
211e2d46f2eSDavid Howells 			netfs_stat(&netfs_n_rh_download);
212e2d46f2eSDavid Howells 			if (rreq->netfs_ops->prepare_read(subreq) < 0) {
213e2d46f2eSDavid Howells 				trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
214e2d46f2eSDavid Howells 				__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
215e2d46f2eSDavid Howells 				goto abandon;
216e2d46f2eSDavid Howells 			}
217e2d46f2eSDavid Howells 
218e2d46f2eSDavid Howells 			part = umin(len, stream->sreq_max_len);
219e2d46f2eSDavid Howells 			subreq->len = subreq->transferred + part;
220e2d46f2eSDavid Howells 			subreq->io_iter = source;
221e2d46f2eSDavid Howells 			iov_iter_truncate(&subreq->io_iter, part);
222e2d46f2eSDavid Howells 			iov_iter_advance(&source, part);
223e2d46f2eSDavid Howells 
224e2d46f2eSDavid Howells 			len -= part;
225e2d46f2eSDavid Howells 			start += part;
226e2d46f2eSDavid Howells 			if (!len && boundary) {
227e2d46f2eSDavid Howells 				__set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
228e2d46f2eSDavid Howells 				boundary = false;
229e2d46f2eSDavid Howells 			}
230e2d46f2eSDavid Howells 
231e2d46f2eSDavid Howells 			netfs_reissue_read(rreq, subreq);
232e2d46f2eSDavid Howells 		} while (len);
233e2d46f2eSDavid Howells 
234e2d46f2eSDavid Howells 	} while (!list_is_head(next, &stream->subrequests));
235ee4cdf7bSDavid Howells 
236ee4cdf7bSDavid Howells 	return;
237ee4cdf7bSDavid Howells 
238e2d46f2eSDavid Howells 	/* If we hit an error, fail all remaining incomplete subrequests */
239e2d46f2eSDavid Howells abandon_after:
240e2d46f2eSDavid Howells 	if (list_is_last(&subreq->rreq_link, &stream->subrequests))
241e2d46f2eSDavid Howells 		return;
242e2d46f2eSDavid Howells 	subreq = list_next_entry(subreq, rreq_link);
243ee4cdf7bSDavid Howells abandon:
244e2d46f2eSDavid Howells 	list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
245e2d46f2eSDavid Howells 		if (!subreq->error &&
246e2d46f2eSDavid Howells 		    !test_bit(NETFS_SREQ_FAILED, &subreq->flags) &&
247e2d46f2eSDavid Howells 		    !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
248e2d46f2eSDavid Howells 			continue;
249ee4cdf7bSDavid Howells 		subreq->error = -ENOMEM;
250e2d46f2eSDavid Howells 		__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
251ee4cdf7bSDavid Howells 		__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
252ee4cdf7bSDavid Howells 	}
253ee4cdf7bSDavid Howells }
254ee4cdf7bSDavid Howells 
255ee4cdf7bSDavid Howells /*
256ee4cdf7bSDavid Howells  * Retry reads.
257ee4cdf7bSDavid Howells  */
netfs_retry_reads(struct netfs_io_request * rreq)258ee4cdf7bSDavid Howells void netfs_retry_reads(struct netfs_io_request *rreq)
259ee4cdf7bSDavid Howells {
260e2d46f2eSDavid Howells 	struct netfs_io_stream *stream = &rreq->io_streams[0];
2611d001396SDavid Howells 
262d01c495fSDavid Howells 	netfs_stat(&netfs_n_rh_retry_read_req);
263d01c495fSDavid Howells 
264e2d46f2eSDavid Howells 	/* Wait for all outstanding I/O to quiesce before performing retries as
265e2d46f2eSDavid Howells 	 * we may need to renegotiate the I/O sizes.
266e2d46f2eSDavid Howells 	 */
267*2b1424cdSDavid Howells 	set_bit(NETFS_RREQ_RETRYING, &rreq->flags);
268*2b1424cdSDavid Howells 	netfs_wait_for_in_progress_stream(rreq, stream);
2691d001396SDavid Howells 	clear_bit(NETFS_RREQ_RETRYING, &rreq->flags);
2701d001396SDavid Howells 
271ee4cdf7bSDavid Howells 	trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
272ee4cdf7bSDavid Howells 	netfs_retry_read_subrequests(rreq);
273ee4cdf7bSDavid Howells }
274ee4cdf7bSDavid Howells 
275ee4cdf7bSDavid Howells /*
276ee4cdf7bSDavid Howells  * Unlock any the pages that haven't been unlocked yet due to abandoned
277ee4cdf7bSDavid Howells  * subrequests.
278ee4cdf7bSDavid Howells  */
netfs_unlock_abandoned_read_pages(struct netfs_io_request * rreq)279ee4cdf7bSDavid Howells void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
280ee4cdf7bSDavid Howells {
281ee4cdf7bSDavid Howells 	struct folio_queue *p;
282ee4cdf7bSDavid Howells 
28306fa229cSDavid Howells 	for (p = rreq->buffer.tail; p; p = p->next) {
284ee4cdf7bSDavid Howells 		for (int slot = 0; slot < folioq_count(p); slot++) {
285ee4cdf7bSDavid Howells 			struct folio *folio = folioq_folio(p, slot);
286ee4cdf7bSDavid Howells 
287ee4cdf7bSDavid Howells 			if (folio && !folioq_is_marked2(p, slot)) {
288ee4cdf7bSDavid Howells 				trace_netfs_folio(folio, netfs_folio_trace_abandon);
289ee4cdf7bSDavid Howells 				folio_unlock(folio);
290ee4cdf7bSDavid Howells 			}
291ee4cdf7bSDavid Howells 		}
292ee4cdf7bSDavid Howells 	}
293ee4cdf7bSDavid Howells }
294