xref: /linux/fs/netfs/read_retry.c (revision e2d46f2ec332533816417b60933954173f602121)
1ee4cdf7bSDavid Howells // SPDX-License-Identifier: GPL-2.0-only
2ee4cdf7bSDavid Howells /* Network filesystem read subrequest retrying.
3ee4cdf7bSDavid Howells  *
4ee4cdf7bSDavid Howells  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5ee4cdf7bSDavid Howells  * Written by David Howells (dhowells@redhat.com)
6ee4cdf7bSDavid Howells  */
7ee4cdf7bSDavid Howells 
8ee4cdf7bSDavid Howells #include <linux/fs.h>
9ee4cdf7bSDavid Howells #include <linux/slab.h>
10ee4cdf7bSDavid Howells #include "internal.h"
11ee4cdf7bSDavid Howells 
12ee4cdf7bSDavid Howells static void netfs_reissue_read(struct netfs_io_request *rreq,
13ee4cdf7bSDavid Howells 			       struct netfs_io_subrequest *subreq)
14ee4cdf7bSDavid Howells {
15*e2d46f2eSDavid Howells 	__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
16ee4cdf7bSDavid Howells 	__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
17ee4cdf7bSDavid Howells 	netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
18ee4cdf7bSDavid Howells 	subreq->rreq->netfs_ops->issue_read(subreq);
19ee4cdf7bSDavid Howells }
20ee4cdf7bSDavid Howells 
21ee4cdf7bSDavid Howells /*
22ee4cdf7bSDavid Howells  * Go through the list of failed/short reads, retrying all retryable ones.  We
23ee4cdf7bSDavid Howells  * need to switch failed cache reads to network downloads.
24ee4cdf7bSDavid Howells  */
25ee4cdf7bSDavid Howells static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
26ee4cdf7bSDavid Howells {
27ee4cdf7bSDavid Howells 	struct netfs_io_subrequest *subreq;
28*e2d46f2eSDavid Howells 	struct netfs_io_stream *stream = &rreq->io_streams[0];
29*e2d46f2eSDavid Howells 	struct list_head *next;
30ee4cdf7bSDavid Howells 
31ee4cdf7bSDavid Howells 	_enter("R=%x", rreq->debug_id);
32ee4cdf7bSDavid Howells 
33*e2d46f2eSDavid Howells 	if (list_empty(&stream->subrequests))
34ee4cdf7bSDavid Howells 		return;
35ee4cdf7bSDavid Howells 
36ee4cdf7bSDavid Howells 	if (rreq->netfs_ops->retry_request)
37ee4cdf7bSDavid Howells 		rreq->netfs_ops->retry_request(rreq, NULL);
38ee4cdf7bSDavid Howells 
39ee4cdf7bSDavid Howells 	/* If there's no renegotiation to do, just resend each retryable subreq
40ee4cdf7bSDavid Howells 	 * up to the first permanently failed one.
41ee4cdf7bSDavid Howells 	 */
42ee4cdf7bSDavid Howells 	if (!rreq->netfs_ops->prepare_read &&
43d4e338deSDavid Howells 	    !rreq->cache_resources.ops) {
44*e2d46f2eSDavid Howells 		list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
45ee4cdf7bSDavid Howells 			if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
46ee4cdf7bSDavid Howells 				break;
47ee4cdf7bSDavid Howells 			if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
484acb665cSDavid Howells 				__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
494acb665cSDavid Howells 				subreq->retry_count++;
50ee4cdf7bSDavid Howells 				netfs_reset_iter(subreq);
51ee4cdf7bSDavid Howells 				netfs_reissue_read(rreq, subreq);
52ee4cdf7bSDavid Howells 			}
53ee4cdf7bSDavid Howells 		}
54ee4cdf7bSDavid Howells 		return;
55ee4cdf7bSDavid Howells 	}
56ee4cdf7bSDavid Howells 
57ee4cdf7bSDavid Howells 	/* Okay, we need to renegotiate all the download requests and flip any
58ee4cdf7bSDavid Howells 	 * failed cache reads over to being download requests and negotiate
59ee4cdf7bSDavid Howells 	 * those also.  All fully successful subreqs have been removed from the
60ee4cdf7bSDavid Howells 	 * list and any spare data from those has been donated.
61ee4cdf7bSDavid Howells 	 *
62ee4cdf7bSDavid Howells 	 * What we do is decant the list and rebuild it one subreq at a time so
63ee4cdf7bSDavid Howells 	 * that we don't end up with donations jumping over a gap we're busy
64ee4cdf7bSDavid Howells 	 * populating with smaller subrequests.  In the event that the subreq
65ee4cdf7bSDavid Howells 	 * we just launched finishes before we insert the next subreq, it'll
66ee4cdf7bSDavid Howells 	 * fill in rreq->prev_donated instead.
67*e2d46f2eSDavid Howells 	 *
68ee4cdf7bSDavid Howells 	 * Note: Alternatively, we could split the tail subrequest right before
69ee4cdf7bSDavid Howells 	 * we reissue it and fix up the donations under lock.
70ee4cdf7bSDavid Howells 	 */
71*e2d46f2eSDavid Howells 	next = stream->subrequests.next;
72ee4cdf7bSDavid Howells 
73ee4cdf7bSDavid Howells 	do {
74*e2d46f2eSDavid Howells 		struct netfs_io_subrequest *from, *to, *tmp;
75ee4cdf7bSDavid Howells 		struct iov_iter source;
76ee4cdf7bSDavid Howells 		unsigned long long start, len;
77*e2d46f2eSDavid Howells 		size_t part;
78ee4cdf7bSDavid Howells 		bool boundary = false;
79ee4cdf7bSDavid Howells 
80ee4cdf7bSDavid Howells 		/* Go through the subreqs and find the next span of contiguous
81ee4cdf7bSDavid Howells 		 * buffer that we then rejig (cifs, for example, needs the
82ee4cdf7bSDavid Howells 		 * rsize renegotiating) and reissue.
83ee4cdf7bSDavid Howells 		 */
84*e2d46f2eSDavid Howells 		from = list_entry(next, struct netfs_io_subrequest, rreq_link);
85*e2d46f2eSDavid Howells 		to = from;
86ee4cdf7bSDavid Howells 		start = from->start + from->transferred;
87ee4cdf7bSDavid Howells 		len   = from->len   - from->transferred;
88ee4cdf7bSDavid Howells 
89*e2d46f2eSDavid Howells 		_debug("from R=%08x[%x] s=%llx ctl=%zx/%zx",
90ee4cdf7bSDavid Howells 		       rreq->debug_id, from->debug_index,
91*e2d46f2eSDavid Howells 		       from->start, from->transferred, from->len);
92ee4cdf7bSDavid Howells 
93ee4cdf7bSDavid Howells 		if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
94ee4cdf7bSDavid Howells 		    !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
95ee4cdf7bSDavid Howells 			goto abandon;
96ee4cdf7bSDavid Howells 
97*e2d46f2eSDavid Howells 		list_for_each_continue(next, &stream->subrequests) {
98*e2d46f2eSDavid Howells 			subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
99*e2d46f2eSDavid Howells 			if (subreq->start + subreq->transferred != start + len ||
100*e2d46f2eSDavid Howells 			    test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
101ee4cdf7bSDavid Howells 			    !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
102ee4cdf7bSDavid Howells 				break;
103*e2d46f2eSDavid Howells 			to = subreq;
104*e2d46f2eSDavid Howells 			len += to->len;
105ee4cdf7bSDavid Howells 		}
106ee4cdf7bSDavid Howells 
107ee4cdf7bSDavid Howells 		_debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
108ee4cdf7bSDavid Howells 
109ee4cdf7bSDavid Howells 		/* Determine the set of buffers we're going to use.  Each
110ee4cdf7bSDavid Howells 		 * subreq gets a subset of a single overall contiguous buffer.
111ee4cdf7bSDavid Howells 		 */
112ee4cdf7bSDavid Howells 		netfs_reset_iter(from);
113ee4cdf7bSDavid Howells 		source = from->io_iter;
114ee4cdf7bSDavid Howells 		source.count = len;
115ee4cdf7bSDavid Howells 
116ee4cdf7bSDavid Howells 		/* Work through the sublist. */
117*e2d46f2eSDavid Howells 		subreq = from;
118*e2d46f2eSDavid Howells 		list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
119*e2d46f2eSDavid Howells 			if (!len)
120*e2d46f2eSDavid Howells 				break;
121ee4cdf7bSDavid Howells 			subreq->source	= NETFS_DOWNLOAD_FROM_SERVER;
122ee4cdf7bSDavid Howells 			subreq->start	= start - subreq->transferred;
123ee4cdf7bSDavid Howells 			subreq->len	= len   + subreq->transferred;
124ee4cdf7bSDavid Howells 			__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
1254acb665cSDavid Howells 			__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
1264acb665cSDavid Howells 			subreq->retry_count++;
127ee4cdf7bSDavid Howells 
128ee4cdf7bSDavid Howells 			trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
129ee4cdf7bSDavid Howells 
130ee4cdf7bSDavid Howells 			/* Renegotiate max_len (rsize) */
131*e2d46f2eSDavid Howells 			stream->sreq_max_len = subreq->len;
132ee4cdf7bSDavid Howells 			if (rreq->netfs_ops->prepare_read(subreq) < 0) {
133ee4cdf7bSDavid Howells 				trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
134ee4cdf7bSDavid Howells 				__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
135*e2d46f2eSDavid Howells 				goto abandon;
136ee4cdf7bSDavid Howells 			}
137ee4cdf7bSDavid Howells 
138*e2d46f2eSDavid Howells 			part = umin(len, stream->sreq_max_len);
139*e2d46f2eSDavid Howells 			if (unlikely(stream->sreq_max_segs))
140*e2d46f2eSDavid Howells 				part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
141ee4cdf7bSDavid Howells 			subreq->len = subreq->transferred + part;
142ee4cdf7bSDavid Howells 			subreq->io_iter = source;
143ee4cdf7bSDavid Howells 			iov_iter_truncate(&subreq->io_iter, part);
144ee4cdf7bSDavid Howells 			iov_iter_advance(&source, part);
145ee4cdf7bSDavid Howells 			len -= part;
146ee4cdf7bSDavid Howells 			start += part;
147ee4cdf7bSDavid Howells 			if (!len) {
148ee4cdf7bSDavid Howells 				if (boundary)
149ee4cdf7bSDavid Howells 					__set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
150ee4cdf7bSDavid Howells 			} else {
151ee4cdf7bSDavid Howells 				__clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
152ee4cdf7bSDavid Howells 			}
153ee4cdf7bSDavid Howells 
154*e2d46f2eSDavid Howells 			netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
155ee4cdf7bSDavid Howells 			netfs_reissue_read(rreq, subreq);
156*e2d46f2eSDavid Howells 			if (subreq == to)
157ee4cdf7bSDavid Howells 				break;
158ee4cdf7bSDavid Howells 		}
159ee4cdf7bSDavid Howells 
160ee4cdf7bSDavid Howells 		/* If we managed to use fewer subreqs, we can discard the
161*e2d46f2eSDavid Howells 		 * excess; if we used the same number, then we're done.
162ee4cdf7bSDavid Howells 		 */
163*e2d46f2eSDavid Howells 		if (!len) {
164*e2d46f2eSDavid Howells 			if (subreq == to)
165*e2d46f2eSDavid Howells 				continue;
166*e2d46f2eSDavid Howells 			list_for_each_entry_safe_from(subreq, tmp,
167*e2d46f2eSDavid Howells 						      &stream->subrequests, rreq_link) {
168ee4cdf7bSDavid Howells 				trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
169ee4cdf7bSDavid Howells 				list_del(&subreq->rreq_link);
170ee4cdf7bSDavid Howells 				netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
171*e2d46f2eSDavid Howells 				if (subreq == to)
172*e2d46f2eSDavid Howells 					break;
173*e2d46f2eSDavid Howells 			}
174*e2d46f2eSDavid Howells 			continue;
175ee4cdf7bSDavid Howells 		}
176ee4cdf7bSDavid Howells 
177*e2d46f2eSDavid Howells 		/* We ran out of subrequests, so we need to allocate some more
178*e2d46f2eSDavid Howells 		 * and insert them after.
179*e2d46f2eSDavid Howells 		 */
180*e2d46f2eSDavid Howells 		do {
181*e2d46f2eSDavid Howells 			subreq = netfs_alloc_subrequest(rreq);
182*e2d46f2eSDavid Howells 			if (!subreq) {
183*e2d46f2eSDavid Howells 				subreq = to;
184*e2d46f2eSDavid Howells 				goto abandon_after;
185*e2d46f2eSDavid Howells 			}
186*e2d46f2eSDavid Howells 			subreq->source		= NETFS_DOWNLOAD_FROM_SERVER;
187*e2d46f2eSDavid Howells 			subreq->start		= start;
188*e2d46f2eSDavid Howells 			subreq->len		= len;
189*e2d46f2eSDavid Howells 			subreq->debug_index	= atomic_inc_return(&rreq->subreq_counter);
190*e2d46f2eSDavid Howells 			subreq->stream_nr	= stream->stream_nr;
191*e2d46f2eSDavid Howells 			subreq->retry_count	= 1;
192*e2d46f2eSDavid Howells 
193*e2d46f2eSDavid Howells 			trace_netfs_sreq_ref(rreq->debug_id, subreq->debug_index,
194*e2d46f2eSDavid Howells 					     refcount_read(&subreq->ref),
195*e2d46f2eSDavid Howells 					     netfs_sreq_trace_new);
196*e2d46f2eSDavid Howells 			netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
197*e2d46f2eSDavid Howells 
198*e2d46f2eSDavid Howells 			list_add(&subreq->rreq_link, &to->rreq_link);
199*e2d46f2eSDavid Howells 			to = list_next_entry(to, rreq_link);
200*e2d46f2eSDavid Howells 			trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
201*e2d46f2eSDavid Howells 
202*e2d46f2eSDavid Howells 			stream->sreq_max_len	= umin(len, rreq->rsize);
203*e2d46f2eSDavid Howells 			stream->sreq_max_segs	= 0;
204*e2d46f2eSDavid Howells 			if (unlikely(stream->sreq_max_segs))
205*e2d46f2eSDavid Howells 				part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
206*e2d46f2eSDavid Howells 
207*e2d46f2eSDavid Howells 			netfs_stat(&netfs_n_rh_download);
208*e2d46f2eSDavid Howells 			if (rreq->netfs_ops->prepare_read(subreq) < 0) {
209*e2d46f2eSDavid Howells 				trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
210*e2d46f2eSDavid Howells 				__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
211*e2d46f2eSDavid Howells 				goto abandon;
212*e2d46f2eSDavid Howells 			}
213*e2d46f2eSDavid Howells 
214*e2d46f2eSDavid Howells 			part = umin(len, stream->sreq_max_len);
215*e2d46f2eSDavid Howells 			subreq->len = subreq->transferred + part;
216*e2d46f2eSDavid Howells 			subreq->io_iter = source;
217*e2d46f2eSDavid Howells 			iov_iter_truncate(&subreq->io_iter, part);
218*e2d46f2eSDavid Howells 			iov_iter_advance(&source, part);
219*e2d46f2eSDavid Howells 
220*e2d46f2eSDavid Howells 			len -= part;
221*e2d46f2eSDavid Howells 			start += part;
222*e2d46f2eSDavid Howells 			if (!len && boundary) {
223*e2d46f2eSDavid Howells 				__set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
224*e2d46f2eSDavid Howells 				boundary = false;
225*e2d46f2eSDavid Howells 			}
226*e2d46f2eSDavid Howells 
227*e2d46f2eSDavid Howells 			netfs_reissue_read(rreq, subreq);
228*e2d46f2eSDavid Howells 		} while (len);
229*e2d46f2eSDavid Howells 
230*e2d46f2eSDavid Howells 	} while (!list_is_head(next, &stream->subrequests));
231ee4cdf7bSDavid Howells 
232ee4cdf7bSDavid Howells 	return;
233ee4cdf7bSDavid Howells 
234*e2d46f2eSDavid Howells 	/* If we hit an error, fail all remaining incomplete subrequests */
235*e2d46f2eSDavid Howells abandon_after:
236*e2d46f2eSDavid Howells 	if (list_is_last(&subreq->rreq_link, &stream->subrequests))
237*e2d46f2eSDavid Howells 		return;
238*e2d46f2eSDavid Howells 	subreq = list_next_entry(subreq, rreq_link);
239ee4cdf7bSDavid Howells abandon:
240*e2d46f2eSDavid Howells 	list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
241*e2d46f2eSDavid Howells 		if (!subreq->error &&
242*e2d46f2eSDavid Howells 		    !test_bit(NETFS_SREQ_FAILED, &subreq->flags) &&
243*e2d46f2eSDavid Howells 		    !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
244*e2d46f2eSDavid Howells 			continue;
245ee4cdf7bSDavid Howells 		subreq->error = -ENOMEM;
246*e2d46f2eSDavid Howells 		__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
247ee4cdf7bSDavid Howells 		__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
248ee4cdf7bSDavid Howells 	}
249ee4cdf7bSDavid Howells }
250ee4cdf7bSDavid Howells 
251ee4cdf7bSDavid Howells /*
252ee4cdf7bSDavid Howells  * Retry reads.
253ee4cdf7bSDavid Howells  */
254ee4cdf7bSDavid Howells void netfs_retry_reads(struct netfs_io_request *rreq)
255ee4cdf7bSDavid Howells {
256*e2d46f2eSDavid Howells 	struct netfs_io_subrequest *subreq;
257*e2d46f2eSDavid Howells 	struct netfs_io_stream *stream = &rreq->io_streams[0];
258*e2d46f2eSDavid Howells 
259*e2d46f2eSDavid Howells 	/* Wait for all outstanding I/O to quiesce before performing retries as
260*e2d46f2eSDavid Howells 	 * we may need to renegotiate the I/O sizes.
261*e2d46f2eSDavid Howells 	 */
262*e2d46f2eSDavid Howells 	list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
263*e2d46f2eSDavid Howells 		wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
264*e2d46f2eSDavid Howells 			    TASK_UNINTERRUPTIBLE);
265*e2d46f2eSDavid Howells 	}
266*e2d46f2eSDavid Howells 
267ee4cdf7bSDavid Howells 	trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
268ee4cdf7bSDavid Howells 	netfs_retry_read_subrequests(rreq);
269ee4cdf7bSDavid Howells }
270ee4cdf7bSDavid Howells 
271ee4cdf7bSDavid Howells /*
272ee4cdf7bSDavid Howells  * Unlock any the pages that haven't been unlocked yet due to abandoned
273ee4cdf7bSDavid Howells  * subrequests.
274ee4cdf7bSDavid Howells  */
275ee4cdf7bSDavid Howells void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
276ee4cdf7bSDavid Howells {
277ee4cdf7bSDavid Howells 	struct folio_queue *p;
278ee4cdf7bSDavid Howells 
27906fa229cSDavid Howells 	for (p = rreq->buffer.tail; p; p = p->next) {
280ee4cdf7bSDavid Howells 		for (int slot = 0; slot < folioq_count(p); slot++) {
281ee4cdf7bSDavid Howells 			struct folio *folio = folioq_folio(p, slot);
282ee4cdf7bSDavid Howells 
283ee4cdf7bSDavid Howells 			if (folio && !folioq_is_marked2(p, slot)) {
284ee4cdf7bSDavid Howells 				trace_netfs_folio(folio, netfs_folio_trace_abandon);
285ee4cdf7bSDavid Howells 				folio_unlock(folio);
286ee4cdf7bSDavid Howells 			}
287ee4cdf7bSDavid Howells 		}
288ee4cdf7bSDavid Howells 	}
289ee4cdf7bSDavid Howells }
290