xref: /linux/fs/netfs/read_retry.c (revision d4e338de17cb6532bf805fae00db8b41e914009b)
1ee4cdf7bSDavid Howells // SPDX-License-Identifier: GPL-2.0-only
2ee4cdf7bSDavid Howells /* Network filesystem read subrequest retrying.
3ee4cdf7bSDavid Howells  *
4ee4cdf7bSDavid Howells  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5ee4cdf7bSDavid Howells  * Written by David Howells (dhowells@redhat.com)
6ee4cdf7bSDavid Howells  */
7ee4cdf7bSDavid Howells 
8ee4cdf7bSDavid Howells #include <linux/fs.h>
9ee4cdf7bSDavid Howells #include <linux/slab.h>
10ee4cdf7bSDavid Howells #include "internal.h"
11ee4cdf7bSDavid Howells 
12ee4cdf7bSDavid Howells static void netfs_reissue_read(struct netfs_io_request *rreq,
13ee4cdf7bSDavid Howells 			       struct netfs_io_subrequest *subreq)
14ee4cdf7bSDavid Howells {
15ee4cdf7bSDavid Howells 	struct iov_iter *io_iter = &subreq->io_iter;
16ee4cdf7bSDavid Howells 
17ee4cdf7bSDavid Howells 	if (iov_iter_is_folioq(io_iter)) {
18ee4cdf7bSDavid Howells 		subreq->curr_folioq = (struct folio_queue *)io_iter->folioq;
19ee4cdf7bSDavid Howells 		subreq->curr_folioq_slot = io_iter->folioq_slot;
20ee4cdf7bSDavid Howells 		subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
21ee4cdf7bSDavid Howells 	}
22ee4cdf7bSDavid Howells 
23ee4cdf7bSDavid Howells 	atomic_inc(&rreq->nr_outstanding);
24ee4cdf7bSDavid Howells 	__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
25ee4cdf7bSDavid Howells 	netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
26ee4cdf7bSDavid Howells 	subreq->rreq->netfs_ops->issue_read(subreq);
27ee4cdf7bSDavid Howells }
28ee4cdf7bSDavid Howells 
29ee4cdf7bSDavid Howells /*
30ee4cdf7bSDavid Howells  * Go through the list of failed/short reads, retrying all retryable ones.  We
31ee4cdf7bSDavid Howells  * need to switch failed cache reads to network downloads.
32ee4cdf7bSDavid Howells  */
33ee4cdf7bSDavid Howells static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
34ee4cdf7bSDavid Howells {
35ee4cdf7bSDavid Howells 	struct netfs_io_subrequest *subreq;
36ee4cdf7bSDavid Howells 	struct netfs_io_stream *stream0 = &rreq->io_streams[0];
37ee4cdf7bSDavid Howells 	LIST_HEAD(sublist);
38ee4cdf7bSDavid Howells 	LIST_HEAD(queue);
39ee4cdf7bSDavid Howells 
40ee4cdf7bSDavid Howells 	_enter("R=%x", rreq->debug_id);
41ee4cdf7bSDavid Howells 
42ee4cdf7bSDavid Howells 	if (list_empty(&rreq->subrequests))
43ee4cdf7bSDavid Howells 		return;
44ee4cdf7bSDavid Howells 
45ee4cdf7bSDavid Howells 	if (rreq->netfs_ops->retry_request)
46ee4cdf7bSDavid Howells 		rreq->netfs_ops->retry_request(rreq, NULL);
47ee4cdf7bSDavid Howells 
48ee4cdf7bSDavid Howells 	/* If there's no renegotiation to do, just resend each retryable subreq
49ee4cdf7bSDavid Howells 	 * up to the first permanently failed one.
50ee4cdf7bSDavid Howells 	 */
51ee4cdf7bSDavid Howells 	if (!rreq->netfs_ops->prepare_read &&
52*d4e338deSDavid Howells 	    !rreq->cache_resources.ops) {
53ee4cdf7bSDavid Howells 		struct netfs_io_subrequest *subreq;
54ee4cdf7bSDavid Howells 
55ee4cdf7bSDavid Howells 		list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
56ee4cdf7bSDavid Howells 			if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
57ee4cdf7bSDavid Howells 				break;
58ee4cdf7bSDavid Howells 			if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
594acb665cSDavid Howells 				__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
604acb665cSDavid Howells 				subreq->retry_count++;
61ee4cdf7bSDavid Howells 				netfs_reset_iter(subreq);
62ee4cdf7bSDavid Howells 				netfs_reissue_read(rreq, subreq);
63ee4cdf7bSDavid Howells 			}
64ee4cdf7bSDavid Howells 		}
65ee4cdf7bSDavid Howells 		return;
66ee4cdf7bSDavid Howells 	}
67ee4cdf7bSDavid Howells 
68ee4cdf7bSDavid Howells 	/* Okay, we need to renegotiate all the download requests and flip any
69ee4cdf7bSDavid Howells 	 * failed cache reads over to being download requests and negotiate
70ee4cdf7bSDavid Howells 	 * those also.  All fully successful subreqs have been removed from the
71ee4cdf7bSDavid Howells 	 * list and any spare data from those has been donated.
72ee4cdf7bSDavid Howells 	 *
73ee4cdf7bSDavid Howells 	 * What we do is decant the list and rebuild it one subreq at a time so
74ee4cdf7bSDavid Howells 	 * that we don't end up with donations jumping over a gap we're busy
75ee4cdf7bSDavid Howells 	 * populating with smaller subrequests.  In the event that the subreq
76ee4cdf7bSDavid Howells 	 * we just launched finishes before we insert the next subreq, it'll
77ee4cdf7bSDavid Howells 	 * fill in rreq->prev_donated instead.
78ee4cdf7bSDavid Howells 
79ee4cdf7bSDavid Howells 	 * Note: Alternatively, we could split the tail subrequest right before
80ee4cdf7bSDavid Howells 	 * we reissue it and fix up the donations under lock.
81ee4cdf7bSDavid Howells 	 */
82ee4cdf7bSDavid Howells 	list_splice_init(&rreq->subrequests, &queue);
83ee4cdf7bSDavid Howells 
84ee4cdf7bSDavid Howells 	do {
85ee4cdf7bSDavid Howells 		struct netfs_io_subrequest *from;
86ee4cdf7bSDavid Howells 		struct iov_iter source;
87ee4cdf7bSDavid Howells 		unsigned long long start, len;
88ee4cdf7bSDavid Howells 		size_t part, deferred_next_donated = 0;
89ee4cdf7bSDavid Howells 		bool boundary = false;
90ee4cdf7bSDavid Howells 
91ee4cdf7bSDavid Howells 		/* Go through the subreqs and find the next span of contiguous
92ee4cdf7bSDavid Howells 		 * buffer that we then rejig (cifs, for example, needs the
93ee4cdf7bSDavid Howells 		 * rsize renegotiating) and reissue.
94ee4cdf7bSDavid Howells 		 */
95ee4cdf7bSDavid Howells 		from = list_first_entry(&queue, struct netfs_io_subrequest, rreq_link);
96ee4cdf7bSDavid Howells 		list_move_tail(&from->rreq_link, &sublist);
97ee4cdf7bSDavid Howells 		start = from->start + from->transferred;
98ee4cdf7bSDavid Howells 		len   = from->len   - from->transferred;
99ee4cdf7bSDavid Howells 
100ee4cdf7bSDavid Howells 		_debug("from R=%08x[%x] s=%llx ctl=%zx/%zx/%zx",
101ee4cdf7bSDavid Howells 		       rreq->debug_id, from->debug_index,
102ee4cdf7bSDavid Howells 		       from->start, from->consumed, from->transferred, from->len);
103ee4cdf7bSDavid Howells 
104ee4cdf7bSDavid Howells 		if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
105ee4cdf7bSDavid Howells 		    !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
106ee4cdf7bSDavid Howells 			goto abandon;
107ee4cdf7bSDavid Howells 
108ee4cdf7bSDavid Howells 		deferred_next_donated = from->next_donated;
109ee4cdf7bSDavid Howells 		while ((subreq = list_first_entry_or_null(
110ee4cdf7bSDavid Howells 				&queue, struct netfs_io_subrequest, rreq_link))) {
111ee4cdf7bSDavid Howells 			if (subreq->start != start + len ||
112ee4cdf7bSDavid Howells 			    subreq->transferred > 0 ||
113ee4cdf7bSDavid Howells 			    !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
114ee4cdf7bSDavid Howells 				break;
115ee4cdf7bSDavid Howells 			list_move_tail(&subreq->rreq_link, &sublist);
116ee4cdf7bSDavid Howells 			len += subreq->len;
117ee4cdf7bSDavid Howells 			deferred_next_donated = subreq->next_donated;
118ee4cdf7bSDavid Howells 			if (test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags))
119ee4cdf7bSDavid Howells 				break;
120ee4cdf7bSDavid Howells 		}
121ee4cdf7bSDavid Howells 
122ee4cdf7bSDavid Howells 		_debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
123ee4cdf7bSDavid Howells 
124ee4cdf7bSDavid Howells 		/* Determine the set of buffers we're going to use.  Each
125ee4cdf7bSDavid Howells 		 * subreq gets a subset of a single overall contiguous buffer.
126ee4cdf7bSDavid Howells 		 */
127ee4cdf7bSDavid Howells 		netfs_reset_iter(from);
128ee4cdf7bSDavid Howells 		source = from->io_iter;
129ee4cdf7bSDavid Howells 		source.count = len;
130ee4cdf7bSDavid Howells 
131ee4cdf7bSDavid Howells 		/* Work through the sublist. */
132ee4cdf7bSDavid Howells 		while ((subreq = list_first_entry_or_null(
133ee4cdf7bSDavid Howells 				&sublist, struct netfs_io_subrequest, rreq_link))) {
134ee4cdf7bSDavid Howells 			list_del(&subreq->rreq_link);
135ee4cdf7bSDavid Howells 
136ee4cdf7bSDavid Howells 			subreq->source	= NETFS_DOWNLOAD_FROM_SERVER;
137ee4cdf7bSDavid Howells 			subreq->start	= start - subreq->transferred;
138ee4cdf7bSDavid Howells 			subreq->len	= len   + subreq->transferred;
139ee4cdf7bSDavid Howells 			stream0->sreq_max_len = subreq->len;
140ee4cdf7bSDavid Howells 
141ee4cdf7bSDavid Howells 			__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
1424acb665cSDavid Howells 			__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
1434acb665cSDavid Howells 			subreq->retry_count++;
144ee4cdf7bSDavid Howells 
145ee4cdf7bSDavid Howells 			spin_lock_bh(&rreq->lock);
146ee4cdf7bSDavid Howells 			list_add_tail(&subreq->rreq_link, &rreq->subrequests);
147ee4cdf7bSDavid Howells 			subreq->prev_donated += rreq->prev_donated;
148ee4cdf7bSDavid Howells 			rreq->prev_donated = 0;
149ee4cdf7bSDavid Howells 			trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
150ee4cdf7bSDavid Howells 			spin_unlock_bh(&rreq->lock);
151ee4cdf7bSDavid Howells 
152ee4cdf7bSDavid Howells 			BUG_ON(!len);
153ee4cdf7bSDavid Howells 
154ee4cdf7bSDavid Howells 			/* Renegotiate max_len (rsize) */
155ee4cdf7bSDavid Howells 			if (rreq->netfs_ops->prepare_read(subreq) < 0) {
156ee4cdf7bSDavid Howells 				trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
157ee4cdf7bSDavid Howells 				__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
158ee4cdf7bSDavid Howells 			}
159ee4cdf7bSDavid Howells 
160ee4cdf7bSDavid Howells 			part = umin(len, stream0->sreq_max_len);
161ee4cdf7bSDavid Howells 			if (unlikely(rreq->io_streams[0].sreq_max_segs))
162ee4cdf7bSDavid Howells 				part = netfs_limit_iter(&source, 0, part, stream0->sreq_max_segs);
163ee4cdf7bSDavid Howells 			subreq->len = subreq->transferred + part;
164ee4cdf7bSDavid Howells 			subreq->io_iter = source;
165ee4cdf7bSDavid Howells 			iov_iter_truncate(&subreq->io_iter, part);
166ee4cdf7bSDavid Howells 			iov_iter_advance(&source, part);
167ee4cdf7bSDavid Howells 			len -= part;
168ee4cdf7bSDavid Howells 			start += part;
169ee4cdf7bSDavid Howells 			if (!len) {
170ee4cdf7bSDavid Howells 				if (boundary)
171ee4cdf7bSDavid Howells 					__set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
172ee4cdf7bSDavid Howells 				subreq->next_donated = deferred_next_donated;
173ee4cdf7bSDavid Howells 			} else {
174ee4cdf7bSDavid Howells 				__clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
175ee4cdf7bSDavid Howells 				subreq->next_donated = 0;
176ee4cdf7bSDavid Howells 			}
177ee4cdf7bSDavid Howells 
178ee4cdf7bSDavid Howells 			netfs_reissue_read(rreq, subreq);
179ee4cdf7bSDavid Howells 			if (!len)
180ee4cdf7bSDavid Howells 				break;
181ee4cdf7bSDavid Howells 
182ee4cdf7bSDavid Howells 			/* If we ran out of subrequests, allocate another. */
183ee4cdf7bSDavid Howells 			if (list_empty(&sublist)) {
184ee4cdf7bSDavid Howells 				subreq = netfs_alloc_subrequest(rreq);
185ee4cdf7bSDavid Howells 				if (!subreq)
186ee4cdf7bSDavid Howells 					goto abandon;
187ee4cdf7bSDavid Howells 				subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
188ee4cdf7bSDavid Howells 				subreq->start = start;
189ee4cdf7bSDavid Howells 
190ee4cdf7bSDavid Howells 				/* We get two refs, but need just one. */
191ee4cdf7bSDavid Howells 				netfs_put_subrequest(subreq, false, netfs_sreq_trace_new);
192ee4cdf7bSDavid Howells 				trace_netfs_sreq(subreq, netfs_sreq_trace_split);
193ee4cdf7bSDavid Howells 				list_add_tail(&subreq->rreq_link, &sublist);
194ee4cdf7bSDavid Howells 			}
195ee4cdf7bSDavid Howells 		}
196ee4cdf7bSDavid Howells 
197ee4cdf7bSDavid Howells 		/* If we managed to use fewer subreqs, we can discard the
198ee4cdf7bSDavid Howells 		 * excess.
199ee4cdf7bSDavid Howells 		 */
200ee4cdf7bSDavid Howells 		while ((subreq = list_first_entry_or_null(
201ee4cdf7bSDavid Howells 				&sublist, struct netfs_io_subrequest, rreq_link))) {
202ee4cdf7bSDavid Howells 			trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
203ee4cdf7bSDavid Howells 			list_del(&subreq->rreq_link);
204ee4cdf7bSDavid Howells 			netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
205ee4cdf7bSDavid Howells 		}
206ee4cdf7bSDavid Howells 
207ee4cdf7bSDavid Howells 	} while (!list_empty(&queue));
208ee4cdf7bSDavid Howells 
209ee4cdf7bSDavid Howells 	return;
210ee4cdf7bSDavid Howells 
211ee4cdf7bSDavid Howells 	/* If we hit ENOMEM, fail all remaining subrequests */
212ee4cdf7bSDavid Howells abandon:
213ee4cdf7bSDavid Howells 	list_splice_init(&sublist, &queue);
214ee4cdf7bSDavid Howells 	list_for_each_entry(subreq, &queue, rreq_link) {
215ee4cdf7bSDavid Howells 		if (!subreq->error)
216ee4cdf7bSDavid Howells 			subreq->error = -ENOMEM;
217ee4cdf7bSDavid Howells 		__clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
218ee4cdf7bSDavid Howells 		__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
219ee4cdf7bSDavid Howells 	}
220ee4cdf7bSDavid Howells 	spin_lock_bh(&rreq->lock);
221ee4cdf7bSDavid Howells 	list_splice_tail_init(&queue, &rreq->subrequests);
222ee4cdf7bSDavid Howells 	spin_unlock_bh(&rreq->lock);
223ee4cdf7bSDavid Howells }
224ee4cdf7bSDavid Howells 
225ee4cdf7bSDavid Howells /*
226ee4cdf7bSDavid Howells  * Retry reads.
227ee4cdf7bSDavid Howells  */
228ee4cdf7bSDavid Howells void netfs_retry_reads(struct netfs_io_request *rreq)
229ee4cdf7bSDavid Howells {
230ee4cdf7bSDavid Howells 	trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
231ee4cdf7bSDavid Howells 
232ee4cdf7bSDavid Howells 	atomic_inc(&rreq->nr_outstanding);
233ee4cdf7bSDavid Howells 
234ee4cdf7bSDavid Howells 	netfs_retry_read_subrequests(rreq);
235ee4cdf7bSDavid Howells 
236ee4cdf7bSDavid Howells 	if (atomic_dec_and_test(&rreq->nr_outstanding))
237ee4cdf7bSDavid Howells 		netfs_rreq_terminated(rreq, false);
238ee4cdf7bSDavid Howells }
239ee4cdf7bSDavid Howells 
240ee4cdf7bSDavid Howells /*
241ee4cdf7bSDavid Howells  * Unlock any the pages that haven't been unlocked yet due to abandoned
242ee4cdf7bSDavid Howells  * subrequests.
243ee4cdf7bSDavid Howells  */
244ee4cdf7bSDavid Howells void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
245ee4cdf7bSDavid Howells {
246ee4cdf7bSDavid Howells 	struct folio_queue *p;
247ee4cdf7bSDavid Howells 
248ee4cdf7bSDavid Howells 	for (p = rreq->buffer; p; p = p->next) {
249ee4cdf7bSDavid Howells 		for (int slot = 0; slot < folioq_count(p); slot++) {
250ee4cdf7bSDavid Howells 			struct folio *folio = folioq_folio(p, slot);
251ee4cdf7bSDavid Howells 
252ee4cdf7bSDavid Howells 			if (folio && !folioq_is_marked2(p, slot)) {
253ee4cdf7bSDavid Howells 				trace_netfs_folio(folio, netfs_folio_trace_abandon);
254ee4cdf7bSDavid Howells 				folio_unlock(folio);
255ee4cdf7bSDavid Howells 			}
256ee4cdf7bSDavid Howells 		}
257ee4cdf7bSDavid Howells 	}
258ee4cdf7bSDavid Howells }
259