1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem read subrequest retrying.
3 *
4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #include <linux/fs.h>
9 #include <linux/slab.h>
10 #include "internal.h"
11
netfs_reissue_read(struct netfs_io_request * rreq,struct netfs_io_subrequest * subreq)12 static void netfs_reissue_read(struct netfs_io_request *rreq,
13 struct netfs_io_subrequest *subreq)
14 {
15 subreq->error = 0;
16 __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
17 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
18 netfs_stat(&netfs_n_rh_retry_read_subreq);
19 subreq->rreq->netfs_ops->issue_read(subreq);
20 }
21
22 /*
23 * Go through the list of failed/short reads, retrying all retryable ones. We
24 * need to switch failed cache reads to network downloads.
25 */
netfs_retry_read_subrequests(struct netfs_io_request * rreq)26 static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
27 {
28 struct netfs_io_subrequest *subreq;
29 struct netfs_io_stream *stream = &rreq->io_streams[0];
30 struct list_head *next;
31
32 _enter("R=%x", rreq->debug_id);
33
34 if (list_empty(&stream->subrequests))
35 return;
36
37 if (rreq->netfs_ops->retry_request)
38 rreq->netfs_ops->retry_request(rreq, NULL);
39
40 /* If there's no renegotiation to do, just resend each retryable subreq
41 * up to the first permanently failed one.
42 */
43 if (!rreq->netfs_ops->prepare_read &&
44 !rreq->cache_resources.ops) {
45 list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
46 if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
47 break;
48 if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
49 __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
50 subreq->retry_count++;
51 netfs_reset_iter(subreq);
52 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
53 netfs_reissue_read(rreq, subreq);
54 }
55 }
56 return;
57 }
58
59 /* Okay, we need to renegotiate all the download requests and flip any
60 * failed cache reads over to being download requests and negotiate
61 * those also. All fully successful subreqs have been removed from the
62 * list and any spare data from those has been donated.
63 *
64 * What we do is decant the list and rebuild it one subreq at a time so
65 * that we don't end up with donations jumping over a gap we're busy
66 * populating with smaller subrequests. In the event that the subreq
67 * we just launched finishes before we insert the next subreq, it'll
68 * fill in rreq->prev_donated instead.
69 *
70 * Note: Alternatively, we could split the tail subrequest right before
71 * we reissue it and fix up the donations under lock.
72 */
73 next = stream->subrequests.next;
74
75 do {
76 struct netfs_io_subrequest *from, *to, *tmp;
77 struct iov_iter source;
78 unsigned long long start, len;
79 size_t part;
80 bool boundary = false, subreq_superfluous = false;
81
82 /* Go through the subreqs and find the next span of contiguous
83 * buffer that we then rejig (cifs, for example, needs the
84 * rsize renegotiating) and reissue.
85 */
86 from = list_entry(next, struct netfs_io_subrequest, rreq_link);
87 to = from;
88 start = from->start + from->transferred;
89 len = from->len - from->transferred;
90
91 _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx",
92 rreq->debug_id, from->debug_index,
93 from->start, from->transferred, from->len);
94
95 if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
96 !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags)) {
97 subreq = from;
98 goto abandon;
99 }
100
101 list_for_each_continue(next, &stream->subrequests) {
102 subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
103 if (subreq->start + subreq->transferred != start + len ||
104 test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
105 !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
106 break;
107 to = subreq;
108 len += to->len;
109 }
110
111 _debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
112
113 /* Determine the set of buffers we're going to use. Each
114 * subreq gets a subset of a single overall contiguous buffer.
115 */
116 netfs_reset_iter(from);
117 source = from->io_iter;
118 source.count = len;
119
120 /* Work through the sublist. */
121 subreq = from;
122 list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
123 if (!len) {
124 subreq_superfluous = true;
125 break;
126 }
127 subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
128 subreq->start = start - subreq->transferred;
129 subreq->len = len + subreq->transferred;
130 __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
131 __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
132 subreq->retry_count++;
133
134 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
135
136 /* Renegotiate max_len (rsize) */
137 stream->sreq_max_len = subreq->len;
138 if (rreq->netfs_ops->prepare_read &&
139 rreq->netfs_ops->prepare_read(subreq) < 0) {
140 trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
141 __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
142 goto abandon;
143 }
144
145 part = umin(len, stream->sreq_max_len);
146 if (unlikely(stream->sreq_max_segs))
147 part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
148 subreq->len = subreq->transferred + part;
149 subreq->io_iter = source;
150 iov_iter_truncate(&subreq->io_iter, part);
151 iov_iter_advance(&source, part);
152 len -= part;
153 start += part;
154 if (!len) {
155 if (boundary)
156 __set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
157 } else {
158 __clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
159 }
160
161 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
162 netfs_reissue_read(rreq, subreq);
163 if (subreq == to) {
164 subreq_superfluous = false;
165 break;
166 }
167 }
168
169 /* If we managed to use fewer subreqs, we can discard the
170 * excess; if we used the same number, then we're done.
171 */
172 if (!len) {
173 if (!subreq_superfluous)
174 continue;
175 list_for_each_entry_safe_from(subreq, tmp,
176 &stream->subrequests, rreq_link) {
177 trace_netfs_sreq(subreq, netfs_sreq_trace_superfluous);
178 list_del(&subreq->rreq_link);
179 netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
180 if (subreq == to)
181 break;
182 }
183 subreq = NULL;
184 continue;
185 }
186
187 /* We ran out of subrequests, so we need to allocate some more
188 * and insert them after.
189 */
190 do {
191 subreq = netfs_alloc_subrequest(rreq);
192 if (!subreq) {
193 subreq = to;
194 goto abandon_after;
195 }
196 subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
197 subreq->start = start;
198 subreq->len = len;
199 subreq->stream_nr = stream->stream_nr;
200 subreq->retry_count = 1;
201
202 trace_netfs_sreq_ref(rreq->debug_id, subreq->debug_index,
203 refcount_read(&subreq->ref),
204 netfs_sreq_trace_new);
205
206 list_add(&subreq->rreq_link, &to->rreq_link);
207 to = list_next_entry(to, rreq_link);
208 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
209
210 stream->sreq_max_len = umin(len, rreq->rsize);
211 stream->sreq_max_segs = 0;
212 if (unlikely(stream->sreq_max_segs))
213 part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
214
215 netfs_stat(&netfs_n_rh_download);
216 if (rreq->netfs_ops->prepare_read(subreq) < 0) {
217 trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
218 __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
219 goto abandon;
220 }
221
222 part = umin(len, stream->sreq_max_len);
223 subreq->len = subreq->transferred + part;
224 subreq->io_iter = source;
225 iov_iter_truncate(&subreq->io_iter, part);
226 iov_iter_advance(&source, part);
227
228 len -= part;
229 start += part;
230 if (!len && boundary) {
231 __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
232 boundary = false;
233 }
234
235 netfs_reissue_read(rreq, subreq);
236 } while (len);
237
238 } while (!list_is_head(next, &stream->subrequests));
239
240 return;
241
242 /* If we hit an error, fail all remaining incomplete subrequests */
243 abandon_after:
244 if (list_is_last(&subreq->rreq_link, &stream->subrequests))
245 return;
246 subreq = list_next_entry(subreq, rreq_link);
247 abandon:
248 list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
249 if (!test_bit(NETFS_SREQ_FAILED, &subreq->flags) &&
250 !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
251 continue;
252 subreq->error = -ENOMEM;
253 __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
254 __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
255 }
256 }
257
258 /*
259 * Retry reads.
260 */
netfs_retry_reads(struct netfs_io_request * rreq)261 void netfs_retry_reads(struct netfs_io_request *rreq)
262 {
263 struct netfs_io_stream *stream = &rreq->io_streams[0];
264
265 netfs_stat(&netfs_n_rh_retry_read_req);
266
267 /* Wait for all outstanding I/O to quiesce before performing retries as
268 * we may need to renegotiate the I/O sizes.
269 */
270 set_bit(NETFS_RREQ_RETRYING, &rreq->flags);
271 netfs_wait_for_in_progress_stream(rreq, stream);
272 clear_bit(NETFS_RREQ_RETRYING, &rreq->flags);
273
274 trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
275 netfs_retry_read_subrequests(rreq);
276 }
277
278 /*
279 * Unlock any the pages that haven't been unlocked yet due to abandoned
280 * subrequests.
281 */
netfs_unlock_abandoned_read_pages(struct netfs_io_request * rreq)282 void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
283 {
284 struct folio_queue *p;
285
286 for (p = rreq->buffer.tail; p; p = p->next) {
287 for (int slot = 0; slot < folioq_count(p); slot++) {
288 struct folio *folio = folioq_folio(p, slot);
289
290 if (folio && !folioq_is_marked2(p, slot)) {
291 trace_netfs_folio(folio, netfs_folio_trace_abandon);
292 folio_unlock(folio);
293 }
294 }
295 }
296 }
297