1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem write retrying.
3  *
4  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/slab.h>
12 #include "internal.h"
13 
14 /*
15  * Perform retries on the streams that need it.
16  */
17 static void netfs_retry_write_stream(struct netfs_io_request *wreq,
18 				     struct netfs_io_stream *stream)
19 {
20 	struct list_head *next;
21 
22 	_enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
23 
24 	if (list_empty(&stream->subrequests))
25 		return;
26 
27 	if (stream->source == NETFS_UPLOAD_TO_SERVER &&
28 	    wreq->netfs_ops->retry_request)
29 		wreq->netfs_ops->retry_request(wreq, stream);
30 
31 	if (unlikely(stream->failed))
32 		return;
33 
34 	/* If there's no renegotiation to do, just resend each failed subreq. */
35 	if (!stream->prepare_write) {
36 		struct netfs_io_subrequest *subreq;
37 
38 		list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
39 			if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
40 				break;
41 			if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
42 				struct iov_iter source;
43 
44 				netfs_reset_iter(subreq);
45 				source = subreq->io_iter;
46 				netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
47 				netfs_reissue_write(stream, subreq, &source);
48 			}
49 		}
50 		return;
51 	}
52 
53 	next = stream->subrequests.next;
54 
55 	do {
56 		struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
57 		struct iov_iter source;
58 		unsigned long long start, len;
59 		size_t part;
60 		bool boundary = false;
61 
62 		/* Go through the stream and find the next span of contiguous
63 		 * data that we then rejig (cifs, for example, needs the wsize
64 		 * renegotiating) and reissue.
65 		 */
66 		from = list_entry(next, struct netfs_io_subrequest, rreq_link);
67 		to = from;
68 		start = from->start + from->transferred;
69 		len   = from->len   - from->transferred;
70 
71 		if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
72 		    !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
73 			return;
74 
75 		list_for_each_continue(next, &stream->subrequests) {
76 			subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
77 			if (subreq->start + subreq->transferred != start + len ||
78 			    test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
79 			    !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
80 				break;
81 			to = subreq;
82 			len += to->len;
83 		}
84 
85 		/* Determine the set of buffers we're going to use.  Each
86 		 * subreq gets a subset of a single overall contiguous buffer.
87 		 */
88 		netfs_reset_iter(from);
89 		source = from->io_iter;
90 		source.count = len;
91 
92 		/* Work through the sublist. */
93 		subreq = from;
94 		list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
95 			if (!len)
96 				break;
97 
98 			subreq->start	= start;
99 			subreq->len	= len;
100 			__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
101 			subreq->retry_count++;
102 			trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
103 
104 			/* Renegotiate max_len (wsize) */
105 			stream->sreq_max_len = len;
106 			stream->prepare_write(subreq);
107 
108 			part = umin(len, stream->sreq_max_len);
109 			if (unlikely(stream->sreq_max_segs))
110 				part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
111 			subreq->len = part;
112 			subreq->transferred = 0;
113 			len -= part;
114 			start += part;
115 			if (len && subreq == to &&
116 			    __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags))
117 				boundary = true;
118 
119 			netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
120 			netfs_reissue_write(stream, subreq, &source);
121 			if (subreq == to)
122 				break;
123 		}
124 
125 		/* If we managed to use fewer subreqs, we can discard the
126 		 * excess; if we used the same number, then we're done.
127 		 */
128 		if (!len) {
129 			if (subreq == to)
130 				continue;
131 			list_for_each_entry_safe_from(subreq, tmp,
132 						      &stream->subrequests, rreq_link) {
133 				trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
134 				list_del(&subreq->rreq_link);
135 				netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
136 				if (subreq == to)
137 					break;
138 			}
139 			continue;
140 		}
141 
142 		/* We ran out of subrequests, so we need to allocate some more
143 		 * and insert them after.
144 		 */
145 		do {
146 			subreq = netfs_alloc_subrequest(wreq);
147 			subreq->source		= to->source;
148 			subreq->start		= start;
149 			subreq->debug_index	= atomic_inc_return(&wreq->subreq_counter);
150 			subreq->stream_nr	= to->stream_nr;
151 			subreq->retry_count	= 1;
152 
153 			trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
154 					     refcount_read(&subreq->ref),
155 					     netfs_sreq_trace_new);
156 			netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
157 
158 			list_add(&subreq->rreq_link, &to->rreq_link);
159 			to = list_next_entry(to, rreq_link);
160 			trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
161 
162 			stream->sreq_max_len	= len;
163 			stream->sreq_max_segs	= INT_MAX;
164 			switch (stream->source) {
165 			case NETFS_UPLOAD_TO_SERVER:
166 				netfs_stat(&netfs_n_wh_upload);
167 				stream->sreq_max_len = umin(len, wreq->wsize);
168 				break;
169 			case NETFS_WRITE_TO_CACHE:
170 				netfs_stat(&netfs_n_wh_write);
171 				break;
172 			default:
173 				WARN_ON_ONCE(1);
174 			}
175 
176 			stream->prepare_write(subreq);
177 
178 			part = umin(len, stream->sreq_max_len);
179 			subreq->len = subreq->transferred + part;
180 			len -= part;
181 			start += part;
182 			if (!len && boundary) {
183 				__set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
184 				boundary = false;
185 			}
186 
187 			netfs_reissue_write(stream, subreq, &source);
188 			if (!len)
189 				break;
190 
191 		} while (len);
192 
193 	} while (!list_is_head(next, &stream->subrequests));
194 }
195 
196 /*
197  * Perform retries on the streams that need it.  If we're doing content
198  * encryption and the server copy changed due to a third-party write, we may
199  * need to do an RMW cycle and also rewrite the data to the cache.
200  */
201 void netfs_retry_writes(struct netfs_io_request *wreq)
202 {
203 	struct netfs_io_stream *stream;
204 	int s;
205 
206 	netfs_stat(&netfs_n_wh_retry_write_req);
207 
208 	/* Wait for all outstanding I/O to quiesce before performing retries as
209 	 * we may need to renegotiate the I/O sizes.
210 	 */
211 	set_bit(NETFS_RREQ_RETRYING, &wreq->flags);
212 	for (s = 0; s < NR_IO_STREAMS; s++) {
213 		stream = &wreq->io_streams[s];
214 		if (stream->active)
215 			netfs_wait_for_in_progress_stream(wreq, stream);
216 	}
217 	clear_bit(NETFS_RREQ_RETRYING, &wreq->flags);
218 
219 	// TODO: Enc: Fetch changed partial pages
220 	// TODO: Enc: Reencrypt content if needed.
221 	// TODO: Enc: Wind back transferred point.
222 	// TODO: Enc: Mark cache pages for retry.
223 
224 	for (s = 0; s < NR_IO_STREAMS; s++) {
225 		stream = &wreq->io_streams[s];
226 		if (stream->need_retry) {
227 			stream->need_retry = false;
228 			netfs_retry_write_stream(wreq, stream);
229 		}
230 	}
231 }
232