1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Direct I/O support.
3  *
4  * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/export.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/uio.h>
14 #include <linux/sched/mm.h>
15 #include <linux/task_io_accounting_ops.h>
16 #include <linux/netfs.h>
17 #include "internal.h"
18 
19 static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq)
20 {
21 	struct netfs_io_request *rreq = subreq->rreq;
22 	size_t rsize;
23 
24 	rsize = umin(subreq->len, rreq->io_streams[0].sreq_max_len);
25 	subreq->len = rsize;
26 
27 	if (unlikely(rreq->io_streams[0].sreq_max_segs)) {
28 		size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize,
29 						rreq->io_streams[0].sreq_max_segs);
30 
31 		if (limit < rsize) {
32 			subreq->len = limit;
33 			trace_netfs_sreq(subreq, netfs_sreq_trace_limited);
34 		}
35 	}
36 
37 	trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
38 
39 	subreq->io_iter	= rreq->buffer.iter;
40 	iov_iter_truncate(&subreq->io_iter, subreq->len);
41 	iov_iter_advance(&rreq->buffer.iter, subreq->len);
42 }
43 
44 /*
45  * Perform a read to a buffer from the server, slicing up the region to be read
46  * according to the network rsize.
47  */
48 static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
49 {
50 	struct netfs_io_stream *stream = &rreq->io_streams[0];
51 	unsigned long long start = rreq->start;
52 	ssize_t size = rreq->len;
53 	int ret = 0;
54 
55 	do {
56 		struct netfs_io_subrequest *subreq;
57 		ssize_t slice;
58 
59 		subreq = netfs_alloc_subrequest(rreq);
60 		if (!subreq) {
61 			ret = -ENOMEM;
62 			break;
63 		}
64 
65 		subreq->source	= NETFS_DOWNLOAD_FROM_SERVER;
66 		subreq->start	= start;
67 		subreq->len	= size;
68 
69 		__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
70 
71 		spin_lock(&rreq->lock);
72 		list_add_tail(&subreq->rreq_link, &stream->subrequests);
73 		if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
74 			stream->front = subreq;
75 			if (!stream->active) {
76 				stream->collected_to = stream->front->start;
77 				/* Store list pointers before active flag */
78 				smp_store_release(&stream->active, true);
79 			}
80 		}
81 		trace_netfs_sreq(subreq, netfs_sreq_trace_added);
82 		spin_unlock(&rreq->lock);
83 
84 		netfs_stat(&netfs_n_rh_download);
85 		if (rreq->netfs_ops->prepare_read) {
86 			ret = rreq->netfs_ops->prepare_read(subreq);
87 			if (ret < 0) {
88 				netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
89 				break;
90 			}
91 		}
92 
93 		netfs_prepare_dio_read_iterator(subreq);
94 		slice = subreq->len;
95 		size -= slice;
96 		start += slice;
97 		rreq->submitted += slice;
98 		if (size <= 0) {
99 			smp_wmb(); /* Write lists before ALL_QUEUED. */
100 			set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
101 		}
102 
103 		rreq->netfs_ops->issue_read(subreq);
104 
105 		if (test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
106 			netfs_wait_for_paused_read(rreq);
107 		if (test_bit(NETFS_RREQ_FAILED, &rreq->flags))
108 			break;
109 		cond_resched();
110 	} while (size > 0);
111 
112 	if (unlikely(size > 0)) {
113 		smp_wmb(); /* Write lists before ALL_QUEUED. */
114 		set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
115 		netfs_wake_collector(rreq);
116 	}
117 
118 	return ret;
119 }
120 
121 /*
122  * Perform a read to an application buffer, bypassing the pagecache and the
123  * local disk cache.
124  */
125 static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
126 {
127 	ssize_t ret;
128 
129 	_enter("R=%x %llx-%llx",
130 	       rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
131 
132 	if (rreq->len == 0) {
133 		pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
134 		return -EIO;
135 	}
136 
137 	// TODO: Use bounce buffer if requested
138 
139 	inode_dio_begin(rreq->inode);
140 
141 	ret = netfs_dispatch_unbuffered_reads(rreq);
142 
143 	if (!rreq->submitted) {
144 		netfs_put_request(rreq, netfs_rreq_trace_put_no_submit);
145 		inode_dio_end(rreq->inode);
146 		ret = 0;
147 		goto out;
148 	}
149 
150 	if (sync)
151 		ret = netfs_wait_for_read(rreq);
152 	else
153 		ret = -EIOCBQUEUED;
154 out:
155 	_leave(" = %zd", ret);
156 	return ret;
157 }
158 
159 /**
160  * netfs_unbuffered_read_iter_locked - Perform an unbuffered or direct I/O read
161  * @iocb: The I/O control descriptor describing the read
162  * @iter: The output buffer (also specifies read length)
163  *
164  * Perform an unbuffered I/O or direct I/O from the file in @iocb to the
165  * output buffer.  No use is made of the pagecache.
166  *
167  * The caller must hold any appropriate locks.
168  */
169 ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter)
170 {
171 	struct netfs_io_request *rreq;
172 	ssize_t ret;
173 	size_t orig_count = iov_iter_count(iter);
174 	bool sync = is_sync_kiocb(iocb);
175 
176 	_enter("");
177 
178 	if (!orig_count)
179 		return 0; /* Don't update atime */
180 
181 	ret = kiocb_write_and_wait(iocb, orig_count);
182 	if (ret < 0)
183 		return ret;
184 	file_accessed(iocb->ki_filp);
185 
186 	rreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp,
187 				   iocb->ki_pos, orig_count,
188 				   iocb->ki_flags & IOCB_DIRECT ?
189 				   NETFS_DIO_READ : NETFS_UNBUFFERED_READ);
190 	if (IS_ERR(rreq))
191 		return PTR_ERR(rreq);
192 
193 	netfs_stat(&netfs_n_rh_dio_read);
194 	trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_dio_read);
195 
196 	/* If this is an async op, we have to keep track of the destination
197 	 * buffer for ourselves as the caller's iterator will be trashed when
198 	 * we return.
199 	 *
200 	 * In such a case, extract an iterator to represent as much of the the
201 	 * output buffer as we can manage.  Note that the extraction might not
202 	 * be able to allocate a sufficiently large bvec array and may shorten
203 	 * the request.
204 	 */
205 	if (user_backed_iter(iter)) {
206 		ret = netfs_extract_user_iter(iter, rreq->len, &rreq->buffer.iter, 0);
207 		if (ret < 0)
208 			goto out;
209 		rreq->direct_bv = (struct bio_vec *)rreq->buffer.iter.bvec;
210 		rreq->direct_bv_count = ret;
211 		rreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
212 		rreq->len = iov_iter_count(&rreq->buffer.iter);
213 	} else {
214 		rreq->buffer.iter = *iter;
215 		rreq->len = orig_count;
216 		rreq->direct_bv_unpin = false;
217 		iov_iter_advance(iter, orig_count);
218 	}
219 
220 	// TODO: Set up bounce buffer if needed
221 
222 	if (!sync) {
223 		rreq->iocb = iocb;
224 		__set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags);
225 	}
226 
227 	ret = netfs_unbuffered_read(rreq, sync);
228 	if (ret < 0)
229 		goto out; /* May be -EIOCBQUEUED */
230 	if (sync) {
231 		// TODO: Copy from bounce buffer
232 		iocb->ki_pos += rreq->transferred;
233 		ret = rreq->transferred;
234 	}
235 
236 out:
237 	netfs_put_request(rreq, netfs_rreq_trace_put_return);
238 	if (ret > 0)
239 		orig_count -= ret;
240 	return ret;
241 }
242 EXPORT_SYMBOL(netfs_unbuffered_read_iter_locked);
243 
244 /**
245  * netfs_unbuffered_read_iter - Perform an unbuffered or direct I/O read
246  * @iocb: The I/O control descriptor describing the read
247  * @iter: The output buffer (also specifies read length)
248  *
249  * Perform an unbuffered I/O or direct I/O from the file in @iocb to the
250  * output buffer.  No use is made of the pagecache.
251  */
252 ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter)
253 {
254 	struct inode *inode = file_inode(iocb->ki_filp);
255 	ssize_t ret;
256 
257 	if (!iter->count)
258 		return 0; /* Don't update atime */
259 
260 	ret = netfs_start_io_direct(inode);
261 	if (ret == 0) {
262 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
263 		netfs_end_io_direct(inode);
264 	}
265 	return ret;
266 }
267 EXPORT_SYMBOL(netfs_unbuffered_read_iter);
268