1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem high-level buffered write support.
3  *
4  * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/export.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/pagevec.h>
14 #include "internal.h"
15 
16 static void __netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
17 {
18 	if (netfs_group)
19 		folio_attach_private(folio, netfs_get_group(netfs_group));
20 }
21 
22 static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
23 {
24 	void *priv = folio_get_private(folio);
25 
26 	if (unlikely(priv != netfs_group)) {
27 		if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE))
28 			folio_attach_private(folio, netfs_get_group(netfs_group));
29 		else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE)
30 			folio_detach_private(folio);
31 	}
32 }
33 
34 /*
35  * Grab a folio for writing and lock it.  Attempt to allocate as large a folio
36  * as possible to hold as much of the remaining length as possible in one go.
37  */
38 static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
39 						loff_t pos, size_t part)
40 {
41 	pgoff_t index = pos / PAGE_SIZE;
42 	fgf_t fgp_flags = FGP_WRITEBEGIN;
43 
44 	if (mapping_large_folio_support(mapping))
45 		fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part);
46 
47 	return __filemap_get_folio(mapping, index, fgp_flags,
48 				   mapping_gfp_mask(mapping));
49 }
50 
51 /*
52  * Update i_size and estimate the update to i_blocks to reflect the additional
53  * data written into the pagecache until we can find out from the server what
54  * the values actually are.
55  */
56 static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
57 				loff_t i_size, loff_t pos, size_t copied)
58 {
59 	blkcnt_t add;
60 	size_t gap;
61 
62 	if (ctx->ops->update_i_size) {
63 		ctx->ops->update_i_size(inode, pos);
64 		return;
65 	}
66 
67 	i_size_write(inode, pos);
68 #if IS_ENABLED(CONFIG_FSCACHE)
69 	fscache_update_cookie(ctx->cache, NULL, &pos);
70 #endif
71 
72 	gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1));
73 	if (copied > gap) {
74 		add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE);
75 
76 		inode->i_blocks = min_t(blkcnt_t,
77 					DIV_ROUND_UP(pos, SECTOR_SIZE),
78 					inode->i_blocks + add);
79 	}
80 }
81 
82 /**
83  * netfs_perform_write - Copy data into the pagecache.
84  * @iocb: The operation parameters
85  * @iter: The source buffer
86  * @netfs_group: Grouping for dirty folios (eg. ceph snaps).
87  *
88  * Copy data into pagecache folios attached to the inode specified by @iocb.
89  * The caller must hold appropriate inode locks.
90  *
91  * Dirty folios are tagged with a netfs_folio struct if they're not up to date
92  * to indicate the range modified.  Dirty folios may also be tagged with a
93  * netfs-specific grouping such that data from an old group gets flushed before
94  * a new one is started.
95  */
96 ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
97 			    struct netfs_group *netfs_group)
98 {
99 	struct file *file = iocb->ki_filp;
100 	struct inode *inode = file_inode(file);
101 	struct address_space *mapping = inode->i_mapping;
102 	struct netfs_inode *ctx = netfs_inode(inode);
103 	struct writeback_control wbc = {
104 		.sync_mode	= WB_SYNC_NONE,
105 		.for_sync	= true,
106 		.nr_to_write	= LONG_MAX,
107 		.range_start	= iocb->ki_pos,
108 		.range_end	= iocb->ki_pos + iter->count,
109 	};
110 	struct netfs_io_request *wreq = NULL;
111 	struct folio *folio = NULL, *writethrough = NULL;
112 	unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
113 	ssize_t written = 0, ret, ret2;
114 	loff_t i_size, pos = iocb->ki_pos;
115 	size_t max_chunk = mapping_max_folio_size(mapping);
116 	bool maybe_trouble = false;
117 
118 	if (unlikely(iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
119 	    ) {
120 		wbc_attach_fdatawrite_inode(&wbc, mapping->host);
121 
122 		ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
123 		if (ret < 0) {
124 			wbc_detach_inode(&wbc);
125 			goto out;
126 		}
127 
128 		wreq = netfs_begin_writethrough(iocb, iter->count);
129 		if (IS_ERR(wreq)) {
130 			wbc_detach_inode(&wbc);
131 			ret = PTR_ERR(wreq);
132 			wreq = NULL;
133 			goto out;
134 		}
135 		if (!is_sync_kiocb(iocb))
136 			wreq->iocb = iocb;
137 		netfs_stat(&netfs_n_wh_writethrough);
138 	} else {
139 		netfs_stat(&netfs_n_wh_buffered_write);
140 	}
141 
142 	do {
143 		struct netfs_folio *finfo;
144 		struct netfs_group *group;
145 		unsigned long long fpos;
146 		size_t flen;
147 		size_t offset;	/* Offset into pagecache folio */
148 		size_t part;	/* Bytes to write to folio */
149 		size_t copied;	/* Bytes copied from user */
150 
151 		offset = pos & (max_chunk - 1);
152 		part = min(max_chunk - offset, iov_iter_count(iter));
153 
154 		/* Bring in the user pages that we will copy from _first_ lest
155 		 * we hit a nasty deadlock on copying from the same page as
156 		 * we're writing to, without it being marked uptodate.
157 		 *
158 		 * Not only is this an optimisation, but it is also required to
159 		 * check that the address is actually valid, when atomic
160 		 * usercopies are used below.
161 		 *
162 		 * We rely on the page being held onto long enough by the LRU
163 		 * that we can grab it below if this causes it to be read.
164 		 */
165 		ret = -EFAULT;
166 		if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
167 			break;
168 
169 		folio = netfs_grab_folio_for_write(mapping, pos, part);
170 		if (IS_ERR(folio)) {
171 			ret = PTR_ERR(folio);
172 			break;
173 		}
174 
175 		flen = folio_size(folio);
176 		fpos = folio_pos(folio);
177 		offset = pos - fpos;
178 		part = min_t(size_t, flen - offset, part);
179 
180 		/* Wait for writeback to complete.  The writeback engine owns
181 		 * the info in folio->private and may change it until it
182 		 * removes the WB mark.
183 		 */
184 		if (folio_get_private(folio) &&
185 		    folio_wait_writeback_killable(folio)) {
186 			ret = written ? -EINTR : -ERESTARTSYS;
187 			goto error_folio_unlock;
188 		}
189 
190 		if (signal_pending(current)) {
191 			ret = written ? -EINTR : -ERESTARTSYS;
192 			goto error_folio_unlock;
193 		}
194 
195 		/* Decide how we should modify a folio.  We might be attempting
196 		 * to do write-streaming, in which case we don't want to a
197 		 * local RMW cycle if we can avoid it.  If we're doing local
198 		 * caching or content crypto, we award that priority over
199 		 * avoiding RMW.  If the file is open readably, then we also
200 		 * assume that we may want to read what we wrote.
201 		 */
202 		finfo = netfs_folio_info(folio);
203 		group = netfs_folio_group(folio);
204 
205 		if (unlikely(group != netfs_group) &&
206 		    group != NETFS_FOLIO_COPY_TO_CACHE)
207 			goto flush_content;
208 
209 		if (folio_test_uptodate(folio)) {
210 			if (mapping_writably_mapped(mapping))
211 				flush_dcache_folio(folio);
212 			copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
213 			if (unlikely(copied == 0))
214 				goto copy_failed;
215 			netfs_set_group(folio, netfs_group);
216 			trace_netfs_folio(folio, netfs_folio_is_uptodate);
217 			goto copied;
218 		}
219 
220 		/* If the page is above the zero-point then we assume that the
221 		 * server would just return a block of zeros or a short read if
222 		 * we try to read it.
223 		 */
224 		if (fpos >= ctx->zero_point) {
225 			folio_zero_segment(folio, 0, offset);
226 			copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
227 			if (unlikely(copied == 0))
228 				goto copy_failed;
229 			folio_zero_segment(folio, offset + copied, flen);
230 			__netfs_set_group(folio, netfs_group);
231 			folio_mark_uptodate(folio);
232 			trace_netfs_folio(folio, netfs_modify_and_clear);
233 			goto copied;
234 		}
235 
236 		/* See if we can write a whole folio in one go. */
237 		if (!maybe_trouble && offset == 0 && part >= flen) {
238 			copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
239 			if (unlikely(copied == 0))
240 				goto copy_failed;
241 			if (unlikely(copied < part)) {
242 				maybe_trouble = true;
243 				iov_iter_revert(iter, copied);
244 				copied = 0;
245 				folio_unlock(folio);
246 				goto retry;
247 			}
248 			__netfs_set_group(folio, netfs_group);
249 			folio_mark_uptodate(folio);
250 			trace_netfs_folio(folio, netfs_whole_folio_modify);
251 			goto copied;
252 		}
253 
254 		/* We don't want to do a streaming write on a file that loses
255 		 * caching service temporarily because the backing store got
256 		 * culled and we don't really want to get a streaming write on
257 		 * a file that's open for reading as ->read_folio() then has to
258 		 * be able to flush it.
259 		 */
260 		if ((file->f_mode & FMODE_READ) ||
261 		    netfs_is_cache_enabled(ctx)) {
262 			if (finfo) {
263 				netfs_stat(&netfs_n_wh_wstream_conflict);
264 				goto flush_content;
265 			}
266 			ret = netfs_prefetch_for_write(file, folio, offset, part);
267 			if (ret < 0) {
268 				_debug("prefetch = %zd", ret);
269 				goto error_folio_unlock;
270 			}
271 			/* Note that copy-to-cache may have been set. */
272 
273 			copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
274 			if (unlikely(copied == 0))
275 				goto copy_failed;
276 			netfs_set_group(folio, netfs_group);
277 			trace_netfs_folio(folio, netfs_just_prefetch);
278 			goto copied;
279 		}
280 
281 		if (!finfo) {
282 			ret = -EIO;
283 			if (WARN_ON(folio_get_private(folio)))
284 				goto error_folio_unlock;
285 			copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
286 			if (unlikely(copied == 0))
287 				goto copy_failed;
288 			if (offset == 0 && copied == flen) {
289 				__netfs_set_group(folio, netfs_group);
290 				folio_mark_uptodate(folio);
291 				trace_netfs_folio(folio, netfs_streaming_filled_page);
292 				goto copied;
293 			}
294 
295 			finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
296 			if (!finfo) {
297 				iov_iter_revert(iter, copied);
298 				ret = -ENOMEM;
299 				goto error_folio_unlock;
300 			}
301 			finfo->netfs_group = netfs_get_group(netfs_group);
302 			finfo->dirty_offset = offset;
303 			finfo->dirty_len = copied;
304 			folio_attach_private(folio, (void *)((unsigned long)finfo |
305 							     NETFS_FOLIO_INFO));
306 			trace_netfs_folio(folio, netfs_streaming_write);
307 			goto copied;
308 		}
309 
310 		/* We can continue a streaming write only if it continues on
311 		 * from the previous.  If it overlaps, we must flush lest we
312 		 * suffer a partial copy and disjoint dirty regions.
313 		 */
314 		if (offset == finfo->dirty_offset + finfo->dirty_len) {
315 			copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
316 			if (unlikely(copied == 0))
317 				goto copy_failed;
318 			finfo->dirty_len += copied;
319 			if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) {
320 				if (finfo->netfs_group)
321 					folio_change_private(folio, finfo->netfs_group);
322 				else
323 					folio_detach_private(folio);
324 				folio_mark_uptodate(folio);
325 				kfree(finfo);
326 				trace_netfs_folio(folio, netfs_streaming_cont_filled_page);
327 			} else {
328 				trace_netfs_folio(folio, netfs_streaming_write_cont);
329 			}
330 			goto copied;
331 		}
332 
333 		/* Incompatible write; flush the folio and try again. */
334 	flush_content:
335 		trace_netfs_folio(folio, netfs_flush_content);
336 		folio_unlock(folio);
337 		folio_put(folio);
338 		ret = filemap_write_and_wait_range(mapping, fpos, fpos + flen - 1);
339 		if (ret < 0)
340 			goto error_folio_unlock;
341 		continue;
342 
343 	copied:
344 		flush_dcache_folio(folio);
345 
346 		/* Update the inode size if we moved the EOF marker */
347 		pos += copied;
348 		i_size = i_size_read(inode);
349 		if (pos > i_size)
350 			netfs_update_i_size(ctx, inode, i_size, pos, copied);
351 		written += copied;
352 
353 		if (likely(!wreq)) {
354 			folio_mark_dirty(folio);
355 			folio_unlock(folio);
356 		} else {
357 			netfs_advance_writethrough(wreq, &wbc, folio, copied,
358 						   offset + copied == flen,
359 						   &writethrough);
360 			/* Folio unlocked */
361 		}
362 	retry:
363 		folio_put(folio);
364 		folio = NULL;
365 
366 		ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
367 		if (unlikely(ret < 0))
368 			break;
369 
370 		cond_resched();
371 	} while (iov_iter_count(iter));
372 
373 out:
374 	if (likely(written)) {
375 		/* Set indication that ctime and mtime got updated in case
376 		 * close is deferred.
377 		 */
378 		set_bit(NETFS_ICTX_MODIFIED_ATTR, &ctx->flags);
379 		if (unlikely(ctx->ops->post_modify))
380 			ctx->ops->post_modify(inode);
381 	}
382 
383 	if (unlikely(wreq)) {
384 		ret2 = netfs_end_writethrough(wreq, &wbc, writethrough);
385 		wbc_detach_inode(&wbc);
386 		if (ret2 == -EIOCBQUEUED)
387 			return ret2;
388 		if (ret == 0 && ret2 < 0)
389 			ret = ret2;
390 	}
391 
392 	iocb->ki_pos += written;
393 	_leave(" = %zd [%zd]", written, ret);
394 	return written ? written : ret;
395 
396 copy_failed:
397 	ret = -EFAULT;
398 error_folio_unlock:
399 	folio_unlock(folio);
400 	folio_put(folio);
401 	goto out;
402 }
403 EXPORT_SYMBOL(netfs_perform_write);
404 
405 /**
406  * netfs_buffered_write_iter_locked - write data to a file
407  * @iocb:	IO state structure (file, offset, etc.)
408  * @from:	iov_iter with data to write
409  * @netfs_group: Grouping for dirty folios (eg. ceph snaps).
410  *
411  * This function does all the work needed for actually writing data to a
412  * file. It does all basic checks, removes SUID from the file, updates
413  * modification times and calls proper subroutines depending on whether we
414  * do direct IO or a standard buffered write.
415  *
416  * The caller must hold appropriate locks around this function and have called
417  * generic_write_checks() already.  The caller is also responsible for doing
418  * any necessary syncing afterwards.
419  *
420  * This function does *not* take care of syncing data in case of O_SYNC write.
421  * A caller has to handle it. This is mainly due to the fact that we want to
422  * avoid syncing under i_rwsem.
423  *
424  * Return:
425  * * number of bytes written, even for truncated writes
426  * * negative error code if no data has been written at all
427  */
428 ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
429 					 struct netfs_group *netfs_group)
430 {
431 	struct file *file = iocb->ki_filp;
432 	ssize_t ret;
433 
434 	trace_netfs_write_iter(iocb, from);
435 
436 	ret = file_remove_privs(file);
437 	if (ret)
438 		return ret;
439 
440 	ret = file_update_time(file);
441 	if (ret)
442 		return ret;
443 
444 	return netfs_perform_write(iocb, from, netfs_group);
445 }
446 EXPORT_SYMBOL(netfs_buffered_write_iter_locked);
447 
448 /**
449  * netfs_file_write_iter - write data to a file
450  * @iocb: IO state structure
451  * @from: iov_iter with data to write
452  *
453  * Perform a write to a file, writing into the pagecache if possible and doing
454  * an unbuffered write instead if not.
455  *
456  * Return:
457  * * Negative error code if no data has been written at all of
458  *   vfs_fsync_range() failed for a synchronous write
459  * * Number of bytes written, even for truncated writes
460  */
461 ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
462 {
463 	struct file *file = iocb->ki_filp;
464 	struct inode *inode = file->f_mapping->host;
465 	struct netfs_inode *ictx = netfs_inode(inode);
466 	ssize_t ret;
467 
468 	_enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
469 
470 	if (!iov_iter_count(from))
471 		return 0;
472 
473 	if ((iocb->ki_flags & IOCB_DIRECT) ||
474 	    test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
475 		return netfs_unbuffered_write_iter(iocb, from);
476 
477 	ret = netfs_start_io_write(inode);
478 	if (ret < 0)
479 		return ret;
480 
481 	ret = generic_write_checks(iocb, from);
482 	if (ret > 0)
483 		ret = netfs_buffered_write_iter_locked(iocb, from, NULL);
484 	netfs_end_io_write(inode);
485 	if (ret > 0)
486 		ret = generic_write_sync(iocb, ret);
487 	return ret;
488 }
489 EXPORT_SYMBOL(netfs_file_write_iter);
490 
491 /*
492  * Notification that a previously read-only page is about to become writable.
493  * The caller indicates the precise page that needs to be written to, but
494  * we only track group on a per-folio basis, so we block more often than
495  * we might otherwise.
496  */
497 vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
498 {
499 	struct netfs_group *group;
500 	struct folio *folio = page_folio(vmf->page);
501 	struct file *file = vmf->vma->vm_file;
502 	struct address_space *mapping = file->f_mapping;
503 	struct inode *inode = file_inode(file);
504 	struct netfs_inode *ictx = netfs_inode(inode);
505 	vm_fault_t ret = VM_FAULT_NOPAGE;
506 	int err;
507 
508 	_enter("%lx", folio->index);
509 
510 	sb_start_pagefault(inode->i_sb);
511 
512 	if (folio_lock_killable(folio) < 0)
513 		goto out;
514 	if (folio->mapping != mapping)
515 		goto unlock;
516 	if (folio_wait_writeback_killable(folio) < 0)
517 		goto unlock;
518 
519 	/* Can we see a streaming write here? */
520 	if (WARN_ON(!folio_test_uptodate(folio))) {
521 		ret = VM_FAULT_SIGBUS;
522 		goto unlock;
523 	}
524 
525 	group = netfs_folio_group(folio);
526 	if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) {
527 		folio_unlock(folio);
528 		err = filemap_fdatawrite_range(mapping,
529 					       folio_pos(folio),
530 					       folio_pos(folio) + folio_size(folio));
531 		switch (err) {
532 		case 0:
533 			ret = VM_FAULT_RETRY;
534 			goto out;
535 		case -ENOMEM:
536 			ret = VM_FAULT_OOM;
537 			goto out;
538 		default:
539 			ret = VM_FAULT_SIGBUS;
540 			goto out;
541 		}
542 	}
543 
544 	if (folio_test_dirty(folio))
545 		trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus);
546 	else
547 		trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
548 	netfs_set_group(folio, netfs_group);
549 	file_update_time(file);
550 	set_bit(NETFS_ICTX_MODIFIED_ATTR, &ictx->flags);
551 	if (ictx->ops->post_modify)
552 		ictx->ops->post_modify(inode);
553 	ret = VM_FAULT_LOCKED;
554 out:
555 	sb_end_pagefault(inode->i_sb);
556 	return ret;
557 unlock:
558 	folio_unlock(folio);
559 	goto out;
560 }
561 EXPORT_SYMBOL(netfs_page_mkwrite);
562