Lines Matching +full:wait +full:- +full:on +full:- +full:write
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
25 * Overload PG_private_2 to give us PG_fscache - this is used to indicate that
36 * folio_start_fscache - Start an fscache write on a folio.
40 * second write before the first one finishes is not allowed.
50 * folio_end_fscache - End an fscache write on a folio.
54 * This will wake any sleepers waiting on this folio.
62 * folio_wait_fscache - Wait for an fscache write on this folio to end.
65 * If this folio is currently being written to a local cache, wait for
66 * the write to finish. Another write may start after this one finishes,
75 * folio_wait_fscache_killable - Wait for an fscache write on this folio to end.
78 * If this folio is currently being written to a local cache, wait
79 * for the write to finish or for a fatal signal to be received.
80 * Another write may start after this one finishes, unless the caller
84 * - 0 if successful.
85 * - -EINTR if a fatal signal was encountered.
112 /* Marks used on xarray-based buffers */
113 #define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */
114 #define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */
130 * Per-inode context. This wraps the VFS inode.
140 * on the server */
144 #define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
145 #define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */
149 * A netfs group - for instance a ceph snap. This is marked on dirty pages and
160 * folio->private
164 unsigned int dirty_offset; /* Write-streaming dirty data offset */
165 unsigned int dirty_len; /* Write-streaming dirty data length */
167 #define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */
185 return finfo->netfs_group; in netfs_folio_group()
190 * Resources required to do operations on a cache.
197 unsigned int inval_counter; /* object->inval_counter at begin_op */
202 * individual read/write from/to a server, a cache, a journal, etc..
205 * the pages it points to can be relied on to exist for the duration.
210 struct list_head rreq_link; /* Link in rreq->subrequests */
219 enum netfs_io_source source; /* Where to read from/write to */
224 #define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
226 #define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
232 NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
233 NETFS_WRITEBACK, /* This write was triggered by writepages */
234 NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
235 NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */
236 NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
238 NETFS_DIO_WRITE, /* This is a direct I/O write */
257 struct iov_iter iter; /* Unencrypted-side iterator */
258 struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
260 struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */
264 unsigned int wsize; /* Maximum write size (0 for none) */
265 unsigned int subreq_counter; /* Next subreq->debug_index */
267 atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */
281 #define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */
282 #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
283 #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
286 #define NETFS_RREQ_WRITE_TO_CACHE 7 /* Need to write to the cache */
287 #define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
316 /* Write request handling */
346 /* Write data to the cache */
347 int (*write)(struct netfs_cache_resources *cres, member
363 /* Prepare a write operation, working out what part of the write we can
370 /* Prepare an on-demand read operation, shortening it to a cached/uncached
386 /* High-level read API. */
391 /* High-level write API */
444 * netfs_inode - Get the netfs inode context from the inode
448 * context struct is expected to directly follow on from the VFS inode struct.
456 * netfs_inode_init - Initialise a netfslib inode context
461 * Initialise the netfs library context struct. This is expected to follow on
468 ctx->ops = ops; in netfs_inode_init()
469 ctx->remote_i_size = i_size_read(&ctx->inode); in netfs_inode_init()
470 ctx->zero_point = LLONG_MAX; in netfs_inode_init()
471 ctx->flags = 0; in netfs_inode_init()
473 ctx->cache = NULL; in netfs_inode_init()
475 /* ->releasepage() drives zero_point */ in netfs_inode_init()
477 ctx->zero_point = ctx->remote_i_size; in netfs_inode_init()
478 mapping_set_release_always(ctx->inode.i_mapping); in netfs_inode_init()
483 * netfs_resize_file - Note that a file got resized
494 ctx->remote_i_size = new_i_size; in netfs_resize_file()
495 if (new_i_size < ctx->zero_point) in netfs_resize_file()
496 ctx->zero_point = new_i_size; in netfs_resize_file()
500 * netfs_i_cookie - Get the cache cookie from the inode
508 return ctx->cache; in netfs_i_cookie()