1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/backing-dev.h>
5 #include <linux/fs.h>
6 #include <linux/mm.h>
7 #include <linux/swap.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/pagevec.h>
11 #include <linux/task_io_accounting_ops.h>
12 #include <linux/signal.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15 #include <linux/netfs.h>
16 #include <trace/events/netfs.h>
17
18 #include "super.h"
19 #include "mds_client.h"
20 #include "cache.h"
21 #include "metric.h"
22 #include "crypto.h"
23 #include <linux/ceph/osd_client.h>
24 #include <linux/ceph/striper.h>
25
26 /*
27 * Ceph address space ops.
28 *
29 * There are a few funny things going on here.
30 *
31 * The page->private field is used to reference a struct
32 * ceph_snap_context for _every_ dirty page. This indicates which
33 * snapshot the page was logically dirtied in, and thus which snap
34 * context needs to be associated with the osd write during writeback.
35 *
36 * Similarly, struct ceph_inode_info maintains a set of counters to
37 * count dirty pages on the inode. In the absence of snapshots,
38 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
39 *
40 * When a snapshot is taken (that is, when the client receives
41 * notification that a snapshot was taken), each inode with caps and
42 * with dirty pages (dirty pages implies there is a cap) gets a new
43 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
44 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
45 * moved to capsnap->dirty. (Unless a sync write is currently in
46 * progress. In that case, the capsnap is said to be "pending", new
47 * writes cannot start, and the capsnap isn't "finalized" until the
48 * write completes (or fails) and a final size/mtime for the inode for
49 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
50 *
51 * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
52 * we look for the first capsnap in i_cap_snaps and write out pages in
53 * that snap context _only_. Then we move on to the next capsnap,
54 * eventually reaching the "live" or "head" context (i.e., pages that
55 * are not yet snapped) and are writing the most recently dirtied
56 * pages.
57 *
58 * Invalidate and so forth must take care to ensure the dirty page
59 * accounting is preserved.
60 */
61
62 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
63 #define CONGESTION_OFF_THRESH(congestion_kb) \
64 (CONGESTION_ON_THRESH(congestion_kb) - \
65 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
66
67 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
68 struct folio **foliop, void **_fsdata);
69
page_snap_context(struct page * page)70 static inline struct ceph_snap_context *page_snap_context(struct page *page)
71 {
72 if (PagePrivate(page))
73 return (void *)page->private;
74 return NULL;
75 }
76
77 /*
78 * Dirty a page. Optimistically adjust accounting, on the assumption
79 * that we won't race with invalidate. If we do, readjust.
80 */
ceph_dirty_folio(struct address_space * mapping,struct folio * folio)81 static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
82 {
83 struct inode *inode = mapping->host;
84 struct ceph_client *cl = ceph_inode_to_client(inode);
85 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
86 struct ceph_inode_info *ci;
87 struct ceph_snap_context *snapc;
88
89 if (folio_test_dirty(folio)) {
90 doutc(cl, "%llx.%llx %p idx %lu -- already dirty\n",
91 ceph_vinop(inode), folio, folio->index);
92 VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
93 return false;
94 }
95
96 atomic64_inc(&mdsc->dirty_folios);
97
98 ci = ceph_inode(inode);
99
100 /* dirty the head */
101 spin_lock(&ci->i_ceph_lock);
102 if (__ceph_have_pending_cap_snap(ci)) {
103 struct ceph_cap_snap *capsnap =
104 list_last_entry(&ci->i_cap_snaps,
105 struct ceph_cap_snap,
106 ci_item);
107 snapc = ceph_get_snap_context(capsnap->context);
108 capsnap->dirty_pages++;
109 } else {
110 BUG_ON(!ci->i_head_snapc);
111 snapc = ceph_get_snap_context(ci->i_head_snapc);
112 ++ci->i_wrbuffer_ref_head;
113 }
114 if (ci->i_wrbuffer_ref == 0)
115 ihold(inode);
116 ++ci->i_wrbuffer_ref;
117 doutc(cl, "%llx.%llx %p idx %lu head %d/%d -> %d/%d "
118 "snapc %p seq %lld (%d snaps)\n",
119 ceph_vinop(inode), folio, folio->index,
120 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
121 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
122 snapc, snapc->seq, snapc->num_snaps);
123 spin_unlock(&ci->i_ceph_lock);
124
125 /*
126 * Reference snap context in folio->private. Also set
127 * PagePrivate so that we get invalidate_folio callback.
128 */
129 VM_WARN_ON_FOLIO(folio->private, folio);
130 folio_attach_private(folio, snapc);
131
132 return ceph_fscache_dirty_folio(mapping, folio);
133 }
134
135 /*
136 * If we are truncating the full folio (i.e. offset == 0), adjust the
137 * dirty folio counters appropriately. Only called if there is private
138 * data on the folio.
139 */
ceph_invalidate_folio(struct folio * folio,size_t offset,size_t length)140 static void ceph_invalidate_folio(struct folio *folio, size_t offset,
141 size_t length)
142 {
143 struct inode *inode = folio->mapping->host;
144 struct ceph_client *cl = ceph_inode_to_client(inode);
145 struct ceph_inode_info *ci = ceph_inode(inode);
146 struct ceph_snap_context *snapc;
147
148
149 if (offset != 0 || length != folio_size(folio)) {
150 doutc(cl, "%llx.%llx idx %lu partial dirty page %zu~%zu\n",
151 ceph_vinop(inode), folio->index, offset, length);
152 return;
153 }
154
155 WARN_ON(!folio_test_locked(folio));
156 if (folio_test_private(folio)) {
157 doutc(cl, "%llx.%llx idx %lu full dirty page\n",
158 ceph_vinop(inode), folio->index);
159
160 snapc = folio_detach_private(folio);
161 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
162 ceph_put_snap_context(snapc);
163 }
164
165 netfs_invalidate_folio(folio, offset, length);
166 }
167
ceph_netfs_expand_readahead(struct netfs_io_request * rreq)168 static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
169 {
170 struct inode *inode = rreq->inode;
171 struct ceph_inode_info *ci = ceph_inode(inode);
172 struct ceph_file_layout *lo = &ci->i_layout;
173 unsigned long max_pages = inode->i_sb->s_bdi->ra_pages;
174 loff_t end = rreq->start + rreq->len, new_end;
175 struct ceph_netfs_request_data *priv = rreq->netfs_priv;
176 unsigned long max_len;
177 u32 blockoff;
178
179 if (priv) {
180 /* Readahead is disabled by posix_fadvise POSIX_FADV_RANDOM */
181 if (priv->file_ra_disabled)
182 max_pages = 0;
183 else
184 max_pages = priv->file_ra_pages;
185
186 }
187
188 /* Readahead is disabled */
189 if (!max_pages)
190 return;
191
192 max_len = max_pages << PAGE_SHIFT;
193
194 /*
195 * Try to expand the length forward by rounding up it to the next
196 * block, but do not exceed the file size, unless the original
197 * request already exceeds it.
198 */
199 new_end = umin(round_up(end, lo->stripe_unit), rreq->i_size);
200 if (new_end > end && new_end <= rreq->start + max_len)
201 rreq->len = new_end - rreq->start;
202
203 /* Try to expand the start downward */
204 div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
205 if (rreq->len + blockoff <= max_len) {
206 rreq->start -= blockoff;
207 rreq->len += blockoff;
208 }
209 }
210
finish_netfs_read(struct ceph_osd_request * req)211 static void finish_netfs_read(struct ceph_osd_request *req)
212 {
213 struct inode *inode = req->r_inode;
214 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
215 struct ceph_client *cl = fsc->client;
216 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
217 struct netfs_io_subrequest *subreq = req->r_priv;
218 struct ceph_osd_req_op *op = &req->r_ops[0];
219 int err = req->r_result;
220 bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
221
222 ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
223 req->r_end_latency, osd_data->length, err);
224
225 doutc(cl, "result %d subreq->len=%zu i_size=%lld\n", req->r_result,
226 subreq->len, i_size_read(req->r_inode));
227
228 /* no object means success but no data */
229 if (err == -ENOENT) {
230 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
231 __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
232 err = 0;
233 } else if (err == -EBLOCKLISTED) {
234 fsc->blocklisted = true;
235 }
236
237 if (err >= 0) {
238 if (sparse && err > 0)
239 err = ceph_sparse_ext_map_end(op);
240 if (err < subreq->len &&
241 subreq->rreq->origin != NETFS_DIO_READ)
242 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
243 if (IS_ENCRYPTED(inode) && err > 0) {
244 err = ceph_fscrypt_decrypt_extents(inode,
245 osd_data->pages, subreq->start,
246 op->extent.sparse_ext,
247 op->extent.sparse_ext_cnt);
248 if (err > subreq->len)
249 err = subreq->len;
250 }
251 if (err > 0)
252 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
253 }
254
255 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
256 ceph_put_page_vector(osd_data->pages,
257 calc_pages_for(osd_data->alignment,
258 osd_data->length), false);
259 }
260 if (err > 0) {
261 subreq->transferred = err;
262 err = 0;
263 }
264 subreq->error = err;
265 trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
266 netfs_read_subreq_terminated(subreq);
267 iput(req->r_inode);
268 ceph_dec_osd_stopping_blocker(fsc->mdsc);
269 }
270
ceph_netfs_issue_op_inline(struct netfs_io_subrequest * subreq)271 static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
272 {
273 struct netfs_io_request *rreq = subreq->rreq;
274 struct inode *inode = rreq->inode;
275 struct ceph_mds_reply_info_parsed *rinfo;
276 struct ceph_mds_reply_info_in *iinfo;
277 struct ceph_mds_request *req;
278 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
279 struct ceph_inode_info *ci = ceph_inode(inode);
280 ssize_t err = 0;
281 size_t len;
282 int mode;
283
284 if (rreq->origin != NETFS_DIO_READ)
285 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
286 __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
287
288 if (subreq->start >= inode->i_size)
289 goto out;
290
291 /* We need to fetch the inline data. */
292 mode = ceph_try_to_choose_auth_mds(inode, CEPH_STAT_CAP_INLINE_DATA);
293 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
294 if (IS_ERR(req)) {
295 err = PTR_ERR(req);
296 goto out;
297 }
298 req->r_ino1 = ci->i_vino;
299 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INLINE_DATA);
300 req->r_num_caps = 2;
301
302 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
303 err = ceph_mdsc_do_request(mdsc, NULL, req);
304 if (err < 0)
305 goto out;
306
307 rinfo = &req->r_reply_info;
308 iinfo = &rinfo->targeti;
309 if (iinfo->inline_version == CEPH_INLINE_NONE) {
310 /* The data got uninlined */
311 ceph_mdsc_put_request(req);
312 return false;
313 }
314
315 len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len);
316 err = copy_to_iter(iinfo->inline_data + subreq->start, len, &subreq->io_iter);
317 if (err == 0) {
318 err = -EFAULT;
319 } else {
320 subreq->transferred += err;
321 err = 0;
322 }
323
324 ceph_mdsc_put_request(req);
325 out:
326 subreq->error = err;
327 trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
328 netfs_read_subreq_terminated(subreq);
329 return true;
330 }
331
ceph_netfs_prepare_read(struct netfs_io_subrequest * subreq)332 static int ceph_netfs_prepare_read(struct netfs_io_subrequest *subreq)
333 {
334 struct netfs_io_request *rreq = subreq->rreq;
335 struct inode *inode = rreq->inode;
336 struct ceph_inode_info *ci = ceph_inode(inode);
337 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
338 u64 objno, objoff;
339 u32 xlen;
340
341 /* Truncate the extent at the end of the current block */
342 ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len,
343 &objno, &objoff, &xlen);
344 rreq->io_streams[0].sreq_max_len = umin(xlen, fsc->mount_options->rsize);
345 return 0;
346 }
347
ceph_netfs_issue_read(struct netfs_io_subrequest * subreq)348 static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
349 {
350 struct netfs_io_request *rreq = subreq->rreq;
351 struct inode *inode = rreq->inode;
352 struct ceph_inode_info *ci = ceph_inode(inode);
353 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
354 struct ceph_client *cl = fsc->client;
355 struct ceph_osd_request *req = NULL;
356 struct ceph_vino vino = ceph_vino(inode);
357 int err;
358 u64 len;
359 bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
360 u64 off = subreq->start;
361 int extent_cnt;
362
363 if (ceph_inode_is_shutdown(inode)) {
364 err = -EIO;
365 goto out;
366 }
367
368 if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
369 return;
370
371 // TODO: This rounding here is slightly dodgy. It *should* work, for
372 // now, as the cache only deals in blocks that are a multiple of
373 // PAGE_SIZE and fscrypt blocks are at most PAGE_SIZE. What needs to
374 // happen is for the fscrypt driving to be moved into netfslib and the
375 // data in the cache also to be stored encrypted.
376 len = subreq->len;
377 ceph_fscrypt_adjust_off_and_len(inode, &off, &len);
378
379 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino,
380 off, &len, 0, 1, sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ,
381 CEPH_OSD_FLAG_READ, NULL, ci->i_truncate_seq,
382 ci->i_truncate_size, false);
383 if (IS_ERR(req)) {
384 err = PTR_ERR(req);
385 req = NULL;
386 goto out;
387 }
388
389 if (sparse) {
390 extent_cnt = __ceph_sparse_read_ext_count(inode, len);
391 err = ceph_alloc_sparse_ext_map(&req->r_ops[0], extent_cnt);
392 if (err)
393 goto out;
394 }
395
396 doutc(cl, "%llx.%llx pos=%llu orig_len=%zu len=%llu\n",
397 ceph_vinop(inode), subreq->start, subreq->len, len);
398
399 /*
400 * FIXME: For now, use CEPH_OSD_DATA_TYPE_PAGES instead of _ITER for
401 * encrypted inodes. We'd need infrastructure that handles an iov_iter
402 * instead of page arrays, and we don't have that as of yet. Once the
403 * dust settles on the write helpers and encrypt/decrypt routines for
404 * netfs, we should be able to rework this.
405 */
406 if (IS_ENCRYPTED(inode)) {
407 struct page **pages;
408 size_t page_off;
409
410 err = iov_iter_get_pages_alloc2(&subreq->io_iter, &pages, len, &page_off);
411 if (err < 0) {
412 doutc(cl, "%llx.%llx failed to allocate pages, %d\n",
413 ceph_vinop(inode), err);
414 goto out;
415 }
416
417 /* should always give us a page-aligned read */
418 WARN_ON_ONCE(page_off);
419 len = err;
420 err = 0;
421
422 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false,
423 false);
424 } else {
425 osd_req_op_extent_osd_iter(req, 0, &subreq->io_iter);
426 }
427 if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
428 err = -EIO;
429 goto out;
430 }
431 req->r_callback = finish_netfs_read;
432 req->r_priv = subreq;
433 req->r_inode = inode;
434 ihold(inode);
435
436 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
437 ceph_osdc_start_request(req->r_osdc, req);
438 out:
439 ceph_osdc_put_request(req);
440 if (err) {
441 subreq->error = err;
442 netfs_read_subreq_terminated(subreq);
443 }
444 doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err);
445 }
446
ceph_init_request(struct netfs_io_request * rreq,struct file * file)447 static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
448 {
449 struct inode *inode = rreq->inode;
450 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
451 struct ceph_client *cl = ceph_inode_to_client(inode);
452 int got = 0, want = CEPH_CAP_FILE_CACHE;
453 struct ceph_netfs_request_data *priv;
454 int ret = 0;
455
456 /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
457 __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
458
459 if (rreq->origin != NETFS_READAHEAD)
460 return 0;
461
462 priv = kzalloc(sizeof(*priv), GFP_NOFS);
463 if (!priv)
464 return -ENOMEM;
465
466 if (file) {
467 struct ceph_rw_context *rw_ctx;
468 struct ceph_file_info *fi = file->private_data;
469
470 priv->file_ra_pages = file->f_ra.ra_pages;
471 priv->file_ra_disabled = file->f_mode & FMODE_RANDOM;
472
473 rw_ctx = ceph_find_rw_context(fi);
474 if (rw_ctx) {
475 rreq->netfs_priv = priv;
476 return 0;
477 }
478 }
479
480 /*
481 * readahead callers do not necessarily hold Fcb caps
482 * (e.g. fadvise, madvise).
483 */
484 ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
485 if (ret < 0) {
486 doutc(cl, "%llx.%llx, error getting cap\n", ceph_vinop(inode));
487 goto out;
488 }
489
490 if (!(got & want)) {
491 doutc(cl, "%llx.%llx, no cache cap\n", ceph_vinop(inode));
492 ret = -EACCES;
493 goto out;
494 }
495 if (ret == 0) {
496 ret = -EACCES;
497 goto out;
498 }
499
500 priv->caps = got;
501 rreq->netfs_priv = priv;
502 rreq->io_streams[0].sreq_max_len = fsc->mount_options->rsize;
503
504 out:
505 if (ret < 0) {
506 if (got)
507 ceph_put_cap_refs(ceph_inode(inode), got);
508 kfree(priv);
509 }
510
511 return ret;
512 }
513
ceph_netfs_free_request(struct netfs_io_request * rreq)514 static void ceph_netfs_free_request(struct netfs_io_request *rreq)
515 {
516 struct ceph_netfs_request_data *priv = rreq->netfs_priv;
517
518 if (!priv)
519 return;
520
521 if (priv->caps)
522 ceph_put_cap_refs(ceph_inode(rreq->inode), priv->caps);
523 kfree(priv);
524 rreq->netfs_priv = NULL;
525 }
526
527 const struct netfs_request_ops ceph_netfs_ops = {
528 .init_request = ceph_init_request,
529 .free_request = ceph_netfs_free_request,
530 .prepare_read = ceph_netfs_prepare_read,
531 .issue_read = ceph_netfs_issue_read,
532 .expand_readahead = ceph_netfs_expand_readahead,
533 .check_write_begin = ceph_netfs_check_write_begin,
534 };
535
536 #ifdef CONFIG_CEPH_FSCACHE
ceph_set_page_fscache(struct page * page)537 static void ceph_set_page_fscache(struct page *page)
538 {
539 folio_start_private_2(page_folio(page)); /* [DEPRECATED] */
540 }
541
ceph_fscache_write_terminated(void * priv,ssize_t error,bool was_async)542 static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
543 {
544 struct inode *inode = priv;
545
546 if (IS_ERR_VALUE(error) && error != -ENOBUFS)
547 ceph_fscache_invalidate(inode, false);
548 }
549
ceph_fscache_write_to_cache(struct inode * inode,u64 off,u64 len,bool caching)550 static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
551 {
552 struct ceph_inode_info *ci = ceph_inode(inode);
553 struct fscache_cookie *cookie = ceph_fscache_cookie(ci);
554
555 fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode),
556 ceph_fscache_write_terminated, inode, true, caching);
557 }
558 #else
ceph_set_page_fscache(struct page * page)559 static inline void ceph_set_page_fscache(struct page *page)
560 {
561 }
562
ceph_fscache_write_to_cache(struct inode * inode,u64 off,u64 len,bool caching)563 static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
564 {
565 }
566 #endif /* CONFIG_CEPH_FSCACHE */
567
568 struct ceph_writeback_ctl
569 {
570 loff_t i_size;
571 u64 truncate_size;
572 u32 truncate_seq;
573 bool size_stable;
574
575 bool head_snapc;
576 struct ceph_snap_context *snapc;
577 struct ceph_snap_context *last_snapc;
578
579 bool done;
580 bool should_loop;
581 bool range_whole;
582 pgoff_t start_index;
583 pgoff_t index;
584 pgoff_t end;
585 xa_mark_t tag;
586
587 pgoff_t strip_unit_end;
588 unsigned int wsize;
589 unsigned int nr_folios;
590 unsigned int max_pages;
591 unsigned int locked_pages;
592
593 int op_idx;
594 int num_ops;
595 u64 offset;
596 u64 len;
597
598 struct folio_batch fbatch;
599 unsigned int processed_in_fbatch;
600
601 bool from_pool;
602 struct page **pages;
603 struct page **data_pages;
604 };
605
606 /*
607 * Get ref for the oldest snapc for an inode with dirty data... that is, the
608 * only snap context we are allowed to write back.
609 */
610 static struct ceph_snap_context *
get_oldest_context(struct inode * inode,struct ceph_writeback_ctl * ctl,struct ceph_snap_context * page_snapc)611 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
612 struct ceph_snap_context *page_snapc)
613 {
614 struct ceph_inode_info *ci = ceph_inode(inode);
615 struct ceph_client *cl = ceph_inode_to_client(inode);
616 struct ceph_snap_context *snapc = NULL;
617 struct ceph_cap_snap *capsnap = NULL;
618
619 spin_lock(&ci->i_ceph_lock);
620 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
621 doutc(cl, " capsnap %p snapc %p has %d dirty pages\n",
622 capsnap, capsnap->context, capsnap->dirty_pages);
623 if (!capsnap->dirty_pages)
624 continue;
625
626 /* get i_size, truncate_{seq,size} for page_snapc? */
627 if (snapc && capsnap->context != page_snapc)
628 continue;
629
630 if (ctl) {
631 if (capsnap->writing) {
632 ctl->i_size = i_size_read(inode);
633 ctl->size_stable = false;
634 } else {
635 ctl->i_size = capsnap->size;
636 ctl->size_stable = true;
637 }
638 ctl->truncate_size = capsnap->truncate_size;
639 ctl->truncate_seq = capsnap->truncate_seq;
640 ctl->head_snapc = false;
641 }
642
643 if (snapc)
644 break;
645
646 snapc = ceph_get_snap_context(capsnap->context);
647 if (!page_snapc ||
648 page_snapc == snapc ||
649 page_snapc->seq > snapc->seq)
650 break;
651 }
652 if (!snapc && ci->i_wrbuffer_ref_head) {
653 snapc = ceph_get_snap_context(ci->i_head_snapc);
654 doutc(cl, " head snapc %p has %d dirty pages\n", snapc,
655 ci->i_wrbuffer_ref_head);
656 if (ctl) {
657 ctl->i_size = i_size_read(inode);
658 ctl->truncate_size = ci->i_truncate_size;
659 ctl->truncate_seq = ci->i_truncate_seq;
660 ctl->size_stable = false;
661 ctl->head_snapc = true;
662 }
663 }
664 spin_unlock(&ci->i_ceph_lock);
665 return snapc;
666 }
667
get_writepages_data_length(struct inode * inode,struct page * page,u64 start)668 static u64 get_writepages_data_length(struct inode *inode,
669 struct page *page, u64 start)
670 {
671 struct ceph_inode_info *ci = ceph_inode(inode);
672 struct ceph_snap_context *snapc;
673 struct ceph_cap_snap *capsnap = NULL;
674 u64 end = i_size_read(inode);
675 u64 ret;
676
677 snapc = page_snap_context(ceph_fscrypt_pagecache_page(page));
678 if (snapc != ci->i_head_snapc) {
679 bool found = false;
680 spin_lock(&ci->i_ceph_lock);
681 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
682 if (capsnap->context == snapc) {
683 if (!capsnap->writing)
684 end = capsnap->size;
685 found = true;
686 break;
687 }
688 }
689 spin_unlock(&ci->i_ceph_lock);
690 WARN_ON(!found);
691 }
692 if (end > ceph_fscrypt_page_offset(page) + thp_size(page))
693 end = ceph_fscrypt_page_offset(page) + thp_size(page);
694 ret = end > start ? end - start : 0;
695 if (ret && fscrypt_is_bounce_page(page))
696 ret = round_up(ret, CEPH_FSCRYPT_BLOCK_SIZE);
697 return ret;
698 }
699
700 /*
701 * Write a folio, but leave it locked.
702 *
703 * If we get a write error, mark the mapping for error, but still adjust the
704 * dirty page accounting (i.e., folio is no longer dirty).
705 */
write_folio_nounlock(struct folio * folio,struct writeback_control * wbc)706 static int write_folio_nounlock(struct folio *folio,
707 struct writeback_control *wbc)
708 {
709 struct page *page = &folio->page;
710 struct inode *inode = folio->mapping->host;
711 struct ceph_inode_info *ci = ceph_inode(inode);
712 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
713 struct ceph_client *cl = fsc->client;
714 struct ceph_snap_context *snapc, *oldest;
715 loff_t page_off = folio_pos(folio);
716 int err;
717 loff_t len = folio_size(folio);
718 loff_t wlen;
719 struct ceph_writeback_ctl ceph_wbc;
720 struct ceph_osd_client *osdc = &fsc->client->osdc;
721 struct ceph_osd_request *req;
722 bool caching = ceph_is_cache_enabled(inode);
723 struct page *bounce_page = NULL;
724
725 doutc(cl, "%llx.%llx folio %p idx %lu\n", ceph_vinop(inode), folio,
726 folio->index);
727
728 if (ceph_inode_is_shutdown(inode))
729 return -EIO;
730
731 /* verify this is a writeable snap context */
732 snapc = page_snap_context(&folio->page);
733 if (!snapc) {
734 doutc(cl, "%llx.%llx folio %p not dirty?\n", ceph_vinop(inode),
735 folio);
736 return 0;
737 }
738 oldest = get_oldest_context(inode, &ceph_wbc, snapc);
739 if (snapc->seq > oldest->seq) {
740 doutc(cl, "%llx.%llx folio %p snapc %p not writeable - noop\n",
741 ceph_vinop(inode), folio, snapc);
742 /* we should only noop if called by kswapd */
743 WARN_ON(!(current->flags & PF_MEMALLOC));
744 ceph_put_snap_context(oldest);
745 folio_redirty_for_writepage(wbc, folio);
746 return 0;
747 }
748 ceph_put_snap_context(oldest);
749
750 /* is this a partial page at end of file? */
751 if (page_off >= ceph_wbc.i_size) {
752 doutc(cl, "%llx.%llx folio at %lu beyond eof %llu\n",
753 ceph_vinop(inode), folio->index, ceph_wbc.i_size);
754 folio_invalidate(folio, 0, folio_size(folio));
755 return 0;
756 }
757
758 if (ceph_wbc.i_size < page_off + len)
759 len = ceph_wbc.i_size - page_off;
760
761 wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len;
762 doutc(cl, "%llx.%llx folio %p index %lu on %llu~%llu snapc %p seq %lld\n",
763 ceph_vinop(inode), folio, folio->index, page_off, wlen, snapc,
764 snapc->seq);
765
766 if (atomic_long_inc_return(&fsc->writeback_count) >
767 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
768 fsc->write_congested = true;
769
770 req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode),
771 page_off, &wlen, 0, 1, CEPH_OSD_OP_WRITE,
772 CEPH_OSD_FLAG_WRITE, snapc,
773 ceph_wbc.truncate_seq,
774 ceph_wbc.truncate_size, true);
775 if (IS_ERR(req)) {
776 folio_redirty_for_writepage(wbc, folio);
777 return PTR_ERR(req);
778 }
779
780 if (wlen < len)
781 len = wlen;
782
783 folio_start_writeback(folio);
784 if (caching)
785 ceph_set_page_fscache(&folio->page);
786 ceph_fscache_write_to_cache(inode, page_off, len, caching);
787
788 if (IS_ENCRYPTED(inode)) {
789 bounce_page = fscrypt_encrypt_pagecache_blocks(folio,
790 CEPH_FSCRYPT_BLOCK_SIZE, 0,
791 GFP_NOFS);
792 if (IS_ERR(bounce_page)) {
793 folio_redirty_for_writepage(wbc, folio);
794 folio_end_writeback(folio);
795 ceph_osdc_put_request(req);
796 return PTR_ERR(bounce_page);
797 }
798 }
799
800 /* it may be a short write due to an object boundary */
801 WARN_ON_ONCE(len > folio_size(folio));
802 osd_req_op_extent_osd_data_pages(req, 0,
803 bounce_page ? &bounce_page : &page, wlen, 0,
804 false, false);
805 doutc(cl, "%llx.%llx %llu~%llu (%llu bytes, %sencrypted)\n",
806 ceph_vinop(inode), page_off, len, wlen,
807 IS_ENCRYPTED(inode) ? "" : "not ");
808
809 req->r_mtime = inode_get_mtime(inode);
810 ceph_osdc_start_request(osdc, req);
811 err = ceph_osdc_wait_request(osdc, req);
812
813 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
814 req->r_end_latency, len, err);
815 fscrypt_free_bounce_page(bounce_page);
816 ceph_osdc_put_request(req);
817 if (err == 0)
818 err = len;
819
820 if (err < 0) {
821 struct writeback_control tmp_wbc;
822 if (!wbc)
823 wbc = &tmp_wbc;
824 if (err == -ERESTARTSYS) {
825 /* killed by SIGKILL */
826 doutc(cl, "%llx.%llx interrupted page %p\n",
827 ceph_vinop(inode), folio);
828 folio_redirty_for_writepage(wbc, folio);
829 folio_end_writeback(folio);
830 return err;
831 }
832 if (err == -EBLOCKLISTED)
833 fsc->blocklisted = true;
834 doutc(cl, "%llx.%llx setting mapping error %d %p\n",
835 ceph_vinop(inode), err, folio);
836 mapping_set_error(&inode->i_data, err);
837 wbc->pages_skipped++;
838 } else {
839 doutc(cl, "%llx.%llx cleaned page %p\n",
840 ceph_vinop(inode), folio);
841 err = 0; /* vfs expects us to return 0 */
842 }
843 oldest = folio_detach_private(folio);
844 WARN_ON_ONCE(oldest != snapc);
845 folio_end_writeback(folio);
846 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
847 ceph_put_snap_context(snapc); /* page's reference */
848
849 if (atomic_long_dec_return(&fsc->writeback_count) <
850 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
851 fsc->write_congested = false;
852
853 return err;
854 }
855
856 /*
857 * async writeback completion handler.
858 *
859 * If we get an error, set the mapping error bit, but not the individual
860 * page error bits.
861 */
writepages_finish(struct ceph_osd_request * req)862 static void writepages_finish(struct ceph_osd_request *req)
863 {
864 struct inode *inode = req->r_inode;
865 struct ceph_inode_info *ci = ceph_inode(inode);
866 struct ceph_client *cl = ceph_inode_to_client(inode);
867 struct ceph_osd_data *osd_data;
868 struct page *page;
869 int num_pages, total_pages = 0;
870 int i, j;
871 int rc = req->r_result;
872 struct ceph_snap_context *snapc = req->r_snapc;
873 struct address_space *mapping = inode->i_mapping;
874 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
875 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
876 unsigned int len = 0;
877 bool remove_page;
878
879 doutc(cl, "%llx.%llx rc %d\n", ceph_vinop(inode), rc);
880 if (rc < 0) {
881 mapping_set_error(mapping, rc);
882 ceph_set_error_write(ci);
883 if (rc == -EBLOCKLISTED)
884 fsc->blocklisted = true;
885 } else {
886 ceph_clear_error_write(ci);
887 }
888
889 /*
890 * We lost the cache cap, need to truncate the page before
891 * it is unlocked, otherwise we'd truncate it later in the
892 * page truncation thread, possibly losing some data that
893 * raced its way in
894 */
895 remove_page = !(ceph_caps_issued(ci) &
896 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
897
898 /* clean all pages */
899 for (i = 0; i < req->r_num_ops; i++) {
900 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) {
901 pr_warn_client(cl,
902 "%llx.%llx incorrect op %d req %p index %d tid %llu\n",
903 ceph_vinop(inode), req->r_ops[i].op, req, i,
904 req->r_tid);
905 break;
906 }
907
908 osd_data = osd_req_op_extent_osd_data(req, i);
909 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
910 len += osd_data->length;
911 num_pages = calc_pages_for((u64)osd_data->alignment,
912 (u64)osd_data->length);
913 total_pages += num_pages;
914 for (j = 0; j < num_pages; j++) {
915 page = osd_data->pages[j];
916 if (fscrypt_is_bounce_page(page)) {
917 page = fscrypt_pagecache_page(page);
918 fscrypt_free_bounce_page(osd_data->pages[j]);
919 osd_data->pages[j] = page;
920 }
921 BUG_ON(!page);
922 WARN_ON(!PageUptodate(page));
923
924 if (atomic_long_dec_return(&fsc->writeback_count) <
925 CONGESTION_OFF_THRESH(
926 fsc->mount_options->congestion_kb))
927 fsc->write_congested = false;
928
929 ceph_put_snap_context(detach_page_private(page));
930 end_page_writeback(page);
931
932 if (atomic64_dec_return(&mdsc->dirty_folios) <= 0) {
933 wake_up_all(&mdsc->flush_end_wq);
934 WARN_ON(atomic64_read(&mdsc->dirty_folios) < 0);
935 }
936
937 doutc(cl, "unlocking %p\n", page);
938
939 if (remove_page)
940 generic_error_remove_folio(inode->i_mapping,
941 page_folio(page));
942
943 unlock_page(page);
944 }
945 doutc(cl, "%llx.%llx wrote %llu bytes cleaned %d pages\n",
946 ceph_vinop(inode), osd_data->length,
947 rc >= 0 ? num_pages : 0);
948
949 release_pages(osd_data->pages, num_pages);
950 }
951
952 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
953 req->r_end_latency, len, rc);
954
955 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
956
957 osd_data = osd_req_op_extent_osd_data(req, 0);
958 if (osd_data->pages_from_pool)
959 mempool_free(osd_data->pages, ceph_wb_pagevec_pool);
960 else
961 kfree(osd_data->pages);
962 ceph_osdc_put_request(req);
963 ceph_dec_osd_stopping_blocker(fsc->mdsc);
964 }
965
966 static inline
is_forced_umount(struct address_space * mapping)967 bool is_forced_umount(struct address_space *mapping)
968 {
969 struct inode *inode = mapping->host;
970 struct ceph_inode_info *ci = ceph_inode(inode);
971 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
972 struct ceph_client *cl = fsc->client;
973
974 if (ceph_inode_is_shutdown(inode)) {
975 if (ci->i_wrbuffer_ref > 0) {
976 pr_warn_ratelimited_client(cl,
977 "%llx.%llx %lld forced umount\n",
978 ceph_vinop(inode), ceph_ino(inode));
979 }
980 mapping_set_error(mapping, -EIO);
981 return true;
982 }
983
984 return false;
985 }
986
987 static inline
ceph_define_write_size(struct address_space * mapping)988 unsigned int ceph_define_write_size(struct address_space *mapping)
989 {
990 struct inode *inode = mapping->host;
991 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
992 unsigned int wsize = i_blocksize(inode);
993
994 if (fsc->mount_options->wsize < wsize)
995 wsize = fsc->mount_options->wsize;
996
997 return wsize;
998 }
999
1000 static inline
ceph_folio_batch_init(struct ceph_writeback_ctl * ceph_wbc)1001 void ceph_folio_batch_init(struct ceph_writeback_ctl *ceph_wbc)
1002 {
1003 folio_batch_init(&ceph_wbc->fbatch);
1004 ceph_wbc->processed_in_fbatch = 0;
1005 }
1006
1007 static inline
ceph_folio_batch_reinit(struct ceph_writeback_ctl * ceph_wbc)1008 void ceph_folio_batch_reinit(struct ceph_writeback_ctl *ceph_wbc)
1009 {
1010 folio_batch_release(&ceph_wbc->fbatch);
1011 ceph_folio_batch_init(ceph_wbc);
1012 }
1013
1014 static inline
ceph_init_writeback_ctl(struct address_space * mapping,struct writeback_control * wbc,struct ceph_writeback_ctl * ceph_wbc)1015 void ceph_init_writeback_ctl(struct address_space *mapping,
1016 struct writeback_control *wbc,
1017 struct ceph_writeback_ctl *ceph_wbc)
1018 {
1019 ceph_wbc->snapc = NULL;
1020 ceph_wbc->last_snapc = NULL;
1021
1022 ceph_wbc->strip_unit_end = 0;
1023 ceph_wbc->wsize = ceph_define_write_size(mapping);
1024
1025 ceph_wbc->nr_folios = 0;
1026 ceph_wbc->max_pages = 0;
1027 ceph_wbc->locked_pages = 0;
1028
1029 ceph_wbc->done = false;
1030 ceph_wbc->should_loop = false;
1031 ceph_wbc->range_whole = false;
1032
1033 ceph_wbc->start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
1034 ceph_wbc->index = ceph_wbc->start_index;
1035 ceph_wbc->end = -1;
1036
1037 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
1038 ceph_wbc->tag = PAGECACHE_TAG_TOWRITE;
1039 } else {
1040 ceph_wbc->tag = PAGECACHE_TAG_DIRTY;
1041 }
1042
1043 ceph_wbc->op_idx = -1;
1044 ceph_wbc->num_ops = 0;
1045 ceph_wbc->offset = 0;
1046 ceph_wbc->len = 0;
1047 ceph_wbc->from_pool = false;
1048
1049 ceph_folio_batch_init(ceph_wbc);
1050
1051 ceph_wbc->pages = NULL;
1052 ceph_wbc->data_pages = NULL;
1053 }
1054
1055 static inline
ceph_define_writeback_range(struct address_space * mapping,struct writeback_control * wbc,struct ceph_writeback_ctl * ceph_wbc)1056 int ceph_define_writeback_range(struct address_space *mapping,
1057 struct writeback_control *wbc,
1058 struct ceph_writeback_ctl *ceph_wbc)
1059 {
1060 struct inode *inode = mapping->host;
1061 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1062 struct ceph_client *cl = fsc->client;
1063
1064 /* find oldest snap context with dirty data */
1065 ceph_wbc->snapc = get_oldest_context(inode, ceph_wbc, NULL);
1066 if (!ceph_wbc->snapc) {
1067 /* hmm, why does writepages get called when there
1068 is no dirty data? */
1069 doutc(cl, " no snap context with dirty data?\n");
1070 return -ENODATA;
1071 }
1072
1073 doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n",
1074 ceph_wbc->snapc, ceph_wbc->snapc->seq,
1075 ceph_wbc->snapc->num_snaps);
1076
1077 ceph_wbc->should_loop = false;
1078
1079 if (ceph_wbc->head_snapc && ceph_wbc->snapc != ceph_wbc->last_snapc) {
1080 /* where to start/end? */
1081 if (wbc->range_cyclic) {
1082 ceph_wbc->index = ceph_wbc->start_index;
1083 ceph_wbc->end = -1;
1084 if (ceph_wbc->index > 0)
1085 ceph_wbc->should_loop = true;
1086 doutc(cl, " cyclic, start at %lu\n", ceph_wbc->index);
1087 } else {
1088 ceph_wbc->index = wbc->range_start >> PAGE_SHIFT;
1089 ceph_wbc->end = wbc->range_end >> PAGE_SHIFT;
1090 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1091 ceph_wbc->range_whole = true;
1092 doutc(cl, " not cyclic, %lu to %lu\n",
1093 ceph_wbc->index, ceph_wbc->end);
1094 }
1095 } else if (!ceph_wbc->head_snapc) {
1096 /* Do not respect wbc->range_{start,end}. Dirty pages
1097 * in that range can be associated with newer snapc.
1098 * They are not writeable until we write all dirty pages
1099 * associated with 'snapc' get written */
1100 if (ceph_wbc->index > 0)
1101 ceph_wbc->should_loop = true;
1102 doutc(cl, " non-head snapc, range whole\n");
1103 }
1104
1105 ceph_put_snap_context(ceph_wbc->last_snapc);
1106 ceph_wbc->last_snapc = ceph_wbc->snapc;
1107
1108 return 0;
1109 }
1110
1111 static inline
has_writeback_done(struct ceph_writeback_ctl * ceph_wbc)1112 bool has_writeback_done(struct ceph_writeback_ctl *ceph_wbc)
1113 {
1114 return ceph_wbc->done && ceph_wbc->index > ceph_wbc->end;
1115 }
1116
1117 static inline
can_next_page_be_processed(struct ceph_writeback_ctl * ceph_wbc,unsigned index)1118 bool can_next_page_be_processed(struct ceph_writeback_ctl *ceph_wbc,
1119 unsigned index)
1120 {
1121 return index < ceph_wbc->nr_folios &&
1122 ceph_wbc->locked_pages < ceph_wbc->max_pages;
1123 }
1124
1125 static
ceph_check_page_before_write(struct address_space * mapping,struct writeback_control * wbc,struct ceph_writeback_ctl * ceph_wbc,struct folio * folio)1126 int ceph_check_page_before_write(struct address_space *mapping,
1127 struct writeback_control *wbc,
1128 struct ceph_writeback_ctl *ceph_wbc,
1129 struct folio *folio)
1130 {
1131 struct inode *inode = mapping->host;
1132 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1133 struct ceph_client *cl = fsc->client;
1134 struct ceph_snap_context *pgsnapc;
1135
1136 /* only dirty folios, or our accounting breaks */
1137 if (unlikely(!folio_test_dirty(folio) || folio->mapping != mapping)) {
1138 doutc(cl, "!dirty or !mapping %p\n", folio);
1139 return -ENODATA;
1140 }
1141
1142 /* only if matching snap context */
1143 pgsnapc = page_snap_context(&folio->page);
1144 if (pgsnapc != ceph_wbc->snapc) {
1145 doutc(cl, "folio snapc %p %lld != oldest %p %lld\n",
1146 pgsnapc, pgsnapc->seq,
1147 ceph_wbc->snapc, ceph_wbc->snapc->seq);
1148
1149 if (!ceph_wbc->should_loop && !ceph_wbc->head_snapc &&
1150 wbc->sync_mode != WB_SYNC_NONE)
1151 ceph_wbc->should_loop = true;
1152
1153 return -ENODATA;
1154 }
1155
1156 if (folio_pos(folio) >= ceph_wbc->i_size) {
1157 doutc(cl, "folio at %lu beyond eof %llu\n",
1158 folio->index, ceph_wbc->i_size);
1159
1160 if ((ceph_wbc->size_stable ||
1161 folio_pos(folio) >= i_size_read(inode)) &&
1162 folio_clear_dirty_for_io(folio))
1163 folio_invalidate(folio, 0, folio_size(folio));
1164
1165 return -ENODATA;
1166 }
1167
1168 if (ceph_wbc->strip_unit_end &&
1169 (folio->index > ceph_wbc->strip_unit_end)) {
1170 doutc(cl, "end of strip unit %p\n", folio);
1171 return -E2BIG;
1172 }
1173
1174 return 0;
1175 }
1176
1177 static inline
__ceph_allocate_page_array(struct ceph_writeback_ctl * ceph_wbc,unsigned int max_pages)1178 void __ceph_allocate_page_array(struct ceph_writeback_ctl *ceph_wbc,
1179 unsigned int max_pages)
1180 {
1181 ceph_wbc->pages = kmalloc_array(max_pages,
1182 sizeof(*ceph_wbc->pages),
1183 GFP_NOFS);
1184 if (!ceph_wbc->pages) {
1185 ceph_wbc->from_pool = true;
1186 ceph_wbc->pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
1187 BUG_ON(!ceph_wbc->pages);
1188 }
1189 }
1190
1191 static inline
ceph_allocate_page_array(struct address_space * mapping,struct ceph_writeback_ctl * ceph_wbc,struct folio * folio)1192 void ceph_allocate_page_array(struct address_space *mapping,
1193 struct ceph_writeback_ctl *ceph_wbc,
1194 struct folio *folio)
1195 {
1196 struct inode *inode = mapping->host;
1197 struct ceph_inode_info *ci = ceph_inode(inode);
1198 u64 objnum;
1199 u64 objoff;
1200 u32 xlen;
1201
1202 /* prepare async write request */
1203 ceph_wbc->offset = (u64)folio_pos(folio);
1204 ceph_calc_file_object_mapping(&ci->i_layout,
1205 ceph_wbc->offset, ceph_wbc->wsize,
1206 &objnum, &objoff, &xlen);
1207
1208 ceph_wbc->num_ops = 1;
1209 ceph_wbc->strip_unit_end = folio->index + ((xlen - 1) >> PAGE_SHIFT);
1210
1211 BUG_ON(ceph_wbc->pages);
1212 ceph_wbc->max_pages = calc_pages_for(0, (u64)xlen);
1213 __ceph_allocate_page_array(ceph_wbc, ceph_wbc->max_pages);
1214
1215 ceph_wbc->len = 0;
1216 }
1217
1218 static inline
is_folio_index_contiguous(const struct ceph_writeback_ctl * ceph_wbc,const struct folio * folio)1219 bool is_folio_index_contiguous(const struct ceph_writeback_ctl *ceph_wbc,
1220 const struct folio *folio)
1221 {
1222 return folio->index == (ceph_wbc->offset + ceph_wbc->len) >> PAGE_SHIFT;
1223 }
1224
1225 static inline
is_num_ops_too_big(struct ceph_writeback_ctl * ceph_wbc)1226 bool is_num_ops_too_big(struct ceph_writeback_ctl *ceph_wbc)
1227 {
1228 return ceph_wbc->num_ops >=
1229 (ceph_wbc->from_pool ? CEPH_OSD_SLAB_OPS : CEPH_OSD_MAX_OPS);
1230 }
1231
1232 static inline
is_write_congestion_happened(struct ceph_fs_client * fsc)1233 bool is_write_congestion_happened(struct ceph_fs_client *fsc)
1234 {
1235 return atomic_long_inc_return(&fsc->writeback_count) >
1236 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb);
1237 }
1238
move_dirty_folio_in_page_array(struct address_space * mapping,struct writeback_control * wbc,struct ceph_writeback_ctl * ceph_wbc,struct folio * folio)1239 static inline int move_dirty_folio_in_page_array(struct address_space *mapping,
1240 struct writeback_control *wbc,
1241 struct ceph_writeback_ctl *ceph_wbc, struct folio *folio)
1242 {
1243 struct inode *inode = mapping->host;
1244 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1245 struct ceph_client *cl = fsc->client;
1246 struct page **pages = ceph_wbc->pages;
1247 unsigned int index = ceph_wbc->locked_pages;
1248 gfp_t gfp_flags = ceph_wbc->locked_pages ? GFP_NOWAIT : GFP_NOFS;
1249
1250 if (IS_ENCRYPTED(inode)) {
1251 pages[index] = fscrypt_encrypt_pagecache_blocks(folio,
1252 PAGE_SIZE,
1253 0,
1254 gfp_flags);
1255 if (IS_ERR(pages[index])) {
1256 if (PTR_ERR(pages[index]) == -EINVAL) {
1257 pr_err_client(cl, "inode->i_blkbits=%hhu\n",
1258 inode->i_blkbits);
1259 }
1260
1261 /* better not fail on first page! */
1262 BUG_ON(ceph_wbc->locked_pages == 0);
1263
1264 pages[index] = NULL;
1265 return PTR_ERR(pages[index]);
1266 }
1267 } else {
1268 pages[index] = &folio->page;
1269 }
1270
1271 ceph_wbc->locked_pages++;
1272
1273 return 0;
1274 }
1275
1276 static
ceph_process_folio_batch(struct address_space * mapping,struct writeback_control * wbc,struct ceph_writeback_ctl * ceph_wbc)1277 int ceph_process_folio_batch(struct address_space *mapping,
1278 struct writeback_control *wbc,
1279 struct ceph_writeback_ctl *ceph_wbc)
1280 {
1281 struct inode *inode = mapping->host;
1282 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1283 struct ceph_client *cl = fsc->client;
1284 struct folio *folio = NULL;
1285 unsigned i;
1286 int rc = 0;
1287
1288 for (i = 0; can_next_page_be_processed(ceph_wbc, i); i++) {
1289 folio = ceph_wbc->fbatch.folios[i];
1290
1291 if (!folio)
1292 continue;
1293
1294 doutc(cl, "? %p idx %lu, folio_test_writeback %#x, "
1295 "folio_test_dirty %#x, folio_test_locked %#x\n",
1296 folio, folio->index, folio_test_writeback(folio),
1297 folio_test_dirty(folio),
1298 folio_test_locked(folio));
1299
1300 if (folio_test_writeback(folio) ||
1301 folio_test_private_2(folio) /* [DEPRECATED] */) {
1302 doutc(cl, "waiting on writeback %p\n", folio);
1303 folio_wait_writeback(folio);
1304 folio_wait_private_2(folio); /* [DEPRECATED] */
1305 continue;
1306 }
1307
1308 if (ceph_wbc->locked_pages == 0)
1309 folio_lock(folio);
1310 else if (!folio_trylock(folio))
1311 break;
1312
1313 rc = ceph_check_page_before_write(mapping, wbc,
1314 ceph_wbc, folio);
1315 if (rc == -ENODATA) {
1316 rc = 0;
1317 folio_unlock(folio);
1318 ceph_wbc->fbatch.folios[i] = NULL;
1319 continue;
1320 } else if (rc == -E2BIG) {
1321 rc = 0;
1322 folio_unlock(folio);
1323 ceph_wbc->fbatch.folios[i] = NULL;
1324 break;
1325 }
1326
1327 if (!folio_clear_dirty_for_io(folio)) {
1328 doutc(cl, "%p !folio_clear_dirty_for_io\n", folio);
1329 folio_unlock(folio);
1330 ceph_wbc->fbatch.folios[i] = NULL;
1331 continue;
1332 }
1333
1334 /*
1335 * We have something to write. If this is
1336 * the first locked page this time through,
1337 * calculate max possible write size and
1338 * allocate a page array
1339 */
1340 if (ceph_wbc->locked_pages == 0) {
1341 ceph_allocate_page_array(mapping, ceph_wbc, folio);
1342 } else if (!is_folio_index_contiguous(ceph_wbc, folio)) {
1343 if (is_num_ops_too_big(ceph_wbc)) {
1344 folio_redirty_for_writepage(wbc, folio);
1345 folio_unlock(folio);
1346 break;
1347 }
1348
1349 ceph_wbc->num_ops++;
1350 ceph_wbc->offset = (u64)folio_pos(folio);
1351 ceph_wbc->len = 0;
1352 }
1353
1354 /* note position of first page in fbatch */
1355 doutc(cl, "%llx.%llx will write folio %p idx %lu\n",
1356 ceph_vinop(inode), folio, folio->index);
1357
1358 fsc->write_congested = is_write_congestion_happened(fsc);
1359
1360 rc = move_dirty_folio_in_page_array(mapping, wbc, ceph_wbc,
1361 folio);
1362 if (rc) {
1363 folio_redirty_for_writepage(wbc, folio);
1364 folio_unlock(folio);
1365 break;
1366 }
1367
1368 ceph_wbc->fbatch.folios[i] = NULL;
1369 ceph_wbc->len += folio_size(folio);
1370 }
1371
1372 ceph_wbc->processed_in_fbatch = i;
1373
1374 return rc;
1375 }
1376
1377 static inline
ceph_shift_unused_folios_left(struct folio_batch * fbatch)1378 void ceph_shift_unused_folios_left(struct folio_batch *fbatch)
1379 {
1380 unsigned j, n = 0;
1381
1382 /* shift unused page to beginning of fbatch */
1383 for (j = 0; j < folio_batch_count(fbatch); j++) {
1384 if (!fbatch->folios[j])
1385 continue;
1386
1387 if (n < j) {
1388 fbatch->folios[n] = fbatch->folios[j];
1389 }
1390
1391 n++;
1392 }
1393
1394 fbatch->nr = n;
1395 }
1396
1397 static
ceph_submit_write(struct address_space * mapping,struct writeback_control * wbc,struct ceph_writeback_ctl * ceph_wbc)1398 int ceph_submit_write(struct address_space *mapping,
1399 struct writeback_control *wbc,
1400 struct ceph_writeback_ctl *ceph_wbc)
1401 {
1402 struct inode *inode = mapping->host;
1403 struct ceph_inode_info *ci = ceph_inode(inode);
1404 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1405 struct ceph_client *cl = fsc->client;
1406 struct ceph_vino vino = ceph_vino(inode);
1407 struct ceph_osd_request *req = NULL;
1408 struct page *page = NULL;
1409 bool caching = ceph_is_cache_enabled(inode);
1410 u64 offset;
1411 u64 len;
1412 unsigned i;
1413
1414 new_request:
1415 offset = ceph_fscrypt_page_offset(ceph_wbc->pages[0]);
1416 len = ceph_wbc->wsize;
1417
1418 req = ceph_osdc_new_request(&fsc->client->osdc,
1419 &ci->i_layout, vino,
1420 offset, &len, 0, ceph_wbc->num_ops,
1421 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
1422 ceph_wbc->snapc, ceph_wbc->truncate_seq,
1423 ceph_wbc->truncate_size, false);
1424 if (IS_ERR(req)) {
1425 req = ceph_osdc_new_request(&fsc->client->osdc,
1426 &ci->i_layout, vino,
1427 offset, &len, 0,
1428 min(ceph_wbc->num_ops,
1429 CEPH_OSD_SLAB_OPS),
1430 CEPH_OSD_OP_WRITE,
1431 CEPH_OSD_FLAG_WRITE,
1432 ceph_wbc->snapc,
1433 ceph_wbc->truncate_seq,
1434 ceph_wbc->truncate_size,
1435 true);
1436 BUG_ON(IS_ERR(req));
1437 }
1438
1439 page = ceph_wbc->pages[ceph_wbc->locked_pages - 1];
1440 BUG_ON(len < ceph_fscrypt_page_offset(page) + thp_size(page) - offset);
1441
1442 if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
1443 for (i = 0; i < folio_batch_count(&ceph_wbc->fbatch); i++) {
1444 struct folio *folio = ceph_wbc->fbatch.folios[i];
1445
1446 if (!folio)
1447 continue;
1448
1449 page = &folio->page;
1450 redirty_page_for_writepage(wbc, page);
1451 unlock_page(page);
1452 }
1453
1454 for (i = 0; i < ceph_wbc->locked_pages; i++) {
1455 page = ceph_fscrypt_pagecache_page(ceph_wbc->pages[i]);
1456
1457 if (!page)
1458 continue;
1459
1460 redirty_page_for_writepage(wbc, page);
1461 unlock_page(page);
1462 }
1463
1464 ceph_osdc_put_request(req);
1465 return -EIO;
1466 }
1467
1468 req->r_callback = writepages_finish;
1469 req->r_inode = inode;
1470
1471 /* Format the osd request message and submit the write */
1472 len = 0;
1473 ceph_wbc->data_pages = ceph_wbc->pages;
1474 ceph_wbc->op_idx = 0;
1475 for (i = 0; i < ceph_wbc->locked_pages; i++) {
1476 u64 cur_offset;
1477
1478 page = ceph_fscrypt_pagecache_page(ceph_wbc->pages[i]);
1479 cur_offset = page_offset(page);
1480
1481 /*
1482 * Discontinuity in page range? Ceph can handle that by just passing
1483 * multiple extents in the write op.
1484 */
1485 if (offset + len != cur_offset) {
1486 /* If it's full, stop here */
1487 if (ceph_wbc->op_idx + 1 == req->r_num_ops)
1488 break;
1489
1490 /* Kick off an fscache write with what we have so far. */
1491 ceph_fscache_write_to_cache(inode, offset, len, caching);
1492
1493 /* Start a new extent */
1494 osd_req_op_extent_dup_last(req, ceph_wbc->op_idx,
1495 cur_offset - offset);
1496
1497 doutc(cl, "got pages at %llu~%llu\n", offset, len);
1498
1499 osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx,
1500 ceph_wbc->data_pages,
1501 len, 0,
1502 ceph_wbc->from_pool,
1503 false);
1504 osd_req_op_extent_update(req, ceph_wbc->op_idx, len);
1505
1506 len = 0;
1507 offset = cur_offset;
1508 ceph_wbc->data_pages = ceph_wbc->pages + i;
1509 ceph_wbc->op_idx++;
1510 }
1511
1512 set_page_writeback(page);
1513
1514 if (caching)
1515 ceph_set_page_fscache(page);
1516
1517 len += thp_size(page);
1518 }
1519
1520 ceph_fscache_write_to_cache(inode, offset, len, caching);
1521
1522 if (ceph_wbc->size_stable) {
1523 len = min(len, ceph_wbc->i_size - offset);
1524 } else if (i == ceph_wbc->locked_pages) {
1525 /* writepages_finish() clears writeback pages
1526 * according to the data length, so make sure
1527 * data length covers all locked pages */
1528 u64 min_len = len + 1 - thp_size(page);
1529 len = get_writepages_data_length(inode,
1530 ceph_wbc->pages[i - 1],
1531 offset);
1532 len = max(len, min_len);
1533 }
1534
1535 if (IS_ENCRYPTED(inode))
1536 len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
1537
1538 doutc(cl, "got pages at %llu~%llu\n", offset, len);
1539
1540 if (IS_ENCRYPTED(inode) &&
1541 ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK)) {
1542 pr_warn_client(cl,
1543 "bad encrypted write offset=%lld len=%llu\n",
1544 offset, len);
1545 }
1546
1547 osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx,
1548 ceph_wbc->data_pages, len,
1549 0, ceph_wbc->from_pool, false);
1550 osd_req_op_extent_update(req, ceph_wbc->op_idx, len);
1551
1552 BUG_ON(ceph_wbc->op_idx + 1 != req->r_num_ops);
1553
1554 ceph_wbc->from_pool = false;
1555 if (i < ceph_wbc->locked_pages) {
1556 BUG_ON(ceph_wbc->num_ops <= req->r_num_ops);
1557 ceph_wbc->num_ops -= req->r_num_ops;
1558 ceph_wbc->locked_pages -= i;
1559
1560 /* allocate new pages array for next request */
1561 ceph_wbc->data_pages = ceph_wbc->pages;
1562 __ceph_allocate_page_array(ceph_wbc, ceph_wbc->locked_pages);
1563 memcpy(ceph_wbc->pages, ceph_wbc->data_pages + i,
1564 ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages));
1565 memset(ceph_wbc->data_pages + i, 0,
1566 ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages));
1567 } else {
1568 BUG_ON(ceph_wbc->num_ops != req->r_num_ops);
1569 /* request message now owns the pages array */
1570 ceph_wbc->pages = NULL;
1571 }
1572
1573 req->r_mtime = inode_get_mtime(inode);
1574 ceph_osdc_start_request(&fsc->client->osdc, req);
1575 req = NULL;
1576
1577 wbc->nr_to_write -= i;
1578 if (ceph_wbc->pages)
1579 goto new_request;
1580
1581 return 0;
1582 }
1583
1584 static
ceph_wait_until_current_writes_complete(struct address_space * mapping,struct writeback_control * wbc,struct ceph_writeback_ctl * ceph_wbc)1585 void ceph_wait_until_current_writes_complete(struct address_space *mapping,
1586 struct writeback_control *wbc,
1587 struct ceph_writeback_ctl *ceph_wbc)
1588 {
1589 struct page *page;
1590 unsigned i, nr;
1591
1592 if (wbc->sync_mode != WB_SYNC_NONE &&
1593 ceph_wbc->start_index == 0 && /* all dirty pages were checked */
1594 !ceph_wbc->head_snapc) {
1595 ceph_wbc->index = 0;
1596
1597 while ((ceph_wbc->index <= ceph_wbc->end) &&
1598 (nr = filemap_get_folios_tag(mapping,
1599 &ceph_wbc->index,
1600 (pgoff_t)-1,
1601 PAGECACHE_TAG_WRITEBACK,
1602 &ceph_wbc->fbatch))) {
1603 for (i = 0; i < nr; i++) {
1604 page = &ceph_wbc->fbatch.folios[i]->page;
1605 if (page_snap_context(page) != ceph_wbc->snapc)
1606 continue;
1607 wait_on_page_writeback(page);
1608 }
1609
1610 folio_batch_release(&ceph_wbc->fbatch);
1611 cond_resched();
1612 }
1613 }
1614 }
1615
1616 /*
1617 * initiate async writeback
1618 */
ceph_writepages_start(struct address_space * mapping,struct writeback_control * wbc)1619 static int ceph_writepages_start(struct address_space *mapping,
1620 struct writeback_control *wbc)
1621 {
1622 struct inode *inode = mapping->host;
1623 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1624 struct ceph_client *cl = fsc->client;
1625 struct ceph_writeback_ctl ceph_wbc;
1626 int rc = 0;
1627
1628 if (wbc->sync_mode == WB_SYNC_NONE && fsc->write_congested)
1629 return 0;
1630
1631 doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode),
1632 wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
1633 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
1634
1635 if (is_forced_umount(mapping)) {
1636 /* we're in a forced umount, don't write! */
1637 return -EIO;
1638 }
1639
1640 ceph_init_writeback_ctl(mapping, wbc, &ceph_wbc);
1641
1642 if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
1643 rc = -EIO;
1644 goto out;
1645 }
1646
1647 retry:
1648 rc = ceph_define_writeback_range(mapping, wbc, &ceph_wbc);
1649 if (rc == -ENODATA) {
1650 /* hmm, why does writepages get called when there
1651 is no dirty data? */
1652 rc = 0;
1653 goto dec_osd_stopping_blocker;
1654 }
1655
1656 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1657 tag_pages_for_writeback(mapping, ceph_wbc.index, ceph_wbc.end);
1658
1659 while (!has_writeback_done(&ceph_wbc)) {
1660 ceph_wbc.locked_pages = 0;
1661 ceph_wbc.max_pages = ceph_wbc.wsize >> PAGE_SHIFT;
1662
1663 get_more_pages:
1664 ceph_folio_batch_reinit(&ceph_wbc);
1665
1666 ceph_wbc.nr_folios = filemap_get_folios_tag(mapping,
1667 &ceph_wbc.index,
1668 ceph_wbc.end,
1669 ceph_wbc.tag,
1670 &ceph_wbc.fbatch);
1671 doutc(cl, "pagevec_lookup_range_tag for tag %#x got %d\n",
1672 ceph_wbc.tag, ceph_wbc.nr_folios);
1673
1674 if (!ceph_wbc.nr_folios && !ceph_wbc.locked_pages)
1675 break;
1676
1677 process_folio_batch:
1678 rc = ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
1679 if (rc)
1680 goto release_folios;
1681
1682 /* did we get anything? */
1683 if (!ceph_wbc.locked_pages)
1684 goto release_folios;
1685
1686 if (ceph_wbc.processed_in_fbatch) {
1687 ceph_shift_unused_folios_left(&ceph_wbc.fbatch);
1688
1689 if (folio_batch_count(&ceph_wbc.fbatch) == 0 &&
1690 ceph_wbc.locked_pages < ceph_wbc.max_pages) {
1691 doutc(cl, "reached end fbatch, trying for more\n");
1692 goto get_more_pages;
1693 }
1694 }
1695
1696 rc = ceph_submit_write(mapping, wbc, &ceph_wbc);
1697 if (rc)
1698 goto release_folios;
1699
1700 ceph_wbc.locked_pages = 0;
1701 ceph_wbc.strip_unit_end = 0;
1702
1703 if (folio_batch_count(&ceph_wbc.fbatch) > 0) {
1704 ceph_wbc.nr_folios =
1705 folio_batch_count(&ceph_wbc.fbatch);
1706 goto process_folio_batch;
1707 }
1708
1709 /*
1710 * We stop writing back only if we are not doing
1711 * integrity sync. In case of integrity sync we have to
1712 * keep going until we have written all the pages
1713 * we tagged for writeback prior to entering this loop.
1714 */
1715 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
1716 ceph_wbc.done = true;
1717
1718 release_folios:
1719 doutc(cl, "folio_batch release on %d folios (%p)\n",
1720 (int)ceph_wbc.fbatch.nr,
1721 ceph_wbc.fbatch.nr ? ceph_wbc.fbatch.folios[0] : NULL);
1722 folio_batch_release(&ceph_wbc.fbatch);
1723 }
1724
1725 if (ceph_wbc.should_loop && !ceph_wbc.done) {
1726 /* more to do; loop back to beginning of file */
1727 doutc(cl, "looping back to beginning of file\n");
1728 /* OK even when start_index == 0 */
1729 ceph_wbc.end = ceph_wbc.start_index - 1;
1730
1731 /* to write dirty pages associated with next snapc,
1732 * we need to wait until current writes complete */
1733 ceph_wait_until_current_writes_complete(mapping, wbc, &ceph_wbc);
1734
1735 ceph_wbc.start_index = 0;
1736 ceph_wbc.index = 0;
1737 goto retry;
1738 }
1739
1740 if (wbc->range_cyclic || (ceph_wbc.range_whole && wbc->nr_to_write > 0))
1741 mapping->writeback_index = ceph_wbc.index;
1742
1743 dec_osd_stopping_blocker:
1744 ceph_dec_osd_stopping_blocker(fsc->mdsc);
1745
1746 out:
1747 ceph_put_snap_context(ceph_wbc.last_snapc);
1748 doutc(cl, "%llx.%llx dend - startone, rc = %d\n", ceph_vinop(inode),
1749 rc);
1750
1751 return rc;
1752 }
1753
1754 /*
1755 * See if a given @snapc is either writeable, or already written.
1756 */
context_is_writeable_or_written(struct inode * inode,struct ceph_snap_context * snapc)1757 static int context_is_writeable_or_written(struct inode *inode,
1758 struct ceph_snap_context *snapc)
1759 {
1760 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL);
1761 int ret = !oldest || snapc->seq <= oldest->seq;
1762
1763 ceph_put_snap_context(oldest);
1764 return ret;
1765 }
1766
1767 /**
1768 * ceph_find_incompatible - find an incompatible context and return it
1769 * @folio: folio being dirtied
1770 *
1771 * We are only allowed to write into/dirty a folio if the folio is
1772 * clean, or already dirty within the same snap context. Returns a
1773 * conflicting context if there is one, NULL if there isn't, or a
1774 * negative error code on other errors.
1775 *
1776 * Must be called with folio lock held.
1777 */
1778 static struct ceph_snap_context *
ceph_find_incompatible(struct folio * folio)1779 ceph_find_incompatible(struct folio *folio)
1780 {
1781 struct inode *inode = folio->mapping->host;
1782 struct ceph_client *cl = ceph_inode_to_client(inode);
1783 struct ceph_inode_info *ci = ceph_inode(inode);
1784
1785 if (ceph_inode_is_shutdown(inode)) {
1786 doutc(cl, " %llx.%llx folio %p is shutdown\n",
1787 ceph_vinop(inode), folio);
1788 return ERR_PTR(-ESTALE);
1789 }
1790
1791 for (;;) {
1792 struct ceph_snap_context *snapc, *oldest;
1793
1794 folio_wait_writeback(folio);
1795
1796 snapc = page_snap_context(&folio->page);
1797 if (!snapc || snapc == ci->i_head_snapc)
1798 break;
1799
1800 /*
1801 * this folio is already dirty in another (older) snap
1802 * context! is it writeable now?
1803 */
1804 oldest = get_oldest_context(inode, NULL, NULL);
1805 if (snapc->seq > oldest->seq) {
1806 /* not writeable -- return it for the caller to deal with */
1807 ceph_put_snap_context(oldest);
1808 doutc(cl, " %llx.%llx folio %p snapc %p not current or oldest\n",
1809 ceph_vinop(inode), folio, snapc);
1810 return ceph_get_snap_context(snapc);
1811 }
1812 ceph_put_snap_context(oldest);
1813
1814 /* yay, writeable, do it now (without dropping folio lock) */
1815 doutc(cl, " %llx.%llx folio %p snapc %p not current, but oldest\n",
1816 ceph_vinop(inode), folio, snapc);
1817 if (folio_clear_dirty_for_io(folio)) {
1818 int r = write_folio_nounlock(folio, NULL);
1819 if (r < 0)
1820 return ERR_PTR(r);
1821 }
1822 }
1823 return NULL;
1824 }
1825
ceph_netfs_check_write_begin(struct file * file,loff_t pos,unsigned int len,struct folio ** foliop,void ** _fsdata)1826 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
1827 struct folio **foliop, void **_fsdata)
1828 {
1829 struct inode *inode = file_inode(file);
1830 struct ceph_inode_info *ci = ceph_inode(inode);
1831 struct ceph_snap_context *snapc;
1832
1833 snapc = ceph_find_incompatible(*foliop);
1834 if (snapc) {
1835 int r;
1836
1837 folio_unlock(*foliop);
1838 folio_put(*foliop);
1839 *foliop = NULL;
1840 if (IS_ERR(snapc))
1841 return PTR_ERR(snapc);
1842
1843 ceph_queue_writeback(inode);
1844 r = wait_event_killable(ci->i_cap_wq,
1845 context_is_writeable_or_written(inode, snapc));
1846 ceph_put_snap_context(snapc);
1847 return r == 0 ? -EAGAIN : r;
1848 }
1849 return 0;
1850 }
1851
1852 /*
1853 * We are only allowed to write into/dirty the page if the page is
1854 * clean, or already dirty within the same snap context.
1855 */
ceph_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)1856 static int ceph_write_begin(struct file *file, struct address_space *mapping,
1857 loff_t pos, unsigned len,
1858 struct folio **foliop, void **fsdata)
1859 {
1860 struct inode *inode = file_inode(file);
1861 struct ceph_inode_info *ci = ceph_inode(inode);
1862 int r;
1863
1864 r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, foliop, NULL);
1865 if (r < 0)
1866 return r;
1867
1868 folio_wait_private_2(*foliop); /* [DEPRECATED] */
1869 WARN_ON_ONCE(!folio_test_locked(*foliop));
1870 return 0;
1871 }
1872
1873 /*
1874 * we don't do anything in here that simple_write_end doesn't do
1875 * except adjust dirty page accounting
1876 */
ceph_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)1877 static int ceph_write_end(struct file *file, struct address_space *mapping,
1878 loff_t pos, unsigned len, unsigned copied,
1879 struct folio *folio, void *fsdata)
1880 {
1881 struct inode *inode = file_inode(file);
1882 struct ceph_client *cl = ceph_inode_to_client(inode);
1883 bool check_cap = false;
1884
1885 doutc(cl, "%llx.%llx file %p folio %p %d~%d (%d)\n", ceph_vinop(inode),
1886 file, folio, (int)pos, (int)copied, (int)len);
1887
1888 if (!folio_test_uptodate(folio)) {
1889 /* just return that nothing was copied on a short copy */
1890 if (copied < len) {
1891 copied = 0;
1892 goto out;
1893 }
1894 folio_mark_uptodate(folio);
1895 }
1896
1897 /* did file size increase? */
1898 if (pos+copied > i_size_read(inode))
1899 check_cap = ceph_inode_set_size(inode, pos+copied);
1900
1901 folio_mark_dirty(folio);
1902
1903 out:
1904 folio_unlock(folio);
1905 folio_put(folio);
1906
1907 if (check_cap)
1908 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY);
1909
1910 return copied;
1911 }
1912
1913 const struct address_space_operations ceph_aops = {
1914 .read_folio = netfs_read_folio,
1915 .readahead = netfs_readahead,
1916 .writepages = ceph_writepages_start,
1917 .write_begin = ceph_write_begin,
1918 .write_end = ceph_write_end,
1919 .dirty_folio = ceph_dirty_folio,
1920 .invalidate_folio = ceph_invalidate_folio,
1921 .release_folio = netfs_release_folio,
1922 .direct_IO = noop_direct_IO,
1923 .migrate_folio = filemap_migrate_folio,
1924 };
1925
ceph_block_sigs(sigset_t * oldset)1926 static void ceph_block_sigs(sigset_t *oldset)
1927 {
1928 sigset_t mask;
1929 siginitsetinv(&mask, sigmask(SIGKILL));
1930 sigprocmask(SIG_BLOCK, &mask, oldset);
1931 }
1932
ceph_restore_sigs(sigset_t * oldset)1933 static void ceph_restore_sigs(sigset_t *oldset)
1934 {
1935 sigprocmask(SIG_SETMASK, oldset, NULL);
1936 }
1937
1938 /*
1939 * vm ops
1940 */
ceph_filemap_fault(struct vm_fault * vmf)1941 static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
1942 {
1943 struct vm_area_struct *vma = vmf->vma;
1944 struct inode *inode = file_inode(vma->vm_file);
1945 struct ceph_inode_info *ci = ceph_inode(inode);
1946 struct ceph_client *cl = ceph_inode_to_client(inode);
1947 struct ceph_file_info *fi = vma->vm_file->private_data;
1948 loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
1949 int want, got, err;
1950 sigset_t oldset;
1951 vm_fault_t ret = VM_FAULT_SIGBUS;
1952
1953 if (ceph_inode_is_shutdown(inode))
1954 return ret;
1955
1956 ceph_block_sigs(&oldset);
1957
1958 doutc(cl, "%llx.%llx %llu trying to get caps\n",
1959 ceph_vinop(inode), off);
1960 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1961 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1962 else
1963 want = CEPH_CAP_FILE_CACHE;
1964
1965 got = 0;
1966 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got);
1967 if (err < 0)
1968 goto out_restore;
1969
1970 doutc(cl, "%llx.%llx %llu got cap refs on %s\n", ceph_vinop(inode),
1971 off, ceph_cap_string(got));
1972
1973 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
1974 !ceph_has_inline_data(ci)) {
1975 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1976 ceph_add_rw_context(fi, &rw_ctx);
1977 ret = filemap_fault(vmf);
1978 ceph_del_rw_context(fi, &rw_ctx);
1979 doutc(cl, "%llx.%llx %llu drop cap refs %s ret %x\n",
1980 ceph_vinop(inode), off, ceph_cap_string(got), ret);
1981 } else
1982 err = -EAGAIN;
1983
1984 ceph_put_cap_refs(ci, got);
1985
1986 if (err != -EAGAIN)
1987 goto out_restore;
1988
1989 /* read inline data */
1990 if (off >= PAGE_SIZE) {
1991 /* does not support inline data > PAGE_SIZE */
1992 ret = VM_FAULT_SIGBUS;
1993 } else {
1994 struct address_space *mapping = inode->i_mapping;
1995 struct page *page;
1996
1997 filemap_invalidate_lock_shared(mapping);
1998 page = find_or_create_page(mapping, 0,
1999 mapping_gfp_constraint(mapping, ~__GFP_FS));
2000 if (!page) {
2001 ret = VM_FAULT_OOM;
2002 goto out_inline;
2003 }
2004 err = __ceph_do_getattr(inode, page,
2005 CEPH_STAT_CAP_INLINE_DATA, true);
2006 if (err < 0 || off >= i_size_read(inode)) {
2007 unlock_page(page);
2008 put_page(page);
2009 ret = vmf_error(err);
2010 goto out_inline;
2011 }
2012 if (err < PAGE_SIZE)
2013 zero_user_segment(page, err, PAGE_SIZE);
2014 else
2015 flush_dcache_page(page);
2016 SetPageUptodate(page);
2017 vmf->page = page;
2018 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
2019 out_inline:
2020 filemap_invalidate_unlock_shared(mapping);
2021 doutc(cl, "%llx.%llx %llu read inline data ret %x\n",
2022 ceph_vinop(inode), off, ret);
2023 }
2024 out_restore:
2025 ceph_restore_sigs(&oldset);
2026 if (err < 0)
2027 ret = vmf_error(err);
2028
2029 return ret;
2030 }
2031
ceph_page_mkwrite(struct vm_fault * vmf)2032 static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
2033 {
2034 struct vm_area_struct *vma = vmf->vma;
2035 struct inode *inode = file_inode(vma->vm_file);
2036 struct ceph_client *cl = ceph_inode_to_client(inode);
2037 struct ceph_inode_info *ci = ceph_inode(inode);
2038 struct ceph_file_info *fi = vma->vm_file->private_data;
2039 struct ceph_cap_flush *prealloc_cf;
2040 struct folio *folio = page_folio(vmf->page);
2041 loff_t off = folio_pos(folio);
2042 loff_t size = i_size_read(inode);
2043 size_t len;
2044 int want, got, err;
2045 sigset_t oldset;
2046 vm_fault_t ret = VM_FAULT_SIGBUS;
2047
2048 if (ceph_inode_is_shutdown(inode))
2049 return ret;
2050
2051 prealloc_cf = ceph_alloc_cap_flush();
2052 if (!prealloc_cf)
2053 return VM_FAULT_OOM;
2054
2055 sb_start_pagefault(inode->i_sb);
2056 ceph_block_sigs(&oldset);
2057
2058 if (off + folio_size(folio) <= size)
2059 len = folio_size(folio);
2060 else
2061 len = offset_in_folio(folio, size);
2062
2063 doutc(cl, "%llx.%llx %llu~%zd getting caps i_size %llu\n",
2064 ceph_vinop(inode), off, len, size);
2065 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2066 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2067 else
2068 want = CEPH_CAP_FILE_BUFFER;
2069
2070 got = 0;
2071 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got);
2072 if (err < 0)
2073 goto out_free;
2074
2075 doutc(cl, "%llx.%llx %llu~%zd got cap refs on %s\n", ceph_vinop(inode),
2076 off, len, ceph_cap_string(got));
2077
2078 /* Update time before taking folio lock */
2079 file_update_time(vma->vm_file);
2080 inode_inc_iversion_raw(inode);
2081
2082 do {
2083 struct ceph_snap_context *snapc;
2084
2085 folio_lock(folio);
2086
2087 if (folio_mkwrite_check_truncate(folio, inode) < 0) {
2088 folio_unlock(folio);
2089 ret = VM_FAULT_NOPAGE;
2090 break;
2091 }
2092
2093 snapc = ceph_find_incompatible(folio);
2094 if (!snapc) {
2095 /* success. we'll keep the folio locked. */
2096 folio_mark_dirty(folio);
2097 ret = VM_FAULT_LOCKED;
2098 break;
2099 }
2100
2101 folio_unlock(folio);
2102
2103 if (IS_ERR(snapc)) {
2104 ret = VM_FAULT_SIGBUS;
2105 break;
2106 }
2107
2108 ceph_queue_writeback(inode);
2109 err = wait_event_killable(ci->i_cap_wq,
2110 context_is_writeable_or_written(inode, snapc));
2111 ceph_put_snap_context(snapc);
2112 } while (err == 0);
2113
2114 if (ret == VM_FAULT_LOCKED) {
2115 int dirty;
2116 spin_lock(&ci->i_ceph_lock);
2117 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2118 &prealloc_cf);
2119 spin_unlock(&ci->i_ceph_lock);
2120 if (dirty)
2121 __mark_inode_dirty(inode, dirty);
2122 }
2123
2124 doutc(cl, "%llx.%llx %llu~%zd dropping cap refs on %s ret %x\n",
2125 ceph_vinop(inode), off, len, ceph_cap_string(got), ret);
2126 ceph_put_cap_refs_async(ci, got);
2127 out_free:
2128 ceph_restore_sigs(&oldset);
2129 sb_end_pagefault(inode->i_sb);
2130 ceph_free_cap_flush(prealloc_cf);
2131 if (err < 0)
2132 ret = vmf_error(err);
2133 return ret;
2134 }
2135
ceph_fill_inline_data(struct inode * inode,struct page * locked_page,char * data,size_t len)2136 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
2137 char *data, size_t len)
2138 {
2139 struct ceph_client *cl = ceph_inode_to_client(inode);
2140 struct address_space *mapping = inode->i_mapping;
2141 struct page *page;
2142
2143 if (locked_page) {
2144 page = locked_page;
2145 } else {
2146 if (i_size_read(inode) == 0)
2147 return;
2148 page = find_or_create_page(mapping, 0,
2149 mapping_gfp_constraint(mapping,
2150 ~__GFP_FS));
2151 if (!page)
2152 return;
2153 if (PageUptodate(page)) {
2154 unlock_page(page);
2155 put_page(page);
2156 return;
2157 }
2158 }
2159
2160 doutc(cl, "%p %llx.%llx len %zu locked_page %p\n", inode,
2161 ceph_vinop(inode), len, locked_page);
2162
2163 if (len > 0) {
2164 void *kaddr = kmap_atomic(page);
2165 memcpy(kaddr, data, len);
2166 kunmap_atomic(kaddr);
2167 }
2168
2169 if (page != locked_page) {
2170 if (len < PAGE_SIZE)
2171 zero_user_segment(page, len, PAGE_SIZE);
2172 else
2173 flush_dcache_page(page);
2174
2175 SetPageUptodate(page);
2176 unlock_page(page);
2177 put_page(page);
2178 }
2179 }
2180
ceph_uninline_data(struct file * file)2181 int ceph_uninline_data(struct file *file)
2182 {
2183 struct inode *inode = file_inode(file);
2184 struct ceph_inode_info *ci = ceph_inode(inode);
2185 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2186 struct ceph_client *cl = fsc->client;
2187 struct ceph_osd_request *req = NULL;
2188 struct ceph_cap_flush *prealloc_cf = NULL;
2189 struct folio *folio = NULL;
2190 u64 inline_version = CEPH_INLINE_NONE;
2191 struct page *pages[1];
2192 int err = 0;
2193 u64 len;
2194
2195 spin_lock(&ci->i_ceph_lock);
2196 inline_version = ci->i_inline_version;
2197 spin_unlock(&ci->i_ceph_lock);
2198
2199 doutc(cl, "%llx.%llx inline_version %llu\n", ceph_vinop(inode),
2200 inline_version);
2201
2202 if (ceph_inode_is_shutdown(inode)) {
2203 err = -EIO;
2204 goto out;
2205 }
2206
2207 if (inline_version == CEPH_INLINE_NONE)
2208 return 0;
2209
2210 prealloc_cf = ceph_alloc_cap_flush();
2211 if (!prealloc_cf)
2212 return -ENOMEM;
2213
2214 if (inline_version == 1) /* initial version, no data */
2215 goto out_uninline;
2216
2217 folio = read_mapping_folio(inode->i_mapping, 0, file);
2218 if (IS_ERR(folio)) {
2219 err = PTR_ERR(folio);
2220 goto out;
2221 }
2222
2223 folio_lock(folio);
2224
2225 len = i_size_read(inode);
2226 if (len > folio_size(folio))
2227 len = folio_size(folio);
2228
2229 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2230 ceph_vino(inode), 0, &len, 0, 1,
2231 CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE,
2232 NULL, 0, 0, false);
2233 if (IS_ERR(req)) {
2234 err = PTR_ERR(req);
2235 goto out_unlock;
2236 }
2237
2238 req->r_mtime = inode_get_mtime(inode);
2239 ceph_osdc_start_request(&fsc->client->osdc, req);
2240 err = ceph_osdc_wait_request(&fsc->client->osdc, req);
2241 ceph_osdc_put_request(req);
2242 if (err < 0)
2243 goto out_unlock;
2244
2245 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2246 ceph_vino(inode), 0, &len, 1, 3,
2247 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
2248 NULL, ci->i_truncate_seq,
2249 ci->i_truncate_size, false);
2250 if (IS_ERR(req)) {
2251 err = PTR_ERR(req);
2252 goto out_unlock;
2253 }
2254
2255 pages[0] = folio_page(folio, 0);
2256 osd_req_op_extent_osd_data_pages(req, 1, pages, len, 0, false, false);
2257
2258 {
2259 __le64 xattr_buf = cpu_to_le64(inline_version);
2260 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
2261 "inline_version", &xattr_buf,
2262 sizeof(xattr_buf),
2263 CEPH_OSD_CMPXATTR_OP_GT,
2264 CEPH_OSD_CMPXATTR_MODE_U64);
2265 if (err)
2266 goto out_put_req;
2267 }
2268
2269 {
2270 char xattr_buf[32];
2271 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
2272 "%llu", inline_version);
2273 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
2274 "inline_version",
2275 xattr_buf, xattr_len, 0, 0);
2276 if (err)
2277 goto out_put_req;
2278 }
2279
2280 req->r_mtime = inode_get_mtime(inode);
2281 ceph_osdc_start_request(&fsc->client->osdc, req);
2282 err = ceph_osdc_wait_request(&fsc->client->osdc, req);
2283
2284 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
2285 req->r_end_latency, len, err);
2286
2287 out_uninline:
2288 if (!err) {
2289 int dirty;
2290
2291 /* Set to CAP_INLINE_NONE and dirty the caps */
2292 down_read(&fsc->mdsc->snap_rwsem);
2293 spin_lock(&ci->i_ceph_lock);
2294 ci->i_inline_version = CEPH_INLINE_NONE;
2295 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2296 spin_unlock(&ci->i_ceph_lock);
2297 up_read(&fsc->mdsc->snap_rwsem);
2298 if (dirty)
2299 __mark_inode_dirty(inode, dirty);
2300 }
2301 out_put_req:
2302 ceph_osdc_put_request(req);
2303 if (err == -ECANCELED)
2304 err = 0;
2305 out_unlock:
2306 if (folio) {
2307 folio_unlock(folio);
2308 folio_put(folio);
2309 }
2310 out:
2311 ceph_free_cap_flush(prealloc_cf);
2312 doutc(cl, "%llx.%llx inline_version %llu = %d\n",
2313 ceph_vinop(inode), inline_version, err);
2314 return err;
2315 }
2316
2317 static const struct vm_operations_struct ceph_vmops = {
2318 .fault = ceph_filemap_fault,
2319 .page_mkwrite = ceph_page_mkwrite,
2320 };
2321
ceph_mmap(struct file * file,struct vm_area_struct * vma)2322 int ceph_mmap(struct file *file, struct vm_area_struct *vma)
2323 {
2324 struct address_space *mapping = file->f_mapping;
2325
2326 if (!mapping->a_ops->read_folio)
2327 return -ENOEXEC;
2328 vma->vm_ops = &ceph_vmops;
2329 return 0;
2330 }
2331
2332 enum {
2333 POOL_READ = 1,
2334 POOL_WRITE = 2,
2335 };
2336
__ceph_pool_perm_get(struct ceph_inode_info * ci,s64 pool,struct ceph_string * pool_ns)2337 static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
2338 s64 pool, struct ceph_string *pool_ns)
2339 {
2340 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
2341 struct ceph_mds_client *mdsc = fsc->mdsc;
2342 struct ceph_client *cl = fsc->client;
2343 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
2344 struct rb_node **p, *parent;
2345 struct ceph_pool_perm *perm;
2346 struct page **pages;
2347 size_t pool_ns_len;
2348 int err = 0, err2 = 0, have = 0;
2349
2350 down_read(&mdsc->pool_perm_rwsem);
2351 p = &mdsc->pool_perm_tree.rb_node;
2352 while (*p) {
2353 perm = rb_entry(*p, struct ceph_pool_perm, node);
2354 if (pool < perm->pool)
2355 p = &(*p)->rb_left;
2356 else if (pool > perm->pool)
2357 p = &(*p)->rb_right;
2358 else {
2359 int ret = ceph_compare_string(pool_ns,
2360 perm->pool_ns,
2361 perm->pool_ns_len);
2362 if (ret < 0)
2363 p = &(*p)->rb_left;
2364 else if (ret > 0)
2365 p = &(*p)->rb_right;
2366 else {
2367 have = perm->perm;
2368 break;
2369 }
2370 }
2371 }
2372 up_read(&mdsc->pool_perm_rwsem);
2373 if (*p)
2374 goto out;
2375
2376 if (pool_ns)
2377 doutc(cl, "pool %lld ns %.*s no perm cached\n", pool,
2378 (int)pool_ns->len, pool_ns->str);
2379 else
2380 doutc(cl, "pool %lld no perm cached\n", pool);
2381
2382 down_write(&mdsc->pool_perm_rwsem);
2383 p = &mdsc->pool_perm_tree.rb_node;
2384 parent = NULL;
2385 while (*p) {
2386 parent = *p;
2387 perm = rb_entry(parent, struct ceph_pool_perm, node);
2388 if (pool < perm->pool)
2389 p = &(*p)->rb_left;
2390 else if (pool > perm->pool)
2391 p = &(*p)->rb_right;
2392 else {
2393 int ret = ceph_compare_string(pool_ns,
2394 perm->pool_ns,
2395 perm->pool_ns_len);
2396 if (ret < 0)
2397 p = &(*p)->rb_left;
2398 else if (ret > 0)
2399 p = &(*p)->rb_right;
2400 else {
2401 have = perm->perm;
2402 break;
2403 }
2404 }
2405 }
2406 if (*p) {
2407 up_write(&mdsc->pool_perm_rwsem);
2408 goto out;
2409 }
2410
2411 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
2412 1, false, GFP_NOFS);
2413 if (!rd_req) {
2414 err = -ENOMEM;
2415 goto out_unlock;
2416 }
2417
2418 rd_req->r_flags = CEPH_OSD_FLAG_READ;
2419 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
2420 rd_req->r_base_oloc.pool = pool;
2421 if (pool_ns)
2422 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns);
2423 ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
2424
2425 err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
2426 if (err)
2427 goto out_unlock;
2428
2429 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
2430 1, false, GFP_NOFS);
2431 if (!wr_req) {
2432 err = -ENOMEM;
2433 goto out_unlock;
2434 }
2435
2436 wr_req->r_flags = CEPH_OSD_FLAG_WRITE;
2437 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
2438 ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc);
2439 ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid);
2440
2441 err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
2442 if (err)
2443 goto out_unlock;
2444
2445 /* one page should be large enough for STAT data */
2446 pages = ceph_alloc_page_vector(1, GFP_KERNEL);
2447 if (IS_ERR(pages)) {
2448 err = PTR_ERR(pages);
2449 goto out_unlock;
2450 }
2451
2452 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
2453 0, false, true);
2454 ceph_osdc_start_request(&fsc->client->osdc, rd_req);
2455
2456 wr_req->r_mtime = inode_get_mtime(&ci->netfs.inode);
2457 ceph_osdc_start_request(&fsc->client->osdc, wr_req);
2458
2459 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
2460 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
2461
2462 if (err >= 0 || err == -ENOENT)
2463 have |= POOL_READ;
2464 else if (err != -EPERM) {
2465 if (err == -EBLOCKLISTED)
2466 fsc->blocklisted = true;
2467 goto out_unlock;
2468 }
2469
2470 if (err2 == 0 || err2 == -EEXIST)
2471 have |= POOL_WRITE;
2472 else if (err2 != -EPERM) {
2473 if (err2 == -EBLOCKLISTED)
2474 fsc->blocklisted = true;
2475 err = err2;
2476 goto out_unlock;
2477 }
2478
2479 pool_ns_len = pool_ns ? pool_ns->len : 0;
2480 perm = kmalloc(struct_size(perm, pool_ns, pool_ns_len + 1), GFP_NOFS);
2481 if (!perm) {
2482 err = -ENOMEM;
2483 goto out_unlock;
2484 }
2485
2486 perm->pool = pool;
2487 perm->perm = have;
2488 perm->pool_ns_len = pool_ns_len;
2489 if (pool_ns_len > 0)
2490 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len);
2491 perm->pool_ns[pool_ns_len] = 0;
2492
2493 rb_link_node(&perm->node, parent, p);
2494 rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
2495 err = 0;
2496 out_unlock:
2497 up_write(&mdsc->pool_perm_rwsem);
2498
2499 ceph_osdc_put_request(rd_req);
2500 ceph_osdc_put_request(wr_req);
2501 out:
2502 if (!err)
2503 err = have;
2504 if (pool_ns)
2505 doutc(cl, "pool %lld ns %.*s result = %d\n", pool,
2506 (int)pool_ns->len, pool_ns->str, err);
2507 else
2508 doutc(cl, "pool %lld result = %d\n", pool, err);
2509 return err;
2510 }
2511
ceph_pool_perm_check(struct inode * inode,int need)2512 int ceph_pool_perm_check(struct inode *inode, int need)
2513 {
2514 struct ceph_client *cl = ceph_inode_to_client(inode);
2515 struct ceph_inode_info *ci = ceph_inode(inode);
2516 struct ceph_string *pool_ns;
2517 s64 pool;
2518 int ret, flags;
2519
2520 /* Only need to do this for regular files */
2521 if (!S_ISREG(inode->i_mode))
2522 return 0;
2523
2524 if (ci->i_vino.snap != CEPH_NOSNAP) {
2525 /*
2526 * Pool permission check needs to write to the first object.
2527 * But for snapshot, head of the first object may have already
2528 * been deleted. Skip check to avoid creating orphan object.
2529 */
2530 return 0;
2531 }
2532
2533 if (ceph_test_mount_opt(ceph_inode_to_fs_client(inode),
2534 NOPOOLPERM))
2535 return 0;
2536
2537 spin_lock(&ci->i_ceph_lock);
2538 flags = ci->i_ceph_flags;
2539 pool = ci->i_layout.pool_id;
2540 spin_unlock(&ci->i_ceph_lock);
2541 check:
2542 if (flags & CEPH_I_POOL_PERM) {
2543 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
2544 doutc(cl, "pool %lld no read perm\n", pool);
2545 return -EPERM;
2546 }
2547 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
2548 doutc(cl, "pool %lld no write perm\n", pool);
2549 return -EPERM;
2550 }
2551 return 0;
2552 }
2553
2554 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
2555 ret = __ceph_pool_perm_get(ci, pool, pool_ns);
2556 ceph_put_string(pool_ns);
2557 if (ret < 0)
2558 return ret;
2559
2560 flags = CEPH_I_POOL_PERM;
2561 if (ret & POOL_READ)
2562 flags |= CEPH_I_POOL_RD;
2563 if (ret & POOL_WRITE)
2564 flags |= CEPH_I_POOL_WR;
2565
2566 spin_lock(&ci->i_ceph_lock);
2567 if (pool == ci->i_layout.pool_id &&
2568 pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) {
2569 ci->i_ceph_flags |= flags;
2570 } else {
2571 pool = ci->i_layout.pool_id;
2572 flags = ci->i_ceph_flags;
2573 }
2574 spin_unlock(&ci->i_ceph_lock);
2575 goto check;
2576 }
2577
ceph_pool_perm_destroy(struct ceph_mds_client * mdsc)2578 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
2579 {
2580 struct ceph_pool_perm *perm;
2581 struct rb_node *n;
2582
2583 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
2584 n = rb_first(&mdsc->pool_perm_tree);
2585 perm = rb_entry(n, struct ceph_pool_perm, node);
2586 rb_erase(n, &mdsc->pool_perm_tree);
2587 kfree(perm);
2588 }
2589 }
2590