Lines Matching refs:ci

51 				 struct ceph_inode_info *ci,
441 struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
444 struct rb_node *n = ci->i_caps.rb_node;
458 struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
462 spin_lock(&ci->i_ceph_lock);
463 cap = __get_cap_for_mds(ci, mds);
464 spin_unlock(&ci->i_ceph_lock);
471 static void __insert_cap_node(struct ceph_inode_info *ci,
474 struct rb_node **p = &ci->i_caps.rb_node;
490 rb_insert_color(&new->ci_node, &ci->i_caps);
498 struct ceph_inode_info *ci)
500 struct inode *inode = &ci->netfs.inode;
503 ci->i_hold_caps_max = round_jiffies(jiffies +
506 ceph_vinop(inode), ci->i_hold_caps_max - jiffies);
518 struct ceph_inode_info *ci)
520 struct inode *inode = &ci->netfs.inode;
523 inode, ceph_vinop(inode), ci->i_ceph_flags,
524 ci->i_hold_caps_max);
527 if (!list_empty(&ci->i_cap_delay_list)) {
528 if (ci->i_ceph_flags & CEPH_I_FLUSH)
530 list_del_init(&ci->i_cap_delay_list);
532 __cap_set_timeouts(mdsc, ci);
533 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
545 struct ceph_inode_info *ci)
547 struct inode *inode = &ci->netfs.inode;
551 ci->i_ceph_flags |= CEPH_I_FLUSH;
552 if (!list_empty(&ci->i_cap_delay_list))
553 list_del_init(&ci->i_cap_delay_list);
554 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
564 struct ceph_inode_info *ci)
566 struct inode *inode = &ci->netfs.inode;
569 if (list_empty(&ci->i_cap_delay_list))
572 list_del_init(&ci->i_cap_delay_list);
577 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
580 struct inode *inode = &ci->netfs.inode;
583 unsigned had = __ceph_caps_issued(ci, NULL);
585 lockdep_assert_held(&ci->i_ceph_lock);
591 if (S_ISREG(ci->netfs.inode.i_mode) &&
594 ci->i_rdcache_gen++;
605 atomic_inc(&ci->i_shared_gen);
606 if (S_ISDIR(ci->netfs.inode.i_mode)) {
608 __ceph_dir_clear_complete(ci);
613 if (S_ISDIR(ci->netfs.inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) &&
615 ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
616 memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
622 * @ci: inode to be moved
625 void change_auth_cap_ses(struct ceph_inode_info *ci,
628 lockdep_assert_held(&ci->i_ceph_lock);
630 if (list_empty(&ci->i_dirty_item) && list_empty(&ci->i_flushing_item))
634 if (!list_empty(&ci->i_dirty_item))
635 list_move(&ci->i_dirty_item, &session->s_cap_dirty);
636 if (!list_empty(&ci->i_flushing_item))
637 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
644 * Caller should hold session snap_rwsem (read) and ci->i_ceph_lock
658 struct ceph_inode_info *ci = ceph_inode(inode);
664 lockdep_assert_held(&ci->i_ceph_lock);
672 cap = __get_cap_for_mds(ci, mds);
683 cap->ci = ci;
684 __insert_cap_node(ci, cap);
711 WARN_ON(cap != ci->i_auth_cap);
720 if (!ci->i_snap_realm ||
722 realmino != (u64)-1 && ci->i_snap_realm->ino != realmino)) {
732 __func__, realmino, ci->i_vino.ino,
733 ci->i_snap_realm ? ci->i_snap_realm->ino : 0);
736 __check_cap_issue(ci, cap, issued);
743 actual_wanted = __ceph_caps_wanted(ci);
749 __cap_delay_requeue(mdsc, ci);
753 if (!ci->i_auth_cap ||
754 ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) {
755 if (ci->i_auth_cap &&
756 ci->i_auth_cap->session != cap->session)
757 change_auth_cap_ses(ci, cap->session);
758 ci->i_auth_cap = cap;
762 WARN_ON(ci->i_auth_cap == cap);
779 wake_up_all(&ci->i_cap_wq);
789 struct inode *inode = &cap->ci->netfs.inode;
812 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
814 struct inode *inode = &ci->netfs.inode;
816 int have = ci->i_snap_caps;
822 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
837 if (ci->i_auth_cap) {
838 cap = ci->i_auth_cap;
847 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
849 int have = ci->i_snap_caps;
853 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
870 struct inode *inode = &cap->ci->netfs.inode;
891 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
893 struct inode *inode = &ci->netfs.inode;
897 int have = ci->i_snap_caps;
906 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
932 for (q = rb_first(&ci->i_caps); q != p;
949 int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
952 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
955 r = __ceph_caps_issued_mask(ci, mask, touch);
966 int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
972 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
981 int __ceph_caps_used(struct ceph_inode_info *ci)
984 if (ci->i_pin_ref)
986 if (ci->i_rd_ref)
988 if (ci->i_rdcache_ref ||
989 (S_ISREG(ci->netfs.inode.i_mode) &&
990 ci->netfs.inode.i_data.nrpages))
992 if (ci->i_wr_ref)
994 if (ci->i_wb_ref || ci->i_wrbuffer_ref)
996 if (ci->i_fx_ref)
1006 int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
1013 ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options;
1017 if (S_ISDIR(ci->netfs.inode.i_mode)) {
1021 if (ci->i_nr_by_mode[RD_SHIFT] > 0 ||
1022 time_after(ci->i_last_rd, used_cutoff))
1025 if (ci->i_nr_by_mode[WR_SHIFT] > 0 ||
1026 time_after(ci->i_last_wr, used_cutoff)) {
1032 if (want || ci->i_nr_by_mode[PIN_SHIFT] > 0)
1039 if (ci->i_nr_by_mode[RD_SHIFT] > 0) {
1040 if (ci->i_nr_by_mode[RD_SHIFT] >= FMODE_WAIT_BIAS ||
1041 time_after(ci->i_last_rd, used_cutoff))
1043 } else if (time_after(ci->i_last_rd, idle_cutoff)) {
1047 if (ci->i_nr_by_mode[WR_SHIFT] > 0) {
1048 if (ci->i_nr_by_mode[WR_SHIFT] >= FMODE_WAIT_BIAS ||
1049 time_after(ci->i_last_wr, used_cutoff))
1051 } else if (time_after(ci->i_last_wr, idle_cutoff)) {
1057 ci->i_nr_by_mode[LAZY_SHIFT] > 0)
1067 int __ceph_caps_wanted(struct ceph_inode_info *ci)
1069 int w = __ceph_caps_file_wanted(ci) | __ceph_caps_used(ci);
1070 if (S_ISDIR(ci->netfs.inode.i_mode)) {
1085 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check)
1091 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1095 if (cap == ci->i_auth_cap)
1105 struct ceph_inode_info *ci = ceph_inode(inode);
1108 spin_lock(&ci->i_ceph_lock);
1109 ret = __ceph_is_any_real_caps(ci);
1110 spin_unlock(&ci->i_ceph_lock);
1125 struct ceph_inode_info *ci = cap->ci;
1126 struct inode *inode = &ci->netfs.inode;
1130 /* 'ci' being NULL means the remove have already occurred */
1131 if (!ci) {
1136 lockdep_assert_held(&ci->i_ceph_lock);
1140 mdsc = ceph_inode_to_fs_client(&ci->netfs.inode)->mdsc;
1143 rb_erase(&cap->ci_node, &ci->i_caps);
1144 if (ci->i_auth_cap == cap)
1145 ci->i_auth_cap = NULL;
1161 cap->ci = NULL;
1178 cap->cap_ino = ci->i_vino.ino;
1185 if (!__ceph_is_any_real_caps(ci)) {
1190 if (ci->i_wr_ref == 0 && ci->i_snap_realm)
1191 ceph_change_snap_realm(&ci->netfs.inode, NULL);
1193 __cap_delay_cancel(mdsc, ci);
1200 struct ceph_inode_info *ci = cap->ci;
1203 /* 'ci' being NULL means the remove have already occurred */
1204 if (!ci) {
1209 lockdep_assert_held(&ci->i_ceph_lock);
1211 fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
1212 WARN_ON_ONCE(ci->i_auth_cap == cap &&
1213 !list_empty(&ci->i_dirty_item) &&
1215 !ceph_inode_is_shutdown(&ci->netfs.inode));
1364 void __ceph_remove_caps(struct ceph_inode_info *ci)
1366 struct inode *inode = &ci->netfs.inode;
1372 spin_lock(&ci->i_ceph_lock);
1373 p = rb_first(&ci->i_caps);
1379 spin_unlock(&ci->i_ceph_lock);
1394 struct ceph_inode_info *ci = cap->ci;
1395 struct inode *inode = &ci->netfs.inode;
1399 lockdep_assert_held(&ci->i_ceph_lock);
1411 ci->i_ceph_flags &= ~CEPH_I_FLUSH;
1426 arg->follows = flushing ? ci->i_head_snapc->seq : 0;
1430 ci->i_reported_size = arg->size;
1431 arg->max_size = ci->i_wanted_max_size;
1432 if (cap == ci->i_auth_cap) {
1434 ci->i_requested_max_size = arg->max_size;
1436 ci->i_requested_max_size = 0;
1440 arg->old_xattr_buf = __ceph_build_xattrs_blob(ci);
1441 arg->xattr_version = ci->i_xattrs.version;
1442 arg->xattr_buf = ceph_buffer_get(ci->i_xattrs.blob);
1451 arg->btime = ci->i_btime;
1462 arg->time_warp_seq = ci->i_time_warp_seq;
1468 arg->inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
1470 !list_empty(&ci->i_cap_snaps)) {
1472 list_for_each_entry_reverse(capsnap, &ci->i_cap_snaps, ci_item) {
1484 if (ci->fscrypt_auth_len &&
1485 WARN_ON_ONCE(ci->fscrypt_auth_len > sizeof(struct ceph_fscrypt_auth))) {
1489 arg->fscrypt_auth_len = ci->fscrypt_auth_len;
1490 memcpy(arg->fscrypt_auth, ci->fscrypt_auth,
1491 min_t(size_t, ci->fscrypt_auth_len,
1520 static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci)
1523 struct inode *inode = &ci->netfs.inode;
1534 spin_lock(&ci->i_ceph_lock);
1535 __cap_delay_requeue(arg->session->s_mdsc, ci);
1536 spin_unlock(&ci->i_ceph_lock);
1545 wake_up_all(&ci->i_cap_wq);
1616 static void __ceph_flush_snaps(struct ceph_inode_info *ci,
1618 __releases(ci->i_ceph_lock)
1619 __acquires(ci->i_ceph_lock)
1621 struct inode *inode = &ci->netfs.inode;
1631 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1654 if (list_empty(&ci->i_flushing_item)) {
1655 list_add_tail(&ci->i_flushing_item,
1661 &ci->i_cap_flush_list);
1668 ci->i_ceph_flags &= ~CEPH_I_FLUSH_SNAPS;
1671 struct ceph_cap *cap = ci->i_auth_cap;
1682 list_for_each_entry(iter, &ci->i_cap_flush_list, i_list) {
1696 spin_unlock(&ci->i_ceph_lock);
1712 spin_lock(&ci->i_ceph_lock);
1716 void ceph_flush_snaps(struct ceph_inode_info *ci,
1719 struct inode *inode = &ci->netfs.inode;
1730 spin_lock(&ci->i_ceph_lock);
1731 if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
1735 if (!ci->i_auth_cap) {
1740 mds = ci->i_auth_cap->session->s_mds;
1747 spin_unlock(&ci->i_ceph_lock);
1755 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH)
1756 __kick_flushing_caps(mdsc, session, ci, 0);
1758 __ceph_flush_snaps(ci, session);
1760 spin_unlock(&ci->i_ceph_lock);
1768 if (!list_empty(&ci->i_snap_flush_item))
1770 list_del_init(&ci->i_snap_flush_item);
1782 int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
1786 ceph_sb_to_fs_client(ci->netfs.inode.i_sb)->mdsc;
1787 struct inode *inode = &ci->netfs.inode;
1789 int was = ci->i_dirty_caps;
1792 lockdep_assert_held(&ci->i_ceph_lock);
1794 if (!ci->i_auth_cap) {
1805 ci->i_dirty_caps |= mask;
1807 struct ceph_mds_session *session = ci->i_auth_cap->session;
1809 WARN_ON_ONCE(ci->i_prealloc_cap_flush);
1810 swap(ci->i_prealloc_cap_flush, *pcf);
1812 if (!ci->i_head_snapc) {
1814 ci->i_head_snapc = ceph_get_snap_context(
1815 ci->i_snap_realm->cached_context);
1818 inode, ceph_vinop(inode), ci->i_head_snapc,
1819 ci->i_auth_cap);
1820 BUG_ON(!list_empty(&ci->i_dirty_item));
1822 list_add(&ci->i_dirty_item, &session->s_cap_dirty);
1824 if (ci->i_flushing_caps == 0) {
1829 WARN_ON_ONCE(!ci->i_prealloc_cap_flush);
1831 BUG_ON(list_empty(&ci->i_dirty_item));
1832 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1835 __cap_delay_requeue(mdsc, ci);
1887 static bool __detach_cap_flush_from_ci(struct ceph_inode_info *ci,
1893 if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
1914 struct ceph_inode_info *ci = ceph_inode(inode);
1918 lockdep_assert_held(&ci->i_ceph_lock);
1919 BUG_ON(ci->i_dirty_caps == 0);
1920 BUG_ON(list_empty(&ci->i_dirty_item));
1921 BUG_ON(!ci->i_prealloc_cap_flush);
1923 flushing = ci->i_dirty_caps;
1926 ceph_cap_string(ci->i_flushing_caps),
1927 ceph_cap_string(ci->i_flushing_caps | flushing));
1928 ci->i_flushing_caps |= flushing;
1929 ci->i_dirty_caps = 0;
1932 swap(cf, ci->i_prealloc_cap_flush);
1937 list_del_init(&ci->i_dirty_item);
1943 if (list_empty(&ci->i_flushing_item)) {
1944 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1949 list_add_tail(&cf->i_list, &ci->i_cap_flush_list);
1958 __releases(ci->i_ceph_lock)
1959 __acquires(ci->i_ceph_lock)
1962 struct ceph_inode_info *ci = ceph_inode(inode);
1963 u32 invalidating_gen = ci->i_rdcache_gen;
1965 spin_unlock(&ci->i_ceph_lock);
1968 spin_lock(&ci->i_ceph_lock);
1971 invalidating_gen == ci->i_rdcache_gen) {
1976 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
1983 bool __ceph_should_report_size(struct ceph_inode_info *ci)
1985 loff_t size = i_size_read(&ci->netfs.inode);
1987 if (ci->i_flushing_caps & CEPH_CAP_FILE_WR)
1989 if (size >= ci->i_max_size)
1992 if (ci->i_max_size > ci->i_reported_size &&
1993 (size << 1) >= ci->i_max_size + ci->i_reported_size)
2009 void ceph_check_caps(struct ceph_inode_info *ci, int flags)
2011 struct inode *inode = &ci->netfs.inode;
2026 spin_lock(&ci->i_ceph_lock);
2027 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
2028 ci->i_ceph_flags |= CEPH_I_ASYNC_CHECK_CAPS;
2031 spin_unlock(&ci->i_ceph_lock);
2035 if (ci->i_ceph_flags & CEPH_I_FLUSH)
2039 file_wanted = __ceph_caps_file_wanted(ci);
2042 used = __ceph_caps_used(ci);
2050 issued = __ceph_caps_issued(ci, &implemented);
2062 __ceph_dir_is_complete(ci)) {
2083 if (ci->i_max_size == 0)
2091 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
2092 ceph_cap_string(ci->i_flushing_caps),
2107 !(ci->i_wb_ref || ci->i_wrbuffer_ref) && /* no dirty pages... */
2117 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2123 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
2131 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
2139 if (ci->i_auth_cap && cap != ci->i_auth_cap)
2140 cap_used &= ~ci->i_auth_cap->issued;
2168 if (S_ISREG(inode->i_mode) && ci->i_wrbuffer_ref &&
2178 if (cap == ci->i_auth_cap &&
2181 if (ci->i_wanted_max_size > ci->i_max_size &&
2182 ci->i_wanted_max_size > ci->i_requested_max_size) {
2188 if (__ceph_should_report_size(ci)) {
2194 if (cap == ci->i_auth_cap) {
2195 if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) {
2199 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) {
2223 if (cap == ci->i_auth_cap &&
2224 (ci->i_ceph_flags &
2226 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH)
2227 __kick_flushing_caps(mdsc, session, ci, 0);
2228 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)
2229 __ceph_flush_snaps(ci, session);
2234 if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
2235 flushing = ci->i_dirty_caps;
2254 spin_unlock(&ci->i_ceph_lock);
2255 __send_cap(&arg, ci);
2256 spin_lock(&ci->i_ceph_lock);
2262 if (__ceph_is_any_real_caps(ci) &&
2263 list_empty(&ci->i_cap_delay_list) &&
2266 __cap_delay_requeue(mdsc, ci);
2269 spin_unlock(&ci->i_ceph_lock);
2284 struct ceph_inode_info *ci = ceph_inode(inode);
2288 spin_lock(&ci->i_ceph_lock);
2290 if (ci->i_dirty_caps && ci->i_auth_cap) {
2291 struct ceph_cap *cap = ci->i_auth_cap;
2296 spin_unlock(&ci->i_ceph_lock);
2300 if (ci->i_ceph_flags &
2302 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH)
2303 __kick_flushing_caps(mdsc, session, ci, 0);
2304 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)
2305 __ceph_flush_snaps(ci, session);
2309 flushing = ci->i_dirty_caps;
2314 __ceph_caps_used(ci), __ceph_caps_wanted(ci),
2317 spin_unlock(&ci->i_ceph_lock);
2319 __send_cap(&arg, ci);
2321 if (!list_empty(&ci->i_cap_flush_list)) {
2323 list_last_entry(&ci->i_cap_flush_list,
2328 flushing = ci->i_flushing_caps;
2329 spin_unlock(&ci->i_ceph_lock);
2341 struct ceph_inode_info *ci = ceph_inode(inode);
2344 spin_lock(&ci->i_ceph_lock);
2345 if (!list_empty(&ci->i_cap_flush_list)) {
2347 list_first_entry(&ci->i_cap_flush_list,
2352 spin_unlock(&ci->i_ceph_lock);
2363 struct ceph_inode_info *ci = ceph_inode(inode);
2367 spin_lock(&ci->i_unsafe_lock);
2368 if (S_ISDIR(inode->i_mode) && !list_empty(&ci->i_unsafe_dirops)) {
2369 req1 = list_last_entry(&ci->i_unsafe_dirops,
2374 if (!list_empty(&ci->i_unsafe_iops)) {
2375 req2 = list_last_entry(&ci->i_unsafe_iops,
2380 spin_unlock(&ci->i_unsafe_lock);
2404 spin_lock(&ci->i_unsafe_lock);
2406 list_for_each_entry(req, &ci->i_unsafe_dirops,
2418 list_for_each_entry(req, &ci->i_unsafe_iops,
2429 spin_unlock(&ci->i_unsafe_lock);
2432 spin_lock(&ci->i_ceph_lock);
2433 if (ci->i_auth_cap) {
2434 s = ci->i_auth_cap->session;
2438 spin_unlock(&ci->i_ceph_lock);
2479 struct ceph_inode_info *ci = ceph_inode(inode);
2507 err = wait_event_interruptible(ci->i_cap_wq,
2531 struct ceph_inode_info *ci = ceph_inode(inode);
2546 err = wait_event_interruptible(ci->i_cap_wq,
2552 spin_lock(&ci->i_ceph_lock);
2553 if (__ceph_caps_dirty(ci))
2554 __cap_delay_requeue_front(mdsc, ci);
2555 spin_unlock(&ci->i_ceph_lock);
2562 struct ceph_inode_info *ci,
2564 __releases(ci->i_ceph_lock)
2565 __acquires(ci->i_ceph_lock)
2567 struct inode *inode = &ci->netfs.inode;
2576 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE)
2579 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
2581 list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) {
2588 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
2592 cap = ci->i_auth_cap;
2610 __ceph_caps_used(ci),
2611 __ceph_caps_wanted(ci),
2614 spin_unlock(&ci->i_ceph_lock);
2615 __send_cap(&arg, ci);
2625 spin_unlock(&ci->i_ceph_lock);
2639 spin_lock(&ci->i_ceph_lock);
2647 struct ceph_inode_info *ci;
2657 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2658 struct inode *inode = &ci->netfs.inode;
2660 spin_lock(&ci->i_ceph_lock);
2661 cap = ci->i_auth_cap;
2666 spin_unlock(&ci->i_ceph_lock);
2677 if ((cap->issued & ci->i_flushing_caps) !=
2678 ci->i_flushing_caps) {
2685 __kick_flushing_caps(mdsc, session, ci,
2688 ci->i_ceph_flags |= CEPH_I_KICK_FLUSH;
2691 spin_unlock(&ci->i_ceph_lock);
2699 struct ceph_inode_info *ci;
2711 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2712 struct inode *inode = &ci->netfs.inode;
2714 spin_lock(&ci->i_ceph_lock);
2715 cap = ci->i_auth_cap;
2720 spin_unlock(&ci->i_ceph_lock);
2723 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
2724 __kick_flushing_caps(mdsc, session, ci,
2727 spin_unlock(&ci->i_ceph_lock);
2732 struct ceph_inode_info *ci)
2735 struct ceph_cap *cap = ci->i_auth_cap;
2736 struct inode *inode = &ci->netfs.inode;
2738 lockdep_assert_held(&ci->i_ceph_lock);
2742 ceph_cap_string(ci->i_flushing_caps));
2744 if (!list_empty(&ci->i_cap_flush_list)) {
2747 list_move_tail(&ci->i_flushing_item,
2752 __kick_flushing_caps(mdsc, session, ci, oldest_flush_tid);
2761 void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
2764 struct inode *inode = &ci->netfs.inode;
2767 lockdep_assert_held(&ci->i_ceph_lock);
2770 ci->i_pin_ref++;
2772 ci->i_rd_ref++;
2774 ci->i_rdcache_ref++;
2776 ci->i_fx_ref++;
2778 if (ci->i_wr_ref == 0 && !ci->i_head_snapc) {
2780 ci->i_head_snapc = ceph_get_snap_context(
2781 ci->i_snap_realm->cached_context);
2783 ci->i_wr_ref++;
2786 if (ci->i_wb_ref == 0)
2788 ci->i_wb_ref++;
2790 ceph_vinop(inode), ci->i_wb_ref-1, ci->i_wb_ref);
2816 struct ceph_inode_info *ci = ceph_inode(inode);
2828 spin_lock(&ci->i_ceph_lock);
2831 (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK)) {
2839 while (ci->i_truncate_pending) {
2840 spin_unlock(&ci->i_ceph_lock);
2846 spin_lock(&ci->i_ceph_lock);
2849 have = __ceph_caps_issued(ci, &implemented);
2852 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2854 inode, ceph_vinop(inode), endoff, ci->i_max_size);
2855 if (endoff > ci->i_requested_max_size)
2856 ret = ci->i_auth_cap ? -EFBIG : -EUCLEAN;
2863 if (__ceph_have_pending_cap_snap(ci)) {
2888 !ci->i_head_snapc &&
2900 spin_unlock(&ci->i_ceph_lock);
2911 ceph_take_cap_refs(ci, *got, true);
2917 if (ci->i_auth_cap &&
2919 struct ceph_mds_session *s = ci->i_auth_cap->session;
2927 ci->i_auth_cap->mds);
2938 mds_wanted = __ceph_caps_mds_wanted(ci, false);
2953 __ceph_touch_fmode(ci, mdsc, flags);
2955 spin_unlock(&ci->i_ceph_lock);
2976 struct ceph_inode_info *ci = ceph_inode(inode);
2981 spin_lock(&ci->i_ceph_lock);
2982 if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) {
2985 ci->i_wanted_max_size = endoff;
2988 if (ci->i_auth_cap &&
2989 (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) &&
2990 ci->i_wanted_max_size > ci->i_max_size &&
2991 ci->i_wanted_max_size > ci->i_requested_max_size)
2993 spin_unlock(&ci->i_ceph_lock);
2995 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY);
3042 struct ceph_inode_info *ci = ceph_inode(inode);
3083 ceph_get_fmode(ci, flags, FMODE_WAIT_BIAS);
3084 add_wait_queue(&ci->i_cap_wq, &wait);
3096 remove_wait_queue(&ci->i_cap_wq, &wait);
3097 ceph_put_fmode(ci, flags, FMODE_WAIT_BIAS);
3112 ceph_put_cap_refs(ci, _got);
3135 if (S_ISREG(ci->netfs.inode.i_mode) &&
3136 ceph_has_inline_data(ci) &&
3152 ceph_put_cap_refs(ci, _got);
3185 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
3187 spin_lock(&ci->i_ceph_lock);
3188 ceph_take_cap_refs(ci, caps, false);
3189 spin_unlock(&ci->i_ceph_lock);
3197 static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
3200 struct inode *inode = &ci->netfs.inode;
3208 if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps))
3209 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
3232 static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
3235 struct inode *inode = &ci->netfs.inode;
3240 spin_lock(&ci->i_ceph_lock);
3242 --ci->i_pin_ref;
3244 if (--ci->i_rd_ref == 0)
3247 if (--ci->i_rdcache_ref == 0)
3250 if (--ci->i_fx_ref == 0)
3253 if (--ci->i_wb_ref == 0) {
3260 ceph_vinop(inode), ci->i_wb_ref+1, ci->i_wb_ref);
3263 if (--ci->i_wr_ref == 0) {
3268 WARN_ON_ONCE(ci->i_wb_ref);
3272 if (ci->i_wrbuffer_ref_head == 0 &&
3273 ci->i_dirty_caps == 0 &&
3274 ci->i_flushing_caps == 0) {
3275 BUG_ON(!ci->i_head_snapc);
3276 ceph_put_snap_context(ci->i_head_snapc);
3277 ci->i_head_snapc = NULL;
3280 if (!__ceph_is_any_real_caps(ci) && ci->i_snap_realm)
3284 if (check_flushsnaps && __ceph_have_pending_cap_snap(ci)) {
3286 list_last_entry(&ci->i_cap_snaps,
3291 if (ceph_try_drop_cap_snap(ci, capsnap))
3294 else if (__ceph_finish_cap_snap(ci, capsnap))
3298 spin_unlock(&ci->i_ceph_lock);
3306 ceph_check_caps(ci, 0);
3308 ceph_flush_snaps(ci, NULL);
3320 wake_up_all(&ci->i_cap_wq);
3325 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
3327 __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_SYNC);
3330 void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had)
3332 __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_ASYNC);
3342 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
3345 struct inode *inode = &ci->netfs.inode;
3353 spin_lock(&ci->i_ceph_lock);
3354 ci->i_wrbuffer_ref -= nr;
3355 if (ci->i_wrbuffer_ref == 0) {
3360 if (ci->i_head_snapc == snapc) {
3361 ci->i_wrbuffer_ref_head -= nr;
3362 if (ci->i_wrbuffer_ref_head == 0 &&
3363 ci->i_wr_ref == 0 &&
3364 ci->i_dirty_caps == 0 &&
3365 ci->i_flushing_caps == 0) {
3366 BUG_ON(!ci->i_head_snapc);
3367 ceph_put_snap_context(ci->i_head_snapc);
3368 ci->i_head_snapc = NULL;
3371 inode, ceph_vinop(inode), ci->i_wrbuffer_ref+nr,
3372 ci->i_wrbuffer_ref_head+nr, ci->i_wrbuffer_ref,
3373 ci->i_wrbuffer_ref_head, last ? " LAST" : "");
3375 list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
3387 WARN_ON_ONCE(ci->i_auth_cap);
3395 if (ceph_try_drop_cap_snap(ci, capsnap)) {
3398 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
3405 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
3406 ci->i_wrbuffer_ref, capsnap->dirty_pages,
3412 spin_unlock(&ci->i_ceph_lock);
3415 ceph_check_caps(ci, 0);
3417 ceph_flush_snaps(ci, NULL);
3420 wake_up_all(&ci->i_cap_wq);
3490 __releases(ci->i_ceph_lock)
3494 struct ceph_inode_info *ci = ceph_inode(inode);
3534 !(ci->i_wrbuffer_ref || ci->i_wb_ref)) {
3538 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
3540 ci->i_rdcache_revoking = ci->i_rdcache_gen;
3557 WARN_ON(cap != ci->i_auth_cap);
3567 __check_cap_issue(ci, cap, newcaps);
3582 ci->i_btime = extra_info->btime;
3588 if (ci->fscrypt_auth_len != extra_info->fscrypt_auth_len ||
3589 memcmp(ci->fscrypt_auth, extra_info->fscrypt_auth,
3590 ci->fscrypt_auth_len))
3593 ci->fscrypt_auth_len,
3610 if (version > ci->i_xattrs.version) {
3613 if (ci->i_xattrs.blob)
3614 ceph_buffer_put(ci->i_xattrs.blob);
3615 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
3616 ci->i_xattrs.version = version;
3634 ci->i_files = extra_info->nfiles;
3635 ci->i_subdirs = extra_info->nsubdirs;
3640 s64 old_pool = ci->i_layout.pool_id;
3643 ceph_file_layout_from_legacy(&ci->i_layout, &grant->layout);
3644 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
3645 lockdep_is_held(&ci->i_ceph_lock));
3646 rcu_assign_pointer(ci->i_layout.pool_ns, extra_info->pool_ns);
3648 if (ci->i_layout.pool_id != old_pool ||
3650 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
3661 if (ci->i_auth_cap == cap && (newcaps & CEPH_CAP_ANY_FILE_WR)) {
3662 if (max_size != ci->i_max_size) {
3663 doutc(cl, "max_size %lld -> %llu\n", ci->i_max_size,
3665 ci->i_max_size = max_size;
3666 if (max_size >= ci->i_wanted_max_size) {
3667 ci->i_wanted_max_size = 0; /* reset */
3668 ci->i_requested_max_size = 0;
3675 wanted = __ceph_caps_wanted(ci);
3676 used = __ceph_caps_used(ci);
3677 dirty = __ceph_caps_dirty(ci);
3712 } else if (cap == ci->i_auth_cap) {
3730 if (cap == ci->i_auth_cap &&
3731 __ceph_caps_revoking_other(ci, cap, newcaps))
3746 if (cap == ci->i_auth_cap)
3753 extra_info->inline_version >= ci->i_inline_version) {
3754 ci->i_inline_version = extra_info->inline_version;
3755 if (ci->i_inline_version != CEPH_INLINE_NONE &&
3761 if (ci->i_auth_cap == cap) {
3765 if (ci->i_requested_max_size > max_size ||
3768 ci->i_requested_max_size = 0;
3772 ceph_kick_flushing_inode_caps(session, ci);
3776 spin_unlock(&ci->i_ceph_lock);
3797 wake_up_all(&ci->i_cap_wq);
3801 ceph_check_caps(ci, flags | CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL);
3803 ceph_check_caps(ci, flags | CHECK_CAPS_NOINVAL);
3814 __releases(ci->i_ceph_lock)
3816 struct ceph_inode_info *ci = ceph_inode(inode);
3828 list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) {
3842 wake_ci |= __detach_cap_flush_from_ci(ci, cf);
3858 ceph_cap_string(ci->i_flushing_caps),
3859 ceph_cap_string(ci->i_flushing_caps & ~cleaned));
3864 ci->i_flushing_caps &= ~cleaned;
3871 if (ci->i_flushing_caps == 0) {
3872 if (list_empty(&ci->i_cap_flush_list)) {
3873 list_del_init(&ci->i_flushing_item);
3887 if (ci->i_dirty_caps == 0) {
3890 BUG_ON(!list_empty(&ci->i_dirty_item));
3892 if (ci->i_wr_ref == 0 &&
3893 ci->i_wrbuffer_ref_head == 0) {
3894 BUG_ON(!ci->i_head_snapc);
3895 ceph_put_snap_context(ci->i_head_snapc);
3896 ci->i_head_snapc = NULL;
3899 BUG_ON(list_empty(&ci->i_dirty_item));
3905 spin_unlock(&ci->i_ceph_lock);
3916 wake_up_all(&ci->i_cap_wq);
3926 struct ceph_inode_info *ci = ceph_inode(inode);
3931 lockdep_assert_held(&ci->i_ceph_lock);
3933 doutc(cl, "removing capsnap %p, %p %llx.%llx ci %p\n", capsnap,
3934 inode, ceph_vinop(inode), ci);
3937 ret = __detach_cap_flush_from_ci(ci, &capsnap->cap_flush);
3942 if (list_empty(&ci->i_cap_flush_list))
3943 list_del_init(&ci->i_flushing_item);
3954 struct ceph_inode_info *ci = ceph_inode(inode);
3956 lockdep_assert_held(&ci->i_ceph_lock);
3972 struct ceph_inode_info *ci = ceph_inode(inode);
3980 doutc(cl, "%p %llx.%llx ci %p mds%d follows %lld\n", inode,
3981 ceph_vinop(inode), ci, session->s_mds, follows);
3983 spin_lock(&ci->i_ceph_lock);
3984 list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
4002 spin_unlock(&ci->i_ceph_lock);
4008 wake_up_all(&ci->i_cap_wq);
4025 struct ceph_inode_info *ci = ceph_inode(inode);
4033 int dirty = __ceph_caps_dirty(ci);
4037 lockdep_assert_held(&ci->i_ceph_lock);
4072 struct ceph_inode_info *ci = ceph_inode(inode);
4092 spin_lock(&ci->i_ceph_lock);
4093 cap = __get_cap_for_mds(ci, mds);
4118 tcap = __get_cap_for_mds(ci, target);
4130 if (cap == ci->i_auth_cap) {
4131 ci->i_auth_cap = tcap;
4132 change_auth_cap_ses(ci, tcap->session);
4139 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
4144 if (!list_empty(&ci->i_cap_flush_list) &&
4145 ci->i_auth_cap == tcap) {
4147 list_move_tail(&ci->i_flushing_item,
4156 spin_unlock(&ci->i_ceph_lock);
4182 spin_unlock(&ci->i_ceph_lock);
4204 struct ceph_inode_info *ci = ceph_inode(inode);
4233 cap = __get_cap_for_mds(ci, mds);
4236 spin_unlock(&ci->i_ceph_lock);
4238 spin_lock(&ci->i_ceph_lock);
4249 __ceph_caps_issued(ci, &issued);
4250 issued |= __ceph_caps_dirty(ci);
4255 ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
4333 struct ceph_inode_info *ci;
4471 ci = ceph_inode(inode);
4499 spin_lock(&ci->i_ceph_lock);
4510 spin_lock(&ci->i_ceph_lock);
4516 spin_unlock(&ci->i_ceph_lock);
4532 __ceph_caps_issued(ci, &extra_info.issued);
4533 extra_info.issued |= __ceph_caps_dirty(ci);
4546 spin_unlock(&ci->i_ceph_lock);
4552 spin_unlock(&ci->i_ceph_lock);
4604 * processed in this run. In this case, the ci->i_hold_caps_max will be
4611 struct ceph_inode_info *ci;
4620 ci = list_first_entry(&mdsc->cap_delay_list,
4623 if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) {
4625 delay = ci->i_hold_caps_max;
4628 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
4629 time_before(jiffies, ci->i_hold_caps_max))
4631 list_del_init(&ci->i_cap_delay_list);
4633 inode = igrab(&ci->netfs.inode);
4638 ceph_check_caps(ci, 0);
4664 struct ceph_inode_info *ci;
4670 ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info,
4672 inode = &ci->netfs.inode;
4677 ceph_check_caps(ci, CHECK_CAPS_FLUSH);
4712 void __ceph_touch_fmode(struct ceph_inode_info *ci,
4717 ci->i_last_rd = now;
4719 ci->i_last_wr = now;
4722 __ceph_is_any_real_caps(ci) &&
4723 list_empty(&ci->i_cap_delay_list))
4724 __cap_delay_requeue(mdsc, ci);
4727 void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
4729 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb);
4737 spin_lock(&ci->i_ceph_lock);
4744 if (i && ci->i_nr_by_mode[i])
4748 ci->i_nr_by_mode[i] += count;
4753 spin_unlock(&ci->i_ceph_lock);
4761 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode, int count)
4763 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb);
4771 spin_lock(&ci->i_ceph_lock);
4774 BUG_ON(ci->i_nr_by_mode[i] < count);
4775 ci->i_nr_by_mode[i] -= count;
4783 if (i && ci->i_nr_by_mode[i])
4789 spin_unlock(&ci->i_ceph_lock);
4800 struct ceph_inode_info *ci = ceph_inode(inode);
4803 spin_lock(&ci->i_ceph_lock);
4805 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
4807 if (__ceph_caps_dirty(ci)) {
4814 ci->i_ceph_flags |= CEPH_I_FLUSH;
4815 if (!list_empty(&ci->i_cap_delay_list))
4816 list_del_init(&ci->i_cap_delay_list);
4817 list_add_tail(&ci->i_cap_delay_list,
4828 spin_unlock(&ci->i_ceph_lock);
4843 struct ceph_inode_info *ci = ceph_inode(inode);
4850 spin_lock(&ci->i_ceph_lock);
4851 used = __ceph_caps_used(ci);
4852 dirty = __ceph_caps_dirty(ci);
4861 cap = __get_cap_for_mds(ci, mds);
4877 int wanted = __ceph_caps_wanted(ci);
4889 if (cap == ci->i_auth_cap &&
4891 ci->i_requested_max_size = 0;
4915 spin_unlock(&ci->i_ceph_lock);
4982 struct ceph_inode_info *ci = ceph_inode(inode);
4987 lockdep_assert_held(&ci->i_ceph_lock);
4989 doutc(cl, "removing capsnaps, ci is %p, %p %llx.%llx\n",
4990 ci, inode, ceph_vinop(inode));
4992 while (!list_empty(&ci->i_cap_snaps)) {
4993 capsnap = list_first_entry(&ci->i_cap_snaps,
5000 wake_up_all(&ci->i_cap_wq);
5010 struct ceph_inode_info *ci = ceph_inode(inode);
5015 lockdep_assert_held(&ci->i_ceph_lock);
5017 doutc(cl, "removing cap %p, ci is %p, %p %llx.%llx\n",
5018 cap, ci, inode, ceph_vinop(inode));
5020 is_auth = (cap == ci->i_auth_cap);
5028 if (ci->i_wrbuffer_ref > 0)
5035 while (!list_empty(&ci->i_cap_flush_list)) {
5036 cf = list_first_entry(&ci->i_cap_flush_list,
5044 if (!list_empty(&ci->i_dirty_item)) {
5047 ceph_cap_string(ci->i_dirty_caps),
5049 ci->i_dirty_caps = 0;
5050 list_del_init(&ci->i_dirty_item);
5053 if (!list_empty(&ci->i_flushing_item)) {
5056 ceph_cap_string(ci->i_flushing_caps),
5058 ci->i_flushing_caps = 0;
5059 list_del_init(&ci->i_flushing_item);
5068 if (ci->i_wrbuffer_ref_head == 0 &&
5069 ci->i_wr_ref == 0 &&
5070 ci->i_dirty_caps == 0 &&
5071 ci->i_flushing_caps == 0) {
5072 ceph_put_snap_context(ci->i_head_snapc);
5073 ci->i_head_snapc = NULL;
5077 if (atomic_read(&ci->i_filelock_ref) > 0) {
5079 ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
5085 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
5086 cf = ci->i_prealloc_cap_flush;
5087 ci->i_prealloc_cap_flush = NULL;
5092 if (!list_empty(&ci->i_cap_snaps))