1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/bio.h>
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
27 
28 #include "gfs2.h"
29 #include "incore.h"
30 #include "bmap.h"
31 #include "dir.h"
32 #include "glock.h"
33 #include "glops.h"
34 #include "inode.h"
35 #include "log.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "recovery.h"
39 #include "rgrp.h"
40 #include "super.h"
41 #include "trans.h"
42 #include "util.h"
43 #include "sys.h"
44 #include "xattr.h"
45 #include "lops.h"
46 
47 enum evict_behavior {
48 	EVICT_SHOULD_DELETE,
49 	EVICT_SHOULD_SKIP_DELETE,
50 	EVICT_SHOULD_DEFER_DELETE,
51 };
52 
53 /**
54  * gfs2_jindex_free - Clear all the journal index information
55  * @sdp: The GFS2 superblock
56  *
57  */
58 
59 void gfs2_jindex_free(struct gfs2_sbd *sdp)
60 {
61 	struct list_head list;
62 	struct gfs2_jdesc *jd;
63 
64 	spin_lock(&sdp->sd_jindex_spin);
65 	list_add(&list, &sdp->sd_jindex_list);
66 	list_del_init(&sdp->sd_jindex_list);
67 	sdp->sd_journals = 0;
68 	spin_unlock(&sdp->sd_jindex_spin);
69 
70 	down_write(&sdp->sd_log_flush_lock);
71 	sdp->sd_jdesc = NULL;
72 	up_write(&sdp->sd_log_flush_lock);
73 
74 	while (!list_empty(&list)) {
75 		jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
76 		BUG_ON(jd->jd_log_bio);
77 		gfs2_free_journal_extents(jd);
78 		list_del(&jd->jd_list);
79 		iput(jd->jd_inode);
80 		jd->jd_inode = NULL;
81 		kfree(jd);
82 	}
83 }
84 
85 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
86 {
87 	struct gfs2_jdesc *jd;
88 
89 	list_for_each_entry(jd, head, jd_list) {
90 		if (jd->jd_jid == jid)
91 			return jd;
92 	}
93 	return NULL;
94 }
95 
96 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
97 {
98 	struct gfs2_jdesc *jd;
99 
100 	spin_lock(&sdp->sd_jindex_spin);
101 	jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
102 	spin_unlock(&sdp->sd_jindex_spin);
103 
104 	return jd;
105 }
106 
107 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
108 {
109 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
110 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
111 	u64 size = i_size_read(jd->jd_inode);
112 
113 	if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
114 		return -EIO;
115 
116 	jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
117 
118 	if (gfs2_write_alloc_required(ip, 0, size)) {
119 		gfs2_consist_inode(ip);
120 		return -EIO;
121 	}
122 
123 	return 0;
124 }
125 
126 /**
127  * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
128  * @sdp: the filesystem
129  *
130  * Returns: errno
131  */
132 
133 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
134 {
135 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
136 	struct gfs2_glock *j_gl = ip->i_gl;
137 	int error;
138 
139 	j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
140 	if (gfs2_withdrawing_or_withdrawn(sdp))
141 		return -EIO;
142 
143 	if (sdp->sd_log_sequence == 0) {
144 		fs_err(sdp, "unknown status of our own journal jid %d",
145 		       sdp->sd_lockstruct.ls_jid);
146 		return -EIO;
147 	}
148 
149 	error = gfs2_quota_init(sdp);
150 	if (!error && gfs2_withdrawing_or_withdrawn(sdp))
151 		error = -EIO;
152 	if (!error)
153 		set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
154 	return error;
155 }
156 
157 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
158 {
159 	const struct gfs2_statfs_change *str = buf;
160 
161 	sc->sc_total = be64_to_cpu(str->sc_total);
162 	sc->sc_free = be64_to_cpu(str->sc_free);
163 	sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
164 }
165 
166 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
167 {
168 	struct gfs2_statfs_change *str = buf;
169 
170 	str->sc_total = cpu_to_be64(sc->sc_total);
171 	str->sc_free = cpu_to_be64(sc->sc_free);
172 	str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
173 }
174 
175 int gfs2_statfs_init(struct gfs2_sbd *sdp)
176 {
177 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
178 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
179 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
180 	struct buffer_head *m_bh;
181 	struct gfs2_holder gh;
182 	int error;
183 
184 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
185 				   &gh);
186 	if (error)
187 		return error;
188 
189 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
190 	if (error)
191 		goto out;
192 
193 	if (sdp->sd_args.ar_spectator) {
194 		spin_lock(&sdp->sd_statfs_spin);
195 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
196 				      sizeof(struct gfs2_dinode));
197 		spin_unlock(&sdp->sd_statfs_spin);
198 	} else {
199 		spin_lock(&sdp->sd_statfs_spin);
200 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
201 				      sizeof(struct gfs2_dinode));
202 		gfs2_statfs_change_in(l_sc, sdp->sd_sc_bh->b_data +
203 				      sizeof(struct gfs2_dinode));
204 		spin_unlock(&sdp->sd_statfs_spin);
205 
206 	}
207 
208 	brelse(m_bh);
209 out:
210 	gfs2_glock_dq_uninit(&gh);
211 	return 0;
212 }
213 
214 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
215 			s64 dinodes)
216 {
217 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
218 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
219 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
220 	s64 x, y;
221 	int need_sync = 0;
222 
223 	gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
224 
225 	spin_lock(&sdp->sd_statfs_spin);
226 	l_sc->sc_total += total;
227 	l_sc->sc_free += free;
228 	l_sc->sc_dinodes += dinodes;
229 	gfs2_statfs_change_out(l_sc, sdp->sd_sc_bh->b_data +
230 			       sizeof(struct gfs2_dinode));
231 	if (sdp->sd_args.ar_statfs_percent) {
232 		x = 100 * l_sc->sc_free;
233 		y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
234 		if (x >= y || x <= -y)
235 			need_sync = 1;
236 	}
237 	spin_unlock(&sdp->sd_statfs_spin);
238 
239 	if (need_sync)
240 		gfs2_wake_up_statfs(sdp);
241 }
242 
243 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh)
244 {
245 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
246 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
247 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
248 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
249 
250 	gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
251 	gfs2_trans_add_meta(m_ip->i_gl, m_bh);
252 
253 	spin_lock(&sdp->sd_statfs_spin);
254 	m_sc->sc_total += l_sc->sc_total;
255 	m_sc->sc_free += l_sc->sc_free;
256 	m_sc->sc_dinodes += l_sc->sc_dinodes;
257 	memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
258 	memset(sdp->sd_sc_bh->b_data + sizeof(struct gfs2_dinode),
259 	       0, sizeof(struct gfs2_statfs_change));
260 	gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
261 	spin_unlock(&sdp->sd_statfs_spin);
262 }
263 
264 int gfs2_statfs_sync(struct super_block *sb, int type)
265 {
266 	struct gfs2_sbd *sdp = sb->s_fs_info;
267 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
268 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
269 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
270 	struct gfs2_holder gh;
271 	struct buffer_head *m_bh;
272 	int error;
273 
274 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
275 				   &gh);
276 	if (error)
277 		goto out;
278 
279 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
280 	if (error)
281 		goto out_unlock;
282 
283 	spin_lock(&sdp->sd_statfs_spin);
284 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
285 			      sizeof(struct gfs2_dinode));
286 	if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
287 		spin_unlock(&sdp->sd_statfs_spin);
288 		goto out_bh;
289 	}
290 	spin_unlock(&sdp->sd_statfs_spin);
291 
292 	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
293 	if (error)
294 		goto out_bh;
295 
296 	update_statfs(sdp, m_bh);
297 	sdp->sd_statfs_force_sync = 0;
298 
299 	gfs2_trans_end(sdp);
300 
301 out_bh:
302 	brelse(m_bh);
303 out_unlock:
304 	gfs2_glock_dq_uninit(&gh);
305 out:
306 	return error;
307 }
308 
309 struct lfcc {
310 	struct list_head list;
311 	struct gfs2_holder gh;
312 };
313 
314 /**
315  * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
316  *                            journals are clean
317  * @sdp: the file system
318  *
319  * Returns: errno
320  */
321 
322 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
323 {
324 	struct gfs2_inode *ip;
325 	struct gfs2_jdesc *jd;
326 	struct lfcc *lfcc;
327 	LIST_HEAD(list);
328 	struct gfs2_log_header_host lh;
329 	int error, error2;
330 
331 	/*
332 	 * Grab all the journal glocks in SH mode.  We are *probably* doing
333 	 * that to prevent recovery.
334 	 */
335 
336 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
337 		lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
338 		if (!lfcc) {
339 			error = -ENOMEM;
340 			goto out;
341 		}
342 		ip = GFS2_I(jd->jd_inode);
343 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
344 		if (error) {
345 			kfree(lfcc);
346 			goto out;
347 		}
348 		list_add(&lfcc->list, &list);
349 	}
350 
351 	gfs2_freeze_unlock(sdp);
352 
353 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
354 				   LM_FLAG_NOEXP | GL_NOPID,
355 				   &sdp->sd_freeze_gh);
356 	if (error)
357 		goto relock_shared;
358 
359 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
360 		error = gfs2_jdesc_check(jd);
361 		if (error)
362 			break;
363 		error = gfs2_find_jhead(jd, &lh);
364 		if (error)
365 			break;
366 		if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
367 			error = -EBUSY;
368 			break;
369 		}
370 	}
371 
372 	if (!error)
373 		goto out;  /* success */
374 
375 	gfs2_freeze_unlock(sdp);
376 
377 relock_shared:
378 	error2 = gfs2_freeze_lock_shared(sdp);
379 	gfs2_assert_withdraw(sdp, !error2);
380 
381 out:
382 	while (!list_empty(&list)) {
383 		lfcc = list_first_entry(&list, struct lfcc, list);
384 		list_del(&lfcc->list);
385 		gfs2_glock_dq_uninit(&lfcc->gh);
386 		kfree(lfcc);
387 	}
388 	return error;
389 }
390 
391 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
392 {
393 	const struct inode *inode = &ip->i_inode;
394 	struct gfs2_dinode *str = buf;
395 
396 	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
397 	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
398 	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
399 	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
400 	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
401 	str->di_mode = cpu_to_be32(inode->i_mode);
402 	str->di_uid = cpu_to_be32(i_uid_read(inode));
403 	str->di_gid = cpu_to_be32(i_gid_read(inode));
404 	str->di_nlink = cpu_to_be32(inode->i_nlink);
405 	str->di_size = cpu_to_be64(i_size_read(inode));
406 	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
407 	str->di_atime = cpu_to_be64(inode_get_atime_sec(inode));
408 	str->di_mtime = cpu_to_be64(inode_get_mtime_sec(inode));
409 	str->di_ctime = cpu_to_be64(inode_get_ctime_sec(inode));
410 
411 	str->di_goal_meta = cpu_to_be64(ip->i_goal);
412 	str->di_goal_data = cpu_to_be64(ip->i_goal);
413 	str->di_generation = cpu_to_be64(ip->i_generation);
414 
415 	str->di_flags = cpu_to_be32(ip->i_diskflags);
416 	str->di_height = cpu_to_be16(ip->i_height);
417 	str->di_payload_format = cpu_to_be32(S_ISDIR(inode->i_mode) &&
418 					     !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
419 					     GFS2_FORMAT_DE : 0);
420 	str->di_depth = cpu_to_be16(ip->i_depth);
421 	str->di_entries = cpu_to_be32(ip->i_entries);
422 
423 	str->di_eattr = cpu_to_be64(ip->i_eattr);
424 	str->di_atime_nsec = cpu_to_be32(inode_get_atime_nsec(inode));
425 	str->di_mtime_nsec = cpu_to_be32(inode_get_mtime_nsec(inode));
426 	str->di_ctime_nsec = cpu_to_be32(inode_get_ctime_nsec(inode));
427 }
428 
429 /**
430  * gfs2_write_inode - Make sure the inode is stable on the disk
431  * @inode: The inode
432  * @wbc: The writeback control structure
433  *
434  * Returns: errno
435  */
436 
437 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
438 {
439 	struct gfs2_inode *ip = GFS2_I(inode);
440 	struct gfs2_sbd *sdp = GFS2_SB(inode);
441 	struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
442 	struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
443 	int ret = 0;
444 	bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
445 
446 	if (flush_all)
447 		gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
448 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
449 			       GFS2_LFC_WRITE_INODE);
450 	if (bdi->wb.dirty_exceeded)
451 		gfs2_ail1_flush(sdp, wbc);
452 	else
453 		filemap_fdatawrite(metamapping);
454 	if (flush_all)
455 		ret = filemap_fdatawait(metamapping);
456 	if (ret)
457 		mark_inode_dirty_sync(inode);
458 	else {
459 		spin_lock(&inode->i_lock);
460 		if (!(inode->i_flags & I_DIRTY))
461 			gfs2_ordered_del_inode(ip);
462 		spin_unlock(&inode->i_lock);
463 	}
464 	return ret;
465 }
466 
467 /**
468  * gfs2_dirty_inode - check for atime updates
469  * @inode: The inode in question
470  * @flags: The type of dirty
471  *
472  * Unfortunately it can be called under any combination of inode
473  * glock and freeze glock, so we have to check carefully.
474  *
475  * At the moment this deals only with atime - it should be possible
476  * to expand that role in future, once a review of the locking has
477  * been carried out.
478  */
479 
480 static void gfs2_dirty_inode(struct inode *inode, int flags)
481 {
482 	struct gfs2_inode *ip = GFS2_I(inode);
483 	struct gfs2_sbd *sdp = GFS2_SB(inode);
484 	struct buffer_head *bh;
485 	struct gfs2_holder gh;
486 	int need_unlock = 0;
487 	int need_endtrans = 0;
488 	int ret;
489 
490 	if (unlikely(!ip->i_gl)) {
491 		/* This can only happen during incomplete inode creation. */
492 		BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
493 		return;
494 	}
495 
496 	if (gfs2_withdrawing_or_withdrawn(sdp))
497 		return;
498 	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
499 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
500 		if (ret) {
501 			fs_err(sdp, "dirty_inode: glock %d\n", ret);
502 			gfs2_dump_glock(NULL, ip->i_gl, true);
503 			return;
504 		}
505 		need_unlock = 1;
506 	} else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
507 		return;
508 
509 	if (current->journal_info == NULL) {
510 		ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
511 		if (ret) {
512 			fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
513 			goto out;
514 		}
515 		need_endtrans = 1;
516 	}
517 
518 	ret = gfs2_meta_inode_buffer(ip, &bh);
519 	if (ret == 0) {
520 		gfs2_trans_add_meta(ip->i_gl, bh);
521 		gfs2_dinode_out(ip, bh->b_data);
522 		brelse(bh);
523 	}
524 
525 	if (need_endtrans)
526 		gfs2_trans_end(sdp);
527 out:
528 	if (need_unlock)
529 		gfs2_glock_dq_uninit(&gh);
530 }
531 
532 /**
533  * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
534  * @sdp: the filesystem
535  *
536  * Returns: errno
537  */
538 
539 void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
540 {
541 	int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
542 
543 	if (!test_bit(SDF_KILL, &sdp->sd_flags))
544 		gfs2_flush_delete_work(sdp);
545 
546 	gfs2_destroy_threads(sdp);
547 
548 	if (log_write_allowed) {
549 		gfs2_quota_sync(sdp->sd_vfs, 0);
550 		gfs2_statfs_sync(sdp->sd_vfs, 0);
551 
552 		/* We do two log flushes here. The first one commits dirty inodes
553 		 * and rgrps to the journal, but queues up revokes to the ail list.
554 		 * The second flush writes out and removes the revokes.
555 		 *
556 		 * The first must be done before the FLUSH_SHUTDOWN code
557 		 * clears the LIVE flag, otherwise it will not be able to start
558 		 * a transaction to write its revokes, and the error will cause
559 		 * a withdraw of the file system. */
560 		gfs2_log_flush(sdp, NULL, GFS2_LFC_MAKE_FS_RO);
561 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
562 			       GFS2_LFC_MAKE_FS_RO);
563 		wait_event_timeout(sdp->sd_log_waitq,
564 				   gfs2_log_is_empty(sdp),
565 				   HZ * 5);
566 		gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
567 	}
568 	gfs2_quota_cleanup(sdp);
569 }
570 
571 /**
572  * gfs2_put_super - Unmount the filesystem
573  * @sb: The VFS superblock
574  *
575  */
576 
577 static void gfs2_put_super(struct super_block *sb)
578 {
579 	struct gfs2_sbd *sdp = sb->s_fs_info;
580 	struct gfs2_jdesc *jd;
581 
582 	/* No more recovery requests */
583 	set_bit(SDF_NORECOVERY, &sdp->sd_flags);
584 	smp_mb();
585 
586 	/* Wait on outstanding recovery */
587 restart:
588 	spin_lock(&sdp->sd_jindex_spin);
589 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
590 		if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
591 			continue;
592 		spin_unlock(&sdp->sd_jindex_spin);
593 		wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
594 			    TASK_UNINTERRUPTIBLE);
595 		goto restart;
596 	}
597 	spin_unlock(&sdp->sd_jindex_spin);
598 
599 	if (!sb_rdonly(sb))
600 		gfs2_make_fs_ro(sdp);
601 	else {
602 		if (gfs2_withdrawing_or_withdrawn(sdp))
603 			gfs2_destroy_threads(sdp);
604 
605 		gfs2_quota_cleanup(sdp);
606 	}
607 
608 	WARN_ON(gfs2_withdrawing(sdp));
609 
610 	/*  At this point, we're through modifying the disk  */
611 
612 	/*  Release stuff  */
613 
614 	gfs2_freeze_unlock(sdp);
615 
616 	iput(sdp->sd_jindex);
617 	iput(sdp->sd_statfs_inode);
618 	iput(sdp->sd_rindex);
619 	iput(sdp->sd_quota_inode);
620 
621 	gfs2_glock_put(sdp->sd_rename_gl);
622 	gfs2_glock_put(sdp->sd_freeze_gl);
623 
624 	if (!sdp->sd_args.ar_spectator) {
625 		if (gfs2_holder_initialized(&sdp->sd_journal_gh))
626 			gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
627 		if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
628 			gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
629 		brelse(sdp->sd_sc_bh);
630 		gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
631 		gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
632 		free_local_statfs_inodes(sdp);
633 		iput(sdp->sd_qc_inode);
634 	}
635 
636 	gfs2_glock_dq_uninit(&sdp->sd_live_gh);
637 	gfs2_clear_rgrpd(sdp);
638 	gfs2_jindex_free(sdp);
639 	/*  Take apart glock structures and buffer lists  */
640 	gfs2_gl_hash_clear(sdp);
641 	iput(sdp->sd_inode);
642 	gfs2_delete_debugfs_file(sdp);
643 
644 	gfs2_sys_fs_del(sdp);
645 	free_sbd(sdp);
646 }
647 
648 /**
649  * gfs2_sync_fs - sync the filesystem
650  * @sb: the superblock
651  * @wait: true to wait for completion
652  *
653  * Flushes the log to disk.
654  */
655 
656 static int gfs2_sync_fs(struct super_block *sb, int wait)
657 {
658 	struct gfs2_sbd *sdp = sb->s_fs_info;
659 
660 	gfs2_quota_sync(sb, -1);
661 	if (wait)
662 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
663 			       GFS2_LFC_SYNC_FS);
664 	return sdp->sd_log_error;
665 }
666 
667 static int gfs2_do_thaw(struct gfs2_sbd *sdp, enum freeze_holder who, const void *freeze_owner)
668 {
669 	struct super_block *sb = sdp->sd_vfs;
670 	int error;
671 
672 	error = gfs2_freeze_lock_shared(sdp);
673 	if (error)
674 		goto fail;
675 	error = thaw_super(sb, who, freeze_owner);
676 	if (!error)
677 		return 0;
678 
679 fail:
680 	fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", error);
681 	gfs2_assert_withdraw(sdp, 0);
682 	return error;
683 }
684 
685 void gfs2_freeze_func(struct work_struct *work)
686 {
687 	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
688 	struct super_block *sb = sdp->sd_vfs;
689 	int error;
690 
691 	mutex_lock(&sdp->sd_freeze_mutex);
692 	error = -EBUSY;
693 	if (test_bit(SDF_FROZEN, &sdp->sd_flags))
694 		goto freeze_failed;
695 
696 	error = freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL);
697 	if (error)
698 		goto freeze_failed;
699 
700 	gfs2_freeze_unlock(sdp);
701 	set_bit(SDF_FROZEN, &sdp->sd_flags);
702 
703 	error = gfs2_do_thaw(sdp, FREEZE_HOLDER_USERSPACE, NULL);
704 	if (error)
705 		goto out;
706 
707 	clear_bit(SDF_FROZEN, &sdp->sd_flags);
708 	goto out;
709 
710 freeze_failed:
711 	fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error);
712 
713 out:
714 	mutex_unlock(&sdp->sd_freeze_mutex);
715 	deactivate_super(sb);
716 }
717 
718 /**
719  * gfs2_freeze_super - prevent further writes to the filesystem
720  * @sb: the VFS structure for the filesystem
721  * @who: freeze flags
722  * @freeze_owner: owner of the freeze
723  *
724  */
725 
726 static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who,
727 			     const void *freeze_owner)
728 {
729 	struct gfs2_sbd *sdp = sb->s_fs_info;
730 	int error;
731 
732 	if (!mutex_trylock(&sdp->sd_freeze_mutex))
733 		return -EBUSY;
734 	if (test_bit(SDF_FROZEN, &sdp->sd_flags)) {
735 		mutex_unlock(&sdp->sd_freeze_mutex);
736 		return -EBUSY;
737 	}
738 
739 	for (;;) {
740 		error = freeze_super(sb, who, freeze_owner);
741 		if (error) {
742 			fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
743 				error);
744 			goto out;
745 		}
746 
747 		error = gfs2_lock_fs_check_clean(sdp);
748 		if (!error) {
749 			set_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
750 			set_bit(SDF_FROZEN, &sdp->sd_flags);
751 			break;
752 		}
753 
754 		error = gfs2_do_thaw(sdp, who, freeze_owner);
755 		if (error)
756 			goto out;
757 
758 		if (error == -EBUSY)
759 			fs_err(sdp, "waiting for recovery before freeze\n");
760 		else if (error == -EIO) {
761 			fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
762 			       "to recovery error.\n");
763 			goto out;
764 		} else {
765 			fs_err(sdp, "error freezing FS: %d\n", error);
766 		}
767 		fs_err(sdp, "retrying...\n");
768 		msleep(1000);
769 	}
770 
771 out:
772 	mutex_unlock(&sdp->sd_freeze_mutex);
773 	return error;
774 }
775 
776 static int gfs2_freeze_fs(struct super_block *sb)
777 {
778 	struct gfs2_sbd *sdp = sb->s_fs_info;
779 
780 	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
781 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
782 			       GFS2_LFC_FREEZE_GO_SYNC);
783 		if (gfs2_withdrawing_or_withdrawn(sdp))
784 			return -EIO;
785 	}
786 	return 0;
787 }
788 
789 /**
790  * gfs2_thaw_super - reallow writes to the filesystem
791  * @sb: the VFS structure for the filesystem
792  * @who: freeze flags
793  * @freeze_owner: owner of the freeze
794  *
795  */
796 
797 static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who,
798 			   const void *freeze_owner)
799 {
800 	struct gfs2_sbd *sdp = sb->s_fs_info;
801 	int error;
802 
803 	if (!mutex_trylock(&sdp->sd_freeze_mutex))
804 		return -EBUSY;
805 	if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags)) {
806 		mutex_unlock(&sdp->sd_freeze_mutex);
807 		return -EINVAL;
808 	}
809 
810 	atomic_inc(&sb->s_active);
811 	gfs2_freeze_unlock(sdp);
812 
813 	error = gfs2_do_thaw(sdp, who, freeze_owner);
814 
815 	if (!error) {
816 		clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
817 		clear_bit(SDF_FROZEN, &sdp->sd_flags);
818 	}
819 	mutex_unlock(&sdp->sd_freeze_mutex);
820 	deactivate_super(sb);
821 	return error;
822 }
823 
824 void gfs2_thaw_freeze_initiator(struct super_block *sb)
825 {
826 	struct gfs2_sbd *sdp = sb->s_fs_info;
827 
828 	mutex_lock(&sdp->sd_freeze_mutex);
829 	if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
830 		goto out;
831 
832 	gfs2_freeze_unlock(sdp);
833 
834 out:
835 	mutex_unlock(&sdp->sd_freeze_mutex);
836 }
837 
838 /**
839  * statfs_slow_fill - fill in the sg for a given RG
840  * @rgd: the RG
841  * @sc: the sc structure
842  *
843  * Returns: 0 on success, -ESTALE if the LVB is invalid
844  */
845 
846 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
847 			    struct gfs2_statfs_change_host *sc)
848 {
849 	gfs2_rgrp_verify(rgd);
850 	sc->sc_total += rgd->rd_data;
851 	sc->sc_free += rgd->rd_free;
852 	sc->sc_dinodes += rgd->rd_dinodes;
853 	return 0;
854 }
855 
856 /**
857  * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
858  * @sdp: the filesystem
859  * @sc: the sc info that will be returned
860  *
861  * Any error (other than a signal) will cause this routine to fall back
862  * to the synchronous version.
863  *
864  * FIXME: This really shouldn't busy wait like this.
865  *
866  * Returns: errno
867  */
868 
869 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
870 {
871 	struct gfs2_rgrpd *rgd_next;
872 	struct gfs2_holder *gha, *gh;
873 	unsigned int slots = 64;
874 	unsigned int x;
875 	int done;
876 	int error = 0, err;
877 
878 	memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
879 	gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
880 	if (!gha)
881 		return -ENOMEM;
882 	for (x = 0; x < slots; x++)
883 		gfs2_holder_mark_uninitialized(gha + x);
884 
885 	rgd_next = gfs2_rgrpd_get_first(sdp);
886 
887 	for (;;) {
888 		done = 1;
889 
890 		for (x = 0; x < slots; x++) {
891 			gh = gha + x;
892 
893 			if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
894 				err = gfs2_glock_wait(gh);
895 				if (err) {
896 					gfs2_holder_uninit(gh);
897 					error = err;
898 				} else {
899 					if (!error) {
900 						struct gfs2_rgrpd *rgd =
901 							gfs2_glock2rgrp(gh->gh_gl);
902 
903 						error = statfs_slow_fill(rgd, sc);
904 					}
905 					gfs2_glock_dq_uninit(gh);
906 				}
907 			}
908 
909 			if (gfs2_holder_initialized(gh))
910 				done = 0;
911 			else if (rgd_next && !error) {
912 				error = gfs2_glock_nq_init(rgd_next->rd_gl,
913 							   LM_ST_SHARED,
914 							   GL_ASYNC,
915 							   gh);
916 				rgd_next = gfs2_rgrpd_get_next(rgd_next);
917 				done = 0;
918 			}
919 
920 			if (signal_pending(current))
921 				error = -ERESTARTSYS;
922 		}
923 
924 		if (done)
925 			break;
926 
927 		yield();
928 	}
929 
930 	kfree(gha);
931 	return error;
932 }
933 
934 /**
935  * gfs2_statfs_i - Do a statfs
936  * @sdp: the filesystem
937  * @sc: the sc structure
938  *
939  * Returns: errno
940  */
941 
942 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
943 {
944 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
945 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
946 
947 	spin_lock(&sdp->sd_statfs_spin);
948 
949 	*sc = *m_sc;
950 	sc->sc_total += l_sc->sc_total;
951 	sc->sc_free += l_sc->sc_free;
952 	sc->sc_dinodes += l_sc->sc_dinodes;
953 
954 	spin_unlock(&sdp->sd_statfs_spin);
955 
956 	if (sc->sc_free < 0)
957 		sc->sc_free = 0;
958 	if (sc->sc_free > sc->sc_total)
959 		sc->sc_free = sc->sc_total;
960 	if (sc->sc_dinodes < 0)
961 		sc->sc_dinodes = 0;
962 
963 	return 0;
964 }
965 
966 /**
967  * gfs2_statfs - Gather and return stats about the filesystem
968  * @dentry: The name of the link
969  * @buf: The buffer
970  *
971  * Returns: 0 on success or error code
972  */
973 
974 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
975 {
976 	struct super_block *sb = dentry->d_sb;
977 	struct gfs2_sbd *sdp = sb->s_fs_info;
978 	struct gfs2_statfs_change_host sc;
979 	int error;
980 
981 	error = gfs2_rindex_update(sdp);
982 	if (error)
983 		return error;
984 
985 	if (gfs2_tune_get(sdp, gt_statfs_slow))
986 		error = gfs2_statfs_slow(sdp, &sc);
987 	else
988 		error = gfs2_statfs_i(sdp, &sc);
989 
990 	if (error)
991 		return error;
992 
993 	buf->f_type = GFS2_MAGIC;
994 	buf->f_bsize = sdp->sd_sb.sb_bsize;
995 	buf->f_blocks = sc.sc_total;
996 	buf->f_bfree = sc.sc_free;
997 	buf->f_bavail = sc.sc_free;
998 	buf->f_files = sc.sc_dinodes + sc.sc_free;
999 	buf->f_ffree = sc.sc_free;
1000 	buf->f_namelen = GFS2_FNAMESIZE;
1001 	buf->f_fsid = uuid_to_fsid(sb->s_uuid.b);
1002 
1003 	return 0;
1004 }
1005 
1006 /**
1007  * gfs2_drop_inode - Drop an inode (test for remote unlink)
1008  * @inode: The inode to drop
1009  *
1010  * If we've received a callback on an iopen lock then it's because a
1011  * remote node tried to deallocate the inode but failed due to this node
1012  * still having the inode open. Here we mark the link count zero
1013  * since we know that it must have reached zero if the GLF_DEMOTE flag
1014  * is set on the iopen glock. If we didn't do a disk read since the
1015  * remote node removed the final link then we might otherwise miss
1016  * this event. This check ensures that this node will deallocate the
1017  * inode's blocks, or alternatively pass the baton on to another
1018  * node for later deallocation.
1019  */
1020 
1021 static int gfs2_drop_inode(struct inode *inode)
1022 {
1023 	struct gfs2_inode *ip = GFS2_I(inode);
1024 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1025 
1026 	if (inode->i_nlink &&
1027 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
1028 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1029 		if (glock_needs_demote(gl))
1030 			clear_nlink(inode);
1031 	}
1032 
1033 	/*
1034 	 * When under memory pressure when an inode's link count has dropped to
1035 	 * zero, defer deleting the inode to the delete workqueue.  This avoids
1036 	 * calling into DLM under memory pressure, which can deadlock.
1037 	 */
1038 	if (!inode->i_nlink &&
1039 	    unlikely(current->flags & PF_MEMALLOC) &&
1040 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
1041 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1042 
1043 		gfs2_glock_hold(gl);
1044 		if (!gfs2_queue_verify_delete(gl, true))
1045 			gfs2_glock_put_async(gl);
1046 		return 0;
1047 	}
1048 
1049 	/*
1050 	 * No longer cache inodes when trying to evict them all.
1051 	 */
1052 	if (test_bit(SDF_EVICTING, &sdp->sd_flags))
1053 		return 1;
1054 
1055 	return generic_drop_inode(inode);
1056 }
1057 
1058 /**
1059  * gfs2_show_options - Show mount options for /proc/mounts
1060  * @s: seq_file structure
1061  * @root: root of this (sub)tree
1062  *
1063  * Returns: 0 on success or error code
1064  */
1065 
1066 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1067 {
1068 	struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1069 	struct gfs2_args *args = &sdp->sd_args;
1070 	unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
1071 
1072 	spin_lock(&sdp->sd_tune.gt_spin);
1073 	logd_secs = sdp->sd_tune.gt_logd_secs;
1074 	quota_quantum = sdp->sd_tune.gt_quota_quantum;
1075 	statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
1076 	statfs_slow = sdp->sd_tune.gt_statfs_slow;
1077 	spin_unlock(&sdp->sd_tune.gt_spin);
1078 
1079 	if (is_subdir(root, sdp->sd_master_dir))
1080 		seq_puts(s, ",meta");
1081 	if (args->ar_lockproto[0])
1082 		seq_show_option(s, "lockproto", args->ar_lockproto);
1083 	if (args->ar_locktable[0])
1084 		seq_show_option(s, "locktable", args->ar_locktable);
1085 	if (args->ar_hostdata[0])
1086 		seq_show_option(s, "hostdata", args->ar_hostdata);
1087 	if (args->ar_spectator)
1088 		seq_puts(s, ",spectator");
1089 	if (args->ar_localflocks)
1090 		seq_puts(s, ",localflocks");
1091 	if (args->ar_debug)
1092 		seq_puts(s, ",debug");
1093 	if (args->ar_posix_acl)
1094 		seq_puts(s, ",acl");
1095 	if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1096 		char *state;
1097 		switch (args->ar_quota) {
1098 		case GFS2_QUOTA_OFF:
1099 			state = "off";
1100 			break;
1101 		case GFS2_QUOTA_ACCOUNT:
1102 			state = "account";
1103 			break;
1104 		case GFS2_QUOTA_ON:
1105 			state = "on";
1106 			break;
1107 		case GFS2_QUOTA_QUIET:
1108 			state = "quiet";
1109 			break;
1110 		default:
1111 			state = "unknown";
1112 			break;
1113 		}
1114 		seq_printf(s, ",quota=%s", state);
1115 	}
1116 	if (args->ar_suiddir)
1117 		seq_puts(s, ",suiddir");
1118 	if (args->ar_data != GFS2_DATA_DEFAULT) {
1119 		char *state;
1120 		switch (args->ar_data) {
1121 		case GFS2_DATA_WRITEBACK:
1122 			state = "writeback";
1123 			break;
1124 		case GFS2_DATA_ORDERED:
1125 			state = "ordered";
1126 			break;
1127 		default:
1128 			state = "unknown";
1129 			break;
1130 		}
1131 		seq_printf(s, ",data=%s", state);
1132 	}
1133 	if (args->ar_discard)
1134 		seq_puts(s, ",discard");
1135 	if (logd_secs != 30)
1136 		seq_printf(s, ",commit=%d", logd_secs);
1137 	if (statfs_quantum != 30)
1138 		seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
1139 	else if (statfs_slow)
1140 		seq_puts(s, ",statfs_quantum=0");
1141 	if (quota_quantum != 60)
1142 		seq_printf(s, ",quota_quantum=%d", quota_quantum);
1143 	if (args->ar_statfs_percent)
1144 		seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1145 	if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1146 		const char *state;
1147 
1148 		switch (args->ar_errors) {
1149 		case GFS2_ERRORS_WITHDRAW:
1150 			state = "withdraw";
1151 			break;
1152 		case GFS2_ERRORS_PANIC:
1153 			state = "panic";
1154 			break;
1155 		default:
1156 			state = "unknown";
1157 			break;
1158 		}
1159 		seq_printf(s, ",errors=%s", state);
1160 	}
1161 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1162 		seq_puts(s, ",nobarrier");
1163 	if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1164 		seq_puts(s, ",demote_interface_used");
1165 	if (args->ar_rgrplvb)
1166 		seq_puts(s, ",rgrplvb");
1167 	if (args->ar_loccookie)
1168 		seq_puts(s, ",loccookie");
1169 	return 0;
1170 }
1171 
1172 /**
1173  * gfs2_glock_put_eventually
1174  * @gl:	The glock to put
1175  *
1176  * When under memory pressure, trigger a deferred glock put to make sure we
1177  * won't call into DLM and deadlock.  Otherwise, put the glock directly.
1178  */
1179 
1180 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1181 {
1182 	if (current->flags & PF_MEMALLOC)
1183 		gfs2_glock_put_async(gl);
1184 	else
1185 		gfs2_glock_put(gl);
1186 }
1187 
1188 static enum evict_behavior gfs2_upgrade_iopen_glock(struct inode *inode)
1189 {
1190 	struct gfs2_inode *ip = GFS2_I(inode);
1191 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1192 	struct gfs2_holder *gh = &ip->i_iopen_gh;
1193 	int error;
1194 
1195 	gh->gh_flags |= GL_NOCACHE;
1196 	gfs2_glock_dq_wait(gh);
1197 
1198 	/*
1199 	 * If there are no other lock holders, we will immediately get
1200 	 * exclusive access to the iopen glock here.
1201 	 *
1202 	 * Otherwise, the other nodes holding the lock will be notified about
1203 	 * our locking request (see iopen_go_callback()).  If they do not have
1204 	 * the inode open, they are expected to evict the cached inode and
1205 	 * release the lock, allowing us to proceed.
1206 	 *
1207 	 * Otherwise, if they cannot evict the inode, they are expected to poke
1208 	 * the inode glock (note: not the iopen glock).  We will notice that
1209 	 * and stop waiting for the iopen glock immediately.  The other node(s)
1210 	 * are then expected to take care of deleting the inode when they no
1211 	 * longer use it.
1212 	 *
1213 	 * As a last resort, if another node keeps holding the iopen glock
1214 	 * without showing any activity on the inode glock, we will eventually
1215 	 * time out and fail the iopen glock upgrade.
1216 	 */
1217 
1218 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1219 	error = gfs2_glock_nq(gh);
1220 	if (error)
1221 		return EVICT_SHOULD_SKIP_DELETE;
1222 
1223 	wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1224 		!test_bit(HIF_WAIT, &gh->gh_iflags) ||
1225 		glock_needs_demote(ip->i_gl),
1226 		5 * HZ);
1227 	if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1228 		gfs2_glock_dq(gh);
1229 		if (glock_needs_demote(ip->i_gl))
1230 			return EVICT_SHOULD_SKIP_DELETE;
1231 		return EVICT_SHOULD_DEFER_DELETE;
1232 	}
1233 	error = gfs2_glock_holder_ready(gh);
1234 	if (error)
1235 		return EVICT_SHOULD_SKIP_DELETE;
1236 	return EVICT_SHOULD_DELETE;
1237 }
1238 
1239 /**
1240  * evict_should_delete - determine whether the inode is eligible for deletion
1241  * @inode: The inode to evict
1242  * @gh: The glock holder structure
1243  *
1244  * This function determines whether the evicted inode is eligible to be deleted
1245  * and locks the inode glock.
1246  *
1247  * Returns: the fate of the dinode
1248  */
1249 static enum evict_behavior evict_should_delete(struct inode *inode,
1250 					       struct gfs2_holder *gh)
1251 {
1252 	struct gfs2_inode *ip = GFS2_I(inode);
1253 	struct super_block *sb = inode->i_sb;
1254 	struct gfs2_sbd *sdp = sb->s_fs_info;
1255 	int ret;
1256 
1257 	if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1258 	    test_bit(GLF_DEFER_DELETE, &ip->i_iopen_gh.gh_gl->gl_flags))
1259 		return EVICT_SHOULD_DEFER_DELETE;
1260 
1261 	/* Deletes should never happen under memory pressure anymore.  */
1262 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1263 		return EVICT_SHOULD_DEFER_DELETE;
1264 
1265 	/* Must not read inode block until block type has been verified */
1266 	ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1267 	if (unlikely(ret))
1268 		return EVICT_SHOULD_SKIP_DELETE;
1269 
1270 	if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1271 		return EVICT_SHOULD_SKIP_DELETE;
1272 	ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1273 	if (ret)
1274 		return EVICT_SHOULD_SKIP_DELETE;
1275 
1276 	ret = gfs2_instantiate(gh);
1277 	if (ret)
1278 		return EVICT_SHOULD_SKIP_DELETE;
1279 
1280 	/*
1281 	 * The inode may have been recreated in the meantime.
1282 	 */
1283 	if (inode->i_nlink)
1284 		return EVICT_SHOULD_SKIP_DELETE;
1285 
1286 	if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1287 	    test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
1288 		return gfs2_upgrade_iopen_glock(inode);
1289 	return EVICT_SHOULD_DELETE;
1290 }
1291 
1292 /**
1293  * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1294  * @inode: The inode to evict
1295  */
1296 static int evict_unlinked_inode(struct inode *inode)
1297 {
1298 	struct gfs2_inode *ip = GFS2_I(inode);
1299 	int ret;
1300 
1301 	if (S_ISDIR(inode->i_mode) &&
1302 	    (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1303 		ret = gfs2_dir_exhash_dealloc(ip);
1304 		if (ret)
1305 			goto out;
1306 	}
1307 
1308 	if (ip->i_eattr) {
1309 		ret = gfs2_ea_dealloc(ip, true);
1310 		if (ret)
1311 			goto out;
1312 	}
1313 
1314 	if (!gfs2_is_stuffed(ip)) {
1315 		ret = gfs2_file_dealloc(ip);
1316 		if (ret)
1317 			goto out;
1318 	}
1319 
1320 	/*
1321 	 * As soon as we clear the bitmap for the dinode, gfs2_create_inode()
1322 	 * can get called to recreate it, or even gfs2_inode_lookup() if the
1323 	 * inode was recreated on another node in the meantime.
1324 	 *
1325 	 * However, inserting the new inode into the inode hash table will not
1326 	 * succeed until the old inode is removed, and that only happens after
1327 	 * ->evict_inode() returns.  The new inode is attached to its inode and
1328 	 *  iopen glocks after inserting it into the inode hash table, so at
1329 	 *  that point we can be sure that both glocks are unused.
1330 	 */
1331 
1332 	ret = gfs2_dinode_dealloc(ip);
1333 	if (!ret && ip->i_gl)
1334 		gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1335 
1336 out:
1337 	return ret;
1338 }
1339 
1340 /*
1341  * evict_linked_inode - evict an inode whose dinode has not been unlinked
1342  * @inode: The inode to evict
1343  */
1344 static int evict_linked_inode(struct inode *inode)
1345 {
1346 	struct super_block *sb = inode->i_sb;
1347 	struct gfs2_sbd *sdp = sb->s_fs_info;
1348 	struct gfs2_inode *ip = GFS2_I(inode);
1349 	struct address_space *metamapping;
1350 	int ret;
1351 
1352 	gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1353 		       GFS2_LFC_EVICT_INODE);
1354 	metamapping = gfs2_glock2aspace(ip->i_gl);
1355 	if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1356 		filemap_fdatawrite(metamapping);
1357 		filemap_fdatawait(metamapping);
1358 	}
1359 	write_inode_now(inode, 1);
1360 	gfs2_ail_flush(ip->i_gl, 0);
1361 
1362 	ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1363 	if (ret)
1364 		return ret;
1365 
1366 	/* Needs to be done before glock release & also in a transaction */
1367 	truncate_inode_pages(&inode->i_data, 0);
1368 	truncate_inode_pages(metamapping, 0);
1369 	gfs2_trans_end(sdp);
1370 	return 0;
1371 }
1372 
1373 /**
1374  * gfs2_evict_inode - Remove an inode from cache
1375  * @inode: The inode to evict
1376  *
1377  * There are three cases to consider:
1378  * 1. i_nlink == 0, we are final opener (and must deallocate)
1379  * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1380  * 3. i_nlink > 0
1381  *
1382  * If the fs is read only, then we have to treat all cases as per #3
1383  * since we are unable to do any deallocation. The inode will be
1384  * deallocated by the next read/write node to attempt an allocation
1385  * in the same resource group
1386  *
1387  * We have to (at the moment) hold the inodes main lock to cover
1388  * the gap between unlocking the shared lock on the iopen lock and
1389  * taking the exclusive lock. I'd rather do a shared -> exclusive
1390  * conversion on the iopen lock, but we can change that later. This
1391  * is safe, just less efficient.
1392  */
1393 
1394 static void gfs2_evict_inode(struct inode *inode)
1395 {
1396 	struct super_block *sb = inode->i_sb;
1397 	struct gfs2_sbd *sdp = sb->s_fs_info;
1398 	struct gfs2_inode *ip = GFS2_I(inode);
1399 	struct gfs2_holder gh;
1400 	enum evict_behavior behavior;
1401 	int ret;
1402 
1403 	gfs2_holder_mark_uninitialized(&gh);
1404 	if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr)
1405 		goto out;
1406 
1407 	/*
1408 	 * In case of an incomplete mount, gfs2_evict_inode() may be called for
1409 	 * system files without having an active journal to write to.  In that
1410 	 * case, skip the filesystem evict.
1411 	 */
1412 	if (!sdp->sd_jdesc)
1413 		goto out;
1414 
1415 	behavior = evict_should_delete(inode, &gh);
1416 	if (behavior == EVICT_SHOULD_DEFER_DELETE &&
1417 	    !test_bit(SDF_KILL, &sdp->sd_flags)) {
1418 		struct gfs2_glock *io_gl = ip->i_iopen_gh.gh_gl;
1419 
1420 		if (io_gl) {
1421 			gfs2_glock_hold(io_gl);
1422 			if (!gfs2_queue_verify_delete(io_gl, true))
1423 				gfs2_glock_put(io_gl);
1424 			goto out;
1425 		}
1426 		behavior = EVICT_SHOULD_SKIP_DELETE;
1427 	}
1428 	if (behavior == EVICT_SHOULD_DELETE)
1429 		ret = evict_unlinked_inode(inode);
1430 	else
1431 		ret = evict_linked_inode(inode);
1432 
1433 	if (gfs2_rs_active(&ip->i_res))
1434 		gfs2_rs_deltree(&ip->i_res);
1435 
1436 	if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1437 		fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
1438 out:
1439 	if (gfs2_holder_initialized(&gh))
1440 		gfs2_glock_dq_uninit(&gh);
1441 	truncate_inode_pages_final(&inode->i_data);
1442 	if (ip->i_qadata)
1443 		gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1444 	gfs2_rs_deltree(&ip->i_res);
1445 	gfs2_ordered_del_inode(ip);
1446 	clear_inode(inode);
1447 	gfs2_dir_hash_inval(ip);
1448 	if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1449 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1450 
1451 		glock_clear_object(gl, ip);
1452 		gfs2_glock_hold(gl);
1453 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1454 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1455 		gfs2_glock_put_eventually(gl);
1456 	}
1457 	if (ip->i_gl) {
1458 		glock_clear_object(ip->i_gl, ip);
1459 		wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1460 		gfs2_glock_put_eventually(ip->i_gl);
1461 		rcu_assign_pointer(ip->i_gl, NULL);
1462 	}
1463 }
1464 
1465 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1466 {
1467 	struct gfs2_inode *ip;
1468 
1469 	ip = alloc_inode_sb(sb, gfs2_inode_cachep, GFP_KERNEL);
1470 	if (!ip)
1471 		return NULL;
1472 	ip->i_no_addr = 0;
1473 	ip->i_no_formal_ino = 0;
1474 	ip->i_flags = 0;
1475 	ip->i_gl = NULL;
1476 	gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1477 	memset(&ip->i_res, 0, sizeof(ip->i_res));
1478 	RB_CLEAR_NODE(&ip->i_res.rs_node);
1479 	ip->i_diskflags = 0;
1480 	ip->i_rahead = 0;
1481 	return &ip->i_inode;
1482 }
1483 
1484 static void gfs2_free_inode(struct inode *inode)
1485 {
1486 	kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1487 }
1488 
1489 void free_local_statfs_inodes(struct gfs2_sbd *sdp)
1490 {
1491 	struct local_statfs_inode *lsi, *safe;
1492 
1493 	/* Run through the statfs inodes list to iput and free memory */
1494 	list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1495 		if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1496 			sdp->sd_sc_inode = NULL; /* belongs to this node */
1497 		if (lsi->si_sc_inode)
1498 			iput(lsi->si_sc_inode);
1499 		list_del(&lsi->si_list);
1500 		kfree(lsi);
1501 	}
1502 }
1503 
1504 struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1505 				      unsigned int index)
1506 {
1507 	struct local_statfs_inode *lsi;
1508 
1509 	/* Return the local (per node) statfs inode in the
1510 	 * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1511 	list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1512 		if (lsi->si_jid == index)
1513 			return lsi->si_sc_inode;
1514 	}
1515 	return NULL;
1516 }
1517 
1518 const struct super_operations gfs2_super_ops = {
1519 	.alloc_inode		= gfs2_alloc_inode,
1520 	.free_inode		= gfs2_free_inode,
1521 	.write_inode		= gfs2_write_inode,
1522 	.dirty_inode		= gfs2_dirty_inode,
1523 	.evict_inode		= gfs2_evict_inode,
1524 	.put_super		= gfs2_put_super,
1525 	.sync_fs		= gfs2_sync_fs,
1526 	.freeze_super		= gfs2_freeze_super,
1527 	.freeze_fs		= gfs2_freeze_fs,
1528 	.thaw_super		= gfs2_thaw_super,
1529 	.statfs			= gfs2_statfs,
1530 	.drop_inode		= gfs2_drop_inode,
1531 	.show_options		= gfs2_show_options,
1532 };
1533 
1534