xref: /linux/fs/pnode.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/pnode.c
4  *
5  * (C) Copyright IBM Corporation 2005.
6  *	Author : Ram Pai (linuxram@us.ibm.com)
7  */
8 #include <linux/mnt_namespace.h>
9 #include <linux/mount.h>
10 #include <linux/fs.h>
11 #include <linux/nsproxy.h>
12 #include <uapi/linux/mount.h>
13 #include "internal.h"
14 #include "pnode.h"
15 
16 /* return the next shared peer mount of @p */
next_peer(struct mount * p)17 static inline struct mount *next_peer(struct mount *p)
18 {
19 	return list_entry(p->mnt_share.next, struct mount, mnt_share);
20 }
21 
first_slave(struct mount * p)22 static inline struct mount *first_slave(struct mount *p)
23 {
24 	return hlist_entry(p->mnt_slave_list.first, struct mount, mnt_slave);
25 }
26 
next_slave(struct mount * p)27 static inline struct mount *next_slave(struct mount *p)
28 {
29 	return hlist_entry(p->mnt_slave.next, struct mount, mnt_slave);
30 }
31 
get_peer_under_root(struct mount * mnt,struct mnt_namespace * ns,const struct path * root)32 static struct mount *get_peer_under_root(struct mount *mnt,
33 					 struct mnt_namespace *ns,
34 					 const struct path *root)
35 {
36 	struct mount *m = mnt;
37 
38 	do {
39 		/* Check the namespace first for optimization */
40 		if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
41 			return m;
42 
43 		m = next_peer(m);
44 	} while (m != mnt);
45 
46 	return NULL;
47 }
48 
49 /*
50  * Get ID of closest dominating peer group having a representative
51  * under the given root.
52  *
53  * Caller must hold namespace_sem
54  */
get_dominating_id(struct mount * mnt,const struct path * root)55 int get_dominating_id(struct mount *mnt, const struct path *root)
56 {
57 	struct mount *m;
58 
59 	for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
60 		struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
61 		if (d)
62 			return d->mnt_group_id;
63 	}
64 
65 	return 0;
66 }
67 
will_be_unmounted(struct mount * m)68 static inline bool will_be_unmounted(struct mount *m)
69 {
70 	return m->mnt.mnt_flags & MNT_UMOUNT;
71 }
72 
propagation_source(struct mount * mnt)73 static struct mount *propagation_source(struct mount *mnt)
74 {
75 	do {
76 		struct mount *m;
77 		for (m = next_peer(mnt); m != mnt; m = next_peer(m)) {
78 			if (!will_be_unmounted(m))
79 				return m;
80 		}
81 		mnt = mnt->mnt_master;
82 	} while (mnt && will_be_unmounted(mnt));
83 	return mnt;
84 }
85 
transfer_propagation(struct mount * mnt,struct mount * to)86 static void transfer_propagation(struct mount *mnt, struct mount *to)
87 {
88 	struct hlist_node *p = NULL, *n;
89 	struct mount *m;
90 
91 	hlist_for_each_entry_safe(m, n, &mnt->mnt_slave_list, mnt_slave) {
92 		m->mnt_master = to;
93 		if (!to)
94 			hlist_del_init(&m->mnt_slave);
95 		else
96 			p = &m->mnt_slave;
97 	}
98 	if (p)
99 		hlist_splice_init(&mnt->mnt_slave_list, p, &to->mnt_slave_list);
100 }
101 
102 /*
103  * EXCL[namespace_sem]
104  */
change_mnt_propagation(struct mount * mnt,int type)105 void change_mnt_propagation(struct mount *mnt, int type)
106 {
107 	struct mount *m = mnt->mnt_master;
108 
109 	if (type == MS_SHARED) {
110 		set_mnt_shared(mnt);
111 		return;
112 	}
113 	if (IS_MNT_SHARED(mnt)) {
114 		m = propagation_source(mnt);
115 		if (list_empty(&mnt->mnt_share)) {
116 			mnt_release_group_id(mnt);
117 		} else {
118 			list_del_init(&mnt->mnt_share);
119 			mnt->mnt_group_id = 0;
120 		}
121 		CLEAR_MNT_SHARED(mnt);
122 		transfer_propagation(mnt, m);
123 	}
124 	hlist_del_init(&mnt->mnt_slave);
125 	if (type == MS_SLAVE) {
126 		mnt->mnt_master = m;
127 		if (m)
128 			hlist_add_head(&mnt->mnt_slave, &m->mnt_slave_list);
129 	} else {
130 		mnt->mnt_master = NULL;
131 		if (type == MS_UNBINDABLE)
132 			mnt->mnt_t_flags |= T_UNBINDABLE;
133 		else
134 			mnt->mnt_t_flags &= ~T_UNBINDABLE;
135 	}
136 }
137 
__propagation_next(struct mount * m,struct mount * origin)138 static struct mount *__propagation_next(struct mount *m,
139 					 struct mount *origin)
140 {
141 	while (1) {
142 		struct mount *master = m->mnt_master;
143 
144 		if (master == origin->mnt_master) {
145 			struct mount *next = next_peer(m);
146 			return (next == origin) ? NULL : next;
147 		} else if (m->mnt_slave.next)
148 			return next_slave(m);
149 
150 		/* back at master */
151 		m = master;
152 	}
153 }
154 
155 /*
156  * get the next mount in the propagation tree.
157  * @m: the mount seen last
158  * @origin: the original mount from where the tree walk initiated
159  *
160  * Note that peer groups form contiguous segments of slave lists.
161  * We rely on that in get_source() to be able to find out if
162  * vfsmount found while iterating with propagation_next() is
163  * a peer of one we'd found earlier.
164  */
propagation_next(struct mount * m,struct mount * origin)165 static struct mount *propagation_next(struct mount *m,
166 					 struct mount *origin)
167 {
168 	/* are there any slaves of this mount? */
169 	if (!IS_MNT_NEW(m) && !hlist_empty(&m->mnt_slave_list))
170 		return first_slave(m);
171 
172 	return __propagation_next(m, origin);
173 }
174 
skip_propagation_subtree(struct mount * m,struct mount * origin)175 static struct mount *skip_propagation_subtree(struct mount *m,
176 						struct mount *origin)
177 {
178 	/*
179 	 * Advance m past everything that gets propagation from it.
180 	 */
181 	struct mount *p = __propagation_next(m, origin);
182 
183 	while (p && peers(m, p))
184 		p = __propagation_next(p, origin);
185 
186 	return p;
187 }
188 
next_group(struct mount * m,struct mount * origin)189 static struct mount *next_group(struct mount *m, struct mount *origin)
190 {
191 	while (1) {
192 		while (1) {
193 			struct mount *next;
194 			if (!IS_MNT_NEW(m) && !hlist_empty(&m->mnt_slave_list))
195 				return first_slave(m);
196 			next = next_peer(m);
197 			if (m->mnt_group_id == origin->mnt_group_id) {
198 				if (next == origin)
199 					return NULL;
200 			} else if (m->mnt_slave.next != &next->mnt_slave)
201 				break;
202 			m = next;
203 		}
204 		/* m is the last peer */
205 		while (1) {
206 			struct mount *master = m->mnt_master;
207 			if (m->mnt_slave.next)
208 				return next_slave(m);
209 			m = next_peer(master);
210 			if (master->mnt_group_id == origin->mnt_group_id)
211 				break;
212 			if (master->mnt_slave.next == &m->mnt_slave)
213 				break;
214 			m = master;
215 		}
216 		if (m == origin)
217 			return NULL;
218 	}
219 }
220 
need_secondary(struct mount * m,struct mountpoint * dest_mp)221 static bool need_secondary(struct mount *m, struct mountpoint *dest_mp)
222 {
223 	/* skip ones added by this propagate_mnt() */
224 	if (IS_MNT_NEW(m))
225 		return false;
226 	/* skip if mountpoint isn't visible in m */
227 	if (!is_subdir(dest_mp->m_dentry, m->mnt.mnt_root))
228 		return false;
229 	/* skip if m is in the anon_ns */
230 	if (is_anon_ns(m->mnt_ns))
231 		return false;
232 	return true;
233 }
234 
find_master(struct mount * m,struct mount * last_copy,struct mount * original)235 static struct mount *find_master(struct mount *m,
236 				struct mount *last_copy,
237 				struct mount *original)
238 {
239 	struct mount *p;
240 
241 	// ascend until there's a copy for something with the same master
242 	for (;;) {
243 		p = m->mnt_master;
244 		if (!p || IS_MNT_MARKED(p))
245 			break;
246 		m = p;
247 	}
248 	while (!peers(last_copy, original)) {
249 		struct mount *parent = last_copy->mnt_parent;
250 		if (parent->mnt_master == p) {
251 			if (!peers(parent, m))
252 				last_copy = last_copy->mnt_master;
253 			break;
254 		}
255 		last_copy = last_copy->mnt_master;
256 	}
257 	return last_copy;
258 }
259 
260 /**
261  * propagate_mnt() - create secondary copies for tree attachment
262  * @dest_mnt:    destination mount.
263  * @dest_mp:     destination mountpoint.
264  * @source_mnt:  source mount.
265  * @tree_list:   list of secondaries to be attached.
266  *
267  * Create secondary copies for attaching a tree with root @source_mnt
268  * at mount @dest_mnt with mountpoint @dest_mp.  Link all new mounts
269  * into a propagation graph.  Set mountpoints for all secondaries,
270  * link their roots into @tree_list via ->mnt_hash.
271  */
propagate_mnt(struct mount * dest_mnt,struct mountpoint * dest_mp,struct mount * source_mnt,struct hlist_head * tree_list)272 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
273 		  struct mount *source_mnt, struct hlist_head *tree_list)
274 {
275 	struct mount *m, *n, *copy, *this;
276 	int err = 0, type;
277 
278 	if (dest_mnt->mnt_master)
279 		SET_MNT_MARK(dest_mnt->mnt_master);
280 
281 	/* iterate over peer groups, depth first */
282 	for (m = dest_mnt; m && !err; m = next_group(m, dest_mnt)) {
283 		if (m == dest_mnt) { // have one for dest_mnt itself
284 			copy = source_mnt;
285 			type = CL_MAKE_SHARED;
286 			n = next_peer(m);
287 			if (n == m)
288 				continue;
289 		} else {
290 			type = CL_SLAVE;
291 			/* beginning of peer group among the slaves? */
292 			if (IS_MNT_SHARED(m))
293 				type |= CL_MAKE_SHARED;
294 			n = m;
295 		}
296 		do {
297 			if (!need_secondary(n, dest_mp))
298 				continue;
299 			if (type & CL_SLAVE) // first in this peer group
300 				copy = find_master(n, copy, source_mnt);
301 			this = copy_tree(copy, copy->mnt.mnt_root, type);
302 			if (IS_ERR(this)) {
303 				err = PTR_ERR(this);
304 				break;
305 			}
306 			read_seqlock_excl(&mount_lock);
307 			mnt_set_mountpoint(n, dest_mp, this);
308 			read_sequnlock_excl(&mount_lock);
309 			if (n->mnt_master)
310 				SET_MNT_MARK(n->mnt_master);
311 			copy = this;
312 			hlist_add_head(&this->mnt_hash, tree_list);
313 			err = count_mounts(n->mnt_ns, this);
314 			if (err)
315 				break;
316 			type = CL_MAKE_SHARED;
317 		} while ((n = next_peer(n)) != m);
318 	}
319 
320 	hlist_for_each_entry(n, tree_list, mnt_hash) {
321 		m = n->mnt_parent;
322 		if (m->mnt_master)
323 			CLEAR_MNT_MARK(m->mnt_master);
324 	}
325 	if (dest_mnt->mnt_master)
326 		CLEAR_MNT_MARK(dest_mnt->mnt_master);
327 	return err;
328 }
329 
330 /*
331  * return true if the refcount is greater than count
332  */
do_refcount_check(struct mount * mnt,int count)333 static inline int do_refcount_check(struct mount *mnt, int count)
334 {
335 	return mnt_get_count(mnt) > count;
336 }
337 
338 /**
339  * propagation_would_overmount - check whether propagation from @from
340  *                               would overmount @to
341  * @from: shared mount
342  * @to:   mount to check
343  * @mp:   future mountpoint of @to on @from
344  *
345  * If @from propagates mounts to @to, @from and @to must either be peers
346  * or one of the masters in the hierarchy of masters of @to must be a
347  * peer of @from.
348  *
349  * If the root of the @to mount is equal to the future mountpoint @mp of
350  * the @to mount on @from then @to will be overmounted by whatever is
351  * propagated to it.
352  *
353  * Context: This function expects namespace_lock() to be held and that
354  *          @mp is stable.
355  * Return: If @from overmounts @to, true is returned, false if not.
356  */
propagation_would_overmount(const struct mount * from,const struct mount * to,const struct mountpoint * mp)357 bool propagation_would_overmount(const struct mount *from,
358 				 const struct mount *to,
359 				 const struct mountpoint *mp)
360 {
361 	if (!IS_MNT_SHARED(from))
362 		return false;
363 
364 	if (to->mnt.mnt_root != mp->m_dentry)
365 		return false;
366 
367 	for (const struct mount *m = to; m; m = m->mnt_master) {
368 		if (peers(from, m))
369 			return true;
370 	}
371 
372 	return false;
373 }
374 
375 /*
376  * check if the mount 'mnt' can be unmounted successfully.
377  * @mnt: the mount to be checked for unmount
378  * NOTE: unmounting 'mnt' would naturally propagate to all
379  * other mounts its parent propagates to.
380  * Check if any of these mounts that **do not have submounts**
381  * have more references than 'refcnt'. If so return busy.
382  *
383  * vfsmount lock must be held for write
384  */
propagate_mount_busy(struct mount * mnt,int refcnt)385 int propagate_mount_busy(struct mount *mnt, int refcnt)
386 {
387 	struct mount *parent = mnt->mnt_parent;
388 
389 	/*
390 	 * quickly check if the current mount can be unmounted.
391 	 * If not, we don't have to go checking for all other
392 	 * mounts
393 	 */
394 	if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
395 		return 1;
396 
397 	if (mnt == parent)
398 		return 0;
399 
400 	for (struct mount *m = propagation_next(parent, parent); m;
401 	     		m = propagation_next(m, parent)) {
402 		struct list_head *head;
403 		struct mount *child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
404 
405 		if (!child)
406 			continue;
407 
408 		head = &child->mnt_mounts;
409 		if (!list_empty(head)) {
410 			/*
411 			 * a mount that covers child completely wouldn't prevent
412 			 * it being pulled out; any other would.
413 			 */
414 			if (!list_is_singular(head) || !child->overmount)
415 				continue;
416 		}
417 		if (do_refcount_check(child, 1))
418 			return 1;
419 	}
420 	return 0;
421 }
422 
423 /*
424  * Clear MNT_LOCKED when it can be shown to be safe.
425  *
426  * mount_lock lock must be held for write
427  */
propagate_mount_unlock(struct mount * mnt)428 void propagate_mount_unlock(struct mount *mnt)
429 {
430 	struct mount *parent = mnt->mnt_parent;
431 	struct mount *m, *child;
432 
433 	BUG_ON(parent == mnt);
434 
435 	for (m = propagation_next(parent, parent); m;
436 			m = propagation_next(m, parent)) {
437 		child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
438 		if (child)
439 			child->mnt.mnt_flags &= ~MNT_LOCKED;
440 	}
441 }
442 
is_candidate(struct mount * m)443 static inline bool is_candidate(struct mount *m)
444 {
445 	return m->mnt_t_flags & T_UMOUNT_CANDIDATE;
446 }
447 
umount_one(struct mount * m,struct list_head * to_umount)448 static void umount_one(struct mount *m, struct list_head *to_umount)
449 {
450 	m->mnt.mnt_flags |= MNT_UMOUNT;
451 	list_del_init(&m->mnt_child);
452 	move_from_ns(m);
453 	list_add_tail(&m->mnt_list, to_umount);
454 }
455 
remove_from_candidate_list(struct mount * m)456 static void remove_from_candidate_list(struct mount *m)
457 {
458 	m->mnt_t_flags &= ~(T_MARKED | T_UMOUNT_CANDIDATE);
459 	list_del_init(&m->mnt_list);
460 }
461 
gather_candidates(struct list_head * set,struct list_head * candidates)462 static void gather_candidates(struct list_head *set,
463 			      struct list_head *candidates)
464 {
465 	struct mount *m, *p, *q;
466 
467 	list_for_each_entry(m, set, mnt_list) {
468 		if (is_candidate(m))
469 			continue;
470 		m->mnt_t_flags |= T_UMOUNT_CANDIDATE;
471 		p = m->mnt_parent;
472 		q = propagation_next(p, p);
473 		while (q) {
474 			struct mount *child = __lookup_mnt(&q->mnt,
475 							   m->mnt_mountpoint);
476 			if (child) {
477 				/*
478 				 * We might've already run into this one.  That
479 				 * must've happened on earlier iteration of the
480 				 * outer loop; in that case we can skip those
481 				 * parents that get propagation from q - there
482 				 * will be nothing new on those as well.
483 				 */
484 				if (is_candidate(child)) {
485 					q = skip_propagation_subtree(q, p);
486 					continue;
487 				}
488 				child->mnt_t_flags |= T_UMOUNT_CANDIDATE;
489 				if (!will_be_unmounted(child))
490 					list_add(&child->mnt_list, candidates);
491 			}
492 			q = propagation_next(q, p);
493 		}
494 	}
495 	list_for_each_entry(m, set, mnt_list)
496 		m->mnt_t_flags &= ~T_UMOUNT_CANDIDATE;
497 }
498 
499 /*
500  * We know that some child of @m can't be unmounted.  In all places where the
501  * chain of descent of @m has child not overmounting the root of parent,
502  * the parent can't be unmounted either.
503  */
trim_ancestors(struct mount * m)504 static void trim_ancestors(struct mount *m)
505 {
506 	struct mount *p;
507 
508 	for (p = m->mnt_parent; is_candidate(p); m = p, p = p->mnt_parent) {
509 		if (IS_MNT_MARKED(m))	// all candidates beneath are overmounts
510 			return;
511 		SET_MNT_MARK(m);
512 		if (m != p->overmount)
513 			p->mnt_t_flags &= ~T_UMOUNT_CANDIDATE;
514 	}
515 }
516 
517 /*
518  * Find and exclude all umount candidates forbidden by @m
519  * (see Documentation/filesystems/propagate_umount.txt)
520  * If we can immediately tell that @m is OK to unmount (unlocked
521  * and all children are already committed to unmounting) commit
522  * to unmounting it.
523  * Only @m itself might be taken from the candidates list;
524  * anything found by trim_ancestors() is marked non-candidate
525  * and left on the list.
526  */
trim_one(struct mount * m,struct list_head * to_umount)527 static void trim_one(struct mount *m, struct list_head *to_umount)
528 {
529 	bool remove_this = false, found = false, umount_this = false;
530 	struct mount *n;
531 
532 	if (!is_candidate(m)) { // trim_ancestors() left it on list
533 		remove_from_candidate_list(m);
534 		return;
535 	}
536 
537 	list_for_each_entry(n, &m->mnt_mounts, mnt_child) {
538 		if (!is_candidate(n)) {
539 			found = true;
540 			if (n != m->overmount) {
541 				remove_this = true;
542 				break;
543 			}
544 		}
545 	}
546 	if (found) {
547 		trim_ancestors(m);
548 	} else if (!IS_MNT_LOCKED(m) && list_empty(&m->mnt_mounts)) {
549 		remove_this = true;
550 		umount_this = true;
551 	}
552 	if (remove_this) {
553 		remove_from_candidate_list(m);
554 		if (umount_this)
555 			umount_one(m, to_umount);
556 	}
557 }
558 
handle_locked(struct mount * m,struct list_head * to_umount)559 static void handle_locked(struct mount *m, struct list_head *to_umount)
560 {
561 	struct mount *cutoff = m, *p;
562 
563 	if (!is_candidate(m)) { // trim_ancestors() left it on list
564 		remove_from_candidate_list(m);
565 		return;
566 	}
567 	for (p = m; is_candidate(p); p = p->mnt_parent) {
568 		remove_from_candidate_list(p);
569 		if (!IS_MNT_LOCKED(p))
570 			cutoff = p->mnt_parent;
571 	}
572 	if (will_be_unmounted(p))
573 		cutoff = p;
574 	while (m != cutoff) {
575 		umount_one(m, to_umount);
576 		m = m->mnt_parent;
577 	}
578 }
579 
580 /*
581  * @m is not to going away, and it overmounts the top of a stack of mounts
582  * that are going away.  We know that all of those are fully overmounted
583  * by the one above (@m being the topmost of the chain), so @m can be slid
584  * in place where the bottom of the stack is attached.
585  *
586  * NOTE: here we temporarily violate a constraint - two mounts end up with
587  * the same parent and mountpoint; that will be remedied as soon as we
588  * return from propagate_umount() - its caller (umount_tree()) will detach
589  * the stack from the parent it (and now @m) is attached to.  umount_tree()
590  * might choose to keep unmounted pieces stuck to each other, but it always
591  * detaches them from the mounts that remain in the tree.
592  */
reparent(struct mount * m)593 static void reparent(struct mount *m)
594 {
595 	struct mount *p = m;
596 	struct mountpoint *mp;
597 
598 	do {
599 		mp = p->mnt_mp;
600 		p = p->mnt_parent;
601 	} while (will_be_unmounted(p));
602 
603 	mnt_change_mountpoint(p, mp, m);
604 	mnt_notify_add(m);
605 }
606 
607 /**
608  * propagate_umount - apply propagation rules to the set of mounts for umount()
609  * @set: the list of mounts to be unmounted.
610  *
611  * Collect all mounts that receive propagation from the mount in @set and have
612  * no obstacles to being unmounted.  Add these additional mounts to the set.
613  *
614  * See Documentation/filesystems/propagate_umount.txt if you do anything in
615  * this area.
616  *
617  * Locks held:
618  * mount_lock (write_seqlock), namespace_sem (exclusive).
619  */
propagate_umount(struct list_head * set)620 void propagate_umount(struct list_head *set)
621 {
622 	struct mount *m, *p;
623 	LIST_HEAD(to_umount);	// committed to unmounting
624 	LIST_HEAD(candidates);	// undecided umount candidates
625 
626 	// collect all candidates
627 	gather_candidates(set, &candidates);
628 
629 	// reduce the set until it's non-shifting
630 	list_for_each_entry_safe(m, p, &candidates, mnt_list)
631 		trim_one(m, &to_umount);
632 
633 	// ... and non-revealing
634 	while (!list_empty(&candidates)) {
635 		m = list_first_entry(&candidates,struct mount, mnt_list);
636 		handle_locked(m, &to_umount);
637 	}
638 
639 	// now to_umount consists of all acceptable candidates
640 	// deal with reparenting of remaining overmounts on those
641 	list_for_each_entry(m, &to_umount, mnt_list) {
642 		if (m->overmount)
643 			reparent(m->overmount);
644 	}
645 
646 	// and fold them into the set
647 	list_splice_tail_init(&to_umount, set);
648 }
649