159bd9dedSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
207b20889SRam Pai /*
307b20889SRam Pai * linux/fs/pnode.c
407b20889SRam Pai *
507b20889SRam Pai * (C) Copyright IBM Corporation 2005.
607b20889SRam Pai * Author : Ram Pai (linuxram@us.ibm.com)
707b20889SRam Pai */
86b3286edSKirill Korotaev #include <linux/mnt_namespace.h>
907b20889SRam Pai #include <linux/mount.h>
1007b20889SRam Pai #include <linux/fs.h>
11132c94e3SEric W. Biederman #include <linux/nsproxy.h>
12e262e32dSDavid Howells #include <uapi/linux/mount.h>
136d59e7f5SAl Viro #include "internal.h"
1407b20889SRam Pai #include "pnode.h"
1507b20889SRam Pai
1603e06e68SRam Pai /* return the next shared peer mount of @p */
next_peer(struct mount * p)17c937135dSAl Viro static inline struct mount *next_peer(struct mount *p)
1803e06e68SRam Pai {
196776db3dSAl Viro return list_entry(p->mnt_share.next, struct mount, mnt_share);
2003e06e68SRam Pai }
2103e06e68SRam Pai
first_slave(struct mount * p)22c937135dSAl Viro static inline struct mount *first_slave(struct mount *p)
235afe0022SRam Pai {
248c5a853fSAl Viro return hlist_entry(p->mnt_slave_list.first, struct mount, mnt_slave);
255afe0022SRam Pai }
265afe0022SRam Pai
next_slave(struct mount * p)27c937135dSAl Viro static inline struct mount *next_slave(struct mount *p)
285afe0022SRam Pai {
298c5a853fSAl Viro return hlist_entry(p->mnt_slave.next, struct mount, mnt_slave);
305afe0022SRam Pai }
315afe0022SRam Pai
322aec880cSAl Viro /* locks: namespace_shared && is_mounted(mnt) */
get_peer_under_root(struct mount * mnt,struct mnt_namespace * ns,const struct path * root)336fc7871fSAl Viro static struct mount *get_peer_under_root(struct mount *mnt,
3497e7e0f7SMiklos Szeredi struct mnt_namespace *ns,
3597e7e0f7SMiklos Szeredi const struct path *root)
3697e7e0f7SMiklos Szeredi {
376fc7871fSAl Viro struct mount *m = mnt;
3897e7e0f7SMiklos Szeredi
3997e7e0f7SMiklos Szeredi do {
4097e7e0f7SMiklos Szeredi /* Check the namespace first for optimization */
41143c8c91SAl Viro if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
426fc7871fSAl Viro return m;
4397e7e0f7SMiklos Szeredi
44c937135dSAl Viro m = next_peer(m);
456fc7871fSAl Viro } while (m != mnt);
4697e7e0f7SMiklos Szeredi
4797e7e0f7SMiklos Szeredi return NULL;
4897e7e0f7SMiklos Szeredi }
4997e7e0f7SMiklos Szeredi
5097e7e0f7SMiklos Szeredi /*
5197e7e0f7SMiklos Szeredi * Get ID of closest dominating peer group having a representative
5297e7e0f7SMiklos Szeredi * under the given root.
5397e7e0f7SMiklos Szeredi *
542aec880cSAl Viro * locks: namespace_shared
5597e7e0f7SMiklos Szeredi */
get_dominating_id(struct mount * mnt,const struct path * root)566fc7871fSAl Viro int get_dominating_id(struct mount *mnt, const struct path *root)
5797e7e0f7SMiklos Szeredi {
586fc7871fSAl Viro struct mount *m;
5997e7e0f7SMiklos Szeredi
6032301920SAl Viro for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
61143c8c91SAl Viro struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
6297e7e0f7SMiklos Szeredi if (d)
6315169fe7SAl Viro return d->mnt_group_id;
6497e7e0f7SMiklos Szeredi }
6597e7e0f7SMiklos Szeredi
6697e7e0f7SMiklos Szeredi return 0;
6797e7e0f7SMiklos Szeredi }
6897e7e0f7SMiklos Szeredi
will_be_unmounted(struct mount * m)69955336e2SAl Viro static inline bool will_be_unmounted(struct mount *m)
70955336e2SAl Viro {
71955336e2SAl Viro return m->mnt.mnt_flags & MNT_UMOUNT;
72955336e2SAl Viro }
73955336e2SAl Viro
transfer_propagation(struct mount * mnt,struct mount * to)7494a8d002SAl Viro static void transfer_propagation(struct mount *mnt, struct mount *to)
75a58b0eb8SRam Pai {
768c5a853fSAl Viro struct hlist_node *p = NULL, *n;
778c5a853fSAl Viro struct mount *m;
788c5a853fSAl Viro
798c5a853fSAl Viro hlist_for_each_entry_safe(m, n, &mnt->mnt_slave_list, mnt_slave) {
808c5a853fSAl Viro m->mnt_master = to;
818c5a853fSAl Viro if (!to)
828c5a853fSAl Viro hlist_del_init(&m->mnt_slave);
838c5a853fSAl Viro else
848c5a853fSAl Viro p = &m->mnt_slave;
85a58b0eb8SRam Pai }
868c5a853fSAl Viro if (p)
878c5a853fSAl Viro hlist_splice_init(&mnt->mnt_slave_list, p, &to->mnt_slave_list);
88a58b0eb8SRam Pai }
89a58b0eb8SRam Pai
9099b7db7bSNick Piggin /*
91406fea79SAl Viro * EXCL[namespace_sem]
9299b7db7bSNick Piggin */
change_mnt_propagation(struct mount * mnt,int type)930f0afb1dSAl Viro void change_mnt_propagation(struct mount *mnt, int type)
9407b20889SRam Pai {
95dd5a4e1dSAl Viro struct mount *m = mnt->mnt_master;
96dd5a4e1dSAl Viro
9703e06e68SRam Pai if (type == MS_SHARED) {
98b90fa9aeSRam Pai set_mnt_shared(mnt);
99a58b0eb8SRam Pai return;
100a58b0eb8SRam Pai }
10194a8d002SAl Viro if (IS_MNT_SHARED(mnt)) {
10294a8d002SAl Viro if (list_empty(&mnt->mnt_share)) {
10394a8d002SAl Viro mnt_release_group_id(mnt);
10494a8d002SAl Viro } else {
105*75db7fd9SAl Viro m = next_peer(mnt);
10694a8d002SAl Viro list_del_init(&mnt->mnt_share);
10794a8d002SAl Viro mnt->mnt_group_id = 0;
10894a8d002SAl Viro }
10994a8d002SAl Viro CLEAR_MNT_SHARED(mnt);
11094a8d002SAl Viro transfer_propagation(mnt, m);
11194a8d002SAl Viro }
1128c5a853fSAl Viro hlist_del_init(&mnt->mnt_slave);
113d5f15047SAl Viro if (type == MS_SLAVE) {
114dd5a4e1dSAl Viro mnt->mnt_master = m;
115dd5a4e1dSAl Viro if (m)
116dd5a4e1dSAl Viro hlist_add_head(&mnt->mnt_slave, &m->mnt_slave_list);
117d5f15047SAl Viro } else {
118d10e8defSAl Viro mnt->mnt_master = NULL;
1199676f0c6SRam Pai if (type == MS_UNBINDABLE)
120406fea79SAl Viro mnt->mnt_t_flags |= T_UNBINDABLE;
1210b03cfb2SAndries E. Brouwer else
122406fea79SAl Viro mnt->mnt_t_flags &= ~T_UNBINDABLE;
12307b20889SRam Pai }
12403e06e68SRam Pai }
125b90fa9aeSRam Pai
trace_transfers(struct mount * m)126*75db7fd9SAl Viro static struct mount *trace_transfers(struct mount *m)
127*75db7fd9SAl Viro {
128*75db7fd9SAl Viro while (1) {
129*75db7fd9SAl Viro struct mount *next = next_peer(m);
130*75db7fd9SAl Viro
131*75db7fd9SAl Viro if (next != m) {
132*75db7fd9SAl Viro list_del_init(&m->mnt_share);
133*75db7fd9SAl Viro m->mnt_group_id = 0;
134*75db7fd9SAl Viro m->mnt_master = next;
135*75db7fd9SAl Viro } else {
136*75db7fd9SAl Viro if (IS_MNT_SHARED(m))
137*75db7fd9SAl Viro mnt_release_group_id(m);
138*75db7fd9SAl Viro next = m->mnt_master;
139*75db7fd9SAl Viro }
140*75db7fd9SAl Viro hlist_del_init(&m->mnt_slave);
141*75db7fd9SAl Viro CLEAR_MNT_SHARED(m);
142*75db7fd9SAl Viro SET_MNT_MARK(m);
143*75db7fd9SAl Viro
144*75db7fd9SAl Viro if (!next || !will_be_unmounted(next))
145*75db7fd9SAl Viro return next;
146*75db7fd9SAl Viro if (IS_MNT_MARKED(next))
147*75db7fd9SAl Viro return next->mnt_master;
148*75db7fd9SAl Viro m = next;
149*75db7fd9SAl Viro }
150*75db7fd9SAl Viro }
151*75db7fd9SAl Viro
set_destinations(struct mount * m,struct mount * master)152*75db7fd9SAl Viro static void set_destinations(struct mount *m, struct mount *master)
153*75db7fd9SAl Viro {
154*75db7fd9SAl Viro struct mount *next;
155*75db7fd9SAl Viro
156*75db7fd9SAl Viro while ((next = m->mnt_master) != master) {
157*75db7fd9SAl Viro m->mnt_master = master;
158*75db7fd9SAl Viro m = next;
159*75db7fd9SAl Viro }
160*75db7fd9SAl Viro }
161*75db7fd9SAl Viro
bulk_make_private(struct list_head * set)162*75db7fd9SAl Viro void bulk_make_private(struct list_head *set)
163*75db7fd9SAl Viro {
164*75db7fd9SAl Viro struct mount *m;
165*75db7fd9SAl Viro
166*75db7fd9SAl Viro list_for_each_entry(m, set, mnt_list)
167*75db7fd9SAl Viro if (!IS_MNT_MARKED(m))
168*75db7fd9SAl Viro set_destinations(m, trace_transfers(m));
169*75db7fd9SAl Viro
170*75db7fd9SAl Viro list_for_each_entry(m, set, mnt_list) {
171*75db7fd9SAl Viro transfer_propagation(m, m->mnt_master);
172*75db7fd9SAl Viro m->mnt_master = NULL;
173*75db7fd9SAl Viro CLEAR_MNT_MARK(m);
174*75db7fd9SAl Viro }
175*75db7fd9SAl Viro }
176*75db7fd9SAl Viro
__propagation_next(struct mount * m,struct mount * origin)177f0d0ba19SAl Viro static struct mount *__propagation_next(struct mount *m,
178f0d0ba19SAl Viro struct mount *origin)
179f0d0ba19SAl Viro {
180f0d0ba19SAl Viro while (1) {
181f0d0ba19SAl Viro struct mount *master = m->mnt_master;
182f0d0ba19SAl Viro
183f0d0ba19SAl Viro if (master == origin->mnt_master) {
184f0d0ba19SAl Viro struct mount *next = next_peer(m);
185f0d0ba19SAl Viro return (next == origin) ? NULL : next;
1868c5a853fSAl Viro } else if (m->mnt_slave.next)
187f0d0ba19SAl Viro return next_slave(m);
188f0d0ba19SAl Viro
189f0d0ba19SAl Viro /* back at master */
190f0d0ba19SAl Viro m = master;
191f0d0ba19SAl Viro }
192f0d0ba19SAl Viro }
193f0d0ba19SAl Viro
194b90fa9aeSRam Pai /*
195b90fa9aeSRam Pai * get the next mount in the propagation tree.
196b90fa9aeSRam Pai * @m: the mount seen last
197b90fa9aeSRam Pai * @origin: the original mount from where the tree walk initiated
198796a6b52SAl Viro *
199796a6b52SAl Viro * Note that peer groups form contiguous segments of slave lists.
200796a6b52SAl Viro * We rely on that in get_source() to be able to find out if
201796a6b52SAl Viro * vfsmount found while iterating with propagation_next() is
202796a6b52SAl Viro * a peer of one we'd found earlier.
203b90fa9aeSRam Pai */
propagation_next(struct mount * m,struct mount * origin)204c937135dSAl Viro static struct mount *propagation_next(struct mount *m,
205c937135dSAl Viro struct mount *origin)
206b90fa9aeSRam Pai {
2075afe0022SRam Pai /* are there any slaves of this mount? */
2088c5a853fSAl Viro if (!IS_MNT_NEW(m) && !hlist_empty(&m->mnt_slave_list))
2095afe0022SRam Pai return first_slave(m);
2105afe0022SRam Pai
211f0d0ba19SAl Viro return __propagation_next(m, origin);
2125afe0022SRam Pai }
2135afe0022SRam Pai
skip_propagation_subtree(struct mount * m,struct mount * origin)214296990deSEric W. Biederman static struct mount *skip_propagation_subtree(struct mount *m,
215296990deSEric W. Biederman struct mount *origin)
216296990deSEric W. Biederman {
217296990deSEric W. Biederman /*
218f0d0ba19SAl Viro * Advance m past everything that gets propagation from it.
219296990deSEric W. Biederman */
220f0d0ba19SAl Viro struct mount *p = __propagation_next(m, origin);
221296990deSEric W. Biederman
222f0d0ba19SAl Viro while (p && peers(m, p))
223f0d0ba19SAl Viro p = __propagation_next(p, origin);
224f0d0ba19SAl Viro
225f0d0ba19SAl Viro return p;
226296990deSEric W. Biederman }
227296990deSEric W. Biederman
next_group(struct mount * m,struct mount * origin)228f2ebb3a9SAl Viro static struct mount *next_group(struct mount *m, struct mount *origin)
2295afe0022SRam Pai {
230f2ebb3a9SAl Viro while (1) {
231f2ebb3a9SAl Viro while (1) {
232f2ebb3a9SAl Viro struct mount *next;
2338c5a853fSAl Viro if (!IS_MNT_NEW(m) && !hlist_empty(&m->mnt_slave_list))
234f2ebb3a9SAl Viro return first_slave(m);
235f2ebb3a9SAl Viro next = next_peer(m);
236f2ebb3a9SAl Viro if (m->mnt_group_id == origin->mnt_group_id) {
237f2ebb3a9SAl Viro if (next == origin)
238f2ebb3a9SAl Viro return NULL;
239f2ebb3a9SAl Viro } else if (m->mnt_slave.next != &next->mnt_slave)
240f2ebb3a9SAl Viro break;
241f2ebb3a9SAl Viro m = next;
242f2ebb3a9SAl Viro }
243f2ebb3a9SAl Viro /* m is the last peer */
244f2ebb3a9SAl Viro while (1) {
245f2ebb3a9SAl Viro struct mount *master = m->mnt_master;
2468c5a853fSAl Viro if (m->mnt_slave.next)
247f2ebb3a9SAl Viro return next_slave(m);
248f2ebb3a9SAl Viro m = next_peer(master);
249f2ebb3a9SAl Viro if (master->mnt_group_id == origin->mnt_group_id)
250f2ebb3a9SAl Viro break;
251f2ebb3a9SAl Viro if (master->mnt_slave.next == &m->mnt_slave)
252f2ebb3a9SAl Viro break;
253f2ebb3a9SAl Viro m = master;
254f2ebb3a9SAl Viro }
255f2ebb3a9SAl Viro if (m == origin)
256f2ebb3a9SAl Viro return NULL;
257f2ebb3a9SAl Viro }
2585afe0022SRam Pai }
2595afe0022SRam Pai
need_secondary(struct mount * m,struct mountpoint * dest_mp)26015e710b8SAl Viro static bool need_secondary(struct mount *m, struct mountpoint *dest_mp)
26115e710b8SAl Viro {
26215e710b8SAl Viro /* skip ones added by this propagate_mnt() */
26315e710b8SAl Viro if (IS_MNT_NEW(m))
26415e710b8SAl Viro return false;
26515e710b8SAl Viro /* skip if mountpoint isn't visible in m */
26615e710b8SAl Viro if (!is_subdir(dest_mp->m_dentry, m->mnt.mnt_root))
26715e710b8SAl Viro return false;
26815e710b8SAl Viro /* skip if m is in the anon_ns */
26915e710b8SAl Viro if (is_anon_ns(m->mnt_ns))
27015e710b8SAl Viro return false;
27115e710b8SAl Viro return true;
27215e710b8SAl Viro }
27315e710b8SAl Viro
find_master(struct mount * m,struct mount * last_copy,struct mount * original)274e0f9396eSAl Viro static struct mount *find_master(struct mount *m,
275e0f9396eSAl Viro struct mount *last_copy,
276e0f9396eSAl Viro struct mount *original)
277e0f9396eSAl Viro {
278e0f9396eSAl Viro struct mount *p;
279e0f9396eSAl Viro
280e0f9396eSAl Viro // ascend until there's a copy for something with the same master
281e0f9396eSAl Viro for (;;) {
282e0f9396eSAl Viro p = m->mnt_master;
283e0f9396eSAl Viro if (!p || IS_MNT_MARKED(p))
284e0f9396eSAl Viro break;
285e0f9396eSAl Viro m = p;
286e0f9396eSAl Viro }
287e0f9396eSAl Viro while (!peers(last_copy, original)) {
288e0f9396eSAl Viro struct mount *parent = last_copy->mnt_parent;
289e0f9396eSAl Viro if (parent->mnt_master == p) {
290e0f9396eSAl Viro if (!peers(parent, m))
291e0f9396eSAl Viro last_copy = last_copy->mnt_master;
292e0f9396eSAl Viro break;
293e0f9396eSAl Viro }
294e0f9396eSAl Viro last_copy = last_copy->mnt_master;
295e0f9396eSAl Viro }
296e0f9396eSAl Viro return last_copy;
297e0f9396eSAl Viro }
298e0f9396eSAl Viro
29903133565SAl Viro /**
30003133565SAl Viro * propagate_mnt() - create secondary copies for tree attachment
301b90fa9aeSRam Pai * @dest_mnt: destination mount.
30203133565SAl Viro * @dest_mp: destination mountpoint.
303b90fa9aeSRam Pai * @source_mnt: source mount.
30403133565SAl Viro * @tree_list: list of secondaries to be attached.
30503133565SAl Viro *
30603133565SAl Viro * Create secondary copies for attaching a tree with root @source_mnt
30703133565SAl Viro * at mount @dest_mnt with mountpoint @dest_mp. Link all new mounts
30803133565SAl Viro * into a propagation graph. Set mountpoints for all secondaries,
30903133565SAl Viro * link their roots into @tree_list via ->mnt_hash.
310b90fa9aeSRam Pai */
propagate_mnt(struct mount * dest_mnt,struct mountpoint * dest_mp,struct mount * source_mnt,struct hlist_head * tree_list)31184d17192SAl Viro int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
31238129a13SAl Viro struct mount *source_mnt, struct hlist_head *tree_list)
313b90fa9aeSRam Pai {
3140a10217eSAl Viro struct mount *m, *n, *copy, *this;
3156a2ce2a7SAl Viro int err = 0, type;
316b90fa9aeSRam Pai
31725776a09SAl Viro if (dest_mnt->mnt_master)
31825776a09SAl Viro SET_MNT_MARK(dest_mnt->mnt_master);
319b90fa9aeSRam Pai
3202b2a3479SAl Viro /* iterate over peer groups, depth first */
3212b2a3479SAl Viro for (m = dest_mnt; m && !err; m = next_group(m, dest_mnt)) {
3222b2a3479SAl Viro if (m == dest_mnt) { // have one for dest_mnt itself
3230a10217eSAl Viro copy = source_mnt;
3240a10217eSAl Viro type = CL_MAKE_SHARED;
3252b2a3479SAl Viro n = next_peer(m);
3262b2a3479SAl Viro if (n == m)
3272b2a3479SAl Viro continue;
3282b2a3479SAl Viro } else {
3290a10217eSAl Viro type = CL_SLAVE;
3300a10217eSAl Viro /* beginning of peer group among the slaves? */
3310a10217eSAl Viro if (IS_MNT_SHARED(m))
3320a10217eSAl Viro type |= CL_MAKE_SHARED;
333f2ebb3a9SAl Viro n = m;
3342b2a3479SAl Viro }
335f2ebb3a9SAl Viro do {
33615e710b8SAl Viro if (!need_secondary(n, dest_mp))
33715e710b8SAl Viro continue;
3380a10217eSAl Viro if (type & CL_SLAVE) // first in this peer group
339bc88530aSAl Viro copy = find_master(n, copy, source_mnt);
340bc88530aSAl Viro this = copy_tree(copy, copy->mnt.mnt_root, type);
341bc88530aSAl Viro if (IS_ERR(this)) {
342bc88530aSAl Viro err = PTR_ERR(this);
3436a2ce2a7SAl Viro break;
3446a2ce2a7SAl Viro }
345511db073SAl Viro scoped_guard(mount_locked_reader)
346bc88530aSAl Viro mnt_set_mountpoint(n, dest_mp, this);
3476a2ce2a7SAl Viro if (n->mnt_master)
3486a2ce2a7SAl Viro SET_MNT_MARK(n->mnt_master);
349bc88530aSAl Viro copy = this;
350bc88530aSAl Viro hlist_add_head(&this->mnt_hash, tree_list);
351bc88530aSAl Viro err = count_mounts(n->mnt_ns, this);
3522b2a3479SAl Viro if (err)
3532b2a3479SAl Viro break;
3540a10217eSAl Viro type = CL_MAKE_SHARED;
35515e710b8SAl Viro } while ((n = next_peer(n)) != m);
356b90fa9aeSRam Pai }
3572b2a3479SAl Viro
358f2ebb3a9SAl Viro hlist_for_each_entry(n, tree_list, mnt_hash) {
359f2ebb3a9SAl Viro m = n->mnt_parent;
36025776a09SAl Viro if (m->mnt_master)
361f2ebb3a9SAl Viro CLEAR_MNT_MARK(m->mnt_master);
362b90fa9aeSRam Pai }
36325776a09SAl Viro if (dest_mnt->mnt_master)
36425776a09SAl Viro CLEAR_MNT_MARK(dest_mnt->mnt_master);
3652b2a3479SAl Viro return err;
366b90fa9aeSRam Pai }
367a05964f3SRam Pai
368a05964f3SRam Pai /*
369a05964f3SRam Pai * return true if the refcount is greater than count
370a05964f3SRam Pai */
do_refcount_check(struct mount * mnt,int count)3711ab59738SAl Viro static inline int do_refcount_check(struct mount *mnt, int count)
372a05964f3SRam Pai {
373aba809cfSAl Viro return mnt_get_count(mnt) > count;
374a05964f3SRam Pai }
375a05964f3SRam Pai
3766ac39281SChristian Brauner /**
3776ac39281SChristian Brauner * propagation_would_overmount - check whether propagation from @from
3786ac39281SChristian Brauner * would overmount @to
3796ac39281SChristian Brauner * @from: shared mount
3806ac39281SChristian Brauner * @to: mount to check
3816ac39281SChristian Brauner * @mp: future mountpoint of @to on @from
3826ac39281SChristian Brauner *
3836ac39281SChristian Brauner * If @from propagates mounts to @to, @from and @to must either be peers
3846ac39281SChristian Brauner * or one of the masters in the hierarchy of masters of @to must be a
3856ac39281SChristian Brauner * peer of @from.
3866ac39281SChristian Brauner *
3876ac39281SChristian Brauner * If the root of the @to mount is equal to the future mountpoint @mp of
3886ac39281SChristian Brauner * the @to mount on @from then @to will be overmounted by whatever is
3896ac39281SChristian Brauner * propagated to it.
3906ac39281SChristian Brauner *
3916ac39281SChristian Brauner * Context: This function expects namespace_lock() to be held and that
3926ac39281SChristian Brauner * @mp is stable.
3936ac39281SChristian Brauner * Return: If @from overmounts @to, true is returned, false if not.
3946ac39281SChristian Brauner */
propagation_would_overmount(const struct mount * from,const struct mount * to,const struct mountpoint * mp)3956ac39281SChristian Brauner bool propagation_would_overmount(const struct mount *from,
3966ac39281SChristian Brauner const struct mount *to,
3976ac39281SChristian Brauner const struct mountpoint *mp)
3986ac39281SChristian Brauner {
3996ac39281SChristian Brauner if (!IS_MNT_SHARED(from))
4006ac39281SChristian Brauner return false;
4016ac39281SChristian Brauner
4026ac39281SChristian Brauner if (to->mnt.mnt_root != mp->m_dentry)
4036ac39281SChristian Brauner return false;
4046ac39281SChristian Brauner
4056ac39281SChristian Brauner for (const struct mount *m = to; m; m = m->mnt_master) {
4066ac39281SChristian Brauner if (peers(from, m))
4076ac39281SChristian Brauner return true;
4086ac39281SChristian Brauner }
4096ac39281SChristian Brauner
4106ac39281SChristian Brauner return false;
4116ac39281SChristian Brauner }
4126ac39281SChristian Brauner
413a05964f3SRam Pai /*
414a05964f3SRam Pai * check if the mount 'mnt' can be unmounted successfully.
415a05964f3SRam Pai * @mnt: the mount to be checked for unmount
416a05964f3SRam Pai * NOTE: unmounting 'mnt' would naturally propagate to all
417a05964f3SRam Pai * other mounts its parent propagates to.
418a05964f3SRam Pai * Check if any of these mounts that **do not have submounts**
419a05964f3SRam Pai * have more references than 'refcnt'. If so return busy.
42099b7db7bSNick Piggin *
421b3e19d92SNick Piggin * vfsmount lock must be held for write
422a05964f3SRam Pai */
propagate_mount_busy(struct mount * mnt,int refcnt)4231ab59738SAl Viro int propagate_mount_busy(struct mount *mnt, int refcnt)
424a05964f3SRam Pai {
4250714a533SAl Viro struct mount *parent = mnt->mnt_parent;
426a05964f3SRam Pai
427a05964f3SRam Pai /*
428a05964f3SRam Pai * quickly check if the current mount can be unmounted.
429a05964f3SRam Pai * If not, we don't have to go checking for all other
430a05964f3SRam Pai * mounts
431a05964f3SRam Pai */
4326b41d536SAl Viro if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
433a05964f3SRam Pai return 1;
434a05964f3SRam Pai
435493a4bebSAl Viro if (mnt == parent)
436493a4bebSAl Viro return 0;
437493a4bebSAl Viro
438493a4bebSAl Viro for (struct mount *m = propagation_next(parent, parent); m;
439c937135dSAl Viro m = propagation_next(m, parent)) {
440493a4bebSAl Viro struct list_head *head;
441493a4bebSAl Viro struct mount *child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
442493a4bebSAl Viro
4431064f874SEric W. Biederman if (!child)
4441064f874SEric W. Biederman continue;
4451064f874SEric W. Biederman
446493a4bebSAl Viro head = &child->mnt_mounts;
447493a4bebSAl Viro if (!list_empty(head)) {
448493a4bebSAl Viro /*
449493a4bebSAl Viro * a mount that covers child completely wouldn't prevent
450493a4bebSAl Viro * it being pulled out; any other would.
4511064f874SEric W. Biederman */
452493a4bebSAl Viro if (!list_is_singular(head) || !child->overmount)
4531064f874SEric W. Biederman continue;
454493a4bebSAl Viro }
455493a4bebSAl Viro if (do_refcount_check(child, 1))
4561064f874SEric W. Biederman return 1;
457a05964f3SRam Pai }
4581064f874SEric W. Biederman return 0;
459a05964f3SRam Pai }
460a05964f3SRam Pai
461a05964f3SRam Pai /*
4625d88457eSEric W. Biederman * Clear MNT_LOCKED when it can be shown to be safe.
4635d88457eSEric W. Biederman *
4645d88457eSEric W. Biederman * mount_lock lock must be held for write
4655d88457eSEric W. Biederman */
propagate_mount_unlock(struct mount * mnt)4665d88457eSEric W. Biederman void propagate_mount_unlock(struct mount *mnt)
4675d88457eSEric W. Biederman {
4685d88457eSEric W. Biederman struct mount *parent = mnt->mnt_parent;
4695d88457eSEric W. Biederman struct mount *m, *child;
4705d88457eSEric W. Biederman
4715d88457eSEric W. Biederman BUG_ON(parent == mnt);
4725d88457eSEric W. Biederman
4735d88457eSEric W. Biederman for (m = propagation_next(parent, parent); m;
4745d88457eSEric W. Biederman m = propagation_next(m, parent)) {
4751064f874SEric W. Biederman child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
4765d88457eSEric W. Biederman if (child)
4775d88457eSEric W. Biederman child->mnt.mnt_flags &= ~MNT_LOCKED;
4785d88457eSEric W. Biederman }
4795d88457eSEric W. Biederman }
4805d88457eSEric W. Biederman
is_candidate(struct mount * m)481f0d0ba19SAl Viro static inline bool is_candidate(struct mount *m)
4820c56fe31SEric W. Biederman {
483406fea79SAl Viro return m->mnt_t_flags & T_UMOUNT_CANDIDATE;
484f0d0ba19SAl Viro }
485f0d0ba19SAl Viro
umount_one(struct mount * m,struct list_head * to_umount)486f0d0ba19SAl Viro static void umount_one(struct mount *m, struct list_head *to_umount)
487f0d0ba19SAl Viro {
488f0d0ba19SAl Viro m->mnt.mnt_flags |= MNT_UMOUNT;
489f0d0ba19SAl Viro list_del_init(&m->mnt_child);
49066320685SAl Viro move_from_ns(m);
49166320685SAl Viro list_add_tail(&m->mnt_list, to_umount);
492f0d0ba19SAl Viro }
493f0d0ba19SAl Viro
remove_from_candidate_list(struct mount * m)494f0d0ba19SAl Viro static void remove_from_candidate_list(struct mount *m)
495f0d0ba19SAl Viro {
496406fea79SAl Viro m->mnt_t_flags &= ~(T_MARKED | T_UMOUNT_CANDIDATE);
497f0d0ba19SAl Viro list_del_init(&m->mnt_list);
498f0d0ba19SAl Viro }
499f0d0ba19SAl Viro
gather_candidates(struct list_head * set,struct list_head * candidates)500f0d0ba19SAl Viro static void gather_candidates(struct list_head *set,
501f0d0ba19SAl Viro struct list_head *candidates)
502f0d0ba19SAl Viro {
503f0d0ba19SAl Viro struct mount *m, *p, *q;
504f0d0ba19SAl Viro
505f0d0ba19SAl Viro list_for_each_entry(m, set, mnt_list) {
506f0d0ba19SAl Viro if (is_candidate(m))
507f0d0ba19SAl Viro continue;
508406fea79SAl Viro m->mnt_t_flags |= T_UMOUNT_CANDIDATE;
509f0d0ba19SAl Viro p = m->mnt_parent;
510f0d0ba19SAl Viro q = propagation_next(p, p);
511f0d0ba19SAl Viro while (q) {
512f0d0ba19SAl Viro struct mount *child = __lookup_mnt(&q->mnt,
513f0d0ba19SAl Viro m->mnt_mountpoint);
514f0d0ba19SAl Viro if (child) {
515f0d0ba19SAl Viro /*
516f0d0ba19SAl Viro * We might've already run into this one. That
517f0d0ba19SAl Viro * must've happened on earlier iteration of the
518f0d0ba19SAl Viro * outer loop; in that case we can skip those
519f0d0ba19SAl Viro * parents that get propagation from q - there
520f0d0ba19SAl Viro * will be nothing new on those as well.
521f0d0ba19SAl Viro */
522f0d0ba19SAl Viro if (is_candidate(child)) {
523f0d0ba19SAl Viro q = skip_propagation_subtree(q, p);
524f0d0ba19SAl Viro continue;
525f0d0ba19SAl Viro }
526406fea79SAl Viro child->mnt_t_flags |= T_UMOUNT_CANDIDATE;
527f0d0ba19SAl Viro if (!will_be_unmounted(child))
528f0d0ba19SAl Viro list_add(&child->mnt_list, candidates);
529f0d0ba19SAl Viro }
530f0d0ba19SAl Viro q = propagation_next(q, p);
531f0d0ba19SAl Viro }
532f0d0ba19SAl Viro }
533f0d0ba19SAl Viro list_for_each_entry(m, set, mnt_list)
534406fea79SAl Viro m->mnt_t_flags &= ~T_UMOUNT_CANDIDATE;
5350c56fe31SEric W. Biederman }
5360c56fe31SEric W. Biederman
5370c56fe31SEric W. Biederman /*
538f0d0ba19SAl Viro * We know that some child of @m can't be unmounted. In all places where the
539f0d0ba19SAl Viro * chain of descent of @m has child not overmounting the root of parent,
540f0d0ba19SAl Viro * the parent can't be unmounted either.
541a05964f3SRam Pai */
trim_ancestors(struct mount * m)542f0d0ba19SAl Viro static void trim_ancestors(struct mount *m)
543a05964f3SRam Pai {
544f0d0ba19SAl Viro struct mount *p;
545a05964f3SRam Pai
546f0d0ba19SAl Viro for (p = m->mnt_parent; is_candidate(p); m = p, p = p->mnt_parent) {
547f0d0ba19SAl Viro if (IS_MNT_MARKED(m)) // all candidates beneath are overmounts
548f0d0ba19SAl Viro return;
549f0d0ba19SAl Viro SET_MNT_MARK(m);
550f0d0ba19SAl Viro if (m != p->overmount)
551406fea79SAl Viro p->mnt_t_flags &= ~T_UMOUNT_CANDIDATE;
552296990deSEric W. Biederman }
553296990deSEric W. Biederman }
554296990deSEric W. Biederman
555a05964f3SRam Pai /*
556f0d0ba19SAl Viro * Find and exclude all umount candidates forbidden by @m
557f0d0ba19SAl Viro * (see Documentation/filesystems/propagate_umount.txt)
558f0d0ba19SAl Viro * If we can immediately tell that @m is OK to unmount (unlocked
559f0d0ba19SAl Viro * and all children are already committed to unmounting) commit
560f0d0ba19SAl Viro * to unmounting it.
561f0d0ba19SAl Viro * Only @m itself might be taken from the candidates list;
562f0d0ba19SAl Viro * anything found by trim_ancestors() is marked non-candidate
563f0d0ba19SAl Viro * and left on the list.
564a05964f3SRam Pai */
trim_one(struct mount * m,struct list_head * to_umount)565f0d0ba19SAl Viro static void trim_one(struct mount *m, struct list_head *to_umount)
566a05964f3SRam Pai {
567f0d0ba19SAl Viro bool remove_this = false, found = false, umount_this = false;
568f0d0ba19SAl Viro struct mount *n;
569a05964f3SRam Pai
570f0d0ba19SAl Viro if (!is_candidate(m)) { // trim_ancestors() left it on list
571f0d0ba19SAl Viro remove_from_candidate_list(m);
572f0d0ba19SAl Viro return;
573296990deSEric W. Biederman }
574296990deSEric W. Biederman
575f0d0ba19SAl Viro list_for_each_entry(n, &m->mnt_mounts, mnt_child) {
576f0d0ba19SAl Viro if (!is_candidate(n)) {
577f0d0ba19SAl Viro found = true;
578f0d0ba19SAl Viro if (n != m->overmount) {
579f0d0ba19SAl Viro remove_this = true;
58099b19d16SEric W. Biederman break;
58199b19d16SEric W. Biederman }
58299b19d16SEric W. Biederman }
58399b19d16SEric W. Biederman }
584f0d0ba19SAl Viro if (found) {
585f0d0ba19SAl Viro trim_ancestors(m);
586f0d0ba19SAl Viro } else if (!IS_MNT_LOCKED(m) && list_empty(&m->mnt_mounts)) {
587f0d0ba19SAl Viro remove_this = true;
588f0d0ba19SAl Viro umount_this = true;
589f0d0ba19SAl Viro }
590f0d0ba19SAl Viro if (remove_this) {
591f0d0ba19SAl Viro remove_from_candidate_list(m);
592f0d0ba19SAl Viro if (umount_this)
593f0d0ba19SAl Viro umount_one(m, to_umount);
594f0d0ba19SAl Viro }
595f0d0ba19SAl Viro }
59699b19d16SEric W. Biederman
handle_locked(struct mount * m,struct list_head * to_umount)597f0d0ba19SAl Viro static void handle_locked(struct mount *m, struct list_head *to_umount)
598f0d0ba19SAl Viro {
599f0d0ba19SAl Viro struct mount *cutoff = m, *p;
600570487d3SEric W. Biederman
601f0d0ba19SAl Viro if (!is_candidate(m)) { // trim_ancestors() left it on list
602f0d0ba19SAl Viro remove_from_candidate_list(m);
603f0d0ba19SAl Viro return;
604f0d0ba19SAl Viro }
605f0d0ba19SAl Viro for (p = m; is_candidate(p); p = p->mnt_parent) {
606f0d0ba19SAl Viro remove_from_candidate_list(p);
607f0d0ba19SAl Viro if (!IS_MNT_LOCKED(p))
608f0d0ba19SAl Viro cutoff = p->mnt_parent;
609f0d0ba19SAl Viro }
610f0d0ba19SAl Viro if (will_be_unmounted(p))
611f0d0ba19SAl Viro cutoff = p;
612f0d0ba19SAl Viro while (m != cutoff) {
613f0d0ba19SAl Viro umount_one(m, to_umount);
614f0d0ba19SAl Viro m = m->mnt_parent;
615f0d0ba19SAl Viro }
616f0d0ba19SAl Viro }
617f0d0ba19SAl Viro
618f0d0ba19SAl Viro /*
619f0d0ba19SAl Viro * @m is not to going away, and it overmounts the top of a stack of mounts
620f0d0ba19SAl Viro * that are going away. We know that all of those are fully overmounted
621f0d0ba19SAl Viro * by the one above (@m being the topmost of the chain), so @m can be slid
622f0d0ba19SAl Viro * in place where the bottom of the stack is attached.
623f0d0ba19SAl Viro *
624f0d0ba19SAl Viro * NOTE: here we temporarily violate a constraint - two mounts end up with
625f0d0ba19SAl Viro * the same parent and mountpoint; that will be remedied as soon as we
626f0d0ba19SAl Viro * return from propagate_umount() - its caller (umount_tree()) will detach
627f0d0ba19SAl Viro * the stack from the parent it (and now @m) is attached to. umount_tree()
628f0d0ba19SAl Viro * might choose to keep unmounted pieces stuck to each other, but it always
629f0d0ba19SAl Viro * detaches them from the mounts that remain in the tree.
630f0d0ba19SAl Viro */
reparent(struct mount * m)631f0d0ba19SAl Viro static void reparent(struct mount *m)
632f0d0ba19SAl Viro {
633f0d0ba19SAl Viro struct mount *p = m;
634f0d0ba19SAl Viro struct mountpoint *mp;
635f0d0ba19SAl Viro
636f0d0ba19SAl Viro do {
637f0d0ba19SAl Viro mp = p->mnt_mp;
638f0d0ba19SAl Viro p = p->mnt_parent;
639f0d0ba19SAl Viro } while (will_be_unmounted(p));
640f0d0ba19SAl Viro
641f0d0ba19SAl Viro mnt_change_mountpoint(p, mp, m);
642f0d0ba19SAl Viro mnt_notify_add(m);
643f0d0ba19SAl Viro }
644f0d0ba19SAl Viro
645f0d0ba19SAl Viro /**
646f0d0ba19SAl Viro * propagate_umount - apply propagation rules to the set of mounts for umount()
647f0d0ba19SAl Viro * @set: the list of mounts to be unmounted.
648f0d0ba19SAl Viro *
649f0d0ba19SAl Viro * Collect all mounts that receive propagation from the mount in @set and have
650f0d0ba19SAl Viro * no obstacles to being unmounted. Add these additional mounts to the set.
651f0d0ba19SAl Viro *
652f0d0ba19SAl Viro * See Documentation/filesystems/propagate_umount.txt if you do anything in
653f0d0ba19SAl Viro * this area.
654f0d0ba19SAl Viro *
655f0d0ba19SAl Viro * Locks held:
656f0d0ba19SAl Viro * mount_lock (write_seqlock), namespace_sem (exclusive).
657f0d0ba19SAl Viro */
propagate_umount(struct list_head * set)658f0d0ba19SAl Viro void propagate_umount(struct list_head *set)
659f0d0ba19SAl Viro {
660f0d0ba19SAl Viro struct mount *m, *p;
661f0d0ba19SAl Viro LIST_HEAD(to_umount); // committed to unmounting
662f0d0ba19SAl Viro LIST_HEAD(candidates); // undecided umount candidates
663f0d0ba19SAl Viro
664f0d0ba19SAl Viro // collect all candidates
665f0d0ba19SAl Viro gather_candidates(set, &candidates);
666f0d0ba19SAl Viro
667f0d0ba19SAl Viro // reduce the set until it's non-shifting
668f0d0ba19SAl Viro list_for_each_entry_safe(m, p, &candidates, mnt_list)
669f0d0ba19SAl Viro trim_one(m, &to_umount);
670f0d0ba19SAl Viro
671f0d0ba19SAl Viro // ... and non-revealing
672f0d0ba19SAl Viro while (!list_empty(&candidates)) {
673f0d0ba19SAl Viro m = list_first_entry(&candidates,struct mount, mnt_list);
674f0d0ba19SAl Viro handle_locked(m, &to_umount);
675f0d0ba19SAl Viro }
676f0d0ba19SAl Viro
677f0d0ba19SAl Viro // now to_umount consists of all acceptable candidates
678da025cdbSAl Viro // deal with reparenting of surviving overmounts on those
679f0d0ba19SAl Viro list_for_each_entry(m, &to_umount, mnt_list) {
680da025cdbSAl Viro struct mount *over = m->overmount;
681da025cdbSAl Viro if (over && !will_be_unmounted(over))
682da025cdbSAl Viro reparent(over);
683f0d0ba19SAl Viro }
684f0d0ba19SAl Viro
685f0d0ba19SAl Viro // and fold them into the set
686f0d0ba19SAl Viro list_splice_tail_init(&to_umount, set);
687a05964f3SRam Pai }
688