1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/mount.h>
3 #include <linux/seq_file.h>
4 #include <linux/poll.h>
5 #include <linux/ns_common.h>
6 #include <linux/fs_pin.h>
7
8 extern struct file_system_type nullfs_fs_type;
9 extern struct list_head notify_list;
10
11 struct mnt_namespace {
12 struct ns_common ns;
13 struct mount * root;
14 struct {
15 struct rb_root mounts; /* Protected by namespace_sem */
16 struct rb_node *mnt_last_node; /* last (rightmost) mount in the rbtree */
17 struct rb_node *mnt_first_node; /* first (leftmost) mount in the rbtree */
18 };
19 struct user_namespace *user_ns;
20 struct ucounts *ucounts;
21 wait_queue_head_t poll;
22 u64 seq_origin; /* Sequence number of origin mount namespace */
23 u64 event;
24 #ifdef CONFIG_FSNOTIFY
25 __u32 n_fsnotify_mask;
26 struct fsnotify_mark_connector __rcu *n_fsnotify_marks;
27 #endif
28 unsigned int nr_mounts; /* # of mounts in the namespace */
29 unsigned int pending_mounts;
30 refcount_t passive; /* number references not pinning @mounts */
31 bool is_anon;
32 } __randomize_layout;
33
34 struct mnt_pcp {
35 int mnt_count;
36 int mnt_writers;
37 };
38
39 struct mountpoint {
40 struct hlist_node m_hash;
41 struct dentry *m_dentry;
42 struct hlist_head m_list;
43 };
44
45 struct mount {
46 struct hlist_node mnt_hash;
47 struct mount *mnt_parent;
48 struct dentry *mnt_mountpoint;
49 struct vfsmount mnt;
50 union {
51 struct rb_node mnt_node; /* node in the ns->mounts rbtree */
52 struct rcu_head mnt_rcu;
53 struct llist_node mnt_llist;
54 };
55 #ifdef CONFIG_SMP
56 struct mnt_pcp __percpu *mnt_pcp;
57 #else
58 int mnt_count;
59 int mnt_writers;
60 #endif
61 struct list_head mnt_mounts; /* list of children, anchored here */
62 struct list_head mnt_child; /* and going through their mnt_child */
63 struct mount *mnt_next_for_sb; /* the next two fields are hlist_node, */
64 struct mount * __aligned(1) *mnt_pprev_for_sb;
65 /* except that LSB of pprev is stolen */
66 #define WRITE_HOLD 1 /* ... for use by mnt_hold_writers() */
67 const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
68 struct list_head mnt_list;
69 struct list_head mnt_expire; /* link in fs-specific expiry list */
70 struct list_head mnt_share; /* circular list of shared mounts */
71 struct hlist_head mnt_slave_list;/* list of slave mounts */
72 struct hlist_node mnt_slave; /* slave list entry */
73 struct mount *mnt_master; /* slave is on master->mnt_slave_list */
74 struct mnt_namespace *mnt_ns; /* containing namespace */
75 struct mountpoint *mnt_mp; /* where is it mounted */
76 union {
77 struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
78 struct hlist_node mnt_umount;
79 };
80 #ifdef CONFIG_FSNOTIFY
81 struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks;
82 __u32 mnt_fsnotify_mask;
83 struct list_head to_notify; /* need to queue notification */
84 struct mnt_namespace *prev_ns; /* previous namespace (NULL if none) */
85 #endif
86 int mnt_t_flags; /* namespace_sem-protected flags */
87 int mnt_id; /* mount identifier, reused */
88 u64 mnt_id_unique; /* mount ID unique until reboot */
89 int mnt_group_id; /* peer group identifier */
90 int mnt_expiry_mark; /* true if marked for expiry */
91 struct hlist_head mnt_pins;
92 struct hlist_head mnt_stuck_children;
93 struct mount *overmount; /* mounted on ->mnt_root */
94 } __randomize_layout;
95
96 enum {
97 T_SHARED = 1, /* mount is shared */
98 T_UNBINDABLE = 2, /* mount is unbindable */
99 T_MARKED = 4, /* internal mark for propagate_... */
100 T_UMOUNT_CANDIDATE = 8, /* for propagate_umount */
101
102 /*
103 * T_SHARED_MASK is the set of flags that should be cleared when a
104 * mount becomes shared. Currently, this is only the flag that says a
105 * mount cannot be bind mounted, since this is how we create a mount
106 * that shares events with another mount. If you add a new T_*
107 * flag, consider how it interacts with shared mounts.
108 */
109 T_SHARED_MASK = T_UNBINDABLE,
110 };
111
112 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
113
real_mount(struct vfsmount * mnt)114 static inline struct mount *real_mount(struct vfsmount *mnt)
115 {
116 return container_of(mnt, struct mount, mnt);
117 }
118
mnt_has_parent(const struct mount * mnt)119 static inline int mnt_has_parent(const struct mount *mnt)
120 {
121 return mnt != mnt->mnt_parent;
122 }
123
is_mounted(struct vfsmount * mnt)124 static inline int is_mounted(struct vfsmount *mnt)
125 {
126 /* neither detached nor internal? */
127 return !IS_ERR_OR_NULL(real_mount(mnt)->mnt_ns);
128 }
129
130 extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *);
131
132 extern int __legitimize_mnt(struct vfsmount *, unsigned);
133
__path_is_mountpoint(const struct path * path)134 static inline bool __path_is_mountpoint(const struct path *path)
135 {
136 struct mount *m = __lookup_mnt(path->mnt, path->dentry);
137 return m && likely(!(m->mnt.mnt_flags & MNT_SYNC_UMOUNT));
138 }
139
140 extern void __detach_mounts(struct dentry *dentry);
141
detach_mounts(struct dentry * dentry)142 static inline void detach_mounts(struct dentry *dentry)
143 {
144 if (!d_mountpoint(dentry))
145 return;
146 __detach_mounts(dentry);
147 }
148
get_mnt_ns(struct mnt_namespace * ns)149 static inline void get_mnt_ns(struct mnt_namespace *ns)
150 {
151 ns_ref_inc(ns);
152 }
153
154 extern seqlock_t mount_lock;
155
156 DEFINE_LOCK_GUARD_0(mount_writer, write_seqlock(&mount_lock),
157 write_sequnlock(&mount_lock))
158 DEFINE_LOCK_GUARD_0(mount_locked_reader, read_seqlock_excl(&mount_lock),
159 read_sequnlock_excl(&mount_lock))
160
161 struct proc_mounts {
162 struct mnt_namespace *ns;
163 struct path root;
164 int (*show)(struct seq_file *, struct vfsmount *);
165 };
166
167 extern const struct seq_operations mounts_op;
168
169 extern bool __is_local_mountpoint(const struct dentry *dentry);
is_local_mountpoint(const struct dentry * dentry)170 static inline bool is_local_mountpoint(const struct dentry *dentry)
171 {
172 if (!d_mountpoint(dentry))
173 return false;
174
175 return __is_local_mountpoint(dentry);
176 }
177
is_anon_ns(struct mnt_namespace * ns)178 static inline bool is_anon_ns(struct mnt_namespace *ns)
179 {
180 return ns->is_anon;
181 }
182
anon_ns_root(const struct mount * m)183 static inline bool anon_ns_root(const struct mount *m)
184 {
185 struct mnt_namespace *ns = READ_ONCE(m->mnt_ns);
186
187 return !IS_ERR_OR_NULL(ns) && is_anon_ns(ns) && m == ns->root;
188 }
189
mnt_ns_attached(const struct mount * mnt)190 static inline bool mnt_ns_attached(const struct mount *mnt)
191 {
192 return !RB_EMPTY_NODE(&mnt->mnt_node);
193 }
194
mnt_ns_empty(const struct mnt_namespace * ns)195 static inline bool mnt_ns_empty(const struct mnt_namespace *ns)
196 {
197 return RB_EMPTY_ROOT(&ns->mounts);
198 }
199
move_from_ns(struct mount * mnt)200 static inline void move_from_ns(struct mount *mnt)
201 {
202 struct mnt_namespace *ns = mnt->mnt_ns;
203 WARN_ON(!mnt_ns_attached(mnt));
204 if (ns->mnt_last_node == &mnt->mnt_node)
205 ns->mnt_last_node = rb_prev(&mnt->mnt_node);
206 if (ns->mnt_first_node == &mnt->mnt_node)
207 ns->mnt_first_node = rb_next(&mnt->mnt_node);
208 rb_erase(&mnt->mnt_node, &ns->mounts);
209 RB_CLEAR_NODE(&mnt->mnt_node);
210 }
211
212 bool has_locked_children(struct mount *mnt, struct dentry *dentry);
213 struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mnt_ns,
214 bool previous);
215
to_mnt_ns(struct ns_common * ns)216 static inline struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
217 {
218 return container_of(ns, struct mnt_namespace, ns);
219 }
220
221 #ifdef CONFIG_FSNOTIFY
mnt_notify_add(struct mount * m)222 static inline void mnt_notify_add(struct mount *m)
223 {
224 /* Optimize the case where there are no watches */
225 if ((m->mnt_ns && m->mnt_ns->n_fsnotify_marks) ||
226 (m->prev_ns && m->prev_ns->n_fsnotify_marks))
227 list_add_tail(&m->to_notify, ¬ify_list);
228 else
229 m->prev_ns = m->mnt_ns;
230 }
231 #else
mnt_notify_add(struct mount * m)232 static inline void mnt_notify_add(struct mount *m)
233 {
234 }
235 #endif
236
topmost_overmount(struct mount * m)237 static inline struct mount *topmost_overmount(struct mount *m)
238 {
239 while (m->overmount)
240 m = m->overmount;
241 return m;
242 }
243
244 static inline bool __test_write_hold(struct mount * __aligned(1) *val)
245 {
246 return (unsigned long)val & WRITE_HOLD;
247 }
248
test_write_hold(const struct mount * m)249 static inline bool test_write_hold(const struct mount *m)
250 {
251 return __test_write_hold(m->mnt_pprev_for_sb);
252 }
253
set_write_hold(struct mount * m)254 static inline void set_write_hold(struct mount *m)
255 {
256 m->mnt_pprev_for_sb = (void *)((unsigned long)m->mnt_pprev_for_sb
257 | WRITE_HOLD);
258 }
259
clear_write_hold(struct mount * m)260 static inline void clear_write_hold(struct mount *m)
261 {
262 m->mnt_pprev_for_sb = (void *)((unsigned long)m->mnt_pprev_for_sb
263 & ~WRITE_HOLD);
264 }
265
266 struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry);
267