xref: /linux/fs/pidfs.c (revision df00ded23a6b4df888237333b1f86067d24113b2)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/anon_inodes.h>
3 #include <linux/exportfs.h>
4 #include <linux/file.h>
5 #include <linux/fs.h>
6 #include <linux/cgroup.h>
7 #include <linux/magic.h>
8 #include <linux/mount.h>
9 #include <linux/pid.h>
10 #include <linux/pidfs.h>
11 #include <linux/pid_namespace.h>
12 #include <linux/poll.h>
13 #include <linux/proc_fs.h>
14 #include <linux/proc_ns.h>
15 #include <linux/pseudo_fs.h>
16 #include <linux/ptrace.h>
17 #include <linux/seq_file.h>
18 #include <uapi/linux/pidfd.h>
19 #include <linux/ipc_namespace.h>
20 #include <linux/time_namespace.h>
21 #include <linux/utsname.h>
22 #include <net/net_namespace.h>
23 
24 #include "internal.h"
25 #include "mount.h"
26 
27 static struct kmem_cache *pidfs_cachep __ro_after_init;
28 
29 /*
30  * Stashes information that userspace needs to access even after the
31  * process has been reaped.
32  */
33 struct pidfs_exit_info {
34 	__u64 cgroupid;
35 	__s32 exit_code;
36 };
37 
38 struct pidfs_inode {
39 	struct pidfs_exit_info __pei;
40 	struct pidfs_exit_info *exit_info;
41 	struct inode vfs_inode;
42 };
43 
44 static inline struct pidfs_inode *pidfs_i(struct inode *inode)
45 {
46 	return container_of(inode, struct pidfs_inode, vfs_inode);
47 }
48 
49 static struct rb_root pidfs_ino_tree = RB_ROOT;
50 
51 #if BITS_PER_LONG == 32
52 static inline unsigned long pidfs_ino(u64 ino)
53 {
54 	return lower_32_bits(ino);
55 }
56 
57 /* On 32 bit the generation number are the upper 32 bits. */
58 static inline u32 pidfs_gen(u64 ino)
59 {
60 	return upper_32_bits(ino);
61 }
62 
63 #else
64 
65 /* On 64 bit simply return ino. */
66 static inline unsigned long pidfs_ino(u64 ino)
67 {
68 	return ino;
69 }
70 
71 /* On 64 bit the generation number is 0. */
72 static inline u32 pidfs_gen(u64 ino)
73 {
74 	return 0;
75 }
76 #endif
77 
78 static int pidfs_ino_cmp(struct rb_node *a, const struct rb_node *b)
79 {
80 	struct pid *pid_a = rb_entry(a, struct pid, pidfs_node);
81 	struct pid *pid_b = rb_entry(b, struct pid, pidfs_node);
82 	u64 pid_ino_a = pid_a->ino;
83 	u64 pid_ino_b = pid_b->ino;
84 
85 	if (pid_ino_a < pid_ino_b)
86 		return -1;
87 	if (pid_ino_a > pid_ino_b)
88 		return 1;
89 	return 0;
90 }
91 
92 void pidfs_add_pid(struct pid *pid)
93 {
94 	static u64 pidfs_ino_nr = 2;
95 
96 	/*
97 	 * On 64 bit nothing special happens. The 64bit number assigned
98 	 * to struct pid is the inode number.
99 	 *
100 	 * On 32 bit the 64 bit number assigned to struct pid is split
101 	 * into two 32 bit numbers. The lower 32 bits are used as the
102 	 * inode number and the upper 32 bits are used as the inode
103 	 * generation number.
104 	 *
105 	 * On 32 bit pidfs_ino() will return the lower 32 bit. When
106 	 * pidfs_ino() returns zero a wrap around happened. When a
107 	 * wraparound happens the 64 bit number will be incremented by 2
108 	 * so inode numbering starts at 2 again.
109 	 *
110 	 * On 64 bit comparing two pidfds is as simple as comparing
111 	 * inode numbers.
112 	 *
113 	 * When a wraparound happens on 32 bit multiple pidfds with the
114 	 * same inode number are likely to exist (This isn't a problem
115 	 * since before pidfs pidfds used the anonymous inode meaning
116 	 * all pidfds had the same inode number.). Userspace can
117 	 * reconstruct the 64 bit identifier by retrieving both the
118 	 * inode number and the inode generation number to compare or
119 	 * use file handles.
120 	 */
121 	if (pidfs_ino(pidfs_ino_nr) == 0)
122 		pidfs_ino_nr += 2;
123 
124 	pid->ino = pidfs_ino_nr;
125 	pid->stashed = NULL;
126 	pidfs_ino_nr++;
127 
128 	write_seqcount_begin(&pidmap_lock_seq);
129 	rb_find_add_rcu(&pid->pidfs_node, &pidfs_ino_tree, pidfs_ino_cmp);
130 	write_seqcount_end(&pidmap_lock_seq);
131 }
132 
133 void pidfs_remove_pid(struct pid *pid)
134 {
135 	write_seqcount_begin(&pidmap_lock_seq);
136 	rb_erase(&pid->pidfs_node, &pidfs_ino_tree);
137 	write_seqcount_end(&pidmap_lock_seq);
138 }
139 
140 #ifdef CONFIG_PROC_FS
141 /**
142  * pidfd_show_fdinfo - print information about a pidfd
143  * @m: proc fdinfo file
144  * @f: file referencing a pidfd
145  *
146  * Pid:
147  * This function will print the pid that a given pidfd refers to in the
148  * pid namespace of the procfs instance.
149  * If the pid namespace of the process is not a descendant of the pid
150  * namespace of the procfs instance 0 will be shown as its pid. This is
151  * similar to calling getppid() on a process whose parent is outside of
152  * its pid namespace.
153  *
154  * NSpid:
155  * If pid namespaces are supported then this function will also print
156  * the pid of a given pidfd refers to for all descendant pid namespaces
157  * starting from the current pid namespace of the instance, i.e. the
158  * Pid field and the first entry in the NSpid field will be identical.
159  * If the pid namespace of the process is not a descendant of the pid
160  * namespace of the procfs instance 0 will be shown as its first NSpid
161  * entry and no others will be shown.
162  * Note that this differs from the Pid and NSpid fields in
163  * /proc/<pid>/status where Pid and NSpid are always shown relative to
164  * the  pid namespace of the procfs instance. The difference becomes
165  * obvious when sending around a pidfd between pid namespaces from a
166  * different branch of the tree, i.e. where no ancestral relation is
167  * present between the pid namespaces:
168  * - create two new pid namespaces ns1 and ns2 in the initial pid
169  *   namespace (also take care to create new mount namespaces in the
170  *   new pid namespace and mount procfs)
171  * - create a process with a pidfd in ns1
172  * - send pidfd from ns1 to ns2
173  * - read /proc/self/fdinfo/<pidfd> and observe that both Pid and NSpid
174  *   have exactly one entry, which is 0
175  */
176 static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
177 {
178 	struct pid *pid = pidfd_pid(f);
179 	struct pid_namespace *ns;
180 	pid_t nr = -1;
181 
182 	if (likely(pid_has_task(pid, PIDTYPE_PID))) {
183 		ns = proc_pid_ns(file_inode(m->file)->i_sb);
184 		nr = pid_nr_ns(pid, ns);
185 	}
186 
187 	seq_put_decimal_ll(m, "Pid:\t", nr);
188 
189 #ifdef CONFIG_PID_NS
190 	seq_put_decimal_ll(m, "\nNSpid:\t", nr);
191 	if (nr > 0) {
192 		int i;
193 
194 		/* If nr is non-zero it means that 'pid' is valid and that
195 		 * ns, i.e. the pid namespace associated with the procfs
196 		 * instance, is in the pid namespace hierarchy of pid.
197 		 * Start at one below the already printed level.
198 		 */
199 		for (i = ns->level + 1; i <= pid->level; i++)
200 			seq_put_decimal_ll(m, "\t", pid->numbers[i].nr);
201 	}
202 #endif
203 	seq_putc(m, '\n');
204 }
205 #endif
206 
207 /*
208  * Poll support for process exit notification.
209  */
210 static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
211 {
212 	struct pid *pid = pidfd_pid(file);
213 	struct task_struct *task;
214 	__poll_t poll_flags = 0;
215 
216 	poll_wait(file, &pid->wait_pidfd, pts);
217 	/*
218 	 * Don't wake waiters if the thread-group leader exited
219 	 * prematurely. They either get notified when the last subthread
220 	 * exits or not at all if one of the remaining subthreads execs
221 	 * and assumes the struct pid of the old thread-group leader.
222 	 */
223 	guard(rcu)();
224 	task = pid_task(pid, PIDTYPE_PID);
225 	if (!task)
226 		poll_flags = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
227 	else if (task->exit_state && !delay_group_leader(task))
228 		poll_flags = EPOLLIN | EPOLLRDNORM;
229 
230 	return poll_flags;
231 }
232 
233 static inline bool pid_in_current_pidns(const struct pid *pid)
234 {
235 	const struct pid_namespace *ns = task_active_pid_ns(current);
236 
237 	if (ns->level <= pid->level)
238 		return pid->numbers[ns->level].ns == ns;
239 
240 	return false;
241 }
242 
243 static long pidfd_info(struct file *file, unsigned int cmd, unsigned long arg)
244 {
245 	struct pidfd_info __user *uinfo = (struct pidfd_info __user *)arg;
246 	struct inode *inode = file_inode(file);
247 	struct pid *pid = pidfd_pid(file);
248 	size_t usize = _IOC_SIZE(cmd);
249 	struct pidfd_info kinfo = {};
250 	struct pidfs_exit_info *exit_info;
251 	struct user_namespace *user_ns;
252 	struct task_struct *task;
253 	const struct cred *c;
254 	__u64 mask;
255 
256 	if (!uinfo)
257 		return -EINVAL;
258 	if (usize < PIDFD_INFO_SIZE_VER0)
259 		return -EINVAL; /* First version, no smaller struct possible */
260 
261 	if (copy_from_user(&mask, &uinfo->mask, sizeof(mask)))
262 		return -EFAULT;
263 
264 	/*
265 	 * Restrict information retrieval to tasks within the caller's pid
266 	 * namespace hierarchy.
267 	 */
268 	if (!pid_in_current_pidns(pid))
269 		return -ESRCH;
270 
271 	if (mask & PIDFD_INFO_EXIT) {
272 		exit_info = READ_ONCE(pidfs_i(inode)->exit_info);
273 		if (exit_info) {
274 			kinfo.mask |= PIDFD_INFO_EXIT;
275 #ifdef CONFIG_CGROUPS
276 			kinfo.cgroupid = exit_info->cgroupid;
277 			kinfo.mask |= PIDFD_INFO_CGROUPID;
278 #endif
279 			kinfo.exit_code = exit_info->exit_code;
280 		}
281 	}
282 
283 	task = get_pid_task(pid, PIDTYPE_PID);
284 	if (!task) {
285 		/*
286 		 * If the task has already been reaped, only exit
287 		 * information is available
288 		 */
289 		if (!(mask & PIDFD_INFO_EXIT))
290 			return -ESRCH;
291 
292 		goto copy_out;
293 	}
294 
295 	c = get_task_cred(task);
296 	if (!c)
297 		return -ESRCH;
298 
299 	/* Unconditionally return identifiers and credentials, the rest only on request */
300 
301 	user_ns = current_user_ns();
302 	kinfo.ruid = from_kuid_munged(user_ns, c->uid);
303 	kinfo.rgid = from_kgid_munged(user_ns, c->gid);
304 	kinfo.euid = from_kuid_munged(user_ns, c->euid);
305 	kinfo.egid = from_kgid_munged(user_ns, c->egid);
306 	kinfo.suid = from_kuid_munged(user_ns, c->suid);
307 	kinfo.sgid = from_kgid_munged(user_ns, c->sgid);
308 	kinfo.fsuid = from_kuid_munged(user_ns, c->fsuid);
309 	kinfo.fsgid = from_kgid_munged(user_ns, c->fsgid);
310 	kinfo.mask |= PIDFD_INFO_CREDS;
311 	put_cred(c);
312 
313 #ifdef CONFIG_CGROUPS
314 	if (!kinfo.cgroupid) {
315 		struct cgroup *cgrp;
316 
317 		rcu_read_lock();
318 		cgrp = task_dfl_cgroup(task);
319 		kinfo.cgroupid = cgroup_id(cgrp);
320 		kinfo.mask |= PIDFD_INFO_CGROUPID;
321 		rcu_read_unlock();
322 	}
323 #endif
324 
325 	/*
326 	 * Copy pid/tgid last, to reduce the chances the information might be
327 	 * stale. Note that it is not possible to ensure it will be valid as the
328 	 * task might return as soon as the copy_to_user finishes, but that's ok
329 	 * and userspace expects that might happen and can act accordingly, so
330 	 * this is just best-effort. What we can do however is checking that all
331 	 * the fields are set correctly, or return ESRCH to avoid providing
332 	 * incomplete information. */
333 
334 	kinfo.ppid = task_ppid_nr_ns(task, NULL);
335 	kinfo.tgid = task_tgid_vnr(task);
336 	kinfo.pid = task_pid_vnr(task);
337 	kinfo.mask |= PIDFD_INFO_PID;
338 
339 	if (kinfo.pid == 0 || kinfo.tgid == 0 || (kinfo.ppid == 0 && kinfo.pid != 1))
340 		return -ESRCH;
341 
342 copy_out:
343 	/*
344 	 * If userspace and the kernel have the same struct size it can just
345 	 * be copied. If userspace provides an older struct, only the bits that
346 	 * userspace knows about will be copied. If userspace provides a new
347 	 * struct, only the bits that the kernel knows about will be copied.
348 	 */
349 	return copy_struct_to_user(uinfo, usize, &kinfo, sizeof(kinfo), NULL);
350 }
351 
352 static bool pidfs_ioctl_valid(unsigned int cmd)
353 {
354 	switch (cmd) {
355 	case FS_IOC_GETVERSION:
356 	case PIDFD_GET_CGROUP_NAMESPACE:
357 	case PIDFD_GET_IPC_NAMESPACE:
358 	case PIDFD_GET_MNT_NAMESPACE:
359 	case PIDFD_GET_NET_NAMESPACE:
360 	case PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE:
361 	case PIDFD_GET_TIME_NAMESPACE:
362 	case PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE:
363 	case PIDFD_GET_UTS_NAMESPACE:
364 	case PIDFD_GET_USER_NAMESPACE:
365 	case PIDFD_GET_PID_NAMESPACE:
366 		return true;
367 	}
368 
369 	/* Extensible ioctls require some more careful checks. */
370 	switch (_IOC_NR(cmd)) {
371 	case _IOC_NR(PIDFD_GET_INFO):
372 		/*
373 		 * Try to prevent performing a pidfd ioctl when someone
374 		 * erronously mistook the file descriptor for a pidfd.
375 		 * This is not perfect but will catch most cases.
376 		 */
377 		return (_IOC_TYPE(cmd) == _IOC_TYPE(PIDFD_GET_INFO));
378 	}
379 
380 	return false;
381 }
382 
383 static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
384 {
385 	struct task_struct *task __free(put_task) = NULL;
386 	struct nsproxy *nsp __free(put_nsproxy) = NULL;
387 	struct ns_common *ns_common = NULL;
388 	struct pid_namespace *pid_ns;
389 
390 	if (!pidfs_ioctl_valid(cmd))
391 		return -ENOIOCTLCMD;
392 
393 	if (cmd == FS_IOC_GETVERSION) {
394 		if (!arg)
395 			return -EINVAL;
396 
397 		__u32 __user *argp = (__u32 __user *)arg;
398 		return put_user(file_inode(file)->i_generation, argp);
399 	}
400 
401 	/* Extensible IOCTL that does not open namespace FDs, take a shortcut */
402 	if (_IOC_NR(cmd) == _IOC_NR(PIDFD_GET_INFO))
403 		return pidfd_info(file, cmd, arg);
404 
405 	task = get_pid_task(pidfd_pid(file), PIDTYPE_PID);
406 	if (!task)
407 		return -ESRCH;
408 
409 	if (arg)
410 		return -EINVAL;
411 
412 	scoped_guard(task_lock, task) {
413 		nsp = task->nsproxy;
414 		if (nsp)
415 			get_nsproxy(nsp);
416 	}
417 	if (!nsp)
418 		return -ESRCH; /* just pretend it didn't exist */
419 
420 	/*
421 	 * We're trying to open a file descriptor to the namespace so perform a
422 	 * filesystem cred ptrace check. Also, we mirror nsfs behavior.
423 	 */
424 	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
425 		return -EACCES;
426 
427 	switch (cmd) {
428 	/* Namespaces that hang of nsproxy. */
429 	case PIDFD_GET_CGROUP_NAMESPACE:
430 		if (IS_ENABLED(CONFIG_CGROUPS)) {
431 			get_cgroup_ns(nsp->cgroup_ns);
432 			ns_common = to_ns_common(nsp->cgroup_ns);
433 		}
434 		break;
435 	case PIDFD_GET_IPC_NAMESPACE:
436 		if (IS_ENABLED(CONFIG_IPC_NS)) {
437 			get_ipc_ns(nsp->ipc_ns);
438 			ns_common = to_ns_common(nsp->ipc_ns);
439 		}
440 		break;
441 	case PIDFD_GET_MNT_NAMESPACE:
442 		get_mnt_ns(nsp->mnt_ns);
443 		ns_common = to_ns_common(nsp->mnt_ns);
444 		break;
445 	case PIDFD_GET_NET_NAMESPACE:
446 		if (IS_ENABLED(CONFIG_NET_NS)) {
447 			ns_common = to_ns_common(nsp->net_ns);
448 			get_net_ns(ns_common);
449 		}
450 		break;
451 	case PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE:
452 		if (IS_ENABLED(CONFIG_PID_NS)) {
453 			get_pid_ns(nsp->pid_ns_for_children);
454 			ns_common = to_ns_common(nsp->pid_ns_for_children);
455 		}
456 		break;
457 	case PIDFD_GET_TIME_NAMESPACE:
458 		if (IS_ENABLED(CONFIG_TIME_NS)) {
459 			get_time_ns(nsp->time_ns);
460 			ns_common = to_ns_common(nsp->time_ns);
461 		}
462 		break;
463 	case PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE:
464 		if (IS_ENABLED(CONFIG_TIME_NS)) {
465 			get_time_ns(nsp->time_ns_for_children);
466 			ns_common = to_ns_common(nsp->time_ns_for_children);
467 		}
468 		break;
469 	case PIDFD_GET_UTS_NAMESPACE:
470 		if (IS_ENABLED(CONFIG_UTS_NS)) {
471 			get_uts_ns(nsp->uts_ns);
472 			ns_common = to_ns_common(nsp->uts_ns);
473 		}
474 		break;
475 	/* Namespaces that don't hang of nsproxy. */
476 	case PIDFD_GET_USER_NAMESPACE:
477 		if (IS_ENABLED(CONFIG_USER_NS)) {
478 			rcu_read_lock();
479 			ns_common = to_ns_common(get_user_ns(task_cred_xxx(task, user_ns)));
480 			rcu_read_unlock();
481 		}
482 		break;
483 	case PIDFD_GET_PID_NAMESPACE:
484 		if (IS_ENABLED(CONFIG_PID_NS)) {
485 			rcu_read_lock();
486 			pid_ns = task_active_pid_ns(task);
487 			if (pid_ns)
488 				ns_common = to_ns_common(get_pid_ns(pid_ns));
489 			rcu_read_unlock();
490 		}
491 		break;
492 	default:
493 		return -ENOIOCTLCMD;
494 	}
495 
496 	if (!ns_common)
497 		return -EOPNOTSUPP;
498 
499 	/* open_namespace() unconditionally consumes the reference */
500 	return open_namespace(ns_common);
501 }
502 
503 static const struct file_operations pidfs_file_operations = {
504 	.poll		= pidfd_poll,
505 #ifdef CONFIG_PROC_FS
506 	.show_fdinfo	= pidfd_show_fdinfo,
507 #endif
508 	.unlocked_ioctl	= pidfd_ioctl,
509 	.compat_ioctl   = compat_ptr_ioctl,
510 };
511 
512 struct pid *pidfd_pid(const struct file *file)
513 {
514 	if (file->f_op != &pidfs_file_operations)
515 		return ERR_PTR(-EBADF);
516 	return file_inode(file)->i_private;
517 }
518 
519 /*
520  * We're called from release_task(). We know there's at least one
521  * reference to struct pid being held that won't be released until the
522  * task has been reaped which cannot happen until we're out of
523  * release_task().
524  *
525  * If this struct pid is referred to by a pidfd then
526  * stashed_dentry_get() will return the dentry and inode for that struct
527  * pid. Since we've taken a reference on it there's now an additional
528  * reference from the exit path on it. Which is fine. We're going to put
529  * it again in a second and we know that the pid is kept alive anyway.
530  *
531  * Worst case is that we've filled in the info and immediately free the
532  * dentry and inode afterwards since the pidfd has been closed. Since
533  * pidfs_exit() currently is placed after exit_task_work() we know that
534  * it cannot be us aka the exiting task holding a pidfd to ourselves.
535  */
536 void pidfs_exit(struct task_struct *tsk)
537 {
538 	struct dentry *dentry;
539 
540 	might_sleep();
541 
542 	dentry = stashed_dentry_get(&task_pid(tsk)->stashed);
543 	if (dentry) {
544 		struct inode *inode = d_inode(dentry);
545 		struct pidfs_exit_info *exit_info = &pidfs_i(inode)->__pei;
546 #ifdef CONFIG_CGROUPS
547 		struct cgroup *cgrp;
548 
549 		rcu_read_lock();
550 		cgrp = task_dfl_cgroup(tsk);
551 		exit_info->cgroupid = cgroup_id(cgrp);
552 		rcu_read_unlock();
553 #endif
554 		exit_info->exit_code = tsk->exit_code;
555 
556 		/* Ensure that PIDFD_GET_INFO sees either all or nothing. */
557 		smp_store_release(&pidfs_i(inode)->exit_info, &pidfs_i(inode)->__pei);
558 		dput(dentry);
559 	}
560 }
561 
562 static struct vfsmount *pidfs_mnt __ro_after_init;
563 
564 /*
565  * The vfs falls back to simple_setattr() if i_op->setattr() isn't
566  * implemented. Let's reject it completely until we have a clean
567  * permission concept for pidfds.
568  */
569 static int pidfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
570 			 struct iattr *attr)
571 {
572 	return -EOPNOTSUPP;
573 }
574 
575 
576 /*
577  * User space expects pidfs inodes to have no file type in st_mode.
578  *
579  * In particular, 'lsof' has this legacy logic:
580  *
581  *	type = s->st_mode & S_IFMT;
582  *	switch (type) {
583  *	  ...
584  *	case 0:
585  *		if (!strcmp(p, "anon_inode"))
586  *			Lf->ntype = Ntype = N_ANON_INODE;
587  *
588  * to detect our old anon_inode logic.
589  *
590  * Rather than mess with our internal sane inode data, just fix it
591  * up here in getattr() by masking off the format bits.
592  */
593 static int pidfs_getattr(struct mnt_idmap *idmap, const struct path *path,
594 			 struct kstat *stat, u32 request_mask,
595 			 unsigned int query_flags)
596 {
597 	struct inode *inode = d_inode(path->dentry);
598 
599 	generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
600 	stat->mode &= ~S_IFMT;
601 	return 0;
602 }
603 
604 static const struct inode_operations pidfs_inode_operations = {
605 	.getattr = pidfs_getattr,
606 	.setattr = pidfs_setattr,
607 };
608 
609 static void pidfs_evict_inode(struct inode *inode)
610 {
611 	struct pid *pid = inode->i_private;
612 
613 	clear_inode(inode);
614 	put_pid(pid);
615 }
616 
617 static struct inode *pidfs_alloc_inode(struct super_block *sb)
618 {
619 	struct pidfs_inode *pi;
620 
621 	pi = alloc_inode_sb(sb, pidfs_cachep, GFP_KERNEL);
622 	if (!pi)
623 		return NULL;
624 
625 	memset(&pi->__pei, 0, sizeof(pi->__pei));
626 	pi->exit_info = NULL;
627 
628 	return &pi->vfs_inode;
629 }
630 
631 static void pidfs_free_inode(struct inode *inode)
632 {
633 	kmem_cache_free(pidfs_cachep, pidfs_i(inode));
634 }
635 
636 static const struct super_operations pidfs_sops = {
637 	.alloc_inode	= pidfs_alloc_inode,
638 	.drop_inode	= generic_delete_inode,
639 	.evict_inode	= pidfs_evict_inode,
640 	.free_inode	= pidfs_free_inode,
641 	.statfs		= simple_statfs,
642 };
643 
644 /*
645  * 'lsof' has knowledge of out historical anon_inode use, and expects
646  * the pidfs dentry name to start with 'anon_inode'.
647  */
648 static char *pidfs_dname(struct dentry *dentry, char *buffer, int buflen)
649 {
650 	return dynamic_dname(buffer, buflen, "anon_inode:[pidfd]");
651 }
652 
653 const struct dentry_operations pidfs_dentry_operations = {
654 	.d_dname	= pidfs_dname,
655 	.d_prune	= stashed_dentry_prune,
656 };
657 
658 static int pidfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
659 			   struct inode *parent)
660 {
661 	const struct pid *pid = inode->i_private;
662 
663 	if (*max_len < 2) {
664 		*max_len = 2;
665 		return FILEID_INVALID;
666 	}
667 
668 	*max_len = 2;
669 	*(u64 *)fh = pid->ino;
670 	return FILEID_KERNFS;
671 }
672 
673 static int pidfs_ino_find(const void *key, const struct rb_node *node)
674 {
675 	const u64 pid_ino = *(u64 *)key;
676 	const struct pid *pid = rb_entry(node, struct pid, pidfs_node);
677 
678 	if (pid_ino < pid->ino)
679 		return -1;
680 	if (pid_ino > pid->ino)
681 		return 1;
682 	return 0;
683 }
684 
685 /* Find a struct pid based on the inode number. */
686 static struct pid *pidfs_ino_get_pid(u64 ino)
687 {
688 	struct pid *pid;
689 	struct rb_node *node;
690 	unsigned int seq;
691 
692 	guard(rcu)();
693 	do {
694 		seq = read_seqcount_begin(&pidmap_lock_seq);
695 		node = rb_find_rcu(&ino, &pidfs_ino_tree, pidfs_ino_find);
696 		if (node)
697 			break;
698 	} while (read_seqcount_retry(&pidmap_lock_seq, seq));
699 
700 	if (!node)
701 		return NULL;
702 
703 	pid = rb_entry(node, struct pid, pidfs_node);
704 
705 	/* Within our pid namespace hierarchy? */
706 	if (pid_vnr(pid) == 0)
707 		return NULL;
708 
709 	return get_pid(pid);
710 }
711 
712 static struct dentry *pidfs_fh_to_dentry(struct super_block *sb,
713 					 struct fid *fid, int fh_len,
714 					 int fh_type)
715 {
716 	int ret;
717 	u64 pid_ino;
718 	struct path path;
719 	struct pid *pid;
720 
721 	if (fh_len < 2)
722 		return NULL;
723 
724 	switch (fh_type) {
725 	case FILEID_KERNFS:
726 		pid_ino = *(u64 *)fid;
727 		break;
728 	default:
729 		return NULL;
730 	}
731 
732 	pid = pidfs_ino_get_pid(pid_ino);
733 	if (!pid)
734 		return NULL;
735 
736 	ret = path_from_stashed(&pid->stashed, pidfs_mnt, pid, &path);
737 	if (ret < 0)
738 		return ERR_PTR(ret);
739 
740 	mntput(path.mnt);
741 	return path.dentry;
742 }
743 
744 /*
745  * Make sure that we reject any nonsensical flags that users pass via
746  * open_by_handle_at(). Note that PIDFD_THREAD is defined as O_EXCL, and
747  * PIDFD_NONBLOCK as O_NONBLOCK.
748  */
749 #define VALID_FILE_HANDLE_OPEN_FLAGS \
750 	(O_RDONLY | O_WRONLY | O_RDWR | O_NONBLOCK | O_CLOEXEC | O_EXCL)
751 
752 static int pidfs_export_permission(struct handle_to_path_ctx *ctx,
753 				   unsigned int oflags)
754 {
755 	if (oflags & ~(VALID_FILE_HANDLE_OPEN_FLAGS | O_LARGEFILE))
756 		return -EINVAL;
757 
758 	/*
759 	 * pidfd_ino_get_pid() will verify that the struct pid is part
760 	 * of the caller's pid namespace hierarchy. No further
761 	 * permission checks are needed.
762 	 */
763 	return 0;
764 }
765 
766 static inline bool pidfs_pid_valid(struct pid *pid, const struct path *path,
767 				   unsigned int flags)
768 {
769 	enum pid_type type;
770 
771 	if (flags & PIDFD_CLONE)
772 		return true;
773 
774 	/*
775 	 * Make sure that if a pidfd is created PIDFD_INFO_EXIT
776 	 * information will be available. So after an inode for the
777 	 * pidfd has been allocated perform another check that the pid
778 	 * is still alive. If it is exit information is available even
779 	 * if the task gets reaped before the pidfd is returned to
780 	 * userspace. The only exception is PIDFD_CLONE where no task
781 	 * linkage has been established for @pid yet and the kernel is
782 	 * in the middle of process creation so there's nothing for
783 	 * pidfs to miss.
784 	 */
785 	if (flags & PIDFD_THREAD)
786 		type = PIDTYPE_PID;
787 	else
788 		type = PIDTYPE_TGID;
789 
790 	/*
791 	 * Since pidfs_exit() is called before struct pid's task linkage
792 	 * is removed the case where the task got reaped but a dentry
793 	 * was already attached to struct pid and exit information was
794 	 * recorded and published can be handled correctly.
795 	 */
796 	if (unlikely(!pid_has_task(pid, type))) {
797 		struct inode *inode = d_inode(path->dentry);
798 		return !!READ_ONCE(pidfs_i(inode)->exit_info);
799 	}
800 
801 	return true;
802 }
803 
804 static struct file *pidfs_export_open(struct path *path, unsigned int oflags)
805 {
806 	if (!pidfs_pid_valid(d_inode(path->dentry)->i_private, path, oflags))
807 		return ERR_PTR(-ESRCH);
808 
809 	/*
810 	 * Clear O_LARGEFILE as open_by_handle_at() forces it and raise
811 	 * O_RDWR as pidfds always are.
812 	 */
813 	oflags &= ~O_LARGEFILE;
814 	return dentry_open(path, oflags | O_RDWR, current_cred());
815 }
816 
817 static const struct export_operations pidfs_export_operations = {
818 	.encode_fh	= pidfs_encode_fh,
819 	.fh_to_dentry	= pidfs_fh_to_dentry,
820 	.open		= pidfs_export_open,
821 	.permission	= pidfs_export_permission,
822 };
823 
824 static int pidfs_init_inode(struct inode *inode, void *data)
825 {
826 	const struct pid *pid = data;
827 
828 	inode->i_private = data;
829 	inode->i_flags |= S_PRIVATE;
830 	inode->i_mode |= S_IRWXU;
831 	inode->i_op = &pidfs_inode_operations;
832 	inode->i_fop = &pidfs_file_operations;
833 	inode->i_ino = pidfs_ino(pid->ino);
834 	inode->i_generation = pidfs_gen(pid->ino);
835 	return 0;
836 }
837 
838 static void pidfs_put_data(void *data)
839 {
840 	struct pid *pid = data;
841 	put_pid(pid);
842 }
843 
844 static const struct stashed_operations pidfs_stashed_ops = {
845 	.init_inode = pidfs_init_inode,
846 	.put_data = pidfs_put_data,
847 };
848 
849 static int pidfs_init_fs_context(struct fs_context *fc)
850 {
851 	struct pseudo_fs_context *ctx;
852 
853 	ctx = init_pseudo(fc, PID_FS_MAGIC);
854 	if (!ctx)
855 		return -ENOMEM;
856 
857 	ctx->ops = &pidfs_sops;
858 	ctx->eops = &pidfs_export_operations;
859 	ctx->dops = &pidfs_dentry_operations;
860 	fc->s_fs_info = (void *)&pidfs_stashed_ops;
861 	return 0;
862 }
863 
864 static struct file_system_type pidfs_type = {
865 	.name			= "pidfs",
866 	.init_fs_context	= pidfs_init_fs_context,
867 	.kill_sb		= kill_anon_super,
868 };
869 
870 struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags)
871 {
872 	struct file *pidfd_file;
873 	struct path path __free(path_put) = {};
874 	int ret;
875 
876 	/*
877 	 * Ensure that PIDFD_CLONE can be passed as a flag without
878 	 * overloading other uapi pidfd flags.
879 	 */
880 	BUILD_BUG_ON(PIDFD_CLONE == PIDFD_THREAD);
881 	BUILD_BUG_ON(PIDFD_CLONE == PIDFD_NONBLOCK);
882 
883 	ret = path_from_stashed(&pid->stashed, pidfs_mnt, get_pid(pid), &path);
884 	if (ret < 0)
885 		return ERR_PTR(ret);
886 
887 	if (!pidfs_pid_valid(pid, &path, flags))
888 		return ERR_PTR(-ESRCH);
889 
890 	flags &= ~PIDFD_CLONE;
891 	pidfd_file = dentry_open(&path, flags, current_cred());
892 	/* Raise PIDFD_THREAD explicitly as do_dentry_open() strips it. */
893 	if (!IS_ERR(pidfd_file))
894 		pidfd_file->f_flags |= (flags & PIDFD_THREAD);
895 
896 	return pidfd_file;
897 }
898 
899 static void pidfs_inode_init_once(void *data)
900 {
901 	struct pidfs_inode *pi = data;
902 
903 	inode_init_once(&pi->vfs_inode);
904 }
905 
906 void __init pidfs_init(void)
907 {
908 	pidfs_cachep = kmem_cache_create("pidfs_cache", sizeof(struct pidfs_inode), 0,
909 					 (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT |
910 					  SLAB_ACCOUNT | SLAB_PANIC),
911 					 pidfs_inode_init_once);
912 	pidfs_mnt = kern_mount(&pidfs_type);
913 	if (IS_ERR(pidfs_mnt))
914 		panic("Failed to mount pidfs pseudo filesystem");
915 }
916