1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic pidhash and scalable, time-bounded PID allocator
4 *
5 * (C) 2002-2003 Nadia Yvette Chambers, IBM
6 * (C) 2004 Nadia Yvette Chambers, Oracle
7 * (C) 2002-2004 Ingo Molnar, Red Hat
8 *
9 * pid-structures are backing objects for tasks sharing a given ID to chain
10 * against. There is very little to them aside from hashing them and
11 * parking tasks using given ID's on a list.
12 *
13 * The hash is always changed with the tasklist_lock write-acquired,
14 * and the hash is only accessed with the tasklist_lock at least
15 * read-acquired, so there's no additional SMP locking needed here.
16 *
17 * We have a list of bitmap pages, which bitmaps represent the PID space.
18 * Allocating and freeing PIDs is completely lockless. The worst-case
19 * allocation scenario when all but one out of 1 million PIDs possible are
20 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
21 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
22 *
23 * Pid namespaces:
24 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
25 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
26 * Many thanks to Oleg Nesterov for comments and help
27 *
28 */
29
30 #include <linux/mm.h>
31 #include <linux/export.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/rculist.h>
35 #include <linux/memblock.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/proc_ns.h>
40 #include <linux/refcount.h>
41 #include <linux/anon_inodes.h>
42 #include <linux/sched/signal.h>
43 #include <linux/sched/task.h>
44 #include <linux/idr.h>
45 #include <linux/pidfs.h>
46 #include <net/sock.h>
47 #include <uapi/linux/pidfd.h>
48
49 struct pid init_struct_pid = {
50 .count = REFCOUNT_INIT(1),
51 .tasks = {
52 { .first = NULL },
53 { .first = NULL },
54 { .first = NULL },
55 },
56 .level = 0,
57 .numbers = { {
58 .nr = 0,
59 .ns = &init_pid_ns,
60 }, }
61 };
62
63 static int pid_max_min = RESERVED_PIDS + 1;
64 static int pid_max_max = PID_MAX_LIMIT;
65
66 /*
67 * PID-map pages start out as NULL, they get allocated upon
68 * first use and are never deallocated. This way a low pid_max
69 * value does not cause lots of bitmaps to be allocated, but
70 * the scheme scales to up to 4 million PIDs, runtime.
71 */
72 struct pid_namespace init_pid_ns = {
73 .ns = NS_COMMON_INIT(init_pid_ns),
74 .idr = IDR_INIT(init_pid_ns.idr),
75 .pid_allocated = PIDNS_ADDING,
76 .level = 0,
77 .child_reaper = &init_task,
78 .user_ns = &init_user_ns,
79 .pid_max = PID_MAX_DEFAULT,
80 #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
81 .memfd_noexec_scope = MEMFD_NOEXEC_SCOPE_EXEC,
82 #endif
83 };
84 EXPORT_SYMBOL_GPL(init_pid_ns);
85
86 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
87
put_pid(struct pid * pid)88 void put_pid(struct pid *pid)
89 {
90 struct pid_namespace *ns;
91
92 if (!pid)
93 return;
94
95 ns = pid->numbers[pid->level].ns;
96 if (refcount_dec_and_test(&pid->count)) {
97 pidfs_free_pid(pid);
98 kmem_cache_free(ns->pid_cachep, pid);
99 put_pid_ns(ns);
100 }
101 }
102 EXPORT_SYMBOL_GPL(put_pid);
103
delayed_put_pid(struct rcu_head * rhp)104 static void delayed_put_pid(struct rcu_head *rhp)
105 {
106 struct pid *pid = container_of(rhp, struct pid, rcu);
107 put_pid(pid);
108 }
109
free_pid(struct pid * pid)110 void free_pid(struct pid *pid)
111 {
112 int i;
113 struct pid_namespace *active_ns;
114
115 lockdep_assert_not_held(&tasklist_lock);
116
117 active_ns = pid->numbers[pid->level].ns;
118 ns_ref_active_put(active_ns);
119
120 spin_lock(&pidmap_lock);
121 for (i = 0; i <= pid->level; i++) {
122 struct upid *upid = pid->numbers + i;
123 struct pid_namespace *ns = upid->ns;
124 switch (--ns->pid_allocated) {
125 case 2:
126 case 1:
127 /* When all that is left in the pid namespace
128 * is the reaper wake up the reaper. The reaper
129 * may be sleeping in zap_pid_ns_processes().
130 */
131 wake_up_process(ns->child_reaper);
132 break;
133 case PIDNS_ADDING:
134 /* Handle a fork failure of the first process */
135 WARN_ON(ns->child_reaper);
136 ns->pid_allocated = 0;
137 break;
138 }
139
140 idr_remove(&ns->idr, upid->nr);
141 }
142 spin_unlock(&pidmap_lock);
143
144 pidfs_remove_pid(pid);
145 call_rcu(&pid->rcu, delayed_put_pid);
146 }
147
free_pids(struct pid ** pids)148 void free_pids(struct pid **pids)
149 {
150 int tmp;
151
152 /*
153 * This can batch pidmap_lock.
154 */
155 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
156 if (pids[tmp])
157 free_pid(pids[tmp]);
158 }
159
alloc_pid(struct pid_namespace * ns,pid_t * arg_set_tid,size_t arg_set_tid_size)160 struct pid *alloc_pid(struct pid_namespace *ns, pid_t *arg_set_tid,
161 size_t arg_set_tid_size)
162 {
163 int set_tid[MAX_PID_NS_LEVEL + 1] = {};
164 int pid_max[MAX_PID_NS_LEVEL + 1] = {};
165 struct pid *pid;
166 enum pid_type type;
167 int i, nr;
168 struct pid_namespace *tmp;
169 struct upid *upid;
170 int retval = -ENOMEM;
171 bool retried_preload;
172
173 /*
174 * arg_set_tid_size contains the size of the arg_set_tid array. Starting at
175 * the most nested currently active PID namespace it tells alloc_pid()
176 * which PID to set for a process in that most nested PID namespace
177 * up to arg_set_tid_size PID namespaces. It does not have to set the PID
178 * for a process in all nested PID namespaces but arg_set_tid_size must
179 * never be greater than the current ns->level + 1.
180 */
181 if (arg_set_tid_size > ns->level + 1)
182 return ERR_PTR(-EINVAL);
183
184 /*
185 * Prep before we take locks:
186 *
187 * 1. allocate and fill in pid struct
188 */
189 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
190 if (!pid)
191 return ERR_PTR(retval);
192
193 get_pid_ns(ns);
194 pid->level = ns->level;
195 refcount_set(&pid->count, 1);
196 spin_lock_init(&pid->lock);
197 for (type = 0; type < PIDTYPE_MAX; ++type)
198 INIT_HLIST_HEAD(&pid->tasks[type]);
199 init_waitqueue_head(&pid->wait_pidfd);
200 INIT_HLIST_HEAD(&pid->inodes);
201 pidfs_prepare_pid(pid);
202
203 /*
204 * 2. perm check checkpoint_restore_ns_capable()
205 *
206 * This stores found pid_max to make sure the used value is the same should
207 * later code need it.
208 */
209 for (tmp = ns, i = ns->level; i >= 0; i--) {
210 pid_max[ns->level - i] = READ_ONCE(tmp->pid_max);
211
212 if (arg_set_tid_size) {
213 int tid = set_tid[ns->level - i] = arg_set_tid[ns->level - i];
214
215 retval = -EINVAL;
216 if (tid < 1 || tid >= pid_max[ns->level - i])
217 goto out_abort;
218 /*
219 * Also fail if a PID != 1 is requested and
220 * no PID 1 exists.
221 */
222 if (tid != 1 && !tmp->child_reaper)
223 goto out_abort;
224 retval = -EPERM;
225 if (!checkpoint_restore_ns_capable(tmp->user_ns))
226 goto out_abort;
227 arg_set_tid_size--;
228 }
229
230 tmp = tmp->parent;
231 }
232
233 /*
234 * Prep is done, id allocation goes here:
235 */
236 retried_preload = false;
237 idr_preload(GFP_KERNEL);
238 spin_lock(&pidmap_lock);
239 for (tmp = ns, i = ns->level; i >= 0;) {
240 int tid = set_tid[ns->level - i];
241
242 if (tid) {
243 nr = idr_alloc(&tmp->idr, NULL, tid,
244 tid + 1, GFP_ATOMIC);
245 /*
246 * If ENOSPC is returned it means that the PID is
247 * alreay in use. Return EEXIST in that case.
248 */
249 if (nr == -ENOSPC)
250
251 nr = -EEXIST;
252 } else {
253 int pid_min = 1;
254 /*
255 * init really needs pid 1, but after reaching the
256 * maximum wrap back to RESERVED_PIDS
257 */
258 if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
259 pid_min = RESERVED_PIDS;
260
261 /*
262 * Store a null pointer so find_pid_ns does not find
263 * a partially initialized PID (see below).
264 */
265 nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
266 pid_max[ns->level - i], GFP_ATOMIC);
267 if (nr == -ENOSPC)
268 nr = -EAGAIN;
269 }
270
271 if (unlikely(nr < 0)) {
272 /*
273 * Preload more memory if idr_alloc{,cyclic} failed with -ENOMEM.
274 *
275 * The IDR API only allows us to preload memory for one call, while we may end
276 * up doing several under pidmap_lock with GFP_ATOMIC. The situation may be
277 * salvageable with GFP_KERNEL. But make sure to not loop indefinitely if preload
278 * did not help (the routine unfortunately returns void, so we have no idea
279 * if it got anywhere).
280 *
281 * The lock can be safely dropped and picked up as historically pid allocation
282 * for different namespaces was *not* atomic -- we try to hold on to it the
283 * entire time only for performance reasons.
284 */
285 if (nr == -ENOMEM && !retried_preload) {
286 spin_unlock(&pidmap_lock);
287 idr_preload_end();
288 retried_preload = true;
289 idr_preload(GFP_KERNEL);
290 spin_lock(&pidmap_lock);
291 continue;
292 }
293 retval = nr;
294 goto out_free;
295 }
296
297 pid->numbers[i].nr = nr;
298 pid->numbers[i].ns = tmp;
299 tmp = tmp->parent;
300 i--;
301 retried_preload = false;
302 }
303
304 /*
305 * ENOMEM is not the most obvious choice especially for the case
306 * where the child subreaper has already exited and the pid
307 * namespace denies the creation of any new processes. But ENOMEM
308 * is what we have exposed to userspace for a long time and it is
309 * documented behavior for pid namespaces. So we can't easily
310 * change it even if there were an error code better suited.
311 *
312 * This can't be done earlier because we need to preserve other
313 * error conditions.
314 */
315 retval = -ENOMEM;
316 if (unlikely(!(ns->pid_allocated & PIDNS_ADDING)))
317 goto out_free;
318 for (upid = pid->numbers + ns->level; upid >= pid->numbers; --upid) {
319 /* Make the PID visible to find_pid_ns. */
320 idr_replace(&upid->ns->idr, pid, upid->nr);
321 upid->ns->pid_allocated++;
322 }
323 spin_unlock(&pidmap_lock);
324 idr_preload_end();
325 ns_ref_active_get(ns);
326
327 retval = pidfs_add_pid(pid);
328 if (unlikely(retval)) {
329 free_pid(pid);
330 pid = ERR_PTR(-ENOMEM);
331 }
332
333 return pid;
334
335 out_free:
336 while (++i <= ns->level) {
337 upid = pid->numbers + i;
338 idr_remove(&upid->ns->idr, upid->nr);
339 }
340
341 /* On failure to allocate the first pid, reset the state */
342 if (ns->pid_allocated == PIDNS_ADDING)
343 idr_set_cursor(&ns->idr, 0);
344
345 spin_unlock(&pidmap_lock);
346 idr_preload_end();
347
348 out_abort:
349 put_pid_ns(ns);
350 kmem_cache_free(ns->pid_cachep, pid);
351 return ERR_PTR(retval);
352 }
353
disable_pid_allocation(struct pid_namespace * ns)354 void disable_pid_allocation(struct pid_namespace *ns)
355 {
356 spin_lock(&pidmap_lock);
357 ns->pid_allocated &= ~PIDNS_ADDING;
358 spin_unlock(&pidmap_lock);
359 }
360
find_pid_ns(int nr,struct pid_namespace * ns)361 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
362 {
363 return idr_find(&ns->idr, nr);
364 }
365 EXPORT_SYMBOL_GPL(find_pid_ns);
366
find_vpid(int nr)367 struct pid *find_vpid(int nr)
368 {
369 return find_pid_ns(nr, task_active_pid_ns(current));
370 }
371 EXPORT_SYMBOL_GPL(find_vpid);
372
task_pid_ptr(struct task_struct * task,enum pid_type type)373 static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
374 {
375 return (type == PIDTYPE_PID) ?
376 &task->thread_pid :
377 &task->signal->pids[type];
378 }
379
380 /*
381 * attach_pid() must be called with the tasklist_lock write-held.
382 */
attach_pid(struct task_struct * task,enum pid_type type)383 void attach_pid(struct task_struct *task, enum pid_type type)
384 {
385 struct pid *pid;
386
387 lockdep_assert_held_write(&tasklist_lock);
388
389 pid = *task_pid_ptr(task, type);
390 hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
391 }
392
__change_pid(struct pid ** pids,struct task_struct * task,enum pid_type type,struct pid * new)393 static void __change_pid(struct pid **pids, struct task_struct *task,
394 enum pid_type type, struct pid *new)
395 {
396 struct pid **pid_ptr, *pid;
397 int tmp;
398
399 lockdep_assert_held_write(&tasklist_lock);
400
401 pid_ptr = task_pid_ptr(task, type);
402 pid = *pid_ptr;
403
404 hlist_del_rcu(&task->pid_links[type]);
405 *pid_ptr = new;
406
407 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
408 if (pid_has_task(pid, tmp))
409 return;
410
411 WARN_ON(pids[type]);
412 pids[type] = pid;
413 }
414
detach_pid(struct pid ** pids,struct task_struct * task,enum pid_type type)415 void detach_pid(struct pid **pids, struct task_struct *task, enum pid_type type)
416 {
417 __change_pid(pids, task, type, NULL);
418 }
419
change_pid(struct pid ** pids,struct task_struct * task,enum pid_type type,struct pid * pid)420 void change_pid(struct pid **pids, struct task_struct *task, enum pid_type type,
421 struct pid *pid)
422 {
423 __change_pid(pids, task, type, pid);
424 attach_pid(task, type);
425 }
426
exchange_tids(struct task_struct * left,struct task_struct * right)427 void exchange_tids(struct task_struct *left, struct task_struct *right)
428 {
429 struct pid *pid1 = left->thread_pid;
430 struct pid *pid2 = right->thread_pid;
431 struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
432 struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
433
434 lockdep_assert_held_write(&tasklist_lock);
435
436 /* Swap the single entry tid lists */
437 hlists_swap_heads_rcu(head1, head2);
438
439 /* Swap the per task_struct pid */
440 rcu_assign_pointer(left->thread_pid, pid2);
441 rcu_assign_pointer(right->thread_pid, pid1);
442
443 /* Swap the cached value */
444 WRITE_ONCE(left->pid, pid_nr(pid2));
445 WRITE_ONCE(right->pid, pid_nr(pid1));
446 }
447
448 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
transfer_pid(struct task_struct * old,struct task_struct * new,enum pid_type type)449 void transfer_pid(struct task_struct *old, struct task_struct *new,
450 enum pid_type type)
451 {
452 WARN_ON_ONCE(type == PIDTYPE_PID);
453 lockdep_assert_held_write(&tasklist_lock);
454 hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
455 }
456
pid_task(struct pid * pid,enum pid_type type)457 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
458 {
459 struct task_struct *result = NULL;
460 if (pid) {
461 struct hlist_node *first;
462 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
463 lockdep_tasklist_lock_is_held());
464 if (first)
465 result = hlist_entry(first, struct task_struct, pid_links[(type)]);
466 }
467 return result;
468 }
469 EXPORT_SYMBOL(pid_task);
470
471 /*
472 * Must be called under rcu_read_lock().
473 */
find_task_by_pid_ns(pid_t nr,struct pid_namespace * ns)474 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
475 {
476 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
477 "find_task_by_pid_ns() needs rcu_read_lock() protection");
478 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
479 }
480
find_task_by_vpid(pid_t vnr)481 struct task_struct *find_task_by_vpid(pid_t vnr)
482 {
483 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
484 }
485
find_get_task_by_vpid(pid_t nr)486 struct task_struct *find_get_task_by_vpid(pid_t nr)
487 {
488 struct task_struct *task;
489
490 rcu_read_lock();
491 task = find_task_by_vpid(nr);
492 if (task)
493 get_task_struct(task);
494 rcu_read_unlock();
495
496 return task;
497 }
498
get_task_pid(struct task_struct * task,enum pid_type type)499 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
500 {
501 struct pid *pid;
502 rcu_read_lock();
503 pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
504 rcu_read_unlock();
505 return pid;
506 }
507 EXPORT_SYMBOL_GPL(get_task_pid);
508
get_pid_task(struct pid * pid,enum pid_type type)509 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
510 {
511 struct task_struct *result;
512 rcu_read_lock();
513 result = pid_task(pid, type);
514 if (result)
515 get_task_struct(result);
516 rcu_read_unlock();
517 return result;
518 }
519 EXPORT_SYMBOL_GPL(get_pid_task);
520
find_get_pid(pid_t nr)521 struct pid *find_get_pid(pid_t nr)
522 {
523 struct pid *pid;
524
525 rcu_read_lock();
526 pid = get_pid(find_vpid(nr));
527 rcu_read_unlock();
528
529 return pid;
530 }
531 EXPORT_SYMBOL_GPL(find_get_pid);
532
pid_nr_ns(struct pid * pid,struct pid_namespace * ns)533 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
534 {
535 struct upid *upid;
536 pid_t nr = 0;
537
538 if (pid && ns && ns->level <= pid->level) {
539 upid = &pid->numbers[ns->level];
540 if (upid->ns == ns)
541 nr = upid->nr;
542 }
543 return nr;
544 }
545 EXPORT_SYMBOL_GPL(pid_nr_ns);
546
pid_vnr(struct pid * pid)547 pid_t pid_vnr(struct pid *pid)
548 {
549 return pid_nr_ns(pid, task_active_pid_ns(current));
550 }
551 EXPORT_SYMBOL_GPL(pid_vnr);
552
__task_pid_nr_ns(struct task_struct * task,enum pid_type type,struct pid_namespace * ns)553 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
554 struct pid_namespace *ns)
555 {
556 pid_t nr = 0;
557
558 rcu_read_lock();
559 if (!ns)
560 ns = task_active_pid_ns(current);
561 nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
562 rcu_read_unlock();
563
564 return nr;
565 }
566 EXPORT_SYMBOL(__task_pid_nr_ns);
567
task_active_pid_ns(struct task_struct * tsk)568 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
569 {
570 return ns_of_pid(task_pid(tsk));
571 }
572 EXPORT_SYMBOL_GPL(task_active_pid_ns);
573
574 /*
575 * Used by proc to find the first pid that is greater than or equal to nr.
576 *
577 * If there is a pid at nr this function is exactly the same as find_pid_ns.
578 */
find_ge_pid(int nr,struct pid_namespace * ns)579 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
580 {
581 return idr_get_next(&ns->idr, &nr);
582 }
583 EXPORT_SYMBOL_GPL(find_ge_pid);
584
pidfd_get_pid(unsigned int fd,unsigned int * flags)585 struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
586 {
587 CLASS(fd, f)(fd);
588 struct pid *pid;
589
590 if (fd_empty(f))
591 return ERR_PTR(-EBADF);
592
593 pid = pidfd_pid(fd_file(f));
594 if (!IS_ERR(pid)) {
595 get_pid(pid);
596 *flags = fd_file(f)->f_flags;
597 }
598 return pid;
599 }
600
601 /**
602 * pidfd_get_task() - Get the task associated with a pidfd
603 *
604 * @pidfd: pidfd for which to get the task
605 * @flags: flags associated with this pidfd
606 *
607 * Return the task associated with @pidfd. The function takes a reference on
608 * the returned task. The caller is responsible for releasing that reference.
609 *
610 * Return: On success, the task_struct associated with the pidfd.
611 * On error, a negative errno number will be returned.
612 */
pidfd_get_task(int pidfd,unsigned int * flags)613 struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags)
614 {
615 unsigned int f_flags = 0;
616 struct pid *pid;
617 struct task_struct *task;
618 enum pid_type type;
619
620 switch (pidfd) {
621 case PIDFD_SELF_THREAD:
622 type = PIDTYPE_PID;
623 pid = get_task_pid(current, type);
624 break;
625 case PIDFD_SELF_THREAD_GROUP:
626 type = PIDTYPE_TGID;
627 pid = get_task_pid(current, type);
628 break;
629 default:
630 pid = pidfd_get_pid(pidfd, &f_flags);
631 if (IS_ERR(pid))
632 return ERR_CAST(pid);
633 type = PIDTYPE_TGID;
634 break;
635 }
636
637 task = get_pid_task(pid, type);
638 put_pid(pid);
639 if (!task)
640 return ERR_PTR(-ESRCH);
641
642 *flags = f_flags;
643 return task;
644 }
645
646 /**
647 * pidfd_create() - Create a new pid file descriptor.
648 *
649 * @pid: struct pid that the pidfd will reference
650 * @flags: flags to pass
651 *
652 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
653 *
654 * Note, that this function can only be called after the fd table has
655 * been unshared to avoid leaking the pidfd to the new process.
656 *
657 * This symbol should not be explicitly exported to loadable modules.
658 *
659 * Return: On success, a cloexec pidfd is returned.
660 * On error, a negative errno number will be returned.
661 */
pidfd_create(struct pid * pid,unsigned int flags)662 static int pidfd_create(struct pid *pid, unsigned int flags)
663 {
664 int pidfd;
665 struct file *pidfd_file;
666
667 pidfd = pidfd_prepare(pid, flags, &pidfd_file);
668 if (pidfd < 0)
669 return pidfd;
670
671 fd_install(pidfd, pidfd_file);
672 return pidfd;
673 }
674
675 /**
676 * sys_pidfd_open() - Open new pid file descriptor.
677 *
678 * @pid: pid for which to retrieve a pidfd
679 * @flags: flags to pass
680 *
681 * This creates a new pid file descriptor with the O_CLOEXEC flag set for
682 * the task identified by @pid. Without PIDFD_THREAD flag the target task
683 * must be a thread-group leader.
684 *
685 * Return: On success, a cloexec pidfd is returned.
686 * On error, a negative errno number will be returned.
687 */
SYSCALL_DEFINE2(pidfd_open,pid_t,pid,unsigned int,flags)688 SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
689 {
690 int fd;
691 struct pid *p;
692
693 if (flags & ~(PIDFD_NONBLOCK | PIDFD_THREAD))
694 return -EINVAL;
695
696 if (pid <= 0)
697 return -EINVAL;
698
699 p = find_get_pid(pid);
700 if (!p)
701 return -ESRCH;
702
703 fd = pidfd_create(p, flags);
704
705 put_pid(p);
706 return fd;
707 }
708
709 #ifdef CONFIG_SYSCTL
pid_table_root_lookup(struct ctl_table_root * root)710 static struct ctl_table_set *pid_table_root_lookup(struct ctl_table_root *root)
711 {
712 return &task_active_pid_ns(current)->set;
713 }
714
set_is_seen(struct ctl_table_set * set)715 static int set_is_seen(struct ctl_table_set *set)
716 {
717 return &task_active_pid_ns(current)->set == set;
718 }
719
pid_table_root_permissions(struct ctl_table_header * head,const struct ctl_table * table)720 static int pid_table_root_permissions(struct ctl_table_header *head,
721 const struct ctl_table *table)
722 {
723 struct pid_namespace *pidns =
724 container_of(head->set, struct pid_namespace, set);
725 int mode = table->mode;
726
727 if (ns_capable_noaudit(pidns->user_ns, CAP_SYS_ADMIN) ||
728 uid_eq(current_euid(), make_kuid(pidns->user_ns, 0)))
729 mode = (mode & S_IRWXU) >> 6;
730 else if (in_egroup_p(make_kgid(pidns->user_ns, 0)))
731 mode = (mode & S_IRWXG) >> 3;
732 else
733 mode = mode & S_IROTH;
734 return (mode << 6) | (mode << 3) | mode;
735 }
736
pid_table_root_set_ownership(struct ctl_table_header * head,kuid_t * uid,kgid_t * gid)737 static void pid_table_root_set_ownership(struct ctl_table_header *head,
738 kuid_t *uid, kgid_t *gid)
739 {
740 struct pid_namespace *pidns =
741 container_of(head->set, struct pid_namespace, set);
742 kuid_t ns_root_uid;
743 kgid_t ns_root_gid;
744
745 ns_root_uid = make_kuid(pidns->user_ns, 0);
746 if (uid_valid(ns_root_uid))
747 *uid = ns_root_uid;
748
749 ns_root_gid = make_kgid(pidns->user_ns, 0);
750 if (gid_valid(ns_root_gid))
751 *gid = ns_root_gid;
752 }
753
754 static struct ctl_table_root pid_table_root = {
755 .lookup = pid_table_root_lookup,
756 .permissions = pid_table_root_permissions,
757 .set_ownership = pid_table_root_set_ownership,
758 };
759
proc_do_cad_pid(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)760 static int proc_do_cad_pid(const struct ctl_table *table, int write, void *buffer,
761 size_t *lenp, loff_t *ppos)
762 {
763 struct pid *new_pid;
764 pid_t tmp_pid;
765 int r;
766 struct ctl_table tmp_table = *table;
767
768 tmp_pid = pid_vnr(cad_pid);
769 tmp_table.data = &tmp_pid;
770
771 r = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
772 if (r || !write)
773 return r;
774
775 new_pid = find_get_pid(tmp_pid);
776 if (!new_pid)
777 return -ESRCH;
778
779 put_pid(xchg(&cad_pid, new_pid));
780 return 0;
781 }
782
783 static const struct ctl_table pid_table[] = {
784 {
785 .procname = "pid_max",
786 .data = &init_pid_ns.pid_max,
787 .maxlen = sizeof(int),
788 .mode = 0644,
789 .proc_handler = proc_dointvec_minmax,
790 .extra1 = &pid_max_min,
791 .extra2 = &pid_max_max,
792 },
793 #ifdef CONFIG_PROC_SYSCTL
794 {
795 .procname = "cad_pid",
796 .maxlen = sizeof(int),
797 .mode = 0600,
798 .proc_handler = proc_do_cad_pid,
799 },
800 #endif
801 };
802 #endif
803
register_pidns_sysctls(struct pid_namespace * pidns)804 int register_pidns_sysctls(struct pid_namespace *pidns)
805 {
806 #ifdef CONFIG_SYSCTL
807 struct ctl_table *tbl;
808
809 setup_sysctl_set(&pidns->set, &pid_table_root, set_is_seen);
810
811 tbl = kmemdup(pid_table, sizeof(pid_table), GFP_KERNEL);
812 if (!tbl)
813 return -ENOMEM;
814 tbl->data = &pidns->pid_max;
815 pidns->pid_max = min(pid_max_max, max_t(int, pidns->pid_max,
816 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
817
818 pidns->sysctls = __register_sysctl_table(&pidns->set, "kernel", tbl,
819 ARRAY_SIZE(pid_table));
820 if (!pidns->sysctls) {
821 kfree(tbl);
822 retire_sysctl_set(&pidns->set);
823 return -ENOMEM;
824 }
825 #endif
826 return 0;
827 }
828
unregister_pidns_sysctls(struct pid_namespace * pidns)829 void unregister_pidns_sysctls(struct pid_namespace *pidns)
830 {
831 #ifdef CONFIG_SYSCTL
832 const struct ctl_table *tbl;
833
834 tbl = pidns->sysctls->ctl_table_arg;
835 unregister_sysctl_table(pidns->sysctls);
836 retire_sysctl_set(&pidns->set);
837 kfree(tbl);
838 #endif
839 }
840
pid_idr_init(void)841 void __init pid_idr_init(void)
842 {
843 /* Verify no one has done anything silly: */
844 BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
845
846 /* bump default and minimum pid_max based on number of cpus */
847 init_pid_ns.pid_max = min(pid_max_max, max_t(int, init_pid_ns.pid_max,
848 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
849 pid_max_min = max_t(int, pid_max_min,
850 PIDS_PER_CPU_MIN * num_possible_cpus());
851 pr_info("pid_max: default: %u minimum: %u\n", init_pid_ns.pid_max, pid_max_min);
852
853 idr_init(&init_pid_ns.idr);
854
855 init_pid_ns.pid_cachep = kmem_cache_create("pid",
856 struct_size_t(struct pid, numbers, 1),
857 __alignof__(struct pid),
858 SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT,
859 NULL);
860 }
861
pid_namespace_sysctl_init(void)862 static __init int pid_namespace_sysctl_init(void)
863 {
864 #ifdef CONFIG_SYSCTL
865 /* "kernel" directory will have already been initialized. */
866 BUG_ON(register_pidns_sysctls(&init_pid_ns));
867 #endif
868 return 0;
869 }
870 subsys_initcall(pid_namespace_sysctl_init);
871
__pidfd_fget(struct task_struct * task,int fd)872 static struct file *__pidfd_fget(struct task_struct *task, int fd)
873 {
874 struct file *file;
875 int ret;
876
877 ret = down_read_killable(&task->signal->exec_update_lock);
878 if (ret)
879 return ERR_PTR(ret);
880
881 if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
882 file = fget_task(task, fd);
883 else
884 file = ERR_PTR(-EPERM);
885
886 up_read(&task->signal->exec_update_lock);
887
888 if (!file) {
889 /*
890 * It is possible that the target thread is exiting; it can be
891 * either:
892 * 1. before exit_signals(), which gives a real fd
893 * 2. before exit_files() takes the task_lock() gives a real fd
894 * 3. after exit_files() releases task_lock(), ->files is NULL;
895 * this has PF_EXITING, since it was set in exit_signals(),
896 * __pidfd_fget() returns EBADF.
897 * In case 3 we get EBADF, but that really means ESRCH, since
898 * the task is currently exiting and has freed its files
899 * struct, so we fix it up.
900 */
901 if (task->flags & PF_EXITING)
902 file = ERR_PTR(-ESRCH);
903 else
904 file = ERR_PTR(-EBADF);
905 }
906
907 return file;
908 }
909
pidfd_getfd(struct pid * pid,int fd)910 static int pidfd_getfd(struct pid *pid, int fd)
911 {
912 struct task_struct *task;
913 struct file *file;
914 int ret;
915
916 task = get_pid_task(pid, PIDTYPE_PID);
917 if (!task)
918 return -ESRCH;
919
920 file = __pidfd_fget(task, fd);
921 put_task_struct(task);
922 if (IS_ERR(file))
923 return PTR_ERR(file);
924
925 ret = receive_fd(file, NULL, O_CLOEXEC);
926 fput(file);
927
928 return ret;
929 }
930
931 /**
932 * sys_pidfd_getfd() - Get a file descriptor from another process
933 *
934 * @pidfd: the pidfd file descriptor of the process
935 * @fd: the file descriptor number to get
936 * @flags: flags on how to get the fd (reserved)
937 *
938 * This syscall gets a copy of a file descriptor from another process
939 * based on the pidfd, and file descriptor number. It requires that
940 * the calling process has the ability to ptrace the process represented
941 * by the pidfd. The process which is having its file descriptor copied
942 * is otherwise unaffected.
943 *
944 * Return: On success, a cloexec file descriptor is returned.
945 * On error, a negative errno number will be returned.
946 */
SYSCALL_DEFINE3(pidfd_getfd,int,pidfd,int,fd,unsigned int,flags)947 SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
948 unsigned int, flags)
949 {
950 struct pid *pid;
951
952 /* flags is currently unused - make sure it's unset */
953 if (flags)
954 return -EINVAL;
955
956 CLASS(fd, f)(pidfd);
957 if (fd_empty(f))
958 return -EBADF;
959
960 pid = pidfd_pid(fd_file(f));
961 if (IS_ERR(pid))
962 return PTR_ERR(pid);
963
964 return pidfd_getfd(pid, fd);
965 }
966