Lines Matching full:pid

3  * Generic pidhash and scalable, time-bounded PID allocator
9 * pid-structures are backing objects for tasks sharing a given ID to chain
17 * We have a list of bitmap pages, which bitmaps represent the PID space.
23 * Pid namespaces:
48 struct pid init_struct_pid = {
70 * PID-map pages start out as NULL, they get allocated upon
108 void put_pid(struct pid *pid) in put_pid() argument
112 if (!pid) in put_pid()
115 ns = pid->numbers[pid->level].ns; in put_pid()
116 if (refcount_dec_and_test(&pid->count)) { in put_pid()
117 kmem_cache_free(ns->pid_cachep, pid); in put_pid()
125 struct pid *pid = container_of(rhp, struct pid, rcu); in delayed_put_pid() local
126 put_pid(pid); in delayed_put_pid()
129 void free_pid(struct pid *pid) in free_pid() argument
136 for (i = 0; i <= pid->level; i++) { in free_pid()
137 struct upid *upid = pid->numbers + i; in free_pid()
142 /* When all that is left in the pid namespace in free_pid()
159 call_rcu(&pid->rcu, delayed_put_pid); in free_pid()
162 struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid, in alloc_pid()
165 struct pid *pid; in alloc_pid() local
174 * the most nested currently active PID namespace it tells alloc_pid() in alloc_pid()
175 * which PID to set for a process in that most nested PID namespace in alloc_pid()
176 * up to set_tid_size PID namespaces. It does not have to set the PID in alloc_pid()
177 * for a process in all nested PID namespaces but set_tid_size must in alloc_pid()
183 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); in alloc_pid()
184 if (!pid) in alloc_pid()
188 pid->level = ns->level; in alloc_pid()
200 * Also fail if a PID != 1 is requested and in alloc_pid()
201 * no PID 1 exists. in alloc_pid()
218 * If ENOSPC is returned it means that the PID is in alloc_pid()
226 * init really needs pid 1, but after reaching the in alloc_pid()
234 * a partially initialized PID (see below). in alloc_pid()
247 pid->numbers[i].nr = nr; in alloc_pid()
248 pid->numbers[i].ns = tmp; in alloc_pid()
254 * where the child subreaper has already exited and the pid in alloc_pid()
257 * documented behavior for pid namespaces. So we can't easily in alloc_pid()
263 refcount_set(&pid->count, 1); in alloc_pid()
264 spin_lock_init(&pid->lock); in alloc_pid()
266 INIT_HLIST_HEAD(&pid->tasks[type]); in alloc_pid()
268 init_waitqueue_head(&pid->wait_pidfd); in alloc_pid()
269 INIT_HLIST_HEAD(&pid->inodes); in alloc_pid()
271 upid = pid->numbers + ns->level; in alloc_pid()
275 for ( ; upid >= pid->numbers; --upid) { in alloc_pid()
276 /* Make the PID visible to find_pid_ns. */ in alloc_pid()
277 idr_replace(&upid->ns->idr, pid, upid->nr); in alloc_pid()
282 return pid; in alloc_pid()
291 upid = pid->numbers + i; in alloc_pid()
295 /* On failure to allocate the first pid, reset the state */ in alloc_pid()
301 kmem_cache_free(ns->pid_cachep, pid); in alloc_pid()
312 struct pid *find_pid_ns(int nr, struct pid_namespace *ns) in find_pid_ns()
318 struct pid *find_vpid(int nr) in find_vpid()
324 static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type) in task_pid_ptr()
336 struct pid *pid = *task_pid_ptr(task, type); in attach_pid() local
337 hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]); in attach_pid()
341 struct pid *new) in __change_pid()
343 struct pid **pid_ptr = task_pid_ptr(task, type); in __change_pid()
344 struct pid *pid; in __change_pid() local
347 pid = *pid_ptr; in __change_pid()
353 if (pid_has_task(pid, tmp)) in __change_pid()
356 free_pid(pid); in __change_pid()
365 struct pid *pid) in change_pid() argument
367 __change_pid(task, type, pid); in change_pid()
373 struct pid *pid1 = left->thread_pid; in exchange_tids()
374 struct pid *pid2 = right->thread_pid; in exchange_tids()
381 /* Swap the per task_struct pid */ in exchange_tids()
386 WRITE_ONCE(left->pid, pid_nr(pid2)); in exchange_tids()
387 WRITE_ONCE(right->pid, pid_nr(pid1)); in exchange_tids()
399 struct task_struct *pid_task(struct pid *pid, enum pid_type type) in pid_task() argument
402 if (pid) { in pid_task()
404 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), in pid_task()
441 struct pid *get_task_pid(struct task_struct *task, enum pid_type type) in get_task_pid()
443 struct pid *pid; in get_task_pid() local
445 pid = get_pid(rcu_dereference(*task_pid_ptr(task, type))); in get_task_pid()
447 return pid; in get_task_pid()
451 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) in get_pid_task() argument
455 result = pid_task(pid, type); in get_pid_task()
463 struct pid *find_get_pid(pid_t nr) in find_get_pid()
465 struct pid *pid; in find_get_pid() local
468 pid = get_pid(find_vpid(nr)); in find_get_pid()
471 return pid; in find_get_pid()
475 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) in pid_nr_ns() argument
480 if (pid && ns->level <= pid->level) { in pid_nr_ns()
481 upid = &pid->numbers[ns->level]; in pid_nr_ns()
489 pid_t pid_vnr(struct pid *pid) in pid_vnr() argument
491 return pid_nr_ns(pid, task_active_pid_ns(current)); in pid_vnr()
517 * Used by proc to find the first pid that is greater than or equal to nr.
519 * If there is a pid at nr this function is exactly the same as find_pid_ns.
521 struct pid *find_ge_pid(int nr, struct pid_namespace *ns) in find_ge_pid()
527 struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags) in pidfd_get_pid()
530 struct pid *pid; in pidfd_get_pid() local
536 pid = pidfd_pid(f.file); in pidfd_get_pid()
537 if (!IS_ERR(pid)) { in pidfd_get_pid()
538 get_pid(pid); in pidfd_get_pid()
543 return pid; in pidfd_get_pid()
566 struct pid *pid; in pidfd_get_task() local
569 pid = pidfd_get_pid(pidfd, &f_flags); in pidfd_get_task()
570 if (IS_ERR(pid)) in pidfd_get_task()
571 return ERR_CAST(pid); in pidfd_get_task()
573 task = get_pid_task(pid, PIDTYPE_TGID); in pidfd_get_task()
574 put_pid(pid); in pidfd_get_task()
583 * pidfd_create() - Create a new pid file descriptor.
585 * @pid: struct pid that the pidfd will reference
588 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
598 int pidfd_create(struct pid *pid, unsigned int flags) in pidfd_create() argument
603 pidfd = pidfd_prepare(pid, flags, &pidfd_file); in pidfd_create()
612 * sys_pidfd_open() - Open new pid file descriptor.
614 * @pid: pid for which to retrieve a pidfd
617 * This creates a new pid file descriptor with the O_CLOEXEC flag set for
618 * the process identified by @pid. Currently, the process identified by
619 * @pid must be a thread-group leader. This restriction currently exists
627 SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags) in SYSCALL_DEFINE2() argument
630 struct pid *p; in SYSCALL_DEFINE2()
635 if (pid <= 0) in SYSCALL_DEFINE2()
638 p = find_get_pid(pid); in SYSCALL_DEFINE2()
662 init_pid_ns.pid_cachep = kmem_cache_create("pid", in pid_idr_init()
663 struct_size_t(struct pid, numbers, 1), in pid_idr_init()
664 __alignof__(struct pid), in pid_idr_init()
688 static int pidfd_getfd(struct pid *pid, int fd) in pidfd_getfd() argument
694 task = get_pid_task(pid, PIDTYPE_PID); in pidfd_getfd()
728 struct pid *pid; in SYSCALL_DEFINE3() local
740 pid = pidfd_pid(f.file); in SYSCALL_DEFINE3()
741 if (IS_ERR(pid)) in SYSCALL_DEFINE3()
742 ret = PTR_ERR(pid); in SYSCALL_DEFINE3()
744 ret = pidfd_getfd(pid, fd); in SYSCALL_DEFINE3()