1 /*-
2 * Copyright (c) 2014 John Baldwin
3 * Copyright (c) 2014, 2016 The FreeBSD Foundation
4 *
5 * Portions of this software were developed by Konstantin Belousov
6 * under sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include "opt_ktrace.h"
31
32 #include <sys/param.h>
33 #include <sys/_unrhdr.h>
34 #include <sys/systm.h>
35 #include <sys/capsicum.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mman.h>
39 #include <sys/mutex.h>
40 #include <sys/priv.h>
41 #include <sys/proc.h>
42 #include <sys/procctl.h>
43 #include <sys/sx.h>
44 #include <sys/syscallsubr.h>
45 #include <sys/sysproto.h>
46 #include <sys/wait.h>
47
48 #include <vm/vm.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_extern.h>
52
53 static int
protect_setchild(struct thread * td,struct proc * p,int flags)54 protect_setchild(struct thread *td, struct proc *p, int flags)
55 {
56
57 PROC_LOCK_ASSERT(p, MA_OWNED);
58 if (p->p_flag & P_SYSTEM || p_cansched(td, p) != 0)
59 return (0);
60 if (flags & PPROT_SET) {
61 p->p_flag |= P_PROTECTED;
62 if (flags & PPROT_INHERIT)
63 p->p_flag2 |= P2_INHERIT_PROTECTED;
64 } else {
65 p->p_flag &= ~P_PROTECTED;
66 p->p_flag2 &= ~P2_INHERIT_PROTECTED;
67 }
68 return (1);
69 }
70
71 static int
protect_setchildren(struct thread * td,struct proc * top,int flags)72 protect_setchildren(struct thread *td, struct proc *top, int flags)
73 {
74 struct proc *p;
75 int ret;
76
77 p = top;
78 ret = 0;
79 sx_assert(&proctree_lock, SX_LOCKED);
80 for (;;) {
81 ret |= protect_setchild(td, p, flags);
82 PROC_UNLOCK(p);
83 /*
84 * If this process has children, descend to them next,
85 * otherwise do any siblings, and if done with this level,
86 * follow back up the tree (but not past top).
87 */
88 if (!LIST_EMPTY(&p->p_children))
89 p = LIST_FIRST(&p->p_children);
90 else for (;;) {
91 if (p == top) {
92 PROC_LOCK(p);
93 return (ret);
94 }
95 if (LIST_NEXT(p, p_sibling)) {
96 p = LIST_NEXT(p, p_sibling);
97 break;
98 }
99 p = p->p_pptr;
100 }
101 PROC_LOCK(p);
102 }
103 }
104
105 static int
protect_set(struct thread * td,struct proc * p,void * data)106 protect_set(struct thread *td, struct proc *p, void *data)
107 {
108 int error, flags, ret;
109
110 flags = *(int *)data;
111 switch (PPROT_OP(flags)) {
112 case PPROT_SET:
113 case PPROT_CLEAR:
114 break;
115 default:
116 return (EINVAL);
117 }
118
119 if ((PPROT_FLAGS(flags) & ~(PPROT_DESCEND | PPROT_INHERIT)) != 0)
120 return (EINVAL);
121
122 error = priv_check(td, PRIV_VM_MADV_PROTECT);
123 if (error)
124 return (error);
125
126 if (flags & PPROT_DESCEND)
127 ret = protect_setchildren(td, p, flags);
128 else
129 ret = protect_setchild(td, p, flags);
130 if (ret == 0)
131 return (EPERM);
132 return (0);
133 }
134
135 static int
reap_acquire(struct thread * td,struct proc * p,void * data __unused)136 reap_acquire(struct thread *td, struct proc *p, void *data __unused)
137 {
138
139 sx_assert(&proctree_lock, SX_XLOCKED);
140 if (p != td->td_proc)
141 return (EPERM);
142 if ((p->p_treeflag & P_TREE_REAPER) != 0)
143 return (EBUSY);
144 p->p_treeflag |= P_TREE_REAPER;
145 /*
146 * We do not reattach existing children and the whole tree
147 * under them to us, since p->p_reaper already seen them.
148 */
149 return (0);
150 }
151
152 static int
reap_release(struct thread * td,struct proc * p,void * data __unused)153 reap_release(struct thread *td, struct proc *p, void *data __unused)
154 {
155
156 sx_assert(&proctree_lock, SX_XLOCKED);
157 if (p != td->td_proc)
158 return (EPERM);
159 if (p == initproc)
160 return (EINVAL);
161 if ((p->p_treeflag & P_TREE_REAPER) == 0)
162 return (EINVAL);
163 reaper_abandon_children(p, false);
164 return (0);
165 }
166
167 static int
reap_status(struct thread * td,struct proc * p,void * data)168 reap_status(struct thread *td, struct proc *p, void *data)
169 {
170 struct proc *reap, *p2, *first_p;
171 struct procctl_reaper_status *rs;
172
173 rs = data;
174 sx_assert(&proctree_lock, SX_LOCKED);
175 if ((p->p_treeflag & P_TREE_REAPER) == 0) {
176 reap = p->p_reaper;
177 } else {
178 reap = p;
179 rs->rs_flags |= REAPER_STATUS_OWNED;
180 }
181 if (reap == initproc)
182 rs->rs_flags |= REAPER_STATUS_REALINIT;
183 rs->rs_reaper = reap->p_pid;
184 rs->rs_descendants = 0;
185 rs->rs_children = 0;
186 if (!LIST_EMPTY(&reap->p_reaplist)) {
187 first_p = LIST_FIRST(&reap->p_children);
188 if (first_p == NULL)
189 first_p = LIST_FIRST(&reap->p_reaplist);
190 rs->rs_pid = first_p->p_pid;
191 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
192 if (proc_realparent(p2) == reap)
193 rs->rs_children++;
194 rs->rs_descendants++;
195 }
196 } else {
197 rs->rs_pid = -1;
198 }
199 return (0);
200 }
201
202 static int
reap_getpids(struct thread * td,struct proc * p,void * data)203 reap_getpids(struct thread *td, struct proc *p, void *data)
204 {
205 struct proc *reap, *p2;
206 struct procctl_reaper_pidinfo *pi, *pip;
207 struct procctl_reaper_pids *rp;
208 u_int i, n;
209 int error;
210
211 rp = data;
212 sx_assert(&proctree_lock, SX_LOCKED);
213 PROC_UNLOCK(p);
214 reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
215 n = i = 0;
216 error = 0;
217 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling)
218 n++;
219 sx_unlock(&proctree_lock);
220 if (rp->rp_count < n)
221 n = rp->rp_count;
222 pi = malloc(n * sizeof(*pi), M_TEMP, M_WAITOK);
223 sx_slock(&proctree_lock);
224 LIST_FOREACH(p2, &reap->p_reaplist, p_reapsibling) {
225 if (i == n)
226 break;
227 pip = &pi[i];
228 bzero(pip, sizeof(*pip));
229 pip->pi_pid = p2->p_pid;
230 pip->pi_subtree = p2->p_reapsubtree;
231 pip->pi_flags = REAPER_PIDINFO_VALID;
232 if (proc_realparent(p2) == reap)
233 pip->pi_flags |= REAPER_PIDINFO_CHILD;
234 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
235 pip->pi_flags |= REAPER_PIDINFO_REAPER;
236 if ((p2->p_flag & P_STOPPED) != 0)
237 pip->pi_flags |= REAPER_PIDINFO_STOPPED;
238 if (p2->p_state == PRS_ZOMBIE)
239 pip->pi_flags |= REAPER_PIDINFO_ZOMBIE;
240 else if ((p2->p_flag & P_WEXIT) != 0)
241 pip->pi_flags |= REAPER_PIDINFO_EXITING;
242 i++;
243 }
244 sx_sunlock(&proctree_lock);
245 error = copyout(pi, rp->rp_pids, i * sizeof(*pi));
246 free(pi, M_TEMP);
247 sx_slock(&proctree_lock);
248 PROC_LOCK(p);
249 return (error);
250 }
251
252 struct reap_kill_proc_work {
253 struct ucred *cr;
254 struct proc *target;
255 ksiginfo_t *ksi;
256 struct procctl_reaper_kill *rk;
257 int *error;
258 };
259
260 static void
reap_kill_proc_locked(struct reap_kill_proc_work * w)261 reap_kill_proc_locked(struct reap_kill_proc_work *w)
262 {
263 int error;
264
265 PROC_LOCK_ASSERT(w->target, MA_OWNED);
266 PROC_ASSERT_HELD(w->target);
267
268 error = cr_cansignal(w->cr, w->target, w->rk->rk_sig);
269 if (error != 0) {
270 /*
271 * Hide ESRCH errors to ensure that this function
272 * cannot be used as an oracle for process visibility.
273 */
274 if (error != ESRCH && *w->error == 0) {
275 w->rk->rk_fpid = w->target->p_pid;
276 *w->error = error;
277 }
278 return;
279 }
280
281 (void)pksignal(w->target, w->rk->rk_sig, w->ksi);
282 w->rk->rk_killed++;
283 *w->error = error;
284 }
285
286 static void
reap_kill_proc(struct reap_kill_proc_work * w,bool * proctree_dropped)287 reap_kill_proc(struct reap_kill_proc_work *w, bool *proctree_dropped)
288 {
289 struct pgrp *pgrp;
290 int xlocked;
291
292 sx_assert(&proctree_lock, SX_LOCKED);
293 xlocked = sx_xlocked(&proctree_lock);
294 PROC_LOCK_ASSERT(w->target, MA_OWNED);
295 PROC_ASSERT_HELD(w->target);
296
297 /* Sync with forks. */
298 for (;;) {
299 /*
300 * Short-circuit handling of the exiting process, do
301 * not wait for it to single-thread (hold prevents it
302 * from exiting further). This avoids
303 * locking pg_killsx for it, and reduces the
304 * proctree_lock contention.
305 */
306 if ((w->target->p_flag2 & P2_WEXIT) != 0)
307 return;
308
309 pgrp = w->target->p_pgrp;
310 if (pgrp == NULL || sx_try_xlock(&pgrp->pg_killsx))
311 break;
312
313 PROC_UNLOCK(w->target);
314 sx_unlock(&proctree_lock);
315 /* This is safe because pgrp zone is nofree. */
316 sx_xlock(&pgrp->pg_killsx);
317 sx_xunlock(&pgrp->pg_killsx);
318 *proctree_dropped = true;
319 if (xlocked)
320 sx_xlock(&proctree_lock);
321 else
322 sx_slock(&proctree_lock);
323 PROC_LOCK(w->target);
324 }
325
326 reap_kill_proc_locked(w);
327
328 if (pgrp != NULL)
329 sx_xunlock(&pgrp->pg_killsx);
330 }
331
332 struct reap_kill_tracker {
333 struct proc *parent;
334 TAILQ_ENTRY(reap_kill_tracker) link;
335 };
336
337 TAILQ_HEAD(reap_kill_tracker_head, reap_kill_tracker);
338
339 static void
reap_kill_sched(struct reap_kill_tracker_head * tracker,struct proc * p2)340 reap_kill_sched(struct reap_kill_tracker_head *tracker, struct proc *p2)
341 {
342 struct reap_kill_tracker *t;
343
344 PROC_LOCK(p2);
345 if ((p2->p_flag2 & P2_WEXIT) != 0) {
346 PROC_UNLOCK(p2);
347 return;
348 }
349 _PHOLD(p2);
350 PROC_UNLOCK(p2);
351 t = malloc(sizeof(struct reap_kill_tracker), M_TEMP, M_WAITOK);
352 t->parent = p2;
353 TAILQ_INSERT_TAIL(tracker, t, link);
354 }
355
356 static void
reap_kill_sched_free(struct reap_kill_tracker * t)357 reap_kill_sched_free(struct reap_kill_tracker *t)
358 {
359 PRELE(t->parent);
360 free(t, M_TEMP);
361 }
362
363 static void
reap_kill_children(struct thread * td,struct proc * reaper,struct procctl_reaper_kill * rk,ksiginfo_t * ksi,int * error)364 reap_kill_children(struct thread *td, struct proc *reaper,
365 struct procctl_reaper_kill *rk, ksiginfo_t *ksi, int *error)
366 {
367 struct proc *p2;
368 int error1;
369
370 LIST_FOREACH(p2, &reaper->p_children, p_sibling) {
371 PROC_LOCK(p2);
372 if ((p2->p_flag2 & P2_WEXIT) == 0) {
373 error1 = p_cansignal(td, p2, rk->rk_sig);
374 if (error1 != 0) {
375 if (*error == ESRCH) {
376 rk->rk_fpid = p2->p_pid;
377 *error = error1;
378 }
379
380 /*
381 * Do not end the loop on error,
382 * signal everything we can.
383 */
384 } else {
385 (void)pksignal(p2, rk->rk_sig, ksi);
386 rk->rk_killed++;
387 }
388 }
389 PROC_UNLOCK(p2);
390 }
391 }
392
393 static bool
reap_kill_subtree_once(struct thread * td,struct proc * p,struct proc * reaper,struct unrhdr * pids,struct reap_kill_proc_work * w)394 reap_kill_subtree_once(struct thread *td, struct proc *p, struct proc *reaper,
395 struct unrhdr *pids, struct reap_kill_proc_work *w)
396 {
397 struct reap_kill_tracker_head tracker;
398 struct reap_kill_tracker *t;
399 struct proc *p2;
400 bool proctree_dropped, res;
401
402 res = false;
403 TAILQ_INIT(&tracker);
404 reap_kill_sched(&tracker, reaper);
405 while ((t = TAILQ_FIRST(&tracker)) != NULL) {
406 TAILQ_REMOVE(&tracker, t, link);
407
408 again:
409 /*
410 * Since reap_kill_proc() drops proctree_lock sx, it
411 * is possible that the tracked reaper is no longer.
412 * In this case the subtree is reparented to the new
413 * reaper, which should handle it.
414 */
415 if ((t->parent->p_treeflag & P_TREE_REAPER) == 0) {
416 reap_kill_sched_free(t);
417 res = true;
418 continue;
419 }
420
421 LIST_FOREACH(p2, &t->parent->p_reaplist, p_reapsibling) {
422 if (t->parent == reaper &&
423 (w->rk->rk_flags & REAPER_KILL_SUBTREE) != 0 &&
424 p2->p_reapsubtree != w->rk->rk_subtree)
425 continue;
426 if ((p2->p_treeflag & P_TREE_REAPER) != 0)
427 reap_kill_sched(&tracker, p2);
428
429 /*
430 * Handle possible pid reuse. If we recorded
431 * p2 as killed but its p_flag2 does not
432 * confirm it, that means that the process
433 * terminated and its id was reused by other
434 * process in the reaper subtree.
435 *
436 * Unlocked read of p2->p_flag2 is fine, it is
437 * our thread that set the tested flag.
438 */
439 if (alloc_unr_specific(pids, p2->p_pid) != p2->p_pid &&
440 (atomic_load_int(&p2->p_flag2) &
441 (P2_REAPKILLED | P2_WEXIT)) != 0)
442 continue;
443
444 proctree_dropped = false;
445 PROC_LOCK(p2);
446 if ((p2->p_flag2 & P2_WEXIT) == 0) {
447 _PHOLD(p2);
448
449 /*
450 * sapblk ensures that only one thread
451 * in the system sets this flag.
452 */
453 p2->p_flag2 |= P2_REAPKILLED;
454
455 w->target = p2;
456 reap_kill_proc(w, &proctree_dropped);
457 _PRELE(p2);
458 }
459 PROC_UNLOCK(p2);
460 res = true;
461 if (proctree_dropped)
462 goto again;
463 }
464 reap_kill_sched_free(t);
465 }
466 return (res);
467 }
468
469 static void
reap_kill_subtree(struct thread * td,struct proc * p,struct proc * reaper,struct reap_kill_proc_work * w)470 reap_kill_subtree(struct thread *td, struct proc *p, struct proc *reaper,
471 struct reap_kill_proc_work *w)
472 {
473 struct unrhdr pids;
474 void *ihandle;
475 struct proc *p2;
476 int pid;
477
478 /*
479 * pids records processes which were already signalled, to
480 * avoid doubling signals to them if iteration needs to be
481 * repeated.
482 */
483 init_unrhdr(&pids, 1, PID_MAX, UNR_NO_MTX);
484 PROC_LOCK(td->td_proc);
485 if ((td->td_proc->p_flag2 & P2_WEXIT) != 0) {
486 PROC_UNLOCK(td->td_proc);
487 goto out;
488 }
489 PROC_UNLOCK(td->td_proc);
490 while (reap_kill_subtree_once(td, p, reaper, &pids, w))
491 ;
492
493 ihandle = create_iter_unr(&pids);
494 while ((pid = next_iter_unr(ihandle)) != -1) {
495 p2 = pfind(pid);
496 if (p2 != NULL) {
497 p2->p_flag2 &= ~P2_REAPKILLED;
498 PROC_UNLOCK(p2);
499 }
500 }
501 free_iter_unr(ihandle);
502
503 out:
504 clean_unrhdr(&pids);
505 clear_unrhdr(&pids);
506 }
507
508 static bool
reap_kill_sapblk(struct thread * td __unused,void * data)509 reap_kill_sapblk(struct thread *td __unused, void *data)
510 {
511 struct procctl_reaper_kill *rk;
512
513 rk = data;
514 return ((rk->rk_flags & REAPER_KILL_CHILDREN) == 0);
515 }
516
517 static int
reap_kill(struct thread * td,struct proc * p,void * data)518 reap_kill(struct thread *td, struct proc *p, void *data)
519 {
520 struct reap_kill_proc_work w;
521 struct proc *reaper;
522 ksiginfo_t ksi;
523 struct procctl_reaper_kill *rk;
524 int error;
525
526 rk = data;
527 sx_assert(&proctree_lock, SX_LOCKED);
528 if (CAP_TRACING(td))
529 ktrcapfail(CAPFAIL_SIGNAL, &rk->rk_sig);
530 if (IN_CAPABILITY_MODE(td))
531 return (ECAPMODE);
532 if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG ||
533 (rk->rk_flags & ~(REAPER_KILL_CHILDREN |
534 REAPER_KILL_SUBTREE)) != 0 || (rk->rk_flags &
535 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) ==
536 (REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE))
537 return (EINVAL);
538 PROC_UNLOCK(p);
539 reaper = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
540 ksiginfo_init(&ksi);
541 ksi.ksi_signo = rk->rk_sig;
542 ksi.ksi_code = SI_USER;
543 ksi.ksi_pid = td->td_proc->p_pid;
544 ksi.ksi_uid = td->td_ucred->cr_ruid;
545 error = ESRCH;
546 rk->rk_killed = 0;
547 rk->rk_fpid = -1;
548 if ((rk->rk_flags & REAPER_KILL_CHILDREN) != 0) {
549 reap_kill_children(td, reaper, rk, &ksi, &error);
550 } else {
551 w.cr = crhold(td->td_ucred);
552 w.ksi = &ksi;
553 w.rk = rk;
554 w.error = &error;
555 reap_kill_subtree(td, p, reaper, &w);
556 crfree(w.cr);
557 }
558 PROC_LOCK(p);
559 return (error);
560 }
561
562 static int
trace_ctl(struct thread * td,struct proc * p,void * data)563 trace_ctl(struct thread *td, struct proc *p, void *data)
564 {
565 int state;
566
567 PROC_LOCK_ASSERT(p, MA_OWNED);
568 state = *(int *)data;
569
570 /*
571 * Ktrace changes p_traceflag from or to zero under the
572 * process lock, so the test does not need to acquire ktrace
573 * mutex.
574 */
575 if ((p->p_flag & P_TRACED) != 0 || p->p_traceflag != 0)
576 return (EBUSY);
577
578 switch (state) {
579 case PROC_TRACE_CTL_ENABLE:
580 if (td->td_proc != p)
581 return (EPERM);
582 p->p_flag2 &= ~(P2_NOTRACE | P2_NOTRACE_EXEC);
583 break;
584 case PROC_TRACE_CTL_DISABLE_EXEC:
585 p->p_flag2 |= P2_NOTRACE_EXEC | P2_NOTRACE;
586 break;
587 case PROC_TRACE_CTL_DISABLE:
588 if ((p->p_flag2 & P2_NOTRACE_EXEC) != 0) {
589 KASSERT((p->p_flag2 & P2_NOTRACE) != 0,
590 ("dandling P2_NOTRACE_EXEC"));
591 if (td->td_proc != p)
592 return (EPERM);
593 p->p_flag2 &= ~P2_NOTRACE_EXEC;
594 } else {
595 p->p_flag2 |= P2_NOTRACE;
596 }
597 break;
598 default:
599 return (EINVAL);
600 }
601 return (0);
602 }
603
604 static int
trace_status(struct thread * td,struct proc * p,void * data)605 trace_status(struct thread *td, struct proc *p, void *data)
606 {
607 int *status;
608
609 status = data;
610 if ((p->p_flag2 & P2_NOTRACE) != 0) {
611 KASSERT((p->p_flag & P_TRACED) == 0,
612 ("%d traced but tracing disabled", p->p_pid));
613 *status = -1;
614 } else if ((p->p_flag & P_TRACED) != 0) {
615 *status = p->p_pptr->p_pid;
616 } else {
617 *status = 0;
618 }
619 return (0);
620 }
621
622 static int
trapcap_ctl(struct thread * td,struct proc * p,void * data)623 trapcap_ctl(struct thread *td, struct proc *p, void *data)
624 {
625 int state;
626
627 PROC_LOCK_ASSERT(p, MA_OWNED);
628 state = *(int *)data;
629
630 switch (state) {
631 case PROC_TRAPCAP_CTL_ENABLE:
632 p->p_flag2 |= P2_TRAPCAP;
633 break;
634 case PROC_TRAPCAP_CTL_DISABLE:
635 p->p_flag2 &= ~P2_TRAPCAP;
636 break;
637 default:
638 return (EINVAL);
639 }
640 return (0);
641 }
642
643 static int
trapcap_status(struct thread * td,struct proc * p,void * data)644 trapcap_status(struct thread *td, struct proc *p, void *data)
645 {
646 int *status;
647
648 status = data;
649 *status = (p->p_flag2 & P2_TRAPCAP) != 0 ? PROC_TRAPCAP_CTL_ENABLE :
650 PROC_TRAPCAP_CTL_DISABLE;
651 return (0);
652 }
653
654 static int
no_new_privs_ctl(struct thread * td,struct proc * p,void * data)655 no_new_privs_ctl(struct thread *td, struct proc *p, void *data)
656 {
657 int state;
658
659 PROC_LOCK_ASSERT(p, MA_OWNED);
660 state = *(int *)data;
661
662 if (state != PROC_NO_NEW_PRIVS_ENABLE)
663 return (EINVAL);
664 p->p_flag2 |= P2_NO_NEW_PRIVS;
665 return (0);
666 }
667
668 static int
no_new_privs_status(struct thread * td,struct proc * p,void * data)669 no_new_privs_status(struct thread *td, struct proc *p, void *data)
670 {
671
672 *(int *)data = (p->p_flag2 & P2_NO_NEW_PRIVS) != 0 ?
673 PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE;
674 return (0);
675 }
676
677 static int
protmax_ctl(struct thread * td,struct proc * p,void * data)678 protmax_ctl(struct thread *td, struct proc *p, void *data)
679 {
680 int state;
681
682 PROC_LOCK_ASSERT(p, MA_OWNED);
683 state = *(int *)data;
684
685 switch (state) {
686 case PROC_PROTMAX_FORCE_ENABLE:
687 p->p_flag2 &= ~P2_PROTMAX_DISABLE;
688 p->p_flag2 |= P2_PROTMAX_ENABLE;
689 break;
690 case PROC_PROTMAX_FORCE_DISABLE:
691 p->p_flag2 |= P2_PROTMAX_DISABLE;
692 p->p_flag2 &= ~P2_PROTMAX_ENABLE;
693 break;
694 case PROC_PROTMAX_NOFORCE:
695 p->p_flag2 &= ~(P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE);
696 break;
697 default:
698 return (EINVAL);
699 }
700 return (0);
701 }
702
703 static int
protmax_status(struct thread * td,struct proc * p,void * data)704 protmax_status(struct thread *td, struct proc *p, void *data)
705 {
706 int d;
707
708 switch (p->p_flag2 & (P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE)) {
709 case 0:
710 d = PROC_PROTMAX_NOFORCE;
711 break;
712 case P2_PROTMAX_ENABLE:
713 d = PROC_PROTMAX_FORCE_ENABLE;
714 break;
715 case P2_PROTMAX_DISABLE:
716 d = PROC_PROTMAX_FORCE_DISABLE;
717 break;
718 }
719 if (kern_mmap_maxprot(p, PROT_READ) == PROT_READ)
720 d |= PROC_PROTMAX_ACTIVE;
721 *(int *)data = d;
722 return (0);
723 }
724
725 static int
aslr_ctl(struct thread * td,struct proc * p,void * data)726 aslr_ctl(struct thread *td, struct proc *p, void *data)
727 {
728 int state;
729
730 PROC_LOCK_ASSERT(p, MA_OWNED);
731 state = *(int *)data;
732
733 switch (state) {
734 case PROC_ASLR_FORCE_ENABLE:
735 p->p_flag2 &= ~P2_ASLR_DISABLE;
736 p->p_flag2 |= P2_ASLR_ENABLE;
737 break;
738 case PROC_ASLR_FORCE_DISABLE:
739 p->p_flag2 |= P2_ASLR_DISABLE;
740 p->p_flag2 &= ~P2_ASLR_ENABLE;
741 break;
742 case PROC_ASLR_NOFORCE:
743 p->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE);
744 break;
745 default:
746 return (EINVAL);
747 }
748 return (0);
749 }
750
751 static int
aslr_status(struct thread * td,struct proc * p,void * data)752 aslr_status(struct thread *td, struct proc *p, void *data)
753 {
754 struct vmspace *vm;
755 int d;
756
757 switch (p->p_flag2 & (P2_ASLR_ENABLE | P2_ASLR_DISABLE)) {
758 case 0:
759 d = PROC_ASLR_NOFORCE;
760 break;
761 case P2_ASLR_ENABLE:
762 d = PROC_ASLR_FORCE_ENABLE;
763 break;
764 case P2_ASLR_DISABLE:
765 d = PROC_ASLR_FORCE_DISABLE;
766 break;
767 }
768 if ((p->p_flag & P_WEXIT) == 0) {
769 _PHOLD(p);
770 PROC_UNLOCK(p);
771 vm = vmspace_acquire_ref(p);
772 if (vm != NULL) {
773 if ((vm->vm_map.flags & MAP_ASLR) != 0)
774 d |= PROC_ASLR_ACTIVE;
775 vmspace_free(vm);
776 }
777 PROC_LOCK(p);
778 _PRELE(p);
779 }
780 *(int *)data = d;
781 return (0);
782 }
783
784 static int
stackgap_ctl(struct thread * td,struct proc * p,void * data)785 stackgap_ctl(struct thread *td, struct proc *p, void *data)
786 {
787 int state;
788
789 PROC_LOCK_ASSERT(p, MA_OWNED);
790 state = *(int *)data;
791
792 if ((state & ~(PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE |
793 PROC_STACKGAP_ENABLE_EXEC | PROC_STACKGAP_DISABLE_EXEC)) != 0)
794 return (EINVAL);
795 switch (state & (PROC_STACKGAP_ENABLE | PROC_STACKGAP_DISABLE)) {
796 case PROC_STACKGAP_ENABLE:
797 if ((p->p_flag2 & P2_STKGAP_DISABLE) != 0)
798 return (EINVAL);
799 break;
800 case PROC_STACKGAP_DISABLE:
801 p->p_flag2 |= P2_STKGAP_DISABLE;
802 break;
803 case 0:
804 break;
805 default:
806 return (EINVAL);
807 }
808 switch (state & (PROC_STACKGAP_ENABLE_EXEC |
809 PROC_STACKGAP_DISABLE_EXEC)) {
810 case PROC_STACKGAP_ENABLE_EXEC:
811 p->p_flag2 &= ~P2_STKGAP_DISABLE_EXEC;
812 break;
813 case PROC_STACKGAP_DISABLE_EXEC:
814 p->p_flag2 |= P2_STKGAP_DISABLE_EXEC;
815 break;
816 case 0:
817 break;
818 default:
819 return (EINVAL);
820 }
821 return (0);
822 }
823
824 static int
stackgap_status(struct thread * td,struct proc * p,void * data)825 stackgap_status(struct thread *td, struct proc *p, void *data)
826 {
827 int d;
828
829 PROC_LOCK_ASSERT(p, MA_OWNED);
830
831 d = (p->p_flag2 & P2_STKGAP_DISABLE) != 0 ? PROC_STACKGAP_DISABLE :
832 PROC_STACKGAP_ENABLE;
833 d |= (p->p_flag2 & P2_STKGAP_DISABLE_EXEC) != 0 ?
834 PROC_STACKGAP_DISABLE_EXEC : PROC_STACKGAP_ENABLE_EXEC;
835 *(int *)data = d;
836 return (0);
837 }
838
839 static int
wxmap_ctl(struct thread * td,struct proc * p,void * data)840 wxmap_ctl(struct thread *td, struct proc *p, void *data)
841 {
842 struct vmspace *vm;
843 vm_map_t map;
844 int state;
845
846 PROC_LOCK_ASSERT(p, MA_OWNED);
847 if ((p->p_flag & P_WEXIT) != 0)
848 return (ESRCH);
849 state = *(int *)data;
850
851 switch (state) {
852 case PROC_WX_MAPPINGS_PERMIT:
853 p->p_flag2 |= P2_WXORX_DISABLE;
854 _PHOLD(p);
855 PROC_UNLOCK(p);
856 vm = vmspace_acquire_ref(p);
857 if (vm != NULL) {
858 map = &vm->vm_map;
859 vm_map_lock(map);
860 map->flags &= ~MAP_WXORX;
861 vm_map_unlock(map);
862 vmspace_free(vm);
863 }
864 PROC_LOCK(p);
865 _PRELE(p);
866 break;
867 case PROC_WX_MAPPINGS_DISALLOW_EXEC:
868 p->p_flag2 |= P2_WXORX_ENABLE_EXEC;
869 break;
870 default:
871 return (EINVAL);
872 }
873
874 return (0);
875 }
876
877 static int
wxmap_status(struct thread * td,struct proc * p,void * data)878 wxmap_status(struct thread *td, struct proc *p, void *data)
879 {
880 struct vmspace *vm;
881 int d;
882
883 PROC_LOCK_ASSERT(p, MA_OWNED);
884 if ((p->p_flag & P_WEXIT) != 0)
885 return (ESRCH);
886
887 d = 0;
888 if ((p->p_flag2 & P2_WXORX_DISABLE) != 0)
889 d |= PROC_WX_MAPPINGS_PERMIT;
890 if ((p->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0)
891 d |= PROC_WX_MAPPINGS_DISALLOW_EXEC;
892 _PHOLD(p);
893 PROC_UNLOCK(p);
894 vm = vmspace_acquire_ref(p);
895 if (vm != NULL) {
896 if ((vm->vm_map.flags & MAP_WXORX) != 0)
897 d |= PROC_WXORX_ENFORCE;
898 vmspace_free(vm);
899 }
900 PROC_LOCK(p);
901 _PRELE(p);
902 *(int *)data = d;
903 return (0);
904 }
905
906 static int
pdeathsig_ctl(struct thread * td,struct proc * p,void * data)907 pdeathsig_ctl(struct thread *td, struct proc *p, void *data)
908 {
909 int signum;
910
911 signum = *(int *)data;
912 if (p != td->td_proc || (signum != 0 && !_SIG_VALID(signum)))
913 return (EINVAL);
914 p->p_pdeathsig = signum;
915 return (0);
916 }
917
918 static int
pdeathsig_status(struct thread * td,struct proc * p,void * data)919 pdeathsig_status(struct thread *td, struct proc *p, void *data)
920 {
921 if (p != td->td_proc)
922 return (EINVAL);
923 *(int *)data = p->p_pdeathsig;
924 return (0);
925 }
926
927 static int
logsigexit_ctl(struct thread * td,struct proc * p,void * data)928 logsigexit_ctl(struct thread *td, struct proc *p, void *data)
929 {
930 int state;
931
932 PROC_LOCK_ASSERT(p, MA_OWNED);
933 state = *(int *)data;
934
935 switch (state) {
936 case PROC_LOGSIGEXIT_CTL_NOFORCE:
937 p->p_flag2 &= ~(P2_LOGSIGEXIT_CTL | P2_LOGSIGEXIT_ENABLE);
938 break;
939 case PROC_LOGSIGEXIT_CTL_FORCE_ENABLE:
940 p->p_flag2 |= P2_LOGSIGEXIT_CTL | P2_LOGSIGEXIT_ENABLE;
941 break;
942 case PROC_LOGSIGEXIT_CTL_FORCE_DISABLE:
943 p->p_flag2 |= P2_LOGSIGEXIT_CTL;
944 p->p_flag2 &= ~P2_LOGSIGEXIT_ENABLE;
945 break;
946 default:
947 return (EINVAL);
948 }
949 return (0);
950 }
951
952 static int
logsigexit_status(struct thread * td,struct proc * p,void * data)953 logsigexit_status(struct thread *td, struct proc *p, void *data)
954 {
955 int state;
956
957 if ((p->p_flag2 & P2_LOGSIGEXIT_CTL) == 0)
958 state = PROC_LOGSIGEXIT_CTL_NOFORCE;
959 else if ((p->p_flag2 & P2_LOGSIGEXIT_ENABLE) != 0)
960 state = PROC_LOGSIGEXIT_CTL_FORCE_ENABLE;
961 else
962 state = PROC_LOGSIGEXIT_CTL_FORCE_DISABLE;
963 *(int *)data = state;
964 return (0);
965 }
966
967 enum {
968 PCTL_SLOCKED,
969 PCTL_XLOCKED,
970 PCTL_UNLOCKED,
971 };
972
973 struct procctl_cmd_info {
974 int lock_tree;
975 bool one_proc : 1;
976 bool esrch_is_einval : 1;
977 bool copyout_on_error : 1;
978 bool no_nonnull_data : 1;
979 bool need_candebug : 1;
980 int copyin_sz;
981 int copyout_sz;
982 int (*exec)(struct thread *, struct proc *, void *);
983 bool (*sapblk)(struct thread *, void *);
984 };
985 static const struct procctl_cmd_info procctl_cmds_info[] = {
986 [PROC_SPROTECT] =
987 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
988 .esrch_is_einval = false, .no_nonnull_data = false,
989 .need_candebug = false,
990 .copyin_sz = sizeof(int), .copyout_sz = 0,
991 .exec = protect_set, .copyout_on_error = false, },
992 [PROC_REAP_ACQUIRE] =
993 { .lock_tree = PCTL_XLOCKED, .one_proc = true,
994 .esrch_is_einval = false, .no_nonnull_data = true,
995 .need_candebug = false,
996 .copyin_sz = 0, .copyout_sz = 0,
997 .exec = reap_acquire, .copyout_on_error = false, },
998 [PROC_REAP_RELEASE] =
999 { .lock_tree = PCTL_XLOCKED, .one_proc = true,
1000 .esrch_is_einval = false, .no_nonnull_data = true,
1001 .need_candebug = false,
1002 .copyin_sz = 0, .copyout_sz = 0,
1003 .exec = reap_release, .copyout_on_error = false, },
1004 [PROC_REAP_STATUS] =
1005 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1006 .esrch_is_einval = false, .no_nonnull_data = false,
1007 .need_candebug = false,
1008 .copyin_sz = 0,
1009 .copyout_sz = sizeof(struct procctl_reaper_status),
1010 .exec = reap_status, .copyout_on_error = false, },
1011 [PROC_REAP_GETPIDS] =
1012 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1013 .esrch_is_einval = false, .no_nonnull_data = false,
1014 .need_candebug = false,
1015 .copyin_sz = sizeof(struct procctl_reaper_pids),
1016 .copyout_sz = 0,
1017 .exec = reap_getpids, .copyout_on_error = false, },
1018 [PROC_REAP_KILL] =
1019 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1020 .esrch_is_einval = false, .no_nonnull_data = false,
1021 .need_candebug = false,
1022 .copyin_sz = sizeof(struct procctl_reaper_kill),
1023 .copyout_sz = sizeof(struct procctl_reaper_kill),
1024 .exec = reap_kill, .copyout_on_error = true,
1025 .sapblk = reap_kill_sapblk, },
1026 [PROC_TRACE_CTL] =
1027 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
1028 .esrch_is_einval = false, .no_nonnull_data = false,
1029 .need_candebug = true,
1030 .copyin_sz = sizeof(int), .copyout_sz = 0,
1031 .exec = trace_ctl, .copyout_on_error = false, },
1032 [PROC_TRACE_STATUS] =
1033 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1034 .esrch_is_einval = false, .no_nonnull_data = false,
1035 .need_candebug = false,
1036 .copyin_sz = 0, .copyout_sz = sizeof(int),
1037 .exec = trace_status, .copyout_on_error = false, },
1038 [PROC_TRAPCAP_CTL] =
1039 { .lock_tree = PCTL_SLOCKED, .one_proc = false,
1040 .esrch_is_einval = false, .no_nonnull_data = false,
1041 .need_candebug = true,
1042 .copyin_sz = sizeof(int), .copyout_sz = 0,
1043 .exec = trapcap_ctl, .copyout_on_error = false, },
1044 [PROC_TRAPCAP_STATUS] =
1045 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1046 .esrch_is_einval = false, .no_nonnull_data = false,
1047 .need_candebug = false,
1048 .copyin_sz = 0, .copyout_sz = sizeof(int),
1049 .exec = trapcap_status, .copyout_on_error = false, },
1050 [PROC_PDEATHSIG_CTL] =
1051 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1052 .esrch_is_einval = true, .no_nonnull_data = false,
1053 .need_candebug = false,
1054 .copyin_sz = sizeof(int), .copyout_sz = 0,
1055 .exec = pdeathsig_ctl, .copyout_on_error = false, },
1056 [PROC_PDEATHSIG_STATUS] =
1057 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1058 .esrch_is_einval = true, .no_nonnull_data = false,
1059 .need_candebug = false,
1060 .copyin_sz = 0, .copyout_sz = sizeof(int),
1061 .exec = pdeathsig_status, .copyout_on_error = false, },
1062 [PROC_ASLR_CTL] =
1063 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1064 .esrch_is_einval = false, .no_nonnull_data = false,
1065 .need_candebug = true,
1066 .copyin_sz = sizeof(int), .copyout_sz = 0,
1067 .exec = aslr_ctl, .copyout_on_error = false, },
1068 [PROC_ASLR_STATUS] =
1069 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1070 .esrch_is_einval = false, .no_nonnull_data = false,
1071 .need_candebug = false,
1072 .copyin_sz = 0, .copyout_sz = sizeof(int),
1073 .exec = aslr_status, .copyout_on_error = false, },
1074 [PROC_PROTMAX_CTL] =
1075 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1076 .esrch_is_einval = false, .no_nonnull_data = false,
1077 .need_candebug = true,
1078 .copyin_sz = sizeof(int), .copyout_sz = 0,
1079 .exec = protmax_ctl, .copyout_on_error = false, },
1080 [PROC_PROTMAX_STATUS] =
1081 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1082 .esrch_is_einval = false, .no_nonnull_data = false,
1083 .need_candebug = false,
1084 .copyin_sz = 0, .copyout_sz = sizeof(int),
1085 .exec = protmax_status, .copyout_on_error = false, },
1086 [PROC_STACKGAP_CTL] =
1087 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1088 .esrch_is_einval = false, .no_nonnull_data = false,
1089 .need_candebug = true,
1090 .copyin_sz = sizeof(int), .copyout_sz = 0,
1091 .exec = stackgap_ctl, .copyout_on_error = false, },
1092 [PROC_STACKGAP_STATUS] =
1093 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1094 .esrch_is_einval = false, .no_nonnull_data = false,
1095 .need_candebug = false,
1096 .copyin_sz = 0, .copyout_sz = sizeof(int),
1097 .exec = stackgap_status, .copyout_on_error = false, },
1098 [PROC_NO_NEW_PRIVS_CTL] =
1099 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1100 .esrch_is_einval = false, .no_nonnull_data = false,
1101 .need_candebug = true,
1102 .copyin_sz = sizeof(int), .copyout_sz = 0,
1103 .exec = no_new_privs_ctl, .copyout_on_error = false, },
1104 [PROC_NO_NEW_PRIVS_STATUS] =
1105 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1106 .esrch_is_einval = false, .no_nonnull_data = false,
1107 .need_candebug = false,
1108 .copyin_sz = 0, .copyout_sz = sizeof(int),
1109 .exec = no_new_privs_status, .copyout_on_error = false, },
1110 [PROC_WXMAP_CTL] =
1111 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1112 .esrch_is_einval = false, .no_nonnull_data = false,
1113 .need_candebug = true,
1114 .copyin_sz = sizeof(int), .copyout_sz = 0,
1115 .exec = wxmap_ctl, .copyout_on_error = false, },
1116 [PROC_WXMAP_STATUS] =
1117 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1118 .esrch_is_einval = false, .no_nonnull_data = false,
1119 .need_candebug = false,
1120 .copyin_sz = 0, .copyout_sz = sizeof(int),
1121 .exec = wxmap_status, .copyout_on_error = false, },
1122 [PROC_LOGSIGEXIT_CTL] =
1123 { .lock_tree = PCTL_SLOCKED, .one_proc = true,
1124 .esrch_is_einval = false, .no_nonnull_data = false,
1125 .need_candebug = true,
1126 .copyin_sz = sizeof(int), .copyout_sz = 0,
1127 .exec = logsigexit_ctl, .copyout_on_error = false, },
1128 [PROC_LOGSIGEXIT_STATUS] =
1129 { .lock_tree = PCTL_UNLOCKED, .one_proc = true,
1130 .esrch_is_einval = false, .no_nonnull_data = false,
1131 .need_candebug = false,
1132 .copyin_sz = 0, .copyout_sz = sizeof(int),
1133 .exec = logsigexit_status, .copyout_on_error = false, },
1134 };
1135
1136 int
sys_procctl(struct thread * td,struct procctl_args * uap)1137 sys_procctl(struct thread *td, struct procctl_args *uap)
1138 {
1139 union {
1140 struct procctl_reaper_status rs;
1141 struct procctl_reaper_pids rp;
1142 struct procctl_reaper_kill rk;
1143 int flags;
1144 } x;
1145 const struct procctl_cmd_info *cmd_info;
1146 int error, error1;
1147
1148 if (uap->com >= PROC_PROCCTL_MD_MIN)
1149 return (cpu_procctl(td, uap->idtype, uap->id,
1150 uap->com, uap->data));
1151 if (uap->com <= 0 || uap->com >= nitems(procctl_cmds_info))
1152 return (EINVAL);
1153 cmd_info = &procctl_cmds_info[uap->com];
1154 bzero(&x, sizeof(x));
1155
1156 if (cmd_info->copyin_sz > 0) {
1157 error = copyin(uap->data, &x, cmd_info->copyin_sz);
1158 if (error != 0)
1159 return (error);
1160 } else if (cmd_info->no_nonnull_data && uap->data != NULL) {
1161 return (EINVAL);
1162 }
1163
1164 error = kern_procctl(td, uap->idtype, uap->id, uap->com, &x);
1165
1166 if (cmd_info->copyout_sz > 0 && (error == 0 ||
1167 cmd_info->copyout_on_error)) {
1168 error1 = copyout(&x, uap->data, cmd_info->copyout_sz);
1169 if (error == 0)
1170 error = error1;
1171 }
1172 return (error);
1173 }
1174
1175 static int
kern_procctl_single(struct thread * td,struct proc * p,int com,void * data)1176 kern_procctl_single(struct thread *td, struct proc *p, int com, void *data)
1177 {
1178
1179 PROC_LOCK_ASSERT(p, MA_OWNED);
1180 return (procctl_cmds_info[com].exec(td, p, data));
1181 }
1182
1183 int
kern_procctl(struct thread * td,idtype_t idtype,id_t id,int com,void * data)1184 kern_procctl(struct thread *td, idtype_t idtype, id_t id, int com, void *data)
1185 {
1186 struct pgrp *pg;
1187 struct proc *p;
1188 const struct procctl_cmd_info *cmd_info;
1189 int error, first_error, ok;
1190 bool sapblk;
1191
1192 MPASS(com > 0 && com < nitems(procctl_cmds_info));
1193 cmd_info = &procctl_cmds_info[com];
1194 if (idtype != P_PID && cmd_info->one_proc)
1195 return (EINVAL);
1196
1197 sapblk = false;
1198 if (cmd_info->sapblk != NULL) {
1199 sapblk = cmd_info->sapblk(td, data);
1200 if (sapblk && !stop_all_proc_block())
1201 return (ERESTART);
1202 }
1203
1204 switch (cmd_info->lock_tree) {
1205 case PCTL_XLOCKED:
1206 sx_xlock(&proctree_lock);
1207 break;
1208 case PCTL_SLOCKED:
1209 sx_slock(&proctree_lock);
1210 break;
1211 default:
1212 break;
1213 }
1214
1215 switch (idtype) {
1216 case P_PID:
1217 if (id == 0) {
1218 p = td->td_proc;
1219 error = 0;
1220 PROC_LOCK(p);
1221 } else {
1222 p = pfind(id);
1223 if (p == NULL) {
1224 error = cmd_info->esrch_is_einval ?
1225 EINVAL : ESRCH;
1226 break;
1227 }
1228 error = cmd_info->need_candebug ? p_candebug(td, p) :
1229 p_cansee(td, p);
1230 }
1231 if (error == 0)
1232 error = kern_procctl_single(td, p, com, data);
1233 PROC_UNLOCK(p);
1234 break;
1235 case P_PGID:
1236 /*
1237 * Attempt to apply the operation to all members of the
1238 * group. Ignore processes in the group that can't be
1239 * seen. Ignore errors so long as at least one process is
1240 * able to complete the request successfully.
1241 */
1242 pg = pgfind(id);
1243 if (pg == NULL) {
1244 error = ESRCH;
1245 break;
1246 }
1247 PGRP_UNLOCK(pg);
1248 ok = 0;
1249 first_error = 0;
1250 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
1251 PROC_LOCK(p);
1252 if (p->p_state == PRS_NEW ||
1253 p->p_state == PRS_ZOMBIE ||
1254 (cmd_info->need_candebug ? p_candebug(td, p) :
1255 p_cansee(td, p)) != 0) {
1256 PROC_UNLOCK(p);
1257 continue;
1258 }
1259 error = kern_procctl_single(td, p, com, data);
1260 PROC_UNLOCK(p);
1261 if (error == 0)
1262 ok = 1;
1263 else if (first_error == 0)
1264 first_error = error;
1265 }
1266 if (ok)
1267 error = 0;
1268 else if (first_error != 0)
1269 error = first_error;
1270 else
1271 /*
1272 * Was not able to see any processes in the
1273 * process group.
1274 */
1275 error = ESRCH;
1276 break;
1277 default:
1278 error = EINVAL;
1279 break;
1280 }
1281
1282 switch (cmd_info->lock_tree) {
1283 case PCTL_XLOCKED:
1284 sx_xunlock(&proctree_lock);
1285 break;
1286 case PCTL_SLOCKED:
1287 sx_sunlock(&proctree_lock);
1288 break;
1289 default:
1290 break;
1291 }
1292 if (sapblk)
1293 stop_all_proc_unblock();
1294 return (error);
1295 }
1296