1 /*
2 	kmod, the new module loader (replaces kerneld)
3 	Kirk Petersen
4 
5 	Reorganized not to be a daemon by Adam Richter, with guidance
6 	from Greg Zornetzer.
7 
8 	Modified to avoid chroot and file sharing problems.
9 	Mikael Pettersson
10 
11 	Limit the concurrent number of kmod modprobes to catch loops from
12 	"modprobe needs a service that is in a module".
13 	Keith Owens <kaos@ocs.com.au> December 1999
14 
15 	Unblock all signals when we exec a usermode process.
16 	Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000
17 
18 	call_usermodehelper wait flag, and remove exec_usermodehelper.
19 	Rusty Russell <rusty@rustcorp.com.au>  Jan 2003
20 */
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/syscalls.h>
24 #include <linux/unistd.h>
25 #include <linux/kmod.h>
26 #include <linux/slab.h>
27 #include <linux/completion.h>
28 #include <linux/cred.h>
29 #include <linux/file.h>
30 #include <linux/fdtable.h>
31 #include <linux/workqueue.h>
32 #include <linux/security.h>
33 #include <linux/mount.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/resource.h>
37 #include <linux/notifier.h>
38 #include <linux/suspend.h>
39 #include <linux/rwsem.h>
40 #include <asm/uaccess.h>
41 
42 #include <trace/events/module.h>
43 
44 extern int max_threads;
45 
46 static struct workqueue_struct *khelper_wq;
47 
48 #define CAP_BSET	(void *)1
49 #define CAP_PI		(void *)2
50 
51 static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
52 static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
53 static DEFINE_SPINLOCK(umh_sysctl_lock);
54 static DECLARE_RWSEM(umhelper_sem);
55 
56 #ifdef CONFIG_MODULES
57 
58 /*
59 	modprobe_path is set via /proc/sys.
60 */
61 char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
62 
63 /**
64  * __request_module - try to load a kernel module
65  * @wait: wait (or not) for the operation to complete
66  * @fmt: printf style format string for the name of the module
67  * @...: arguments as specified in the format string
68  *
69  * Load a module using the user mode module loader. The function returns
70  * zero on success or a negative errno code on failure. Note that a
71  * successful module load does not mean the module did not then unload
72  * and exit on an error of its own. Callers must check that the service
73  * they requested is now available not blindly invoke it.
74  *
75  * If module auto-loading support is disabled then this function
76  * becomes a no-operation.
77  */
__request_module(bool wait,const char * fmt,...)78 int __request_module(bool wait, const char *fmt, ...)
79 {
80 	va_list args;
81 	char module_name[MODULE_NAME_LEN];
82 	unsigned int max_modprobes;
83 	int ret;
84 	char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
85 	static char *envp[] = { "HOME=/",
86 				"TERM=linux",
87 				"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
88 				NULL };
89 	static atomic_t kmod_concurrent = ATOMIC_INIT(0);
90 #define MAX_KMOD_CONCURRENT 50	/* Completely arbitrary value - KAO */
91 	static int kmod_loop_msg;
92 
93 	va_start(args, fmt);
94 	ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
95 	va_end(args);
96 	if (ret >= MODULE_NAME_LEN)
97 		return -ENAMETOOLONG;
98 
99 	ret = security_kernel_module_request(module_name);
100 	if (ret)
101 		return ret;
102 
103 	/* If modprobe needs a service that is in a module, we get a recursive
104 	 * loop.  Limit the number of running kmod threads to max_threads/2 or
105 	 * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
106 	 * would be to run the parents of this process, counting how many times
107 	 * kmod was invoked.  That would mean accessing the internals of the
108 	 * process tables to get the command line, proc_pid_cmdline is static
109 	 * and it is not worth changing the proc code just to handle this case.
110 	 * KAO.
111 	 *
112 	 * "trace the ppid" is simple, but will fail if someone's
113 	 * parent exits.  I think this is as good as it gets. --RR
114 	 */
115 	max_modprobes = min(max_threads/2, MAX_KMOD_CONCURRENT);
116 	atomic_inc(&kmod_concurrent);
117 	if (atomic_read(&kmod_concurrent) > max_modprobes) {
118 		/* We may be blaming an innocent here, but unlikely */
119 		if (kmod_loop_msg < 5) {
120 			printk(KERN_ERR
121 			       "request_module: runaway loop modprobe %s\n",
122 			       module_name);
123 			kmod_loop_msg++;
124 		}
125 		atomic_dec(&kmod_concurrent);
126 		return -ENOMEM;
127 	}
128 
129 	trace_module_request(module_name, wait, _RET_IP_);
130 
131 	ret = call_usermodehelper_fns(modprobe_path, argv, envp,
132 			wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC,
133 			NULL, NULL, NULL);
134 
135 	atomic_dec(&kmod_concurrent);
136 	return ret;
137 }
138 EXPORT_SYMBOL(__request_module);
139 #endif /* CONFIG_MODULES */
140 
141 /*
142  * This is the task which runs the usermode application
143  */
____call_usermodehelper(void * data)144 static int ____call_usermodehelper(void *data)
145 {
146 	struct subprocess_info *sub_info = data;
147 	struct cred *new;
148 	int retval;
149 
150 	spin_lock_irq(&current->sighand->siglock);
151 	flush_signal_handlers(current, 1);
152 	spin_unlock_irq(&current->sighand->siglock);
153 
154 	/* We can run anywhere, unlike our parent keventd(). */
155 	set_cpus_allowed_ptr(current, cpu_all_mask);
156 
157 	/*
158 	 * Our parent is keventd, which runs with elevated scheduling priority.
159 	 * Avoid propagating that into the userspace child.
160 	 */
161 	set_user_nice(current, 0);
162 
163 	retval = -ENOMEM;
164 	new = prepare_kernel_cred(current);
165 	if (!new)
166 		goto fail;
167 
168 	spin_lock(&umh_sysctl_lock);
169 	new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
170 	new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
171 					     new->cap_inheritable);
172 	spin_unlock(&umh_sysctl_lock);
173 
174 	if (sub_info->init) {
175 		retval = sub_info->init(sub_info, new);
176 		if (retval) {
177 			abort_creds(new);
178 			goto fail;
179 		}
180 	}
181 
182 	commit_creds(new);
183 
184 	retval = kernel_execve(sub_info->path,
185 			       (const char *const *)sub_info->argv,
186 			       (const char *const *)sub_info->envp);
187 
188 	/* Exec failed? */
189 fail:
190 	sub_info->retval = retval;
191 	do_exit(0);
192 }
193 
call_usermodehelper_freeinfo(struct subprocess_info * info)194 void call_usermodehelper_freeinfo(struct subprocess_info *info)
195 {
196 	if (info->cleanup)
197 		(*info->cleanup)(info);
198 	kfree(info);
199 }
200 EXPORT_SYMBOL(call_usermodehelper_freeinfo);
201 
202 /* Keventd can't block, but this (a child) can. */
wait_for_helper(void * data)203 static int wait_for_helper(void *data)
204 {
205 	struct subprocess_info *sub_info = data;
206 	pid_t pid;
207 
208 	/* If SIGCLD is ignored sys_wait4 won't populate the status. */
209 	spin_lock_irq(&current->sighand->siglock);
210 	current->sighand->action[SIGCHLD-1].sa.sa_handler = SIG_DFL;
211 	spin_unlock_irq(&current->sighand->siglock);
212 
213 	pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
214 	if (pid < 0) {
215 		sub_info->retval = pid;
216 	} else {
217 		int ret = -ECHILD;
218 		/*
219 		 * Normally it is bogus to call wait4() from in-kernel because
220 		 * wait4() wants to write the exit code to a userspace address.
221 		 * But wait_for_helper() always runs as keventd, and put_user()
222 		 * to a kernel address works OK for kernel threads, due to their
223 		 * having an mm_segment_t which spans the entire address space.
224 		 *
225 		 * Thus the __user pointer cast is valid here.
226 		 */
227 		sys_wait4(pid, (int __user *)&ret, 0, NULL);
228 
229 		/*
230 		 * If ret is 0, either ____call_usermodehelper failed and the
231 		 * real error code is already in sub_info->retval or
232 		 * sub_info->retval is 0 anyway, so don't mess with it then.
233 		 */
234 		if (ret)
235 			sub_info->retval = ret;
236 	}
237 
238 	complete(sub_info->complete);
239 	return 0;
240 }
241 
242 /* This is run by khelper thread  */
__call_usermodehelper(struct work_struct * work)243 static void __call_usermodehelper(struct work_struct *work)
244 {
245 	struct subprocess_info *sub_info =
246 		container_of(work, struct subprocess_info, work);
247 	enum umh_wait wait = sub_info->wait;
248 	pid_t pid;
249 
250 	/* CLONE_VFORK: wait until the usermode helper has execve'd
251 	 * successfully We need the data structures to stay around
252 	 * until that is done.  */
253 	if (wait == UMH_WAIT_PROC)
254 		pid = kernel_thread(wait_for_helper, sub_info,
255 				    CLONE_FS | CLONE_FILES | SIGCHLD);
256 	else
257 		pid = kernel_thread(____call_usermodehelper, sub_info,
258 				    CLONE_VFORK | SIGCHLD);
259 
260 	switch (wait) {
261 	case UMH_NO_WAIT:
262 		call_usermodehelper_freeinfo(sub_info);
263 		break;
264 
265 	case UMH_WAIT_PROC:
266 		if (pid > 0)
267 			break;
268 		/* FALLTHROUGH */
269 	case UMH_WAIT_EXEC:
270 		if (pid < 0)
271 			sub_info->retval = pid;
272 		complete(sub_info->complete);
273 	}
274 }
275 
276 /*
277  * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
278  * (used for preventing user land processes from being created after the user
279  * land has been frozen during a system-wide hibernation or suspend operation).
280  * Should always be manipulated under umhelper_sem acquired for write.
281  */
282 static int usermodehelper_disabled = 1;
283 
284 /* Number of helpers running */
285 static atomic_t running_helpers = ATOMIC_INIT(0);
286 
287 /*
288  * Wait queue head used by usermodehelper_disable() to wait for all running
289  * helpers to finish.
290  */
291 static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
292 
293 /*
294  * Time to wait for running_helpers to become zero before the setting of
295  * usermodehelper_disabled in usermodehelper_disable() fails
296  */
297 #define RUNNING_HELPERS_TIMEOUT	(5 * HZ)
298 
read_lock_usermodehelper(void)299 void read_lock_usermodehelper(void)
300 {
301 	down_read(&umhelper_sem);
302 }
303 EXPORT_SYMBOL_GPL(read_lock_usermodehelper);
304 
read_unlock_usermodehelper(void)305 void read_unlock_usermodehelper(void)
306 {
307 	up_read(&umhelper_sem);
308 }
309 EXPORT_SYMBOL_GPL(read_unlock_usermodehelper);
310 
311 /**
312  * usermodehelper_disable - prevent new helpers from being started
313  */
usermodehelper_disable(void)314 int usermodehelper_disable(void)
315 {
316 	long retval;
317 
318 	down_write(&umhelper_sem);
319 	usermodehelper_disabled = 1;
320 	up_write(&umhelper_sem);
321 
322 	/*
323 	 * From now on call_usermodehelper_exec() won't start any new
324 	 * helpers, so it is sufficient if running_helpers turns out to
325 	 * be zero at one point (it may be increased later, but that
326 	 * doesn't matter).
327 	 */
328 	retval = wait_event_timeout(running_helpers_waitq,
329 					atomic_read(&running_helpers) == 0,
330 					RUNNING_HELPERS_TIMEOUT);
331 	if (retval)
332 		return 0;
333 
334 	down_write(&umhelper_sem);
335 	usermodehelper_disabled = 0;
336 	up_write(&umhelper_sem);
337 	return -EAGAIN;
338 }
339 
340 /**
341  * usermodehelper_enable - allow new helpers to be started again
342  */
usermodehelper_enable(void)343 void usermodehelper_enable(void)
344 {
345 	down_write(&umhelper_sem);
346 	usermodehelper_disabled = 0;
347 	up_write(&umhelper_sem);
348 }
349 
350 /**
351  * usermodehelper_is_disabled - check if new helpers are allowed to be started
352  */
usermodehelper_is_disabled(void)353 bool usermodehelper_is_disabled(void)
354 {
355 	return usermodehelper_disabled;
356 }
357 EXPORT_SYMBOL_GPL(usermodehelper_is_disabled);
358 
helper_lock(void)359 static void helper_lock(void)
360 {
361 	atomic_inc(&running_helpers);
362 	smp_mb__after_atomic_inc();
363 }
364 
helper_unlock(void)365 static void helper_unlock(void)
366 {
367 	if (atomic_dec_and_test(&running_helpers))
368 		wake_up(&running_helpers_waitq);
369 }
370 
371 /**
372  * call_usermodehelper_setup - prepare to call a usermode helper
373  * @path: path to usermode executable
374  * @argv: arg vector for process
375  * @envp: environment for process
376  * @gfp_mask: gfp mask for memory allocation
377  *
378  * Returns either %NULL on allocation failure, or a subprocess_info
379  * structure.  This should be passed to call_usermodehelper_exec to
380  * exec the process and free the structure.
381  */
call_usermodehelper_setup(char * path,char ** argv,char ** envp,gfp_t gfp_mask)382 struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
383 						  char **envp, gfp_t gfp_mask)
384 {
385 	struct subprocess_info *sub_info;
386 	sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
387 	if (!sub_info)
388 		goto out;
389 
390 	INIT_WORK(&sub_info->work, __call_usermodehelper);
391 	sub_info->path = path;
392 	sub_info->argv = argv;
393 	sub_info->envp = envp;
394   out:
395 	return sub_info;
396 }
397 EXPORT_SYMBOL(call_usermodehelper_setup);
398 
399 /**
400  * call_usermodehelper_setfns - set a cleanup/init function
401  * @info: a subprocess_info returned by call_usermodehelper_setup
402  * @cleanup: a cleanup function
403  * @init: an init function
404  * @data: arbitrary context sensitive data
405  *
406  * The init function is used to customize the helper process prior to
407  * exec.  A non-zero return code causes the process to error out, exit,
408  * and return the failure to the calling process
409  *
410  * The cleanup function is just before ethe subprocess_info is about to
411  * be freed.  This can be used for freeing the argv and envp.  The
412  * Function must be runnable in either a process context or the
413  * context in which call_usermodehelper_exec is called.
414  */
call_usermodehelper_setfns(struct subprocess_info * info,int (* init)(struct subprocess_info * info,struct cred * new),void (* cleanup)(struct subprocess_info * info),void * data)415 void call_usermodehelper_setfns(struct subprocess_info *info,
416 		    int (*init)(struct subprocess_info *info, struct cred *new),
417 		    void (*cleanup)(struct subprocess_info *info),
418 		    void *data)
419 {
420 	info->cleanup = cleanup;
421 	info->init = init;
422 	info->data = data;
423 }
424 EXPORT_SYMBOL(call_usermodehelper_setfns);
425 
426 /**
427  * call_usermodehelper_exec - start a usermode application
428  * @sub_info: information about the subprocessa
429  * @wait: wait for the application to finish and return status.
430  *        when -1 don't wait at all, but you get no useful error back when
431  *        the program couldn't be exec'ed. This makes it safe to call
432  *        from interrupt context.
433  *
434  * Runs a user-space application.  The application is started
435  * asynchronously if wait is not set, and runs as a child of keventd.
436  * (ie. it runs with full root capabilities).
437  */
call_usermodehelper_exec(struct subprocess_info * sub_info,enum umh_wait wait)438 int call_usermodehelper_exec(struct subprocess_info *sub_info,
439 			     enum umh_wait wait)
440 {
441 	DECLARE_COMPLETION_ONSTACK(done);
442 	int retval = 0;
443 
444 	helper_lock();
445 	if (sub_info->path[0] == '\0')
446 		goto out;
447 
448 	if (!khelper_wq || usermodehelper_disabled) {
449 		retval = -EBUSY;
450 		goto out;
451 	}
452 
453 	sub_info->complete = &done;
454 	sub_info->wait = wait;
455 
456 	queue_work(khelper_wq, &sub_info->work);
457 	if (wait == UMH_NO_WAIT)	/* task has freed sub_info */
458 		goto unlock;
459 	wait_for_completion(&done);
460 	retval = sub_info->retval;
461 
462 out:
463 	call_usermodehelper_freeinfo(sub_info);
464 unlock:
465 	helper_unlock();
466 	return retval;
467 }
468 EXPORT_SYMBOL(call_usermodehelper_exec);
469 
proc_cap_handler(struct ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)470 static int proc_cap_handler(struct ctl_table *table, int write,
471 			 void __user *buffer, size_t *lenp, loff_t *ppos)
472 {
473 	struct ctl_table t;
474 	unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
475 	kernel_cap_t new_cap;
476 	int err, i;
477 
478 	if (write && (!capable(CAP_SETPCAP) ||
479 		      !capable(CAP_SYS_MODULE)))
480 		return -EPERM;
481 
482 	/*
483 	 * convert from the global kernel_cap_t to the ulong array to print to
484 	 * userspace if this is a read.
485 	 */
486 	spin_lock(&umh_sysctl_lock);
487 	for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)  {
488 		if (table->data == CAP_BSET)
489 			cap_array[i] = usermodehelper_bset.cap[i];
490 		else if (table->data == CAP_PI)
491 			cap_array[i] = usermodehelper_inheritable.cap[i];
492 		else
493 			BUG();
494 	}
495 	spin_unlock(&umh_sysctl_lock);
496 
497 	t = *table;
498 	t.data = &cap_array;
499 
500 	/*
501 	 * actually read or write and array of ulongs from userspace.  Remember
502 	 * these are least significant 32 bits first
503 	 */
504 	err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
505 	if (err < 0)
506 		return err;
507 
508 	/*
509 	 * convert from the sysctl array of ulongs to the kernel_cap_t
510 	 * internal representation
511 	 */
512 	for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
513 		new_cap.cap[i] = cap_array[i];
514 
515 	/*
516 	 * Drop everything not in the new_cap (but don't add things)
517 	 */
518 	spin_lock(&umh_sysctl_lock);
519 	if (write) {
520 		if (table->data == CAP_BSET)
521 			usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
522 		if (table->data == CAP_PI)
523 			usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
524 	}
525 	spin_unlock(&umh_sysctl_lock);
526 
527 	return 0;
528 }
529 
530 struct ctl_table usermodehelper_table[] = {
531 	{
532 		.procname	= "bset",
533 		.data		= CAP_BSET,
534 		.maxlen		= _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
535 		.mode		= 0600,
536 		.proc_handler	= proc_cap_handler,
537 	},
538 	{
539 		.procname	= "inheritable",
540 		.data		= CAP_PI,
541 		.maxlen		= _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
542 		.mode		= 0600,
543 		.proc_handler	= proc_cap_handler,
544 	},
545 	{ }
546 };
547 
usermodehelper_init(void)548 void __init usermodehelper_init(void)
549 {
550 	khelper_wq = create_singlethread_workqueue("khelper");
551 	BUG_ON(!khelper_wq);
552 }
553