1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include <linux/workqueue.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/list.h>
9 #include <linux/delay.h>
10 #include <linux/sched.h>
11 #include <linux/idr.h>
12 #include <linux/rculist.h>
13 #include <linux/nsproxy.h>
14 #include <linux/fs.h>
15 #include <linux/proc_ns.h>
16 #include <linux/file.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
19 #include <linux/net_namespace.h>
20 #include <linux/sched/task.h>
21 #include <linux/uidgid.h>
22 #include <linux/proc_fs.h>
23
24 #include <net/aligned_data.h>
25 #include <net/sock.h>
26 #include <net/netlink.h>
27 #include <net/net_namespace.h>
28 #include <net/netns/generic.h>
29
30 /*
31 * Our network namespace constructor/destructor lists
32 */
33
34 static LIST_HEAD(pernet_list);
35 static struct list_head *first_device = &pernet_list;
36
37 LIST_HEAD(net_namespace_list);
38 EXPORT_SYMBOL_GPL(net_namespace_list);
39
40 /* Protects net_namespace_list. Nests iside rtnl_lock() */
41 DECLARE_RWSEM(net_rwsem);
42 EXPORT_SYMBOL_GPL(net_rwsem);
43
44 #ifdef CONFIG_KEYS
45 static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
46 #endif
47
48 struct net init_net;
49 EXPORT_SYMBOL(init_net);
50
51 static bool init_net_initialized;
52 /*
53 * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
54 * init_net_initialized and first_device pointer.
55 * This is internal net namespace object. Please, don't use it
56 * outside.
57 */
58 DECLARE_RWSEM(pernet_ops_rwsem);
59
60 #define MIN_PERNET_OPS_ID \
61 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
62
63 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
64
65 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
66
net_alloc_generic(void)67 static struct net_generic *net_alloc_generic(void)
68 {
69 unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
70 unsigned int generic_size;
71 struct net_generic *ng;
72
73 generic_size = offsetof(struct net_generic, ptr[gen_ptrs]);
74
75 ng = kzalloc(generic_size, GFP_KERNEL);
76 if (ng)
77 ng->s.len = gen_ptrs;
78
79 return ng;
80 }
81
net_assign_generic(struct net * net,unsigned int id,void * data)82 static int net_assign_generic(struct net *net, unsigned int id, void *data)
83 {
84 struct net_generic *ng, *old_ng;
85
86 BUG_ON(id < MIN_PERNET_OPS_ID);
87
88 old_ng = rcu_dereference_protected(net->gen,
89 lockdep_is_held(&pernet_ops_rwsem));
90 if (old_ng->s.len > id) {
91 old_ng->ptr[id] = data;
92 return 0;
93 }
94
95 ng = net_alloc_generic();
96 if (!ng)
97 return -ENOMEM;
98
99 /*
100 * Some synchronisation notes:
101 *
102 * The net_generic explores the net->gen array inside rcu
103 * read section. Besides once set the net->gen->ptr[x]
104 * pointer never changes (see rules in netns/generic.h).
105 *
106 * That said, we simply duplicate this array and schedule
107 * the old copy for kfree after a grace period.
108 */
109
110 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
111 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
112 ng->ptr[id] = data;
113
114 rcu_assign_pointer(net->gen, ng);
115 kfree_rcu(old_ng, s.rcu);
116 return 0;
117 }
118
ops_init(const struct pernet_operations * ops,struct net * net)119 static int ops_init(const struct pernet_operations *ops, struct net *net)
120 {
121 struct net_generic *ng;
122 int err = -ENOMEM;
123 void *data = NULL;
124
125 if (ops->id) {
126 data = kzalloc(ops->size, GFP_KERNEL);
127 if (!data)
128 goto out;
129
130 err = net_assign_generic(net, *ops->id, data);
131 if (err)
132 goto cleanup;
133 }
134 err = 0;
135 if (ops->init)
136 err = ops->init(net);
137 if (!err)
138 return 0;
139
140 if (ops->id) {
141 ng = rcu_dereference_protected(net->gen,
142 lockdep_is_held(&pernet_ops_rwsem));
143 ng->ptr[*ops->id] = NULL;
144 }
145
146 cleanup:
147 kfree(data);
148
149 out:
150 return err;
151 }
152
ops_pre_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)153 static void ops_pre_exit_list(const struct pernet_operations *ops,
154 struct list_head *net_exit_list)
155 {
156 struct net *net;
157
158 if (ops->pre_exit) {
159 list_for_each_entry(net, net_exit_list, exit_list)
160 ops->pre_exit(net);
161 }
162 }
163
ops_exit_rtnl_list(const struct list_head * ops_list,const struct pernet_operations * ops,struct list_head * net_exit_list)164 static void ops_exit_rtnl_list(const struct list_head *ops_list,
165 const struct pernet_operations *ops,
166 struct list_head *net_exit_list)
167 {
168 const struct pernet_operations *saved_ops = ops;
169 LIST_HEAD(dev_kill_list);
170 struct net *net;
171
172 rtnl_lock();
173
174 list_for_each_entry(net, net_exit_list, exit_list) {
175 __rtnl_net_lock(net);
176
177 ops = saved_ops;
178 list_for_each_entry_continue_reverse(ops, ops_list, list) {
179 if (ops->exit_rtnl)
180 ops->exit_rtnl(net, &dev_kill_list);
181 }
182
183 __rtnl_net_unlock(net);
184 }
185
186 unregister_netdevice_many(&dev_kill_list);
187
188 rtnl_unlock();
189 }
190
ops_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)191 static void ops_exit_list(const struct pernet_operations *ops,
192 struct list_head *net_exit_list)
193 {
194 if (ops->exit) {
195 struct net *net;
196
197 list_for_each_entry(net, net_exit_list, exit_list) {
198 ops->exit(net);
199 cond_resched();
200 }
201 }
202
203 if (ops->exit_batch)
204 ops->exit_batch(net_exit_list);
205 }
206
ops_free_list(const struct pernet_operations * ops,struct list_head * net_exit_list)207 static void ops_free_list(const struct pernet_operations *ops,
208 struct list_head *net_exit_list)
209 {
210 struct net *net;
211
212 if (ops->id) {
213 list_for_each_entry(net, net_exit_list, exit_list)
214 kfree(net_generic(net, *ops->id));
215 }
216 }
217
ops_undo_list(const struct list_head * ops_list,const struct pernet_operations * ops,struct list_head * net_exit_list,bool expedite_rcu)218 static void ops_undo_list(const struct list_head *ops_list,
219 const struct pernet_operations *ops,
220 struct list_head *net_exit_list,
221 bool expedite_rcu)
222 {
223 const struct pernet_operations *saved_ops;
224 bool hold_rtnl = false;
225
226 if (!ops)
227 ops = list_entry(ops_list, typeof(*ops), list);
228
229 saved_ops = ops;
230
231 list_for_each_entry_continue_reverse(ops, ops_list, list) {
232 hold_rtnl |= !!ops->exit_rtnl;
233 ops_pre_exit_list(ops, net_exit_list);
234 }
235
236 /* Another CPU might be rcu-iterating the list, wait for it.
237 * This needs to be before calling the exit() notifiers, so the
238 * rcu_barrier() after ops_undo_list() isn't sufficient alone.
239 * Also the pre_exit() and exit() methods need this barrier.
240 */
241 if (expedite_rcu)
242 synchronize_rcu_expedited();
243 else
244 synchronize_rcu();
245
246 if (hold_rtnl)
247 ops_exit_rtnl_list(ops_list, saved_ops, net_exit_list);
248
249 ops = saved_ops;
250 list_for_each_entry_continue_reverse(ops, ops_list, list)
251 ops_exit_list(ops, net_exit_list);
252
253 ops = saved_ops;
254 list_for_each_entry_continue_reverse(ops, ops_list, list)
255 ops_free_list(ops, net_exit_list);
256 }
257
ops_undo_single(struct pernet_operations * ops,struct list_head * net_exit_list)258 static void ops_undo_single(struct pernet_operations *ops,
259 struct list_head *net_exit_list)
260 {
261 LIST_HEAD(ops_list);
262
263 list_add(&ops->list, &ops_list);
264 ops_undo_list(&ops_list, NULL, net_exit_list, false);
265 list_del(&ops->list);
266 }
267
268 /* should be called with nsid_lock held */
alloc_netid(struct net * net,struct net * peer,int reqid)269 static int alloc_netid(struct net *net, struct net *peer, int reqid)
270 {
271 int min = 0, max = 0;
272
273 if (reqid >= 0) {
274 min = reqid;
275 max = reqid + 1;
276 }
277
278 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
279 }
280
281 /* This function is used by idr_for_each(). If net is equal to peer, the
282 * function returns the id so that idr_for_each() stops. Because we cannot
283 * returns the id 0 (idr_for_each() will not stop), we return the magic value
284 * NET_ID_ZERO (-1) for it.
285 */
286 #define NET_ID_ZERO -1
net_eq_idr(int id,void * net,void * peer)287 static int net_eq_idr(int id, void *net, void *peer)
288 {
289 if (net_eq(net, peer))
290 return id ? : NET_ID_ZERO;
291 return 0;
292 }
293
294 /* Must be called from RCU-critical section or with nsid_lock held */
__peernet2id(const struct net * net,struct net * peer)295 static int __peernet2id(const struct net *net, struct net *peer)
296 {
297 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
298
299 /* Magic value for id 0. */
300 if (id == NET_ID_ZERO)
301 return 0;
302 if (id > 0)
303 return id;
304
305 return NETNSA_NSID_NOT_ASSIGNED;
306 }
307
308 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
309 struct nlmsghdr *nlh, gfp_t gfp);
310 /* This function returns the id of a peer netns. If no id is assigned, one will
311 * be allocated and returned.
312 */
peernet2id_alloc(struct net * net,struct net * peer,gfp_t gfp)313 int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
314 {
315 int id;
316
317 if (refcount_read(&net->ns.count) == 0)
318 return NETNSA_NSID_NOT_ASSIGNED;
319
320 spin_lock(&net->nsid_lock);
321 id = __peernet2id(net, peer);
322 if (id >= 0) {
323 spin_unlock(&net->nsid_lock);
324 return id;
325 }
326
327 /* When peer is obtained from RCU lists, we may race with
328 * its cleanup. Check whether it's alive, and this guarantees
329 * we never hash a peer back to net->netns_ids, after it has
330 * just been idr_remove()'d from there in cleanup_net().
331 */
332 if (!maybe_get_net(peer)) {
333 spin_unlock(&net->nsid_lock);
334 return NETNSA_NSID_NOT_ASSIGNED;
335 }
336
337 id = alloc_netid(net, peer, -1);
338 spin_unlock(&net->nsid_lock);
339
340 put_net(peer);
341 if (id < 0)
342 return NETNSA_NSID_NOT_ASSIGNED;
343
344 rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
345
346 return id;
347 }
348 EXPORT_SYMBOL_GPL(peernet2id_alloc);
349
350 /* This function returns, if assigned, the id of a peer netns. */
peernet2id(const struct net * net,struct net * peer)351 int peernet2id(const struct net *net, struct net *peer)
352 {
353 int id;
354
355 rcu_read_lock();
356 id = __peernet2id(net, peer);
357 rcu_read_unlock();
358
359 return id;
360 }
361 EXPORT_SYMBOL(peernet2id);
362
363 /* This function returns true is the peer netns has an id assigned into the
364 * current netns.
365 */
peernet_has_id(const struct net * net,struct net * peer)366 bool peernet_has_id(const struct net *net, struct net *peer)
367 {
368 return peernet2id(net, peer) >= 0;
369 }
370
get_net_ns_by_id(const struct net * net,int id)371 struct net *get_net_ns_by_id(const struct net *net, int id)
372 {
373 struct net *peer;
374
375 if (id < 0)
376 return NULL;
377
378 rcu_read_lock();
379 peer = idr_find(&net->netns_ids, id);
380 if (peer)
381 peer = maybe_get_net(peer);
382 rcu_read_unlock();
383
384 return peer;
385 }
386 EXPORT_SYMBOL_GPL(get_net_ns_by_id);
387
preinit_net_sysctl(struct net * net)388 static __net_init void preinit_net_sysctl(struct net *net)
389 {
390 net->core.sysctl_somaxconn = SOMAXCONN;
391 /* Limits per socket sk_omem_alloc usage.
392 * TCP zerocopy regular usage needs 128 KB.
393 */
394 net->core.sysctl_optmem_max = 128 * 1024;
395 net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
396 net->core.sysctl_tstamp_allow_data = 1;
397 }
398
399 /* init code that must occur even if setup_net() is not called. */
preinit_net(struct net * net,struct user_namespace * user_ns)400 static __net_init void preinit_net(struct net *net, struct user_namespace *user_ns)
401 {
402 refcount_set(&net->passive, 1);
403 refcount_set(&net->ns.count, 1);
404 ref_tracker_dir_init(&net->refcnt_tracker, 128, "net_refcnt");
405 ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net_notrefcnt");
406
407 get_random_bytes(&net->hash_mix, sizeof(u32));
408 net->dev_base_seq = 1;
409 net->user_ns = user_ns;
410
411 idr_init(&net->netns_ids);
412 spin_lock_init(&net->nsid_lock);
413 mutex_init(&net->ipv4.ra_mutex);
414
415 #ifdef CONFIG_DEBUG_NET_SMALL_RTNL
416 mutex_init(&net->rtnl_mutex);
417 lock_set_cmp_fn(&net->rtnl_mutex, rtnl_net_lock_cmp_fn, NULL);
418 #endif
419
420 INIT_LIST_HEAD(&net->ptype_all);
421 INIT_LIST_HEAD(&net->ptype_specific);
422 preinit_net_sysctl(net);
423 }
424
425 /*
426 * setup_net runs the initializers for the network namespace object.
427 */
setup_net(struct net * net)428 static __net_init int setup_net(struct net *net)
429 {
430 /* Must be called with pernet_ops_rwsem held */
431 const struct pernet_operations *ops;
432 LIST_HEAD(net_exit_list);
433 int error = 0;
434
435 net->net_cookie = atomic64_inc_return(&net_aligned_data.net_cookie);
436
437 list_for_each_entry(ops, &pernet_list, list) {
438 error = ops_init(ops, net);
439 if (error < 0)
440 goto out_undo;
441 }
442 down_write(&net_rwsem);
443 list_add_tail_rcu(&net->list, &net_namespace_list);
444 up_write(&net_rwsem);
445 out:
446 return error;
447
448 out_undo:
449 /* Walk through the list backwards calling the exit functions
450 * for the pernet modules whose init functions did not fail.
451 */
452 list_add(&net->exit_list, &net_exit_list);
453 ops_undo_list(&pernet_list, ops, &net_exit_list, false);
454 rcu_barrier();
455 goto out;
456 }
457
458 #ifdef CONFIG_NET_NS
inc_net_namespaces(struct user_namespace * ns)459 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
460 {
461 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
462 }
463
dec_net_namespaces(struct ucounts * ucounts)464 static void dec_net_namespaces(struct ucounts *ucounts)
465 {
466 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
467 }
468
469 static struct kmem_cache *net_cachep __ro_after_init;
470 static struct workqueue_struct *netns_wq;
471
net_alloc(void)472 static struct net *net_alloc(void)
473 {
474 struct net *net = NULL;
475 struct net_generic *ng;
476
477 ng = net_alloc_generic();
478 if (!ng)
479 goto out;
480
481 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
482 if (!net)
483 goto out_free;
484
485 #ifdef CONFIG_KEYS
486 net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
487 if (!net->key_domain)
488 goto out_free_2;
489 refcount_set(&net->key_domain->usage, 1);
490 #endif
491
492 rcu_assign_pointer(net->gen, ng);
493 out:
494 return net;
495
496 #ifdef CONFIG_KEYS
497 out_free_2:
498 kmem_cache_free(net_cachep, net);
499 net = NULL;
500 #endif
501 out_free:
502 kfree(ng);
503 goto out;
504 }
505
506 static LLIST_HEAD(defer_free_list);
507
net_complete_free(void)508 static void net_complete_free(void)
509 {
510 struct llist_node *kill_list;
511 struct net *net, *next;
512
513 /* Get the list of namespaces to free from last round. */
514 kill_list = llist_del_all(&defer_free_list);
515
516 llist_for_each_entry_safe(net, next, kill_list, defer_free_list)
517 kmem_cache_free(net_cachep, net);
518
519 }
520
net_passive_dec(struct net * net)521 void net_passive_dec(struct net *net)
522 {
523 if (refcount_dec_and_test(&net->passive)) {
524 kfree(rcu_access_pointer(net->gen));
525
526 /* There should not be any trackers left there. */
527 ref_tracker_dir_exit(&net->notrefcnt_tracker);
528
529 /* Wait for an extra rcu_barrier() before final free. */
530 llist_add(&net->defer_free_list, &defer_free_list);
531 }
532 }
533
net_drop_ns(void * p)534 void net_drop_ns(void *p)
535 {
536 struct net *net = (struct net *)p;
537
538 if (net)
539 net_passive_dec(net);
540 }
541
copy_net_ns(unsigned long flags,struct user_namespace * user_ns,struct net * old_net)542 struct net *copy_net_ns(unsigned long flags,
543 struct user_namespace *user_ns, struct net *old_net)
544 {
545 struct ucounts *ucounts;
546 struct net *net;
547 int rv;
548
549 if (!(flags & CLONE_NEWNET))
550 return get_net(old_net);
551
552 ucounts = inc_net_namespaces(user_ns);
553 if (!ucounts)
554 return ERR_PTR(-ENOSPC);
555
556 net = net_alloc();
557 if (!net) {
558 rv = -ENOMEM;
559 goto dec_ucounts;
560 }
561
562 preinit_net(net, user_ns);
563 net->ucounts = ucounts;
564 get_user_ns(user_ns);
565
566 rv = down_read_killable(&pernet_ops_rwsem);
567 if (rv < 0)
568 goto put_userns;
569
570 rv = setup_net(net);
571
572 up_read(&pernet_ops_rwsem);
573
574 if (rv < 0) {
575 put_userns:
576 #ifdef CONFIG_KEYS
577 key_remove_domain(net->key_domain);
578 #endif
579 put_user_ns(user_ns);
580 net_passive_dec(net);
581 dec_ucounts:
582 dec_net_namespaces(ucounts);
583 return ERR_PTR(rv);
584 }
585 return net;
586 }
587
588 /**
589 * net_ns_get_ownership - get sysfs ownership data for @net
590 * @net: network namespace in question (can be NULL)
591 * @uid: kernel user ID for sysfs objects
592 * @gid: kernel group ID for sysfs objects
593 *
594 * Returns the uid/gid pair of root in the user namespace associated with the
595 * given network namespace.
596 */
net_ns_get_ownership(const struct net * net,kuid_t * uid,kgid_t * gid)597 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
598 {
599 if (net) {
600 kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
601 kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
602
603 if (uid_valid(ns_root_uid))
604 *uid = ns_root_uid;
605
606 if (gid_valid(ns_root_gid))
607 *gid = ns_root_gid;
608 } else {
609 *uid = GLOBAL_ROOT_UID;
610 *gid = GLOBAL_ROOT_GID;
611 }
612 }
613 EXPORT_SYMBOL_GPL(net_ns_get_ownership);
614
unhash_nsid(struct net * net,struct net * last)615 static void unhash_nsid(struct net *net, struct net *last)
616 {
617 struct net *tmp;
618 /* This function is only called from cleanup_net() work,
619 * and this work is the only process, that may delete
620 * a net from net_namespace_list. So, when the below
621 * is executing, the list may only grow. Thus, we do not
622 * use for_each_net_rcu() or net_rwsem.
623 */
624 for_each_net(tmp) {
625 int id;
626
627 spin_lock(&tmp->nsid_lock);
628 id = __peernet2id(tmp, net);
629 if (id >= 0)
630 idr_remove(&tmp->netns_ids, id);
631 spin_unlock(&tmp->nsid_lock);
632 if (id >= 0)
633 rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
634 GFP_KERNEL);
635 if (tmp == last)
636 break;
637 }
638 spin_lock(&net->nsid_lock);
639 idr_destroy(&net->netns_ids);
640 spin_unlock(&net->nsid_lock);
641 }
642
643 static LLIST_HEAD(cleanup_list);
644
645 struct task_struct *cleanup_net_task;
646
cleanup_net(struct work_struct * work)647 static void cleanup_net(struct work_struct *work)
648 {
649 struct llist_node *net_kill_list;
650 struct net *net, *tmp, *last;
651 LIST_HEAD(net_exit_list);
652
653 WRITE_ONCE(cleanup_net_task, current);
654
655 /* Atomically snapshot the list of namespaces to cleanup */
656 net_kill_list = llist_del_all(&cleanup_list);
657
658 down_read(&pernet_ops_rwsem);
659
660 /* Don't let anyone else find us. */
661 down_write(&net_rwsem);
662 llist_for_each_entry(net, net_kill_list, cleanup_list)
663 list_del_rcu(&net->list);
664 /* Cache last net. After we unlock rtnl, no one new net
665 * added to net_namespace_list can assign nsid pointer
666 * to a net from net_kill_list (see peernet2id_alloc()).
667 * So, we skip them in unhash_nsid().
668 *
669 * Note, that unhash_nsid() does not delete nsid links
670 * between net_kill_list's nets, as they've already
671 * deleted from net_namespace_list. But, this would be
672 * useless anyway, as netns_ids are destroyed there.
673 */
674 last = list_last_entry(&net_namespace_list, struct net, list);
675 up_write(&net_rwsem);
676
677 llist_for_each_entry(net, net_kill_list, cleanup_list) {
678 unhash_nsid(net, last);
679 list_add_tail(&net->exit_list, &net_exit_list);
680 }
681
682 ops_undo_list(&pernet_list, NULL, &net_exit_list, true);
683
684 up_read(&pernet_ops_rwsem);
685
686 /* Ensure there are no outstanding rcu callbacks using this
687 * network namespace.
688 */
689 rcu_barrier();
690
691 net_complete_free();
692
693 /* Finally it is safe to free my network namespace structure */
694 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
695 list_del_init(&net->exit_list);
696 dec_net_namespaces(net->ucounts);
697 #ifdef CONFIG_KEYS
698 key_remove_domain(net->key_domain);
699 #endif
700 put_user_ns(net->user_ns);
701 net_passive_dec(net);
702 }
703 WRITE_ONCE(cleanup_net_task, NULL);
704 }
705
706 /**
707 * net_ns_barrier - wait until concurrent net_cleanup_work is done
708 *
709 * cleanup_net runs from work queue and will first remove namespaces
710 * from the global list, then run net exit functions.
711 *
712 * Call this in module exit path to make sure that all netns
713 * ->exit ops have been invoked before the function is removed.
714 */
net_ns_barrier(void)715 void net_ns_barrier(void)
716 {
717 down_write(&pernet_ops_rwsem);
718 up_write(&pernet_ops_rwsem);
719 }
720 EXPORT_SYMBOL(net_ns_barrier);
721
722 static DECLARE_WORK(net_cleanup_work, cleanup_net);
723
__put_net(struct net * net)724 void __put_net(struct net *net)
725 {
726 ref_tracker_dir_exit(&net->refcnt_tracker);
727 /* Cleanup the network namespace in process context */
728 if (llist_add(&net->cleanup_list, &cleanup_list))
729 queue_work(netns_wq, &net_cleanup_work);
730 }
731 EXPORT_SYMBOL_GPL(__put_net);
732
733 /**
734 * get_net_ns - increment the refcount of the network namespace
735 * @ns: common namespace (net)
736 *
737 * Returns the net's common namespace or ERR_PTR() if ref is zero.
738 */
get_net_ns(struct ns_common * ns)739 struct ns_common *get_net_ns(struct ns_common *ns)
740 {
741 struct net *net;
742
743 net = maybe_get_net(container_of(ns, struct net, ns));
744 if (net)
745 return &net->ns;
746 return ERR_PTR(-EINVAL);
747 }
748 EXPORT_SYMBOL_GPL(get_net_ns);
749
get_net_ns_by_fd(int fd)750 struct net *get_net_ns_by_fd(int fd)
751 {
752 CLASS(fd, f)(fd);
753
754 if (fd_empty(f))
755 return ERR_PTR(-EBADF);
756
757 if (proc_ns_file(fd_file(f))) {
758 struct ns_common *ns = get_proc_ns(file_inode(fd_file(f)));
759 if (ns->ops == &netns_operations)
760 return get_net(container_of(ns, struct net, ns));
761 }
762
763 return ERR_PTR(-EINVAL);
764 }
765 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
766 #endif
767
get_net_ns_by_pid(pid_t pid)768 struct net *get_net_ns_by_pid(pid_t pid)
769 {
770 struct task_struct *tsk;
771 struct net *net;
772
773 /* Lookup the network namespace */
774 net = ERR_PTR(-ESRCH);
775 rcu_read_lock();
776 tsk = find_task_by_vpid(pid);
777 if (tsk) {
778 struct nsproxy *nsproxy;
779 task_lock(tsk);
780 nsproxy = tsk->nsproxy;
781 if (nsproxy)
782 net = get_net(nsproxy->net_ns);
783 task_unlock(tsk);
784 }
785 rcu_read_unlock();
786 return net;
787 }
788 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
789
790 #ifdef CONFIG_NET_NS_REFCNT_TRACKER
net_ns_net_debugfs(struct net * net)791 static void net_ns_net_debugfs(struct net *net)
792 {
793 ref_tracker_dir_symlink(&net->refcnt_tracker, "netns-%llx-%u-refcnt",
794 net->net_cookie, net->ns.inum);
795 ref_tracker_dir_symlink(&net->notrefcnt_tracker, "netns-%llx-%u-notrefcnt",
796 net->net_cookie, net->ns.inum);
797 }
798
init_net_debugfs(void)799 static int __init init_net_debugfs(void)
800 {
801 ref_tracker_dir_debugfs(&init_net.refcnt_tracker);
802 ref_tracker_dir_debugfs(&init_net.notrefcnt_tracker);
803 net_ns_net_debugfs(&init_net);
804 return 0;
805 }
806 late_initcall(init_net_debugfs);
807 #else
net_ns_net_debugfs(struct net * net)808 static void net_ns_net_debugfs(struct net *net)
809 {
810 }
811 #endif
812
net_ns_net_init(struct net * net)813 static __net_init int net_ns_net_init(struct net *net)
814 {
815 #ifdef CONFIG_NET_NS
816 net->ns.ops = &netns_operations;
817 #endif
818 net->ns.inum = PROC_NET_INIT_INO;
819 if (net != &init_net) {
820 int ret = ns_alloc_inum(&net->ns);
821 if (ret)
822 return ret;
823 }
824 net_ns_net_debugfs(net);
825 return 0;
826 }
827
net_ns_net_exit(struct net * net)828 static __net_exit void net_ns_net_exit(struct net *net)
829 {
830 /*
831 * Initial network namespace doesn't exit so we don't need any
832 * special checks here.
833 */
834 ns_free_inum(&net->ns);
835 }
836
837 static struct pernet_operations __net_initdata net_ns_ops = {
838 .init = net_ns_net_init,
839 .exit = net_ns_net_exit,
840 };
841
842 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
843 [NETNSA_NONE] = { .type = NLA_UNSPEC },
844 [NETNSA_NSID] = { .type = NLA_S32 },
845 [NETNSA_PID] = { .type = NLA_U32 },
846 [NETNSA_FD] = { .type = NLA_U32 },
847 [NETNSA_TARGET_NSID] = { .type = NLA_S32 },
848 };
849
rtnl_net_newid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)850 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
851 struct netlink_ext_ack *extack)
852 {
853 struct net *net = sock_net(skb->sk);
854 struct nlattr *tb[NETNSA_MAX + 1];
855 struct nlattr *nla;
856 struct net *peer;
857 int nsid, err;
858
859 err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
860 NETNSA_MAX, rtnl_net_policy, extack);
861 if (err < 0)
862 return err;
863 if (!tb[NETNSA_NSID]) {
864 NL_SET_ERR_MSG(extack, "nsid is missing");
865 return -EINVAL;
866 }
867 nsid = nla_get_s32(tb[NETNSA_NSID]);
868
869 if (tb[NETNSA_PID]) {
870 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
871 nla = tb[NETNSA_PID];
872 } else if (tb[NETNSA_FD]) {
873 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
874 nla = tb[NETNSA_FD];
875 } else {
876 NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
877 return -EINVAL;
878 }
879 if (IS_ERR(peer)) {
880 NL_SET_BAD_ATTR(extack, nla);
881 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
882 return PTR_ERR(peer);
883 }
884
885 spin_lock(&net->nsid_lock);
886 if (__peernet2id(net, peer) >= 0) {
887 spin_unlock(&net->nsid_lock);
888 err = -EEXIST;
889 NL_SET_BAD_ATTR(extack, nla);
890 NL_SET_ERR_MSG(extack,
891 "Peer netns already has a nsid assigned");
892 goto out;
893 }
894
895 err = alloc_netid(net, peer, nsid);
896 spin_unlock(&net->nsid_lock);
897 if (err >= 0) {
898 rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
899 nlh, GFP_KERNEL);
900 err = 0;
901 } else if (err == -ENOSPC && nsid >= 0) {
902 err = -EEXIST;
903 NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
904 NL_SET_ERR_MSG(extack, "The specified nsid is already used");
905 }
906 out:
907 put_net(peer);
908 return err;
909 }
910
rtnl_net_get_size(void)911 static int rtnl_net_get_size(void)
912 {
913 return NLMSG_ALIGN(sizeof(struct rtgenmsg))
914 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
915 + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
916 ;
917 }
918
919 struct net_fill_args {
920 u32 portid;
921 u32 seq;
922 int flags;
923 int cmd;
924 int nsid;
925 bool add_ref;
926 int ref_nsid;
927 };
928
rtnl_net_fill(struct sk_buff * skb,struct net_fill_args * args)929 static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
930 {
931 struct nlmsghdr *nlh;
932 struct rtgenmsg *rth;
933
934 nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
935 args->flags);
936 if (!nlh)
937 return -EMSGSIZE;
938
939 rth = nlmsg_data(nlh);
940 rth->rtgen_family = AF_UNSPEC;
941
942 if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
943 goto nla_put_failure;
944
945 if (args->add_ref &&
946 nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
947 goto nla_put_failure;
948
949 nlmsg_end(skb, nlh);
950 return 0;
951
952 nla_put_failure:
953 nlmsg_cancel(skb, nlh);
954 return -EMSGSIZE;
955 }
956
rtnl_net_valid_getid_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)957 static int rtnl_net_valid_getid_req(struct sk_buff *skb,
958 const struct nlmsghdr *nlh,
959 struct nlattr **tb,
960 struct netlink_ext_ack *extack)
961 {
962 int i, err;
963
964 if (!netlink_strict_get_check(skb))
965 return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
966 tb, NETNSA_MAX, rtnl_net_policy,
967 extack);
968
969 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
970 NETNSA_MAX, rtnl_net_policy,
971 extack);
972 if (err)
973 return err;
974
975 for (i = 0; i <= NETNSA_MAX; i++) {
976 if (!tb[i])
977 continue;
978
979 switch (i) {
980 case NETNSA_PID:
981 case NETNSA_FD:
982 case NETNSA_NSID:
983 case NETNSA_TARGET_NSID:
984 break;
985 default:
986 NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
987 return -EINVAL;
988 }
989 }
990
991 return 0;
992 }
993
rtnl_net_getid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)994 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
995 struct netlink_ext_ack *extack)
996 {
997 struct net *net = sock_net(skb->sk);
998 struct nlattr *tb[NETNSA_MAX + 1];
999 struct net_fill_args fillargs = {
1000 .portid = NETLINK_CB(skb).portid,
1001 .seq = nlh->nlmsg_seq,
1002 .cmd = RTM_NEWNSID,
1003 };
1004 struct net *peer, *target = net;
1005 struct nlattr *nla;
1006 struct sk_buff *msg;
1007 int err;
1008
1009 err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
1010 if (err < 0)
1011 return err;
1012 if (tb[NETNSA_PID]) {
1013 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
1014 nla = tb[NETNSA_PID];
1015 } else if (tb[NETNSA_FD]) {
1016 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
1017 nla = tb[NETNSA_FD];
1018 } else if (tb[NETNSA_NSID]) {
1019 peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
1020 if (!peer)
1021 peer = ERR_PTR(-ENOENT);
1022 nla = tb[NETNSA_NSID];
1023 } else {
1024 NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
1025 return -EINVAL;
1026 }
1027
1028 if (IS_ERR(peer)) {
1029 NL_SET_BAD_ATTR(extack, nla);
1030 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
1031 return PTR_ERR(peer);
1032 }
1033
1034 if (tb[NETNSA_TARGET_NSID]) {
1035 int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
1036
1037 target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
1038 if (IS_ERR(target)) {
1039 NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
1040 NL_SET_ERR_MSG(extack,
1041 "Target netns reference is invalid");
1042 err = PTR_ERR(target);
1043 goto out;
1044 }
1045 fillargs.add_ref = true;
1046 fillargs.ref_nsid = peernet2id(net, peer);
1047 }
1048
1049 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
1050 if (!msg) {
1051 err = -ENOMEM;
1052 goto out;
1053 }
1054
1055 fillargs.nsid = peernet2id(target, peer);
1056 err = rtnl_net_fill(msg, &fillargs);
1057 if (err < 0)
1058 goto err_out;
1059
1060 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
1061 goto out;
1062
1063 err_out:
1064 nlmsg_free(msg);
1065 out:
1066 if (fillargs.add_ref)
1067 put_net(target);
1068 put_net(peer);
1069 return err;
1070 }
1071
1072 struct rtnl_net_dump_cb {
1073 struct net *tgt_net;
1074 struct net *ref_net;
1075 struct sk_buff *skb;
1076 struct net_fill_args fillargs;
1077 int idx;
1078 int s_idx;
1079 };
1080
1081 /* Runs in RCU-critical section. */
rtnl_net_dumpid_one(int id,void * peer,void * data)1082 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
1083 {
1084 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
1085 int ret;
1086
1087 if (net_cb->idx < net_cb->s_idx)
1088 goto cont;
1089
1090 net_cb->fillargs.nsid = id;
1091 if (net_cb->fillargs.add_ref)
1092 net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
1093 ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
1094 if (ret < 0)
1095 return ret;
1096
1097 cont:
1098 net_cb->idx++;
1099 return 0;
1100 }
1101
rtnl_valid_dump_net_req(const struct nlmsghdr * nlh,struct sock * sk,struct rtnl_net_dump_cb * net_cb,struct netlink_callback * cb)1102 static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
1103 struct rtnl_net_dump_cb *net_cb,
1104 struct netlink_callback *cb)
1105 {
1106 struct netlink_ext_ack *extack = cb->extack;
1107 struct nlattr *tb[NETNSA_MAX + 1];
1108 int err, i;
1109
1110 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
1111 NETNSA_MAX, rtnl_net_policy,
1112 extack);
1113 if (err < 0)
1114 return err;
1115
1116 for (i = 0; i <= NETNSA_MAX; i++) {
1117 if (!tb[i])
1118 continue;
1119
1120 if (i == NETNSA_TARGET_NSID) {
1121 struct net *net;
1122
1123 net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
1124 if (IS_ERR(net)) {
1125 NL_SET_BAD_ATTR(extack, tb[i]);
1126 NL_SET_ERR_MSG(extack,
1127 "Invalid target network namespace id");
1128 return PTR_ERR(net);
1129 }
1130 net_cb->fillargs.add_ref = true;
1131 net_cb->ref_net = net_cb->tgt_net;
1132 net_cb->tgt_net = net;
1133 } else {
1134 NL_SET_BAD_ATTR(extack, tb[i]);
1135 NL_SET_ERR_MSG(extack,
1136 "Unsupported attribute in dump request");
1137 return -EINVAL;
1138 }
1139 }
1140
1141 return 0;
1142 }
1143
rtnl_net_dumpid(struct sk_buff * skb,struct netlink_callback * cb)1144 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
1145 {
1146 struct rtnl_net_dump_cb net_cb = {
1147 .tgt_net = sock_net(skb->sk),
1148 .skb = skb,
1149 .fillargs = {
1150 .portid = NETLINK_CB(cb->skb).portid,
1151 .seq = cb->nlh->nlmsg_seq,
1152 .flags = NLM_F_MULTI,
1153 .cmd = RTM_NEWNSID,
1154 },
1155 .idx = 0,
1156 .s_idx = cb->args[0],
1157 };
1158 int err = 0;
1159
1160 if (cb->strict_check) {
1161 err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
1162 if (err < 0)
1163 goto end;
1164 }
1165
1166 rcu_read_lock();
1167 idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
1168 rcu_read_unlock();
1169
1170 cb->args[0] = net_cb.idx;
1171 end:
1172 if (net_cb.fillargs.add_ref)
1173 put_net(net_cb.tgt_net);
1174 return err;
1175 }
1176
rtnl_net_notifyid(struct net * net,int cmd,int id,u32 portid,struct nlmsghdr * nlh,gfp_t gfp)1177 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1178 struct nlmsghdr *nlh, gfp_t gfp)
1179 {
1180 struct net_fill_args fillargs = {
1181 .portid = portid,
1182 .seq = nlh ? nlh->nlmsg_seq : 0,
1183 .cmd = cmd,
1184 .nsid = id,
1185 };
1186 struct sk_buff *msg;
1187 int err = -ENOMEM;
1188
1189 msg = nlmsg_new(rtnl_net_get_size(), gfp);
1190 if (!msg)
1191 goto out;
1192
1193 err = rtnl_net_fill(msg, &fillargs);
1194 if (err < 0)
1195 goto err_out;
1196
1197 rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
1198 return;
1199
1200 err_out:
1201 nlmsg_free(msg);
1202 out:
1203 rtnl_set_sk_err(net, RTNLGRP_NSID, err);
1204 }
1205
1206 #ifdef CONFIG_NET_NS
netns_ipv4_struct_check(void)1207 static void __init netns_ipv4_struct_check(void)
1208 {
1209 /* TX readonly hotpath cache lines */
1210 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1211 sysctl_tcp_early_retrans);
1212 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1213 sysctl_tcp_tso_win_divisor);
1214 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1215 sysctl_tcp_tso_rtt_log);
1216 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1217 sysctl_tcp_autocorking);
1218 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1219 sysctl_tcp_min_snd_mss);
1220 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1221 sysctl_tcp_notsent_lowat);
1222 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1223 sysctl_tcp_limit_output_bytes);
1224 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1225 sysctl_tcp_min_rtt_wlen);
1226 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1227 sysctl_tcp_wmem);
1228 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1229 sysctl_ip_fwd_use_pmtu);
1230 CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_tx, 33);
1231
1232 /* TXRX readonly hotpath cache lines */
1233 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_txrx,
1234 sysctl_tcp_moderate_rcvbuf);
1235 CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_txrx, 1);
1236
1237 /* RX readonly hotpath cache line */
1238 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1239 sysctl_ip_early_demux);
1240 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1241 sysctl_tcp_early_demux);
1242 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1243 sysctl_tcp_l3mdev_accept);
1244 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1245 sysctl_tcp_reordering);
1246 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1247 sysctl_tcp_rmem);
1248 CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 22);
1249 }
1250 #endif
1251
1252 static const struct rtnl_msg_handler net_ns_rtnl_msg_handlers[] __initconst = {
1253 {.msgtype = RTM_NEWNSID, .doit = rtnl_net_newid,
1254 .flags = RTNL_FLAG_DOIT_UNLOCKED},
1255 {.msgtype = RTM_GETNSID, .doit = rtnl_net_getid,
1256 .dumpit = rtnl_net_dumpid,
1257 .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
1258 };
1259
net_ns_init(void)1260 void __init net_ns_init(void)
1261 {
1262 struct net_generic *ng;
1263
1264 #ifdef CONFIG_NET_NS
1265 netns_ipv4_struct_check();
1266 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
1267 SMP_CACHE_BYTES,
1268 SLAB_PANIC|SLAB_ACCOUNT, NULL);
1269
1270 /* Create workqueue for cleanup */
1271 netns_wq = create_singlethread_workqueue("netns");
1272 if (!netns_wq)
1273 panic("Could not create netns workq");
1274 #endif
1275
1276 ng = net_alloc_generic();
1277 if (!ng)
1278 panic("Could not allocate generic netns");
1279
1280 rcu_assign_pointer(init_net.gen, ng);
1281
1282 #ifdef CONFIG_KEYS
1283 init_net.key_domain = &init_net_key_domain;
1284 #endif
1285 preinit_net(&init_net, &init_user_ns);
1286
1287 down_write(&pernet_ops_rwsem);
1288 if (setup_net(&init_net))
1289 panic("Could not setup the initial network namespace");
1290
1291 init_net_initialized = true;
1292 up_write(&pernet_ops_rwsem);
1293
1294 if (register_pernet_subsys(&net_ns_ops))
1295 panic("Could not register network namespace subsystems");
1296
1297 rtnl_register_many(net_ns_rtnl_msg_handlers);
1298 }
1299
1300 #ifdef CONFIG_NET_NS
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1301 static int __register_pernet_operations(struct list_head *list,
1302 struct pernet_operations *ops)
1303 {
1304 LIST_HEAD(net_exit_list);
1305 struct net *net;
1306 int error;
1307
1308 list_add_tail(&ops->list, list);
1309 if (ops->init || ops->id) {
1310 /* We held write locked pernet_ops_rwsem, and parallel
1311 * setup_net() and cleanup_net() are not possible.
1312 */
1313 for_each_net(net) {
1314 error = ops_init(ops, net);
1315 if (error)
1316 goto out_undo;
1317 list_add_tail(&net->exit_list, &net_exit_list);
1318 }
1319 }
1320 return 0;
1321
1322 out_undo:
1323 /* If I have an error cleanup all namespaces I initialized */
1324 list_del(&ops->list);
1325 ops_undo_single(ops, &net_exit_list);
1326 return error;
1327 }
1328
__unregister_pernet_operations(struct pernet_operations * ops)1329 static void __unregister_pernet_operations(struct pernet_operations *ops)
1330 {
1331 LIST_HEAD(net_exit_list);
1332 struct net *net;
1333
1334 /* See comment in __register_pernet_operations() */
1335 for_each_net(net)
1336 list_add_tail(&net->exit_list, &net_exit_list);
1337
1338 list_del(&ops->list);
1339 ops_undo_single(ops, &net_exit_list);
1340 }
1341
1342 #else
1343
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1344 static int __register_pernet_operations(struct list_head *list,
1345 struct pernet_operations *ops)
1346 {
1347 if (!init_net_initialized) {
1348 list_add_tail(&ops->list, list);
1349 return 0;
1350 }
1351
1352 return ops_init(ops, &init_net);
1353 }
1354
__unregister_pernet_operations(struct pernet_operations * ops)1355 static void __unregister_pernet_operations(struct pernet_operations *ops)
1356 {
1357 if (!init_net_initialized) {
1358 list_del(&ops->list);
1359 } else {
1360 LIST_HEAD(net_exit_list);
1361
1362 list_add(&init_net.exit_list, &net_exit_list);
1363 ops_undo_single(ops, &net_exit_list);
1364 }
1365 }
1366
1367 #endif /* CONFIG_NET_NS */
1368
1369 static DEFINE_IDA(net_generic_ids);
1370
register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1371 static int register_pernet_operations(struct list_head *list,
1372 struct pernet_operations *ops)
1373 {
1374 int error;
1375
1376 if (WARN_ON(!!ops->id ^ !!ops->size))
1377 return -EINVAL;
1378
1379 if (ops->id) {
1380 error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
1381 GFP_KERNEL);
1382 if (error < 0)
1383 return error;
1384 *ops->id = error;
1385 /* This does not require READ_ONCE as writers already hold
1386 * pernet_ops_rwsem. But WRITE_ONCE is needed to protect
1387 * net_alloc_generic.
1388 */
1389 WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1));
1390 }
1391 error = __register_pernet_operations(list, ops);
1392 if (error) {
1393 rcu_barrier();
1394 if (ops->id)
1395 ida_free(&net_generic_ids, *ops->id);
1396 }
1397
1398 return error;
1399 }
1400
unregister_pernet_operations(struct pernet_operations * ops)1401 static void unregister_pernet_operations(struct pernet_operations *ops)
1402 {
1403 __unregister_pernet_operations(ops);
1404 rcu_barrier();
1405 if (ops->id)
1406 ida_free(&net_generic_ids, *ops->id);
1407 }
1408
1409 /**
1410 * register_pernet_subsys - register a network namespace subsystem
1411 * @ops: pernet operations structure for the subsystem
1412 *
1413 * Register a subsystem which has init and exit functions
1414 * that are called when network namespaces are created and
1415 * destroyed respectively.
1416 *
1417 * When registered all network namespace init functions are
1418 * called for every existing network namespace. Allowing kernel
1419 * modules to have a race free view of the set of network namespaces.
1420 *
1421 * When a new network namespace is created all of the init
1422 * methods are called in the order in which they were registered.
1423 *
1424 * When a network namespace is destroyed all of the exit methods
1425 * are called in the reverse of the order with which they were
1426 * registered.
1427 */
register_pernet_subsys(struct pernet_operations * ops)1428 int register_pernet_subsys(struct pernet_operations *ops)
1429 {
1430 int error;
1431 down_write(&pernet_ops_rwsem);
1432 error = register_pernet_operations(first_device, ops);
1433 up_write(&pernet_ops_rwsem);
1434 return error;
1435 }
1436 EXPORT_SYMBOL_GPL(register_pernet_subsys);
1437
1438 /**
1439 * unregister_pernet_subsys - unregister a network namespace subsystem
1440 * @ops: pernet operations structure to manipulate
1441 *
1442 * Remove the pernet operations structure from the list to be
1443 * used when network namespaces are created or destroyed. In
1444 * addition run the exit method for all existing network
1445 * namespaces.
1446 */
unregister_pernet_subsys(struct pernet_operations * ops)1447 void unregister_pernet_subsys(struct pernet_operations *ops)
1448 {
1449 down_write(&pernet_ops_rwsem);
1450 unregister_pernet_operations(ops);
1451 up_write(&pernet_ops_rwsem);
1452 }
1453 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1454
1455 /**
1456 * register_pernet_device - register a network namespace device
1457 * @ops: pernet operations structure for the subsystem
1458 *
1459 * Register a device which has init and exit functions
1460 * that are called when network namespaces are created and
1461 * destroyed respectively.
1462 *
1463 * When registered all network namespace init functions are
1464 * called for every existing network namespace. Allowing kernel
1465 * modules to have a race free view of the set of network namespaces.
1466 *
1467 * When a new network namespace is created all of the init
1468 * methods are called in the order in which they were registered.
1469 *
1470 * When a network namespace is destroyed all of the exit methods
1471 * are called in the reverse of the order with which they were
1472 * registered.
1473 */
register_pernet_device(struct pernet_operations * ops)1474 int register_pernet_device(struct pernet_operations *ops)
1475 {
1476 int error;
1477 down_write(&pernet_ops_rwsem);
1478 error = register_pernet_operations(&pernet_list, ops);
1479 if (!error && (first_device == &pernet_list))
1480 first_device = &ops->list;
1481 up_write(&pernet_ops_rwsem);
1482 return error;
1483 }
1484 EXPORT_SYMBOL_GPL(register_pernet_device);
1485
1486 /**
1487 * unregister_pernet_device - unregister a network namespace netdevice
1488 * @ops: pernet operations structure to manipulate
1489 *
1490 * Remove the pernet operations structure from the list to be
1491 * used when network namespaces are created or destroyed. In
1492 * addition run the exit method for all existing network
1493 * namespaces.
1494 */
unregister_pernet_device(struct pernet_operations * ops)1495 void unregister_pernet_device(struct pernet_operations *ops)
1496 {
1497 down_write(&pernet_ops_rwsem);
1498 if (&ops->list == first_device)
1499 first_device = first_device->next;
1500 unregister_pernet_operations(ops);
1501 up_write(&pernet_ops_rwsem);
1502 }
1503 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1504
1505 #ifdef CONFIG_NET_NS
netns_get(struct task_struct * task)1506 static struct ns_common *netns_get(struct task_struct *task)
1507 {
1508 struct net *net = NULL;
1509 struct nsproxy *nsproxy;
1510
1511 task_lock(task);
1512 nsproxy = task->nsproxy;
1513 if (nsproxy)
1514 net = get_net(nsproxy->net_ns);
1515 task_unlock(task);
1516
1517 return net ? &net->ns : NULL;
1518 }
1519
to_net_ns(struct ns_common * ns)1520 static inline struct net *to_net_ns(struct ns_common *ns)
1521 {
1522 return container_of(ns, struct net, ns);
1523 }
1524
netns_put(struct ns_common * ns)1525 static void netns_put(struct ns_common *ns)
1526 {
1527 put_net(to_net_ns(ns));
1528 }
1529
netns_install(struct nsset * nsset,struct ns_common * ns)1530 static int netns_install(struct nsset *nsset, struct ns_common *ns)
1531 {
1532 struct nsproxy *nsproxy = nsset->nsproxy;
1533 struct net *net = to_net_ns(ns);
1534
1535 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1536 !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
1537 return -EPERM;
1538
1539 put_net(nsproxy->net_ns);
1540 nsproxy->net_ns = get_net(net);
1541 return 0;
1542 }
1543
netns_owner(struct ns_common * ns)1544 static struct user_namespace *netns_owner(struct ns_common *ns)
1545 {
1546 return to_net_ns(ns)->user_ns;
1547 }
1548
1549 const struct proc_ns_operations netns_operations = {
1550 .name = "net",
1551 .type = CLONE_NEWNET,
1552 .get = netns_get,
1553 .put = netns_put,
1554 .install = netns_install,
1555 .owner = netns_owner,
1556 };
1557 #endif
1558