1 /* netfilter.c: look after the filters for various protocols. 2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox. 3 * 4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any 5 * way. 6 * 7 * This code is GPL. 8 */ 9 #include <linux/kernel.h> 10 #include <linux/netfilter.h> 11 #include <net/protocol.h> 12 #include <linux/init.h> 13 #include <linux/skbuff.h> 14 #include <linux/wait.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/if.h> 18 #include <linux/netdevice.h> 19 #include <linux/netfilter_ipv6.h> 20 #include <linux/inetdevice.h> 21 #include <linux/proc_fs.h> 22 #include <linux/mutex.h> 23 #include <linux/mm.h> 24 #include <linux/rcupdate.h> 25 #include <net/net_namespace.h> 26 #include <net/netfilter/nf_queue.h> 27 #include <net/sock.h> 28 29 #include "nf_internals.h" 30 31 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly; 32 EXPORT_SYMBOL_GPL(nf_ipv6_ops); 33 34 #ifdef CONFIG_JUMP_LABEL 35 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 36 EXPORT_SYMBOL(nf_hooks_needed); 37 #endif 38 39 static DEFINE_MUTEX(nf_hook_mutex); 40 41 /* max hooks per family/hooknum */ 42 #define MAX_HOOK_COUNT 1024 43 44 #define nf_entry_dereference(e) \ 45 rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex)) 46 47 static struct nf_hook_entries *allocate_hook_entries_size(u16 num) 48 { 49 struct nf_hook_entries *e; 50 size_t alloc = sizeof(*e) + 51 sizeof(struct nf_hook_entry) * num + 52 sizeof(struct nf_hook_ops *) * num + 53 sizeof(struct nf_hook_entries_rcu_head); 54 55 if (num == 0) 56 return NULL; 57 58 e = kvzalloc(alloc, GFP_KERNEL_ACCOUNT); 59 if (e) 60 e->num_hook_entries = num; 61 return e; 62 } 63 64 static void __nf_hook_entries_free(struct rcu_head *h) 65 { 66 struct nf_hook_entries_rcu_head *head; 67 68 head = container_of(h, struct nf_hook_entries_rcu_head, head); 69 kvfree(head->allocation); 70 } 71 72 static void nf_hook_entries_free(struct nf_hook_entries *e) 73 { 74 struct nf_hook_entries_rcu_head *head; 75 struct nf_hook_ops **ops; 76 unsigned int num; 77 78 if (!e) 79 return; 80 81 num = e->num_hook_entries; 82 ops = nf_hook_entries_get_hook_ops(e); 83 head = (void *)&ops[num]; 84 head->allocation = e; 85 call_rcu(&head->head, __nf_hook_entries_free); 86 } 87 88 static unsigned int accept_all(void *priv, 89 struct sk_buff *skb, 90 const struct nf_hook_state *state) 91 { 92 return NF_ACCEPT; /* ACCEPT makes nf_hook_slow call next hook */ 93 } 94 95 static const struct nf_hook_ops dummy_ops = { 96 .hook = accept_all, 97 .priority = INT_MIN, 98 }; 99 100 static struct nf_hook_entries * 101 nf_hook_entries_grow(const struct nf_hook_entries *old, 102 const struct nf_hook_ops *reg) 103 { 104 unsigned int i, alloc_entries, nhooks, old_entries; 105 struct nf_hook_ops **orig_ops = NULL; 106 struct nf_hook_ops **new_ops; 107 struct nf_hook_entries *new; 108 bool inserted = false; 109 110 alloc_entries = 1; 111 old_entries = old ? old->num_hook_entries : 0; 112 113 if (old) { 114 orig_ops = nf_hook_entries_get_hook_ops(old); 115 116 for (i = 0; i < old_entries; i++) { 117 if (orig_ops[i] != &dummy_ops) 118 alloc_entries++; 119 120 /* Restrict BPF hook type to force a unique priority, not 121 * shared at attach time. 122 * 123 * This is mainly to avoid ordering issues between two 124 * different bpf programs, this doesn't prevent a normal 125 * hook at same priority as a bpf one (we don't want to 126 * prevent defrag, conntrack, iptables etc from attaching). 127 */ 128 if (reg->priority == orig_ops[i]->priority && 129 reg->hook_ops_type == NF_HOOK_OP_BPF) 130 return ERR_PTR(-EBUSY); 131 } 132 } 133 134 if (alloc_entries > MAX_HOOK_COUNT) 135 return ERR_PTR(-E2BIG); 136 137 new = allocate_hook_entries_size(alloc_entries); 138 if (!new) 139 return ERR_PTR(-ENOMEM); 140 141 new_ops = nf_hook_entries_get_hook_ops(new); 142 143 i = 0; 144 nhooks = 0; 145 while (i < old_entries) { 146 if (orig_ops[i] == &dummy_ops) { 147 ++i; 148 continue; 149 } 150 151 if (inserted || reg->priority > orig_ops[i]->priority) { 152 new_ops[nhooks] = (void *)orig_ops[i]; 153 new->hooks[nhooks] = old->hooks[i]; 154 i++; 155 } else { 156 new_ops[nhooks] = (void *)reg; 157 new->hooks[nhooks].hook = reg->hook; 158 new->hooks[nhooks].priv = reg->priv; 159 inserted = true; 160 } 161 nhooks++; 162 } 163 164 if (!inserted) { 165 new_ops[nhooks] = (void *)reg; 166 new->hooks[nhooks].hook = reg->hook; 167 new->hooks[nhooks].priv = reg->priv; 168 } 169 170 return new; 171 } 172 173 static void hooks_validate(const struct nf_hook_entries *hooks) 174 { 175 #ifdef CONFIG_DEBUG_MISC 176 struct nf_hook_ops **orig_ops; 177 int prio = INT_MIN; 178 size_t i = 0; 179 180 orig_ops = nf_hook_entries_get_hook_ops(hooks); 181 182 for (i = 0; i < hooks->num_hook_entries; i++) { 183 if (orig_ops[i] == &dummy_ops) 184 continue; 185 186 WARN_ON(orig_ops[i]->priority < prio); 187 188 if (orig_ops[i]->priority > prio) 189 prio = orig_ops[i]->priority; 190 } 191 #endif 192 } 193 194 int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp, 195 const struct nf_hook_ops *reg) 196 { 197 struct nf_hook_entries *new_hooks; 198 struct nf_hook_entries *p; 199 200 p = rcu_dereference_raw(*pp); 201 new_hooks = nf_hook_entries_grow(p, reg); 202 if (IS_ERR(new_hooks)) 203 return PTR_ERR(new_hooks); 204 205 hooks_validate(new_hooks); 206 207 rcu_assign_pointer(*pp, new_hooks); 208 209 BUG_ON(p == new_hooks); 210 nf_hook_entries_free(p); 211 return 0; 212 } 213 EXPORT_SYMBOL_GPL(nf_hook_entries_insert_raw); 214 215 /* 216 * __nf_hook_entries_try_shrink - try to shrink hook array 217 * 218 * @old -- current hook blob at @pp 219 * @pp -- location of hook blob 220 * 221 * Hook unregistration must always succeed, so to-be-removed hooks 222 * are replaced by a dummy one that will just move to next hook. 223 * 224 * This counts the current dummy hooks, attempts to allocate new blob, 225 * copies the live hooks, then replaces and discards old one. 226 * 227 * return values: 228 * 229 * Returns address to free, or NULL. 230 */ 231 static void *__nf_hook_entries_try_shrink(struct nf_hook_entries *old, 232 struct nf_hook_entries __rcu **pp) 233 { 234 unsigned int i, j, skip = 0, hook_entries; 235 struct nf_hook_entries *new = NULL; 236 struct nf_hook_ops **orig_ops; 237 struct nf_hook_ops **new_ops; 238 239 if (WARN_ON_ONCE(!old)) 240 return NULL; 241 242 orig_ops = nf_hook_entries_get_hook_ops(old); 243 for (i = 0; i < old->num_hook_entries; i++) { 244 if (orig_ops[i] == &dummy_ops) 245 skip++; 246 } 247 248 /* if skip == hook_entries all hooks have been removed */ 249 hook_entries = old->num_hook_entries; 250 if (skip == hook_entries) 251 goto out_assign; 252 253 if (skip == 0) 254 return NULL; 255 256 hook_entries -= skip; 257 new = allocate_hook_entries_size(hook_entries); 258 if (!new) 259 return NULL; 260 261 new_ops = nf_hook_entries_get_hook_ops(new); 262 for (i = 0, j = 0; i < old->num_hook_entries; i++) { 263 if (orig_ops[i] == &dummy_ops) 264 continue; 265 new->hooks[j] = old->hooks[i]; 266 new_ops[j] = (void *)orig_ops[i]; 267 j++; 268 } 269 hooks_validate(new); 270 out_assign: 271 rcu_assign_pointer(*pp, new); 272 return old; 273 } 274 275 static struct nf_hook_entries __rcu ** 276 nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum, 277 struct net_device *dev) 278 { 279 switch (pf) { 280 case NFPROTO_NETDEV: 281 break; 282 #ifdef CONFIG_NETFILTER_FAMILY_ARP 283 case NFPROTO_ARP: 284 if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum)) 285 return NULL; 286 return net->nf.hooks_arp + hooknum; 287 #endif 288 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE 289 case NFPROTO_BRIDGE: 290 if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_bridge) <= hooknum)) 291 return NULL; 292 return net->nf.hooks_bridge + hooknum; 293 #endif 294 #ifdef CONFIG_NETFILTER_INGRESS 295 case NFPROTO_INET: 296 if (WARN_ON_ONCE(hooknum != NF_INET_INGRESS)) 297 return NULL; 298 if (!dev || dev_net(dev) != net) { 299 WARN_ON_ONCE(1); 300 return NULL; 301 } 302 return &dev->nf_hooks_ingress; 303 #endif 304 case NFPROTO_IPV4: 305 if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum)) 306 return NULL; 307 return net->nf.hooks_ipv4 + hooknum; 308 case NFPROTO_IPV6: 309 if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum)) 310 return NULL; 311 return net->nf.hooks_ipv6 + hooknum; 312 default: 313 WARN_ON_ONCE(1); 314 return NULL; 315 } 316 317 #ifdef CONFIG_NETFILTER_INGRESS 318 if (hooknum == NF_NETDEV_INGRESS) { 319 if (dev && dev_net(dev) == net) 320 return &dev->nf_hooks_ingress; 321 } 322 #endif 323 #ifdef CONFIG_NETFILTER_EGRESS 324 if (hooknum == NF_NETDEV_EGRESS) { 325 if (dev && dev_net(dev) == net) 326 return &dev->nf_hooks_egress; 327 } 328 #endif 329 WARN_ON_ONCE(1); 330 return NULL; 331 } 332 333 static int nf_ingress_check(struct net *net, const struct nf_hook_ops *reg, 334 int hooknum) 335 { 336 #ifndef CONFIG_NETFILTER_INGRESS 337 if (reg->hooknum == hooknum) 338 return -EOPNOTSUPP; 339 #endif 340 if (reg->hooknum != hooknum || 341 !reg->dev || dev_net(reg->dev) != net) 342 return -EINVAL; 343 344 return 0; 345 } 346 347 static inline bool __maybe_unused nf_ingress_hook(const struct nf_hook_ops *reg, 348 int pf) 349 { 350 if ((pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) || 351 (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS)) 352 return true; 353 354 return false; 355 } 356 357 static inline bool __maybe_unused nf_egress_hook(const struct nf_hook_ops *reg, 358 int pf) 359 { 360 return pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_EGRESS; 361 } 362 363 static void nf_static_key_inc(const struct nf_hook_ops *reg, int pf) 364 { 365 #ifdef CONFIG_JUMP_LABEL 366 int hooknum; 367 368 if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) { 369 pf = NFPROTO_NETDEV; 370 hooknum = NF_NETDEV_INGRESS; 371 } else { 372 hooknum = reg->hooknum; 373 } 374 static_key_slow_inc(&nf_hooks_needed[pf][hooknum]); 375 #endif 376 } 377 378 static void nf_static_key_dec(const struct nf_hook_ops *reg, int pf) 379 { 380 #ifdef CONFIG_JUMP_LABEL 381 int hooknum; 382 383 if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) { 384 pf = NFPROTO_NETDEV; 385 hooknum = NF_NETDEV_INGRESS; 386 } else { 387 hooknum = reg->hooknum; 388 } 389 static_key_slow_dec(&nf_hooks_needed[pf][hooknum]); 390 #endif 391 } 392 393 static int __nf_register_net_hook(struct net *net, int pf, 394 const struct nf_hook_ops *reg) 395 { 396 struct nf_hook_entries *p, *new_hooks; 397 struct nf_hook_entries __rcu **pp; 398 int err; 399 400 switch (pf) { 401 case NFPROTO_NETDEV: 402 #ifndef CONFIG_NETFILTER_INGRESS 403 if (reg->hooknum == NF_NETDEV_INGRESS) 404 return -EOPNOTSUPP; 405 #endif 406 #ifndef CONFIG_NETFILTER_EGRESS 407 if (reg->hooknum == NF_NETDEV_EGRESS) 408 return -EOPNOTSUPP; 409 #endif 410 if ((reg->hooknum != NF_NETDEV_INGRESS && 411 reg->hooknum != NF_NETDEV_EGRESS) || 412 !reg->dev || dev_net(reg->dev) != net) 413 return -EINVAL; 414 break; 415 case NFPROTO_INET: 416 if (reg->hooknum != NF_INET_INGRESS) 417 break; 418 419 err = nf_ingress_check(net, reg, NF_INET_INGRESS); 420 if (err < 0) 421 return err; 422 break; 423 } 424 425 pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev); 426 if (!pp) 427 return -EINVAL; 428 429 mutex_lock(&nf_hook_mutex); 430 431 p = nf_entry_dereference(*pp); 432 new_hooks = nf_hook_entries_grow(p, reg); 433 434 if (!IS_ERR(new_hooks)) { 435 hooks_validate(new_hooks); 436 rcu_assign_pointer(*pp, new_hooks); 437 } 438 439 mutex_unlock(&nf_hook_mutex); 440 if (IS_ERR(new_hooks)) 441 return PTR_ERR(new_hooks); 442 443 #ifdef CONFIG_NETFILTER_INGRESS 444 if (nf_ingress_hook(reg, pf)) 445 net_inc_ingress_queue(); 446 #endif 447 #ifdef CONFIG_NETFILTER_EGRESS 448 if (nf_egress_hook(reg, pf)) 449 net_inc_egress_queue(); 450 #endif 451 nf_static_key_inc(reg, pf); 452 453 BUG_ON(p == new_hooks); 454 nf_hook_entries_free(p); 455 return 0; 456 } 457 458 /* 459 * nf_remove_net_hook - remove a hook from blob 460 * 461 * @oldp: current address of hook blob 462 * @unreg: hook to unregister 463 * 464 * This cannot fail, hook unregistration must always succeed. 465 * Therefore replace the to-be-removed hook with a dummy hook. 466 */ 467 static bool nf_remove_net_hook(struct nf_hook_entries *old, 468 const struct nf_hook_ops *unreg) 469 { 470 struct nf_hook_ops **orig_ops; 471 unsigned int i; 472 473 orig_ops = nf_hook_entries_get_hook_ops(old); 474 for (i = 0; i < old->num_hook_entries; i++) { 475 if (orig_ops[i] != unreg) 476 continue; 477 WRITE_ONCE(old->hooks[i].hook, accept_all); 478 WRITE_ONCE(orig_ops[i], (void *)&dummy_ops); 479 return true; 480 } 481 482 return false; 483 } 484 485 static void __nf_unregister_net_hook(struct net *net, int pf, 486 const struct nf_hook_ops *reg) 487 { 488 struct nf_hook_entries __rcu **pp; 489 struct nf_hook_entries *p; 490 491 pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev); 492 if (!pp) 493 return; 494 495 mutex_lock(&nf_hook_mutex); 496 497 p = nf_entry_dereference(*pp); 498 if (WARN_ON_ONCE(!p)) { 499 mutex_unlock(&nf_hook_mutex); 500 return; 501 } 502 503 if (nf_remove_net_hook(p, reg)) { 504 #ifdef CONFIG_NETFILTER_INGRESS 505 if (nf_ingress_hook(reg, pf)) 506 net_dec_ingress_queue(); 507 #endif 508 #ifdef CONFIG_NETFILTER_EGRESS 509 if (nf_egress_hook(reg, pf)) 510 net_dec_egress_queue(); 511 #endif 512 nf_static_key_dec(reg, pf); 513 } else { 514 WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum); 515 } 516 517 p = __nf_hook_entries_try_shrink(p, pp); 518 mutex_unlock(&nf_hook_mutex); 519 if (!p) 520 return; 521 522 nf_queue_nf_hook_drop(net); 523 nf_hook_entries_free(p); 524 } 525 526 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) 527 { 528 if (reg->pf == NFPROTO_INET) { 529 if (reg->hooknum == NF_INET_INGRESS) { 530 __nf_unregister_net_hook(net, NFPROTO_INET, reg); 531 } else { 532 __nf_unregister_net_hook(net, NFPROTO_IPV4, reg); 533 __nf_unregister_net_hook(net, NFPROTO_IPV6, reg); 534 } 535 } else { 536 __nf_unregister_net_hook(net, reg->pf, reg); 537 } 538 } 539 EXPORT_SYMBOL(nf_unregister_net_hook); 540 541 void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp, 542 const struct nf_hook_ops *reg) 543 { 544 struct nf_hook_entries *p; 545 546 p = rcu_dereference_raw(*pp); 547 if (nf_remove_net_hook(p, reg)) { 548 p = __nf_hook_entries_try_shrink(p, pp); 549 nf_hook_entries_free(p); 550 } 551 } 552 EXPORT_SYMBOL_GPL(nf_hook_entries_delete_raw); 553 554 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) 555 { 556 int err; 557 558 if (reg->pf == NFPROTO_INET) { 559 if (reg->hooknum == NF_INET_INGRESS) { 560 err = __nf_register_net_hook(net, NFPROTO_INET, reg); 561 if (err < 0) 562 return err; 563 } else { 564 err = __nf_register_net_hook(net, NFPROTO_IPV4, reg); 565 if (err < 0) 566 return err; 567 568 err = __nf_register_net_hook(net, NFPROTO_IPV6, reg); 569 if (err < 0) { 570 __nf_unregister_net_hook(net, NFPROTO_IPV4, reg); 571 return err; 572 } 573 } 574 } else { 575 err = __nf_register_net_hook(net, reg->pf, reg); 576 if (err < 0) 577 return err; 578 } 579 580 return 0; 581 } 582 EXPORT_SYMBOL(nf_register_net_hook); 583 584 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, 585 unsigned int n) 586 { 587 unsigned int i; 588 int err = 0; 589 590 for (i = 0; i < n; i++) { 591 err = nf_register_net_hook(net, ®[i]); 592 if (err) 593 goto err; 594 } 595 return err; 596 597 err: 598 if (i > 0) 599 nf_unregister_net_hooks(net, reg, i); 600 return err; 601 } 602 EXPORT_SYMBOL(nf_register_net_hooks); 603 604 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, 605 unsigned int hookcount) 606 { 607 unsigned int i; 608 609 for (i = 0; i < hookcount; i++) 610 nf_unregister_net_hook(net, ®[i]); 611 } 612 EXPORT_SYMBOL(nf_unregister_net_hooks); 613 614 /* Returns 1 if okfn() needs to be executed by the caller, 615 * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */ 616 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, 617 const struct nf_hook_entries *e, unsigned int s) 618 { 619 unsigned int verdict; 620 int ret; 621 622 for (; s < e->num_hook_entries; s++) { 623 verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state); 624 switch (verdict & NF_VERDICT_MASK) { 625 case NF_ACCEPT: 626 break; 627 case NF_DROP: 628 kfree_skb_reason(skb, 629 SKB_DROP_REASON_NETFILTER_DROP); 630 ret = NF_DROP_GETERR(verdict); 631 if (ret == 0) 632 ret = -EPERM; 633 return ret; 634 case NF_QUEUE: 635 ret = nf_queue(skb, state, s, verdict); 636 if (ret == 1) 637 continue; 638 return ret; 639 case NF_STOLEN: 640 return NF_DROP_GETERR(verdict); 641 default: 642 WARN_ON_ONCE(1); 643 return 0; 644 } 645 } 646 647 return 1; 648 } 649 EXPORT_SYMBOL(nf_hook_slow); 650 651 void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state, 652 const struct nf_hook_entries *e) 653 { 654 struct sk_buff *skb, *next; 655 LIST_HEAD(sublist); 656 int ret; 657 658 list_for_each_entry_safe(skb, next, head, list) { 659 skb_list_del_init(skb); 660 ret = nf_hook_slow(skb, state, e, 0); 661 if (ret == 1) 662 list_add_tail(&skb->list, &sublist); 663 } 664 /* Put passed packets back on main list */ 665 list_splice(&sublist, head); 666 } 667 EXPORT_SYMBOL(nf_hook_slow_list); 668 669 /* This needs to be compiled in any case to avoid dependencies between the 670 * nfnetlink_queue code and nf_conntrack. 671 */ 672 const struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly; 673 EXPORT_SYMBOL_GPL(nfnl_ct_hook); 674 675 const struct nf_ct_hook __rcu *nf_ct_hook __read_mostly; 676 EXPORT_SYMBOL_GPL(nf_ct_hook); 677 678 const struct nf_defrag_hook __rcu *nf_defrag_v4_hook __read_mostly; 679 EXPORT_SYMBOL_GPL(nf_defrag_v4_hook); 680 681 const struct nf_defrag_hook __rcu *nf_defrag_v6_hook __read_mostly; 682 EXPORT_SYMBOL_GPL(nf_defrag_v6_hook); 683 684 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 685 u8 nf_ctnetlink_has_listener; 686 EXPORT_SYMBOL_GPL(nf_ctnetlink_has_listener); 687 688 const struct nf_nat_hook __rcu *nf_nat_hook __read_mostly; 689 EXPORT_SYMBOL_GPL(nf_nat_hook); 690 691 /* This does not belong here, but locally generated errors need it if connection 692 * tracking in use: without this, connection may not be in hash table, and hence 693 * manufactured ICMP or RST packets will not be associated with it. 694 */ 695 void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb) 696 { 697 const struct nf_ct_hook *ct_hook; 698 699 if (skb->_nfct) { 700 rcu_read_lock(); 701 ct_hook = rcu_dereference(nf_ct_hook); 702 if (ct_hook) 703 ct_hook->attach(new, skb); 704 rcu_read_unlock(); 705 } 706 } 707 EXPORT_SYMBOL(nf_ct_attach); 708 709 void nf_conntrack_destroy(struct nf_conntrack *nfct) 710 { 711 const struct nf_ct_hook *ct_hook; 712 713 rcu_read_lock(); 714 ct_hook = rcu_dereference(nf_ct_hook); 715 if (ct_hook) 716 ct_hook->destroy(nfct); 717 rcu_read_unlock(); 718 719 WARN_ON(!ct_hook); 720 } 721 EXPORT_SYMBOL(nf_conntrack_destroy); 722 723 void nf_ct_set_closing(struct nf_conntrack *nfct) 724 { 725 const struct nf_ct_hook *ct_hook; 726 727 if (!nfct) 728 return; 729 730 rcu_read_lock(); 731 ct_hook = rcu_dereference(nf_ct_hook); 732 if (ct_hook) 733 ct_hook->set_closing(nfct); 734 735 rcu_read_unlock(); 736 } 737 EXPORT_SYMBOL_GPL(nf_ct_set_closing); 738 739 bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, 740 const struct sk_buff *skb) 741 { 742 const struct nf_ct_hook *ct_hook; 743 bool ret = false; 744 745 rcu_read_lock(); 746 ct_hook = rcu_dereference(nf_ct_hook); 747 if (ct_hook) 748 ret = ct_hook->get_tuple_skb(dst_tuple, skb); 749 rcu_read_unlock(); 750 return ret; 751 } 752 EXPORT_SYMBOL(nf_ct_get_tuple_skb); 753 754 /* Built-in default zone used e.g. by modules. */ 755 const struct nf_conntrack_zone nf_ct_zone_dflt = { 756 .id = NF_CT_DEFAULT_ZONE_ID, 757 .dir = NF_CT_DEFAULT_ZONE_DIR, 758 }; 759 EXPORT_SYMBOL_GPL(nf_ct_zone_dflt); 760 #endif /* CONFIG_NF_CONNTRACK */ 761 762 static void __net_init 763 __netfilter_net_init(struct nf_hook_entries __rcu **e, int max) 764 { 765 int h; 766 767 for (h = 0; h < max; h++) 768 RCU_INIT_POINTER(e[h], NULL); 769 } 770 771 static int __net_init netfilter_net_init(struct net *net) 772 { 773 __netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4)); 774 __netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6)); 775 #ifdef CONFIG_NETFILTER_FAMILY_ARP 776 __netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp)); 777 #endif 778 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE 779 __netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge)); 780 #endif 781 #ifdef CONFIG_PROC_FS 782 net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter", 783 net->proc_net); 784 if (!net->nf.proc_netfilter) { 785 if (!net_eq(net, &init_net)) 786 pr_err("cannot create netfilter proc entry"); 787 788 return -ENOMEM; 789 } 790 #endif 791 792 return 0; 793 } 794 795 static void __net_exit netfilter_net_exit(struct net *net) 796 { 797 remove_proc_entry("netfilter", net->proc_net); 798 } 799 800 static struct pernet_operations netfilter_net_ops = { 801 .init = netfilter_net_init, 802 .exit = netfilter_net_exit, 803 }; 804 805 int __init netfilter_init(void) 806 { 807 int ret; 808 809 ret = register_pernet_subsys(&netfilter_net_ops); 810 if (ret < 0) 811 goto err; 812 813 #ifdef CONFIG_LWTUNNEL 814 ret = netfilter_lwtunnel_init(); 815 if (ret < 0) 816 goto err_lwtunnel_pernet; 817 #endif 818 ret = netfilter_log_init(); 819 if (ret < 0) 820 goto err_log_pernet; 821 822 return 0; 823 err_log_pernet: 824 #ifdef CONFIG_LWTUNNEL 825 netfilter_lwtunnel_fini(); 826 err_lwtunnel_pernet: 827 #endif 828 unregister_pernet_subsys(&netfilter_net_ops); 829 err: 830 return ret; 831 } 832