1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * net/core/fib_rules.c Generic Routing Rules 4 * 5 * Authors: Thomas Graf <tgraf@suug.ch> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/list.h> 12 #include <linux/module.h> 13 #include <net/net_namespace.h> 14 #include <net/inet_dscp.h> 15 #include <net/sock.h> 16 #include <net/fib_rules.h> 17 #include <net/ip_tunnels.h> 18 #include <linux/indirect_call_wrapper.h> 19 20 #if defined(CONFIG_IPV6) && defined(CONFIG_IPV6_MULTIPLE_TABLES) 21 #ifdef CONFIG_IP_MULTIPLE_TABLES 22 #define INDIRECT_CALL_MT(f, f2, f1, ...) \ 23 INDIRECT_CALL_INET(f, f2, f1, __VA_ARGS__) 24 #else 25 #define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__) 26 #endif 27 #elif defined(CONFIG_IP_MULTIPLE_TABLES) 28 #define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__) 29 #else 30 #define INDIRECT_CALL_MT(f, f2, f1, ...) f(__VA_ARGS__) 31 #endif 32 33 static const struct fib_kuid_range fib_kuid_range_unset = { 34 KUIDT_INIT(0), 35 KUIDT_INIT(~0), 36 }; 37 38 bool fib_rule_matchall(const struct fib_rule *rule) 39 { 40 if (READ_ONCE(rule->iifindex) || READ_ONCE(rule->oifindex) || 41 rule->mark || rule->tun_id || rule->flags) 42 return false; 43 if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1) 44 return false; 45 if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) || 46 !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end)) 47 return false; 48 if (fib_rule_port_range_set(&rule->sport_range)) 49 return false; 50 if (fib_rule_port_range_set(&rule->dport_range)) 51 return false; 52 return true; 53 } 54 EXPORT_SYMBOL_GPL(fib_rule_matchall); 55 56 int fib_default_rule_add(struct fib_rules_ops *ops, 57 u32 pref, u32 table) 58 { 59 struct fib_rule *r; 60 61 r = kzalloc(ops->rule_size, GFP_KERNEL_ACCOUNT); 62 if (r == NULL) 63 return -ENOMEM; 64 65 refcount_set(&r->refcnt, 1); 66 r->action = FR_ACT_TO_TBL; 67 r->pref = pref; 68 r->table = table; 69 r->proto = RTPROT_KERNEL; 70 r->fr_net = ops->fro_net; 71 r->uid_range = fib_kuid_range_unset; 72 73 r->suppress_prefixlen = -1; 74 r->suppress_ifgroup = -1; 75 76 /* The lock is not required here, the list in unreachable 77 * at the moment this function is called */ 78 list_add_tail(&r->list, &ops->rules_list); 79 return 0; 80 } 81 EXPORT_SYMBOL(fib_default_rule_add); 82 83 static u32 fib_default_rule_pref(struct fib_rules_ops *ops) 84 { 85 struct list_head *pos; 86 struct fib_rule *rule; 87 88 if (!list_empty(&ops->rules_list)) { 89 pos = ops->rules_list.next; 90 if (pos->next != &ops->rules_list) { 91 rule = list_entry(pos->next, struct fib_rule, list); 92 if (rule->pref) 93 return rule->pref - 1; 94 } 95 } 96 97 return 0; 98 } 99 100 static void notify_rule_change(int event, struct fib_rule *rule, 101 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 102 u32 pid); 103 104 static struct fib_rules_ops *lookup_rules_ops(const struct net *net, 105 int family) 106 { 107 struct fib_rules_ops *ops; 108 109 rcu_read_lock(); 110 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 111 if (ops->family == family) { 112 if (!try_module_get(ops->owner)) 113 ops = NULL; 114 rcu_read_unlock(); 115 return ops; 116 } 117 } 118 rcu_read_unlock(); 119 120 return NULL; 121 } 122 123 static void rules_ops_put(struct fib_rules_ops *ops) 124 { 125 if (ops) 126 module_put(ops->owner); 127 } 128 129 static void flush_route_cache(struct fib_rules_ops *ops) 130 { 131 if (ops->flush_cache) 132 ops->flush_cache(ops); 133 } 134 135 static int __fib_rules_register(struct fib_rules_ops *ops) 136 { 137 int err = -EEXIST; 138 struct fib_rules_ops *o; 139 struct net *net; 140 141 net = ops->fro_net; 142 143 if (ops->rule_size < sizeof(struct fib_rule)) 144 return -EINVAL; 145 146 if (ops->match == NULL || ops->configure == NULL || 147 ops->compare == NULL || ops->fill == NULL || 148 ops->action == NULL) 149 return -EINVAL; 150 151 spin_lock(&net->rules_mod_lock); 152 list_for_each_entry(o, &net->rules_ops, list) 153 if (ops->family == o->family) 154 goto errout; 155 156 list_add_tail_rcu(&ops->list, &net->rules_ops); 157 err = 0; 158 errout: 159 spin_unlock(&net->rules_mod_lock); 160 161 return err; 162 } 163 164 struct fib_rules_ops * 165 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net) 166 { 167 struct fib_rules_ops *ops; 168 int err; 169 170 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); 171 if (ops == NULL) 172 return ERR_PTR(-ENOMEM); 173 174 INIT_LIST_HEAD(&ops->rules_list); 175 ops->fro_net = net; 176 177 err = __fib_rules_register(ops); 178 if (err) { 179 kfree(ops); 180 ops = ERR_PTR(err); 181 } 182 183 return ops; 184 } 185 EXPORT_SYMBOL_GPL(fib_rules_register); 186 187 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops) 188 { 189 struct fib_rule *rule, *tmp; 190 191 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) { 192 list_del_rcu(&rule->list); 193 if (ops->delete) 194 ops->delete(rule); 195 fib_rule_put(rule); 196 } 197 } 198 199 void fib_rules_unregister(struct fib_rules_ops *ops) 200 { 201 struct net *net = ops->fro_net; 202 203 spin_lock(&net->rules_mod_lock); 204 list_del_rcu(&ops->list); 205 spin_unlock(&net->rules_mod_lock); 206 207 fib_rules_cleanup_ops(ops); 208 kfree_rcu(ops, rcu); 209 } 210 EXPORT_SYMBOL_GPL(fib_rules_unregister); 211 212 static int uid_range_set(struct fib_kuid_range *range) 213 { 214 return uid_valid(range->start) && uid_valid(range->end); 215 } 216 217 static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb) 218 { 219 struct fib_rule_uid_range *in; 220 struct fib_kuid_range out; 221 222 in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]); 223 224 out.start = make_kuid(current_user_ns(), in->start); 225 out.end = make_kuid(current_user_ns(), in->end); 226 227 return out; 228 } 229 230 static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range) 231 { 232 struct fib_rule_uid_range out = { 233 from_kuid_munged(current_user_ns(), range->start), 234 from_kuid_munged(current_user_ns(), range->end) 235 }; 236 237 return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out); 238 } 239 240 static int nla_get_port_range(struct nlattr *pattr, 241 struct fib_rule_port_range *port_range) 242 { 243 const struct fib_rule_port_range *pr = nla_data(pattr); 244 245 if (!fib_rule_port_range_valid(pr)) 246 return -EINVAL; 247 248 port_range->start = pr->start; 249 port_range->end = pr->end; 250 251 return 0; 252 } 253 254 static int nla_put_port_range(struct sk_buff *skb, int attrtype, 255 struct fib_rule_port_range *range) 256 { 257 return nla_put(skb, attrtype, sizeof(*range), range); 258 } 259 260 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, 261 struct flowi *fl, int flags, 262 struct fib_lookup_arg *arg) 263 { 264 int iifindex, oifindex, ret = 0; 265 266 iifindex = READ_ONCE(rule->iifindex); 267 if (iifindex && (iifindex != fl->flowi_iif)) 268 goto out; 269 270 oifindex = READ_ONCE(rule->oifindex); 271 if (oifindex && (oifindex != fl->flowi_oif)) 272 goto out; 273 274 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask) 275 goto out; 276 277 if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id)) 278 goto out; 279 280 if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg)) 281 goto out; 282 283 if (uid_lt(fl->flowi_uid, rule->uid_range.start) || 284 uid_gt(fl->flowi_uid, rule->uid_range.end)) 285 goto out; 286 287 ret = INDIRECT_CALL_MT(ops->match, 288 fib6_rule_match, 289 fib4_rule_match, 290 rule, fl, flags); 291 out: 292 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; 293 } 294 295 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, 296 int flags, struct fib_lookup_arg *arg) 297 { 298 struct fib_rule *rule; 299 int err; 300 301 rcu_read_lock(); 302 303 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 304 jumped: 305 if (!fib_rule_match(rule, ops, fl, flags, arg)) 306 continue; 307 308 if (rule->action == FR_ACT_GOTO) { 309 struct fib_rule *target; 310 311 target = rcu_dereference(rule->ctarget); 312 if (target == NULL) { 313 continue; 314 } else { 315 rule = target; 316 goto jumped; 317 } 318 } else if (rule->action == FR_ACT_NOP) 319 continue; 320 else 321 err = INDIRECT_CALL_MT(ops->action, 322 fib6_rule_action, 323 fib4_rule_action, 324 rule, fl, flags, arg); 325 326 if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress, 327 fib6_rule_suppress, 328 fib4_rule_suppress, 329 rule, flags, arg)) 330 continue; 331 332 if (err != -EAGAIN) { 333 if ((arg->flags & FIB_LOOKUP_NOREF) || 334 likely(refcount_inc_not_zero(&rule->refcnt))) { 335 arg->rule = rule; 336 goto out; 337 } 338 break; 339 } 340 } 341 342 err = -ESRCH; 343 out: 344 rcu_read_unlock(); 345 346 return err; 347 } 348 EXPORT_SYMBOL_GPL(fib_rules_lookup); 349 350 static int call_fib_rule_notifier(struct notifier_block *nb, 351 enum fib_event_type event_type, 352 struct fib_rule *rule, int family, 353 struct netlink_ext_ack *extack) 354 { 355 struct fib_rule_notifier_info info = { 356 .info.family = family, 357 .info.extack = extack, 358 .rule = rule, 359 }; 360 361 return call_fib_notifier(nb, event_type, &info.info); 362 } 363 364 static int call_fib_rule_notifiers(struct net *net, 365 enum fib_event_type event_type, 366 struct fib_rule *rule, 367 struct fib_rules_ops *ops, 368 struct netlink_ext_ack *extack) 369 { 370 struct fib_rule_notifier_info info = { 371 .info.family = ops->family, 372 .info.extack = extack, 373 .rule = rule, 374 }; 375 376 ASSERT_RTNL_NET(net); 377 378 /* Paired with READ_ONCE() in fib_rules_seq() */ 379 WRITE_ONCE(ops->fib_rules_seq, ops->fib_rules_seq + 1); 380 return call_fib_notifiers(net, event_type, &info.info); 381 } 382 383 /* Called with rcu_read_lock() */ 384 int fib_rules_dump(struct net *net, struct notifier_block *nb, int family, 385 struct netlink_ext_ack *extack) 386 { 387 struct fib_rules_ops *ops; 388 struct fib_rule *rule; 389 int err = 0; 390 391 ops = lookup_rules_ops(net, family); 392 if (!ops) 393 return -EAFNOSUPPORT; 394 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 395 err = call_fib_rule_notifier(nb, FIB_EVENT_RULE_ADD, 396 rule, family, extack); 397 if (err) 398 break; 399 } 400 rules_ops_put(ops); 401 402 return err; 403 } 404 EXPORT_SYMBOL_GPL(fib_rules_dump); 405 406 unsigned int fib_rules_seq_read(const struct net *net, int family) 407 { 408 unsigned int fib_rules_seq; 409 struct fib_rules_ops *ops; 410 411 ops = lookup_rules_ops(net, family); 412 if (!ops) 413 return 0; 414 /* Paired with WRITE_ONCE() in call_fib_rule_notifiers() */ 415 fib_rules_seq = READ_ONCE(ops->fib_rules_seq); 416 rules_ops_put(ops); 417 418 return fib_rules_seq; 419 } 420 EXPORT_SYMBOL_GPL(fib_rules_seq_read); 421 422 static struct fib_rule *rule_find(struct fib_rules_ops *ops, 423 struct fib_rule_hdr *frh, 424 struct nlattr **tb, 425 struct fib_rule *rule, 426 bool user_priority) 427 { 428 struct fib_rule *r; 429 430 list_for_each_entry(r, &ops->rules_list, list) { 431 if (rule->action && r->action != rule->action) 432 continue; 433 434 if (rule->table && r->table != rule->table) 435 continue; 436 437 if (user_priority && r->pref != rule->pref) 438 continue; 439 440 if (rule->iifname[0] && 441 memcmp(r->iifname, rule->iifname, IFNAMSIZ)) 442 continue; 443 444 if (rule->oifname[0] && 445 memcmp(r->oifname, rule->oifname, IFNAMSIZ)) 446 continue; 447 448 if (rule->mark && r->mark != rule->mark) 449 continue; 450 451 if (rule->suppress_ifgroup != -1 && 452 r->suppress_ifgroup != rule->suppress_ifgroup) 453 continue; 454 455 if (rule->suppress_prefixlen != -1 && 456 r->suppress_prefixlen != rule->suppress_prefixlen) 457 continue; 458 459 if (rule->mark_mask && r->mark_mask != rule->mark_mask) 460 continue; 461 462 if (rule->tun_id && r->tun_id != rule->tun_id) 463 continue; 464 465 if (rule->l3mdev && r->l3mdev != rule->l3mdev) 466 continue; 467 468 if (uid_range_set(&rule->uid_range) && 469 (!uid_eq(r->uid_range.start, rule->uid_range.start) || 470 !uid_eq(r->uid_range.end, rule->uid_range.end))) 471 continue; 472 473 if (rule->ip_proto && r->ip_proto != rule->ip_proto) 474 continue; 475 476 if (rule->proto && r->proto != rule->proto) 477 continue; 478 479 if (fib_rule_port_range_set(&rule->sport_range) && 480 !fib_rule_port_range_compare(&r->sport_range, 481 &rule->sport_range)) 482 continue; 483 484 if (rule->sport_mask && r->sport_mask != rule->sport_mask) 485 continue; 486 487 if (fib_rule_port_range_set(&rule->dport_range) && 488 !fib_rule_port_range_compare(&r->dport_range, 489 &rule->dport_range)) 490 continue; 491 492 if (rule->dport_mask && r->dport_mask != rule->dport_mask) 493 continue; 494 495 if (!ops->compare(r, frh, tb)) 496 continue; 497 return r; 498 } 499 500 return NULL; 501 } 502 503 #ifdef CONFIG_NET_L3_MASTER_DEV 504 static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule, 505 struct netlink_ext_ack *extack) 506 { 507 nlrule->l3mdev = nla_get_u8(nla); 508 if (nlrule->l3mdev != 1) { 509 NL_SET_ERR_MSG(extack, "Invalid l3mdev attribute"); 510 return -1; 511 } 512 513 return 0; 514 } 515 #else 516 static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule, 517 struct netlink_ext_ack *extack) 518 { 519 NL_SET_ERR_MSG(extack, "l3mdev support is not enabled in kernel"); 520 return -1; 521 } 522 #endif 523 524 static int fib_nl2rule_port_mask(const struct nlattr *mask_attr, 525 const struct fib_rule_port_range *range, 526 u16 *port_mask, 527 struct netlink_ext_ack *extack) 528 { 529 if (!fib_rule_port_range_valid(range)) { 530 NL_SET_ERR_MSG_ATTR(extack, mask_attr, 531 "Cannot specify port mask without port value"); 532 return -EINVAL; 533 } 534 535 if (fib_rule_port_is_range(range)) { 536 NL_SET_ERR_MSG_ATTR(extack, mask_attr, 537 "Cannot specify port mask for port range"); 538 return -EINVAL; 539 } 540 541 if (range->start & ~nla_get_u16(mask_attr)) { 542 NL_SET_ERR_MSG_ATTR(extack, mask_attr, "Invalid port mask"); 543 return -EINVAL; 544 } 545 546 *port_mask = nla_get_u16(mask_attr); 547 548 return 0; 549 } 550 551 static int fib_nl2rule(struct net *net, struct nlmsghdr *nlh, 552 struct netlink_ext_ack *extack, 553 struct fib_rules_ops *ops, 554 struct nlattr *tb[], 555 struct fib_rule **rule, 556 bool *user_priority) 557 { 558 struct fib_rule_hdr *frh = nlmsg_data(nlh); 559 struct fib_rule *nlrule = NULL; 560 int err = -EINVAL; 561 562 if (frh->src_len) 563 if (!tb[FRA_SRC] || 564 frh->src_len > (ops->addr_size * 8) || 565 nla_len(tb[FRA_SRC]) != ops->addr_size) { 566 NL_SET_ERR_MSG(extack, "Invalid source address"); 567 goto errout; 568 } 569 570 if (frh->dst_len) 571 if (!tb[FRA_DST] || 572 frh->dst_len > (ops->addr_size * 8) || 573 nla_len(tb[FRA_DST]) != ops->addr_size) { 574 NL_SET_ERR_MSG(extack, "Invalid dst address"); 575 goto errout; 576 } 577 578 nlrule = kzalloc(ops->rule_size, GFP_KERNEL_ACCOUNT); 579 if (!nlrule) { 580 err = -ENOMEM; 581 goto errout; 582 } 583 refcount_set(&nlrule->refcnt, 1); 584 nlrule->fr_net = net; 585 586 if (tb[FRA_PRIORITY]) { 587 nlrule->pref = nla_get_u32(tb[FRA_PRIORITY]); 588 *user_priority = true; 589 } 590 591 nlrule->proto = nla_get_u8_default(tb[FRA_PROTOCOL], RTPROT_UNSPEC); 592 593 if (tb[FRA_IIFNAME]) { 594 nlrule->iifindex = -1; 595 nla_strscpy(nlrule->iifname, tb[FRA_IIFNAME], IFNAMSIZ); 596 } 597 598 if (tb[FRA_OIFNAME]) { 599 nlrule->oifindex = -1; 600 nla_strscpy(nlrule->oifname, tb[FRA_OIFNAME], IFNAMSIZ); 601 } 602 603 if (tb[FRA_FWMARK]) { 604 nlrule->mark = nla_get_u32(tb[FRA_FWMARK]); 605 if (nlrule->mark) 606 /* compatibility: if the mark value is non-zero all bits 607 * are compared unless a mask is explicitly specified. 608 */ 609 nlrule->mark_mask = 0xFFFFFFFF; 610 } 611 612 if (tb[FRA_FWMASK]) 613 nlrule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); 614 615 if (tb[FRA_TUN_ID]) 616 nlrule->tun_id = nla_get_be64(tb[FRA_TUN_ID]); 617 618 if (tb[FRA_L3MDEV] && 619 fib_nl2rule_l3mdev(tb[FRA_L3MDEV], nlrule, extack) < 0) 620 goto errout_free; 621 622 nlrule->action = frh->action; 623 nlrule->flags = frh->flags; 624 nlrule->table = frh_get_table(frh, tb); 625 if (tb[FRA_SUPPRESS_PREFIXLEN]) 626 nlrule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]); 627 else 628 nlrule->suppress_prefixlen = -1; 629 630 if (tb[FRA_SUPPRESS_IFGROUP]) 631 nlrule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]); 632 else 633 nlrule->suppress_ifgroup = -1; 634 635 if (tb[FRA_GOTO]) { 636 if (nlrule->action != FR_ACT_GOTO) { 637 NL_SET_ERR_MSG(extack, "Unexpected goto"); 638 goto errout_free; 639 } 640 641 nlrule->target = nla_get_u32(tb[FRA_GOTO]); 642 } else if (nlrule->action == FR_ACT_GOTO) { 643 NL_SET_ERR_MSG(extack, "Missing goto target for action goto"); 644 goto errout_free; 645 } 646 647 if (nlrule->l3mdev && nlrule->table) { 648 NL_SET_ERR_MSG(extack, "l3mdev and table are mutually exclusive"); 649 goto errout_free; 650 } 651 652 if (tb[FRA_UID_RANGE]) { 653 if (current_user_ns() != net->user_ns) { 654 err = -EPERM; 655 NL_SET_ERR_MSG(extack, "No permission to set uid"); 656 goto errout_free; 657 } 658 659 nlrule->uid_range = nla_get_kuid_range(tb); 660 661 if (!uid_range_set(&nlrule->uid_range) || 662 !uid_lte(nlrule->uid_range.start, nlrule->uid_range.end)) { 663 NL_SET_ERR_MSG(extack, "Invalid uid range"); 664 goto errout_free; 665 } 666 } else { 667 nlrule->uid_range = fib_kuid_range_unset; 668 } 669 670 if (tb[FRA_IP_PROTO]) 671 nlrule->ip_proto = nla_get_u8(tb[FRA_IP_PROTO]); 672 673 if (tb[FRA_SPORT_RANGE]) { 674 err = nla_get_port_range(tb[FRA_SPORT_RANGE], 675 &nlrule->sport_range); 676 if (err) { 677 NL_SET_ERR_MSG(extack, "Invalid sport range"); 678 goto errout_free; 679 } 680 if (!fib_rule_port_is_range(&nlrule->sport_range)) 681 nlrule->sport_mask = U16_MAX; 682 } 683 684 if (tb[FRA_SPORT_MASK]) { 685 err = fib_nl2rule_port_mask(tb[FRA_SPORT_MASK], 686 &nlrule->sport_range, 687 &nlrule->sport_mask, extack); 688 if (err) 689 goto errout_free; 690 } 691 692 if (tb[FRA_DPORT_RANGE]) { 693 err = nla_get_port_range(tb[FRA_DPORT_RANGE], 694 &nlrule->dport_range); 695 if (err) { 696 NL_SET_ERR_MSG(extack, "Invalid dport range"); 697 goto errout_free; 698 } 699 if (!fib_rule_port_is_range(&nlrule->dport_range)) 700 nlrule->dport_mask = U16_MAX; 701 } 702 703 if (tb[FRA_DPORT_MASK]) { 704 err = fib_nl2rule_port_mask(tb[FRA_DPORT_MASK], 705 &nlrule->dport_range, 706 &nlrule->dport_mask, extack); 707 if (err) 708 goto errout_free; 709 } 710 711 *rule = nlrule; 712 713 return 0; 714 715 errout_free: 716 kfree(nlrule); 717 errout: 718 return err; 719 } 720 721 static int fib_nl2rule_rtnl(struct fib_rule *nlrule, 722 struct fib_rules_ops *ops, 723 struct nlattr *tb[], 724 struct netlink_ext_ack *extack) 725 { 726 if (!tb[FRA_PRIORITY]) 727 nlrule->pref = fib_default_rule_pref(ops); 728 729 /* Backward jumps are prohibited to avoid endless loops */ 730 if (tb[FRA_GOTO] && nlrule->target <= nlrule->pref) { 731 NL_SET_ERR_MSG(extack, "Backward goto not supported"); 732 return -EINVAL; 733 } 734 735 if (tb[FRA_IIFNAME]) { 736 struct net_device *dev; 737 738 dev = __dev_get_by_name(nlrule->fr_net, nlrule->iifname); 739 if (dev) 740 nlrule->iifindex = dev->ifindex; 741 } 742 743 if (tb[FRA_OIFNAME]) { 744 struct net_device *dev; 745 746 dev = __dev_get_by_name(nlrule->fr_net, nlrule->oifname); 747 if (dev) 748 nlrule->oifindex = dev->ifindex; 749 } 750 751 return 0; 752 } 753 754 static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, 755 struct nlattr **tb, struct fib_rule *rule) 756 { 757 struct fib_rule *r; 758 759 list_for_each_entry(r, &ops->rules_list, list) { 760 if (r->action != rule->action) 761 continue; 762 763 if (r->table != rule->table) 764 continue; 765 766 if (r->pref != rule->pref) 767 continue; 768 769 if (memcmp(r->iifname, rule->iifname, IFNAMSIZ)) 770 continue; 771 772 if (memcmp(r->oifname, rule->oifname, IFNAMSIZ)) 773 continue; 774 775 if (r->mark != rule->mark) 776 continue; 777 778 if (r->suppress_ifgroup != rule->suppress_ifgroup) 779 continue; 780 781 if (r->suppress_prefixlen != rule->suppress_prefixlen) 782 continue; 783 784 if (r->mark_mask != rule->mark_mask) 785 continue; 786 787 if (r->tun_id != rule->tun_id) 788 continue; 789 790 if (r->l3mdev != rule->l3mdev) 791 continue; 792 793 if (!uid_eq(r->uid_range.start, rule->uid_range.start) || 794 !uid_eq(r->uid_range.end, rule->uid_range.end)) 795 continue; 796 797 if (r->ip_proto != rule->ip_proto) 798 continue; 799 800 if (r->proto != rule->proto) 801 continue; 802 803 if (!fib_rule_port_range_compare(&r->sport_range, 804 &rule->sport_range)) 805 continue; 806 807 if (r->sport_mask != rule->sport_mask) 808 continue; 809 810 if (!fib_rule_port_range_compare(&r->dport_range, 811 &rule->dport_range)) 812 continue; 813 814 if (r->dport_mask != rule->dport_mask) 815 continue; 816 817 if (!ops->compare(r, frh, tb)) 818 continue; 819 return 1; 820 } 821 return 0; 822 } 823 824 static const struct nla_policy fib_rule_policy[FRA_MAX + 1] = { 825 [FRA_UNSPEC] = { .strict_start_type = FRA_DPORT_RANGE + 1 }, 826 [FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, 827 [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, 828 [FRA_PRIORITY] = { .type = NLA_U32 }, 829 [FRA_FWMARK] = { .type = NLA_U32 }, 830 [FRA_FLOW] = { .type = NLA_U32 }, 831 [FRA_TUN_ID] = { .type = NLA_U64 }, 832 [FRA_FWMASK] = { .type = NLA_U32 }, 833 [FRA_TABLE] = { .type = NLA_U32 }, 834 [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, 835 [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, 836 [FRA_GOTO] = { .type = NLA_U32 }, 837 [FRA_L3MDEV] = { .type = NLA_U8 }, 838 [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) }, 839 [FRA_PROTOCOL] = { .type = NLA_U8 }, 840 [FRA_IP_PROTO] = { .type = NLA_U8 }, 841 [FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }, 842 [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }, 843 [FRA_DSCP] = NLA_POLICY_MAX(NLA_U8, INET_DSCP_MASK >> 2), 844 [FRA_FLOWLABEL] = { .type = NLA_BE32 }, 845 [FRA_FLOWLABEL_MASK] = { .type = NLA_BE32 }, 846 [FRA_SPORT_MASK] = { .type = NLA_U16 }, 847 [FRA_DPORT_MASK] = { .type = NLA_U16 }, 848 [FRA_DSCP_MASK] = NLA_POLICY_MASK(NLA_U8, INET_DSCP_MASK >> 2), 849 }; 850 851 int fib_newrule(struct net *net, struct sk_buff *skb, struct nlmsghdr *nlh, 852 struct netlink_ext_ack *extack, bool rtnl_held) 853 { 854 struct fib_rule *rule = NULL, *r, *last = NULL; 855 struct fib_rule_hdr *frh = nlmsg_data(nlh); 856 int err = -EINVAL, unresolved = 0; 857 struct fib_rules_ops *ops = NULL; 858 struct nlattr *tb[FRA_MAX + 1]; 859 bool user_priority = false; 860 861 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) { 862 NL_SET_ERR_MSG(extack, "Invalid msg length"); 863 goto errout; 864 } 865 866 ops = lookup_rules_ops(net, frh->family); 867 if (!ops) { 868 err = -EAFNOSUPPORT; 869 NL_SET_ERR_MSG(extack, "Rule family not supported"); 870 goto errout; 871 } 872 873 err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX, 874 fib_rule_policy, extack); 875 if (err < 0) { 876 NL_SET_ERR_MSG(extack, "Error parsing msg"); 877 goto errout; 878 } 879 880 err = fib_nl2rule(net, nlh, extack, ops, tb, &rule, &user_priority); 881 if (err) 882 goto errout; 883 884 if (!rtnl_held) 885 rtnl_net_lock(net); 886 887 err = fib_nl2rule_rtnl(rule, ops, tb, extack); 888 if (err) 889 goto errout_free; 890 891 if ((nlh->nlmsg_flags & NLM_F_EXCL) && 892 rule_exists(ops, frh, tb, rule)) { 893 err = -EEXIST; 894 goto errout_free; 895 } 896 897 err = ops->configure(rule, skb, frh, tb, extack); 898 if (err < 0) 899 goto errout_free; 900 901 err = call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule, ops, 902 extack); 903 if (err < 0) 904 goto errout_free; 905 906 list_for_each_entry(r, &ops->rules_list, list) { 907 if (r->pref == rule->target) { 908 RCU_INIT_POINTER(rule->ctarget, r); 909 break; 910 } 911 } 912 913 if (rcu_dereference_protected(rule->ctarget, 1) == NULL) 914 unresolved = 1; 915 916 list_for_each_entry(r, &ops->rules_list, list) { 917 if (r->pref > rule->pref) 918 break; 919 last = r; 920 } 921 922 if (last) 923 list_add_rcu(&rule->list, &last->list); 924 else 925 list_add_rcu(&rule->list, &ops->rules_list); 926 927 if (ops->unresolved_rules) { 928 /* 929 * There are unresolved goto rules in the list, check if 930 * any of them are pointing to this new rule. 931 */ 932 list_for_each_entry(r, &ops->rules_list, list) { 933 if (r->action == FR_ACT_GOTO && 934 r->target == rule->pref && 935 rtnl_dereference(r->ctarget) == NULL) { 936 rcu_assign_pointer(r->ctarget, rule); 937 if (--ops->unresolved_rules == 0) 938 break; 939 } 940 } 941 } 942 943 if (rule->action == FR_ACT_GOTO) 944 ops->nr_goto_rules++; 945 946 if (unresolved) 947 ops->unresolved_rules++; 948 949 if (rule->tun_id) 950 ip_tunnel_need_metadata(); 951 952 fib_rule_get(rule); 953 954 if (!rtnl_held) 955 rtnl_net_unlock(net); 956 957 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid); 958 fib_rule_put(rule); 959 flush_route_cache(ops); 960 rules_ops_put(ops); 961 return 0; 962 963 errout_free: 964 if (!rtnl_held) 965 rtnl_net_unlock(net); 966 kfree(rule); 967 errout: 968 rules_ops_put(ops); 969 return err; 970 } 971 EXPORT_SYMBOL_GPL(fib_newrule); 972 973 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, 974 struct netlink_ext_ack *extack) 975 { 976 return fib_newrule(sock_net(skb->sk), skb, nlh, extack, false); 977 } 978 979 int fib_delrule(struct net *net, struct sk_buff *skb, struct nlmsghdr *nlh, 980 struct netlink_ext_ack *extack, bool rtnl_held) 981 { 982 struct fib_rule *rule = NULL, *nlrule = NULL; 983 struct fib_rule_hdr *frh = nlmsg_data(nlh); 984 struct fib_rules_ops *ops = NULL; 985 struct nlattr *tb[FRA_MAX+1]; 986 bool user_priority = false; 987 int err = -EINVAL; 988 989 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) { 990 NL_SET_ERR_MSG(extack, "Invalid msg length"); 991 goto errout; 992 } 993 994 ops = lookup_rules_ops(net, frh->family); 995 if (ops == NULL) { 996 err = -EAFNOSUPPORT; 997 NL_SET_ERR_MSG(extack, "Rule family not supported"); 998 goto errout; 999 } 1000 1001 err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX, 1002 fib_rule_policy, extack); 1003 if (err < 0) { 1004 NL_SET_ERR_MSG(extack, "Error parsing msg"); 1005 goto errout; 1006 } 1007 1008 err = fib_nl2rule(net, nlh, extack, ops, tb, &nlrule, &user_priority); 1009 if (err) 1010 goto errout; 1011 1012 if (!rtnl_held) 1013 rtnl_net_lock(net); 1014 1015 err = fib_nl2rule_rtnl(nlrule, ops, tb, extack); 1016 if (err) 1017 goto errout_free; 1018 1019 rule = rule_find(ops, frh, tb, nlrule, user_priority); 1020 if (!rule) { 1021 err = -ENOENT; 1022 goto errout_free; 1023 } 1024 1025 if (rule->flags & FIB_RULE_PERMANENT) { 1026 err = -EPERM; 1027 goto errout_free; 1028 } 1029 1030 if (ops->delete) { 1031 err = ops->delete(rule); 1032 if (err) 1033 goto errout_free; 1034 } 1035 1036 if (rule->tun_id) 1037 ip_tunnel_unneed_metadata(); 1038 1039 list_del_rcu(&rule->list); 1040 1041 if (rule->action == FR_ACT_GOTO) { 1042 ops->nr_goto_rules--; 1043 if (rtnl_dereference(rule->ctarget) == NULL) 1044 ops->unresolved_rules--; 1045 } 1046 1047 /* 1048 * Check if this rule is a target to any of them. If so, 1049 * adjust to the next one with the same preference or 1050 * disable them. As this operation is eventually very 1051 * expensive, it is only performed if goto rules, except 1052 * current if it is goto rule, have actually been added. 1053 */ 1054 if (ops->nr_goto_rules > 0) { 1055 struct fib_rule *n, *r; 1056 1057 n = list_next_entry(rule, list); 1058 if (&n->list == &ops->rules_list || n->pref != rule->pref) 1059 n = NULL; 1060 list_for_each_entry(r, &ops->rules_list, list) { 1061 if (rtnl_dereference(r->ctarget) != rule) 1062 continue; 1063 rcu_assign_pointer(r->ctarget, n); 1064 if (!n) 1065 ops->unresolved_rules++; 1066 } 1067 } 1068 1069 call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops, NULL); 1070 1071 if (!rtnl_held) 1072 rtnl_net_unlock(net); 1073 1074 notify_rule_change(RTM_DELRULE, rule, ops, nlh, NETLINK_CB(skb).portid); 1075 fib_rule_put(rule); 1076 flush_route_cache(ops); 1077 rules_ops_put(ops); 1078 kfree(nlrule); 1079 return 0; 1080 1081 errout_free: 1082 if (!rtnl_held) 1083 rtnl_net_unlock(net); 1084 kfree(nlrule); 1085 errout: 1086 rules_ops_put(ops); 1087 return err; 1088 } 1089 EXPORT_SYMBOL_GPL(fib_delrule); 1090 1091 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, 1092 struct netlink_ext_ack *extack) 1093 { 1094 return fib_delrule(sock_net(skb->sk), skb, nlh, extack, false); 1095 } 1096 1097 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, 1098 struct fib_rule *rule) 1099 { 1100 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)) 1101 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */ 1102 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */ 1103 + nla_total_size(4) /* FRA_PRIORITY */ 1104 + nla_total_size(4) /* FRA_TABLE */ 1105 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */ 1106 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */ 1107 + nla_total_size(4) /* FRA_FWMARK */ 1108 + nla_total_size(4) /* FRA_FWMASK */ 1109 + nla_total_size_64bit(8) /* FRA_TUN_ID */ 1110 + nla_total_size(sizeof(struct fib_kuid_range)) 1111 + nla_total_size(1) /* FRA_PROTOCOL */ 1112 + nla_total_size(1) /* FRA_IP_PROTO */ 1113 + nla_total_size(sizeof(struct fib_rule_port_range)) /* FRA_SPORT_RANGE */ 1114 + nla_total_size(sizeof(struct fib_rule_port_range)) /* FRA_DPORT_RANGE */ 1115 + nla_total_size(2) /* FRA_SPORT_MASK */ 1116 + nla_total_size(2); /* FRA_DPORT_MASK */ 1117 1118 if (ops->nlmsg_payload) 1119 payload += ops->nlmsg_payload(rule); 1120 1121 return payload; 1122 } 1123 1124 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, 1125 u32 pid, u32 seq, int type, int flags, 1126 struct fib_rules_ops *ops) 1127 { 1128 struct nlmsghdr *nlh; 1129 struct fib_rule_hdr *frh; 1130 1131 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags); 1132 if (nlh == NULL) 1133 return -EMSGSIZE; 1134 1135 frh = nlmsg_data(nlh); 1136 frh->family = ops->family; 1137 frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT; 1138 if (nla_put_u32(skb, FRA_TABLE, rule->table)) 1139 goto nla_put_failure; 1140 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen)) 1141 goto nla_put_failure; 1142 frh->res1 = 0; 1143 frh->res2 = 0; 1144 frh->action = rule->action; 1145 frh->flags = rule->flags; 1146 1147 if (nla_put_u8(skb, FRA_PROTOCOL, rule->proto)) 1148 goto nla_put_failure; 1149 1150 if (rule->action == FR_ACT_GOTO && 1151 rcu_access_pointer(rule->ctarget) == NULL) 1152 frh->flags |= FIB_RULE_UNRESOLVED; 1153 1154 if (rule->iifname[0]) { 1155 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname)) 1156 goto nla_put_failure; 1157 if (READ_ONCE(rule->iifindex) == -1) 1158 frh->flags |= FIB_RULE_IIF_DETACHED; 1159 } 1160 1161 if (rule->oifname[0]) { 1162 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname)) 1163 goto nla_put_failure; 1164 if (READ_ONCE(rule->oifindex) == -1) 1165 frh->flags |= FIB_RULE_OIF_DETACHED; 1166 } 1167 1168 if ((rule->pref && 1169 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) || 1170 (rule->mark && 1171 nla_put_u32(skb, FRA_FWMARK, rule->mark)) || 1172 ((rule->mark_mask || rule->mark) && 1173 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) || 1174 (rule->target && 1175 nla_put_u32(skb, FRA_GOTO, rule->target)) || 1176 (rule->tun_id && 1177 nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) || 1178 (rule->l3mdev && 1179 nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) || 1180 (uid_range_set(&rule->uid_range) && 1181 nla_put_uid_range(skb, &rule->uid_range)) || 1182 (fib_rule_port_range_set(&rule->sport_range) && 1183 nla_put_port_range(skb, FRA_SPORT_RANGE, &rule->sport_range)) || 1184 (rule->sport_mask && nla_put_u16(skb, FRA_SPORT_MASK, 1185 rule->sport_mask)) || 1186 (fib_rule_port_range_set(&rule->dport_range) && 1187 nla_put_port_range(skb, FRA_DPORT_RANGE, &rule->dport_range)) || 1188 (rule->dport_mask && nla_put_u16(skb, FRA_DPORT_MASK, 1189 rule->dport_mask)) || 1190 (rule->ip_proto && nla_put_u8(skb, FRA_IP_PROTO, rule->ip_proto))) 1191 goto nla_put_failure; 1192 1193 if (rule->suppress_ifgroup != -1) { 1194 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup)) 1195 goto nla_put_failure; 1196 } 1197 1198 if (ops->fill(rule, skb, frh) < 0) 1199 goto nla_put_failure; 1200 1201 nlmsg_end(skb, nlh); 1202 return 0; 1203 1204 nla_put_failure: 1205 nlmsg_cancel(skb, nlh); 1206 return -EMSGSIZE; 1207 } 1208 1209 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, 1210 struct fib_rules_ops *ops) 1211 { 1212 int idx = 0; 1213 struct fib_rule *rule; 1214 int err = 0; 1215 1216 rcu_read_lock(); 1217 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 1218 if (idx < cb->args[1]) 1219 goto skip; 1220 1221 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, 1222 cb->nlh->nlmsg_seq, RTM_NEWRULE, 1223 NLM_F_MULTI, ops); 1224 if (err) 1225 break; 1226 skip: 1227 idx++; 1228 } 1229 rcu_read_unlock(); 1230 cb->args[1] = idx; 1231 rules_ops_put(ops); 1232 1233 return err; 1234 } 1235 1236 static int fib_valid_dumprule_req(const struct nlmsghdr *nlh, 1237 struct netlink_ext_ack *extack) 1238 { 1239 struct fib_rule_hdr *frh; 1240 1241 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) { 1242 NL_SET_ERR_MSG(extack, "Invalid header for fib rule dump request"); 1243 return -EINVAL; 1244 } 1245 1246 frh = nlmsg_data(nlh); 1247 if (frh->dst_len || frh->src_len || frh->tos || frh->table || 1248 frh->res1 || frh->res2 || frh->action || frh->flags) { 1249 NL_SET_ERR_MSG(extack, 1250 "Invalid values in header for fib rule dump request"); 1251 return -EINVAL; 1252 } 1253 1254 if (nlmsg_attrlen(nlh, sizeof(*frh))) { 1255 NL_SET_ERR_MSG(extack, "Invalid data after header in fib rule dump request"); 1256 return -EINVAL; 1257 } 1258 1259 return 0; 1260 } 1261 1262 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) 1263 { 1264 const struct nlmsghdr *nlh = cb->nlh; 1265 struct net *net = sock_net(skb->sk); 1266 struct fib_rules_ops *ops; 1267 int err, idx = 0, family; 1268 1269 if (cb->strict_check) { 1270 err = fib_valid_dumprule_req(nlh, cb->extack); 1271 1272 if (err < 0) 1273 return err; 1274 } 1275 1276 family = rtnl_msg_family(nlh); 1277 if (family != AF_UNSPEC) { 1278 /* Protocol specific dump request */ 1279 ops = lookup_rules_ops(net, family); 1280 if (ops == NULL) 1281 return -EAFNOSUPPORT; 1282 1283 return dump_rules(skb, cb, ops); 1284 } 1285 1286 err = 0; 1287 rcu_read_lock(); 1288 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 1289 if (idx < cb->args[0] || !try_module_get(ops->owner)) 1290 goto skip; 1291 1292 err = dump_rules(skb, cb, ops); 1293 if (err < 0) 1294 break; 1295 1296 cb->args[1] = 0; 1297 skip: 1298 idx++; 1299 } 1300 rcu_read_unlock(); 1301 cb->args[0] = idx; 1302 1303 return err; 1304 } 1305 1306 static void notify_rule_change(int event, struct fib_rule *rule, 1307 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 1308 u32 pid) 1309 { 1310 struct net *net; 1311 struct sk_buff *skb; 1312 int err = -ENOMEM; 1313 1314 net = ops->fro_net; 1315 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); 1316 if (skb == NULL) 1317 goto errout; 1318 1319 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); 1320 if (err < 0) { 1321 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */ 1322 WARN_ON(err == -EMSGSIZE); 1323 kfree_skb(skb); 1324 goto errout; 1325 } 1326 1327 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL); 1328 return; 1329 errout: 1330 rtnl_set_sk_err(net, ops->nlgroup, err); 1331 } 1332 1333 static void attach_rules(struct list_head *rules, struct net_device *dev) 1334 { 1335 struct fib_rule *rule; 1336 1337 list_for_each_entry(rule, rules, list) { 1338 if (rule->iifindex == -1 && 1339 strcmp(dev->name, rule->iifname) == 0) 1340 WRITE_ONCE(rule->iifindex, dev->ifindex); 1341 if (rule->oifindex == -1 && 1342 strcmp(dev->name, rule->oifname) == 0) 1343 WRITE_ONCE(rule->oifindex, dev->ifindex); 1344 } 1345 } 1346 1347 static void detach_rules(struct list_head *rules, struct net_device *dev) 1348 { 1349 struct fib_rule *rule; 1350 1351 list_for_each_entry(rule, rules, list) { 1352 if (rule->iifindex == dev->ifindex) 1353 WRITE_ONCE(rule->iifindex, -1); 1354 if (rule->oifindex == dev->ifindex) 1355 WRITE_ONCE(rule->oifindex, -1); 1356 } 1357 } 1358 1359 1360 static int fib_rules_event(struct notifier_block *this, unsigned long event, 1361 void *ptr) 1362 { 1363 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1364 struct net *net = dev_net(dev); 1365 struct fib_rules_ops *ops; 1366 1367 ASSERT_RTNL(); 1368 1369 switch (event) { 1370 case NETDEV_REGISTER: 1371 list_for_each_entry(ops, &net->rules_ops, list) 1372 attach_rules(&ops->rules_list, dev); 1373 break; 1374 1375 case NETDEV_CHANGENAME: 1376 list_for_each_entry(ops, &net->rules_ops, list) { 1377 detach_rules(&ops->rules_list, dev); 1378 attach_rules(&ops->rules_list, dev); 1379 } 1380 break; 1381 1382 case NETDEV_UNREGISTER: 1383 list_for_each_entry(ops, &net->rules_ops, list) 1384 detach_rules(&ops->rules_list, dev); 1385 break; 1386 } 1387 1388 return NOTIFY_DONE; 1389 } 1390 1391 static struct notifier_block fib_rules_notifier = { 1392 .notifier_call = fib_rules_event, 1393 }; 1394 1395 static int __net_init fib_rules_net_init(struct net *net) 1396 { 1397 INIT_LIST_HEAD(&net->rules_ops); 1398 spin_lock_init(&net->rules_mod_lock); 1399 return 0; 1400 } 1401 1402 static void __net_exit fib_rules_net_exit(struct net *net) 1403 { 1404 WARN_ON_ONCE(!list_empty(&net->rules_ops)); 1405 } 1406 1407 static struct pernet_operations fib_rules_net_ops = { 1408 .init = fib_rules_net_init, 1409 .exit = fib_rules_net_exit, 1410 }; 1411 1412 static const struct rtnl_msg_handler fib_rules_rtnl_msg_handlers[] __initconst = { 1413 {.msgtype = RTM_NEWRULE, .doit = fib_nl_newrule, 1414 .flags = RTNL_FLAG_DOIT_PERNET}, 1415 {.msgtype = RTM_DELRULE, .doit = fib_nl_delrule, 1416 .flags = RTNL_FLAG_DOIT_PERNET}, 1417 {.msgtype = RTM_GETRULE, .dumpit = fib_nl_dumprule, 1418 .flags = RTNL_FLAG_DUMP_UNLOCKED}, 1419 }; 1420 1421 static int __init fib_rules_init(void) 1422 { 1423 int err; 1424 1425 rtnl_register_many(fib_rules_rtnl_msg_handlers); 1426 1427 err = register_pernet_subsys(&fib_rules_net_ops); 1428 if (err < 0) 1429 goto fail; 1430 1431 err = register_netdevice_notifier(&fib_rules_notifier); 1432 if (err < 0) 1433 goto fail_unregister; 1434 1435 return 0; 1436 1437 fail_unregister: 1438 unregister_pernet_subsys(&fib_rules_net_ops); 1439 fail: 1440 rtnl_unregister_many(fib_rules_rtnl_msg_handlers); 1441 return err; 1442 } 1443 1444 subsys_initcall(fib_rules_init); 1445