1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2023 Isovalent */
3
4 #include <linux/netdevice.h>
5 #include <linux/ethtool.h>
6 #include <linux/etherdevice.h>
7 #include <linux/filter.h>
8 #include <linux/netfilter_netdev.h>
9 #include <linux/bpf_mprog.h>
10 #include <linux/indirect_call_wrapper.h>
11
12 #include <net/netkit.h>
13 #include <net/dst.h>
14 #include <net/tcx.h>
15
16 #define DRV_NAME "netkit"
17
18 struct netkit {
19 /* Needed in fast-path */
20 struct net_device __rcu *peer;
21 struct bpf_mprog_entry __rcu *active;
22 enum netkit_action policy;
23 enum netkit_scrub scrub;
24 struct bpf_mprog_bundle bundle;
25
26 /* Needed in slow-path */
27 enum netkit_mode mode;
28 bool primary;
29 u32 headroom;
30 };
31
32 struct netkit_link {
33 struct bpf_link link;
34 struct net_device *dev;
35 };
36
37 static __always_inline int
netkit_run(const struct bpf_mprog_entry * entry,struct sk_buff * skb,enum netkit_action ret)38 netkit_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
39 enum netkit_action ret)
40 {
41 const struct bpf_mprog_fp *fp;
42 const struct bpf_prog *prog;
43
44 bpf_mprog_foreach_prog(entry, fp, prog) {
45 bpf_compute_data_pointers(skb);
46 ret = bpf_prog_run(prog, skb);
47 if (ret != NETKIT_NEXT)
48 break;
49 }
50 return ret;
51 }
52
netkit_xnet(struct sk_buff * skb)53 static void netkit_xnet(struct sk_buff *skb)
54 {
55 skb->priority = 0;
56 skb->mark = 0;
57 }
58
netkit_prep_forward(struct sk_buff * skb,bool xnet,bool xnet_scrub)59 static void netkit_prep_forward(struct sk_buff *skb,
60 bool xnet, bool xnet_scrub)
61 {
62 skb_scrub_packet(skb, false);
63 nf_skip_egress(skb, true);
64 skb_reset_mac_header(skb);
65 if (!xnet)
66 return;
67 skb_clear_tstamp(skb);
68 if (xnet_scrub)
69 netkit_xnet(skb);
70 }
71
netkit_priv(const struct net_device * dev)72 static struct netkit *netkit_priv(const struct net_device *dev)
73 {
74 return netdev_priv(dev);
75 }
76
netkit_xmit(struct sk_buff * skb,struct net_device * dev)77 static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
78 {
79 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
80 struct netkit *nk = netkit_priv(dev);
81 enum netkit_action ret = READ_ONCE(nk->policy);
82 netdev_tx_t ret_dev = NET_XMIT_SUCCESS;
83 const struct bpf_mprog_entry *entry;
84 struct net_device *peer;
85 int len = skb->len;
86
87 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
88 rcu_read_lock();
89 peer = rcu_dereference(nk->peer);
90 if (unlikely(!peer || !(peer->flags & IFF_UP) ||
91 !pskb_may_pull(skb, ETH_HLEN) ||
92 skb_orphan_frags(skb, GFP_ATOMIC)))
93 goto drop;
94 netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)),
95 nk->scrub);
96 eth_skb_pkt_type(skb, peer);
97 skb->dev = peer;
98 entry = rcu_dereference(nk->active);
99 if (entry)
100 ret = netkit_run(entry, skb, ret);
101 switch (ret) {
102 case NETKIT_NEXT:
103 case NETKIT_PASS:
104 eth_skb_pull_mac(skb);
105 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
106 if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
107 dev_sw_netstats_tx_add(dev, 1, len);
108 dev_sw_netstats_rx_add(peer, len);
109 } else {
110 goto drop_stats;
111 }
112 break;
113 case NETKIT_REDIRECT:
114 dev_sw_netstats_tx_add(dev, 1, len);
115 skb_do_redirect(skb);
116 break;
117 case NETKIT_DROP:
118 default:
119 drop:
120 kfree_skb(skb);
121 drop_stats:
122 dev_core_stats_tx_dropped_inc(dev);
123 ret_dev = NET_XMIT_DROP;
124 break;
125 }
126 rcu_read_unlock();
127 bpf_net_ctx_clear(bpf_net_ctx);
128 return ret_dev;
129 }
130
netkit_open(struct net_device * dev)131 static int netkit_open(struct net_device *dev)
132 {
133 struct netkit *nk = netkit_priv(dev);
134 struct net_device *peer = rtnl_dereference(nk->peer);
135
136 if (!peer)
137 return -ENOTCONN;
138 if (peer->flags & IFF_UP) {
139 netif_carrier_on(dev);
140 netif_carrier_on(peer);
141 }
142 return 0;
143 }
144
netkit_close(struct net_device * dev)145 static int netkit_close(struct net_device *dev)
146 {
147 struct netkit *nk = netkit_priv(dev);
148 struct net_device *peer = rtnl_dereference(nk->peer);
149
150 netif_carrier_off(dev);
151 if (peer)
152 netif_carrier_off(peer);
153 return 0;
154 }
155
netkit_get_iflink(const struct net_device * dev)156 static int netkit_get_iflink(const struct net_device *dev)
157 {
158 struct netkit *nk = netkit_priv(dev);
159 struct net_device *peer;
160 int iflink = 0;
161
162 rcu_read_lock();
163 peer = rcu_dereference(nk->peer);
164 if (peer)
165 iflink = READ_ONCE(peer->ifindex);
166 rcu_read_unlock();
167 return iflink;
168 }
169
netkit_set_multicast(struct net_device * dev)170 static void netkit_set_multicast(struct net_device *dev)
171 {
172 /* Nothing to do, we receive whatever gets pushed to us! */
173 }
174
netkit_set_macaddr(struct net_device * dev,void * sa)175 static int netkit_set_macaddr(struct net_device *dev, void *sa)
176 {
177 struct netkit *nk = netkit_priv(dev);
178
179 if (nk->mode != NETKIT_L2)
180 return -EOPNOTSUPP;
181
182 return eth_mac_addr(dev, sa);
183 }
184
netkit_set_headroom(struct net_device * dev,int headroom)185 static void netkit_set_headroom(struct net_device *dev, int headroom)
186 {
187 struct netkit *nk = netkit_priv(dev), *nk2;
188 struct net_device *peer;
189
190 if (headroom < 0)
191 headroom = NET_SKB_PAD;
192
193 rcu_read_lock();
194 peer = rcu_dereference(nk->peer);
195 if (unlikely(!peer))
196 goto out;
197
198 nk2 = netkit_priv(peer);
199 nk->headroom = headroom;
200 headroom = max(nk->headroom, nk2->headroom);
201
202 peer->needed_headroom = headroom;
203 dev->needed_headroom = headroom;
204 out:
205 rcu_read_unlock();
206 }
207
netkit_peer_dev(struct net_device * dev)208 INDIRECT_CALLABLE_SCOPE struct net_device *netkit_peer_dev(struct net_device *dev)
209 {
210 return rcu_dereference(netkit_priv(dev)->peer);
211 }
212
netkit_get_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)213 static void netkit_get_stats(struct net_device *dev,
214 struct rtnl_link_stats64 *stats)
215 {
216 dev_fetch_sw_netstats(stats, dev->tstats);
217 stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
218 }
219
220 static void netkit_uninit(struct net_device *dev);
221
222 static const struct net_device_ops netkit_netdev_ops = {
223 .ndo_open = netkit_open,
224 .ndo_stop = netkit_close,
225 .ndo_start_xmit = netkit_xmit,
226 .ndo_set_rx_mode = netkit_set_multicast,
227 .ndo_set_rx_headroom = netkit_set_headroom,
228 .ndo_set_mac_address = netkit_set_macaddr,
229 .ndo_get_iflink = netkit_get_iflink,
230 .ndo_get_peer_dev = netkit_peer_dev,
231 .ndo_get_stats64 = netkit_get_stats,
232 .ndo_uninit = netkit_uninit,
233 .ndo_features_check = passthru_features_check,
234 };
235
netkit_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)236 static void netkit_get_drvinfo(struct net_device *dev,
237 struct ethtool_drvinfo *info)
238 {
239 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
240 }
241
242 static const struct ethtool_ops netkit_ethtool_ops = {
243 .get_drvinfo = netkit_get_drvinfo,
244 };
245
netkit_setup(struct net_device * dev)246 static void netkit_setup(struct net_device *dev)
247 {
248 static const netdev_features_t netkit_features_hw_vlan =
249 NETIF_F_HW_VLAN_CTAG_TX |
250 NETIF_F_HW_VLAN_CTAG_RX |
251 NETIF_F_HW_VLAN_STAG_TX |
252 NETIF_F_HW_VLAN_STAG_RX;
253 static const netdev_features_t netkit_features =
254 netkit_features_hw_vlan |
255 NETIF_F_SG |
256 NETIF_F_FRAGLIST |
257 NETIF_F_HW_CSUM |
258 NETIF_F_RXCSUM |
259 NETIF_F_SCTP_CRC |
260 NETIF_F_HIGHDMA |
261 NETIF_F_GSO_SOFTWARE |
262 NETIF_F_GSO_ENCAP_ALL;
263
264 ether_setup(dev);
265 dev->max_mtu = ETH_MAX_MTU;
266 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
267
268 dev->flags |= IFF_NOARP;
269 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
270 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
271 dev->priv_flags |= IFF_PHONY_HEADROOM;
272 dev->priv_flags |= IFF_NO_QUEUE;
273 dev->priv_flags |= IFF_DISABLE_NETPOLL;
274 dev->lltx = true;
275
276 dev->ethtool_ops = &netkit_ethtool_ops;
277 dev->netdev_ops = &netkit_netdev_ops;
278
279 dev->features |= netkit_features;
280 dev->hw_features = netkit_features;
281 dev->hw_enc_features = netkit_features;
282 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
283 dev->vlan_features = dev->features & ~netkit_features_hw_vlan;
284
285 dev->needs_free_netdev = true;
286
287 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
288 }
289
netkit_get_link_net(const struct net_device * dev)290 static struct net *netkit_get_link_net(const struct net_device *dev)
291 {
292 struct netkit *nk = netkit_priv(dev);
293 struct net_device *peer = rtnl_dereference(nk->peer);
294
295 return peer ? dev_net(peer) : dev_net(dev);
296 }
297
netkit_check_policy(int policy,struct nlattr * tb,struct netlink_ext_ack * extack)298 static int netkit_check_policy(int policy, struct nlattr *tb,
299 struct netlink_ext_ack *extack)
300 {
301 switch (policy) {
302 case NETKIT_PASS:
303 case NETKIT_DROP:
304 return 0;
305 default:
306 NL_SET_ERR_MSG_ATTR(extack, tb,
307 "Provided default xmit policy not supported");
308 return -EINVAL;
309 }
310 }
311
netkit_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)312 static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
313 struct netlink_ext_ack *extack)
314 {
315 struct nlattr *attr = tb[IFLA_ADDRESS];
316
317 if (!attr)
318 return 0;
319 if (nla_len(attr) != ETH_ALEN)
320 return -EINVAL;
321 if (!is_valid_ether_addr(nla_data(attr)))
322 return -EADDRNOTAVAIL;
323 return 0;
324 }
325
326 static struct rtnl_link_ops netkit_link_ops;
327
netkit_new_link(struct net_device * dev,struct rtnl_newlink_params * params,struct netlink_ext_ack * extack)328 static int netkit_new_link(struct net_device *dev,
329 struct rtnl_newlink_params *params,
330 struct netlink_ext_ack *extack)
331 {
332 struct net *peer_net = rtnl_newlink_peer_net(params);
333 enum netkit_scrub scrub_prim = NETKIT_SCRUB_DEFAULT;
334 enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT;
335 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp, *attr;
336 enum netkit_action policy_prim = NETKIT_PASS;
337 enum netkit_action policy_peer = NETKIT_PASS;
338 struct nlattr **data = params->data;
339 enum netkit_mode mode = NETKIT_L3;
340 unsigned char ifname_assign_type;
341 struct nlattr **tb = params->tb;
342 u16 headroom = 0, tailroom = 0;
343 struct ifinfomsg *ifmp = NULL;
344 struct net_device *peer;
345 char ifname[IFNAMSIZ];
346 struct netkit *nk;
347 int err;
348
349 tbp = tb;
350 if (data) {
351 if (data[IFLA_NETKIT_MODE])
352 mode = nla_get_u32(data[IFLA_NETKIT_MODE]);
353 if (data[IFLA_NETKIT_PEER_INFO]) {
354 attr = data[IFLA_NETKIT_PEER_INFO];
355 ifmp = nla_data(attr);
356 rtnl_nla_parse_ifinfomsg(peer_tb, attr, extack);
357 tbp = peer_tb;
358 }
359 if (data[IFLA_NETKIT_SCRUB])
360 scrub_prim = nla_get_u32(data[IFLA_NETKIT_SCRUB]);
361 if (data[IFLA_NETKIT_PEER_SCRUB])
362 scrub_peer = nla_get_u32(data[IFLA_NETKIT_PEER_SCRUB]);
363 if (data[IFLA_NETKIT_POLICY]) {
364 attr = data[IFLA_NETKIT_POLICY];
365 policy_prim = nla_get_u32(attr);
366 err = netkit_check_policy(policy_prim, attr, extack);
367 if (err < 0)
368 return err;
369 }
370 if (data[IFLA_NETKIT_PEER_POLICY]) {
371 attr = data[IFLA_NETKIT_PEER_POLICY];
372 policy_peer = nla_get_u32(attr);
373 err = netkit_check_policy(policy_peer, attr, extack);
374 if (err < 0)
375 return err;
376 }
377 if (data[IFLA_NETKIT_HEADROOM])
378 headroom = nla_get_u16(data[IFLA_NETKIT_HEADROOM]);
379 if (data[IFLA_NETKIT_TAILROOM])
380 tailroom = nla_get_u16(data[IFLA_NETKIT_TAILROOM]);
381 }
382
383 if (ifmp && tbp[IFLA_IFNAME]) {
384 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
385 ifname_assign_type = NET_NAME_USER;
386 } else {
387 strscpy(ifname, "nk%d", IFNAMSIZ);
388 ifname_assign_type = NET_NAME_ENUM;
389 }
390 if (mode != NETKIT_L2 &&
391 (tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
392 return -EOPNOTSUPP;
393
394 peer = rtnl_create_link(peer_net, ifname, ifname_assign_type,
395 &netkit_link_ops, tbp, extack);
396 if (IS_ERR(peer))
397 return PTR_ERR(peer);
398
399 netif_inherit_tso_max(peer, dev);
400 if (headroom) {
401 peer->needed_headroom = headroom;
402 dev->needed_headroom = headroom;
403 }
404 if (tailroom) {
405 peer->needed_tailroom = tailroom;
406 dev->needed_tailroom = tailroom;
407 }
408
409 if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
410 eth_hw_addr_random(peer);
411 if (ifmp && dev->ifindex)
412 peer->ifindex = ifmp->ifi_index;
413
414 nk = netkit_priv(peer);
415 nk->primary = false;
416 nk->policy = policy_peer;
417 nk->scrub = scrub_peer;
418 nk->mode = mode;
419 nk->headroom = headroom;
420 bpf_mprog_bundle_init(&nk->bundle);
421
422 err = register_netdevice(peer);
423 if (err < 0)
424 goto err_register_peer;
425 netif_carrier_off(peer);
426 if (mode == NETKIT_L2)
427 dev_change_flags(peer, peer->flags & ~IFF_NOARP, NULL);
428
429 err = rtnl_configure_link(peer, NULL, 0, NULL);
430 if (err < 0)
431 goto err_configure_peer;
432
433 if (mode == NETKIT_L2 && !tb[IFLA_ADDRESS])
434 eth_hw_addr_random(dev);
435 if (tb[IFLA_IFNAME])
436 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
437 else
438 strscpy(dev->name, "nk%d", IFNAMSIZ);
439
440 nk = netkit_priv(dev);
441 nk->primary = true;
442 nk->policy = policy_prim;
443 nk->scrub = scrub_prim;
444 nk->mode = mode;
445 nk->headroom = headroom;
446 bpf_mprog_bundle_init(&nk->bundle);
447
448 err = register_netdevice(dev);
449 if (err < 0)
450 goto err_configure_peer;
451 netif_carrier_off(dev);
452 if (mode == NETKIT_L2)
453 dev_change_flags(dev, dev->flags & ~IFF_NOARP, NULL);
454
455 rcu_assign_pointer(netkit_priv(dev)->peer, peer);
456 rcu_assign_pointer(netkit_priv(peer)->peer, dev);
457 return 0;
458 err_configure_peer:
459 unregister_netdevice(peer);
460 return err;
461 err_register_peer:
462 free_netdev(peer);
463 return err;
464 }
465
netkit_entry_fetch(struct net_device * dev,bool bundle_fallback)466 static struct bpf_mprog_entry *netkit_entry_fetch(struct net_device *dev,
467 bool bundle_fallback)
468 {
469 struct netkit *nk = netkit_priv(dev);
470 struct bpf_mprog_entry *entry;
471
472 ASSERT_RTNL();
473 entry = rcu_dereference_rtnl(nk->active);
474 if (entry)
475 return entry;
476 if (bundle_fallback)
477 return &nk->bundle.a;
478 return NULL;
479 }
480
netkit_entry_update(struct net_device * dev,struct bpf_mprog_entry * entry)481 static void netkit_entry_update(struct net_device *dev,
482 struct bpf_mprog_entry *entry)
483 {
484 struct netkit *nk = netkit_priv(dev);
485
486 ASSERT_RTNL();
487 rcu_assign_pointer(nk->active, entry);
488 }
489
netkit_entry_sync(void)490 static void netkit_entry_sync(void)
491 {
492 synchronize_rcu();
493 }
494
netkit_dev_fetch(struct net * net,u32 ifindex,u32 which)495 static struct net_device *netkit_dev_fetch(struct net *net, u32 ifindex, u32 which)
496 {
497 struct net_device *dev;
498 struct netkit *nk;
499
500 ASSERT_RTNL();
501
502 switch (which) {
503 case BPF_NETKIT_PRIMARY:
504 case BPF_NETKIT_PEER:
505 break;
506 default:
507 return ERR_PTR(-EINVAL);
508 }
509
510 dev = __dev_get_by_index(net, ifindex);
511 if (!dev)
512 return ERR_PTR(-ENODEV);
513 if (dev->netdev_ops != &netkit_netdev_ops)
514 return ERR_PTR(-ENXIO);
515
516 nk = netkit_priv(dev);
517 if (!nk->primary)
518 return ERR_PTR(-EACCES);
519 if (which == BPF_NETKIT_PEER) {
520 dev = rcu_dereference_rtnl(nk->peer);
521 if (!dev)
522 return ERR_PTR(-ENODEV);
523 }
524 return dev;
525 }
526
netkit_prog_attach(const union bpf_attr * attr,struct bpf_prog * prog)527 int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
528 {
529 struct bpf_mprog_entry *entry, *entry_new;
530 struct bpf_prog *replace_prog = NULL;
531 struct net_device *dev;
532 int ret;
533
534 rtnl_lock();
535 dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex,
536 attr->attach_type);
537 if (IS_ERR(dev)) {
538 ret = PTR_ERR(dev);
539 goto out;
540 }
541 entry = netkit_entry_fetch(dev, true);
542 if (attr->attach_flags & BPF_F_REPLACE) {
543 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd,
544 prog->type);
545 if (IS_ERR(replace_prog)) {
546 ret = PTR_ERR(replace_prog);
547 replace_prog = NULL;
548 goto out;
549 }
550 }
551 ret = bpf_mprog_attach(entry, &entry_new, prog, NULL, replace_prog,
552 attr->attach_flags, attr->relative_fd,
553 attr->expected_revision);
554 if (!ret) {
555 if (entry != entry_new) {
556 netkit_entry_update(dev, entry_new);
557 netkit_entry_sync();
558 }
559 bpf_mprog_commit(entry);
560 }
561 out:
562 if (replace_prog)
563 bpf_prog_put(replace_prog);
564 rtnl_unlock();
565 return ret;
566 }
567
netkit_prog_detach(const union bpf_attr * attr,struct bpf_prog * prog)568 int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog)
569 {
570 struct bpf_mprog_entry *entry, *entry_new;
571 struct net_device *dev;
572 int ret;
573
574 rtnl_lock();
575 dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex,
576 attr->attach_type);
577 if (IS_ERR(dev)) {
578 ret = PTR_ERR(dev);
579 goto out;
580 }
581 entry = netkit_entry_fetch(dev, false);
582 if (!entry) {
583 ret = -ENOENT;
584 goto out;
585 }
586 ret = bpf_mprog_detach(entry, &entry_new, prog, NULL, attr->attach_flags,
587 attr->relative_fd, attr->expected_revision);
588 if (!ret) {
589 if (!bpf_mprog_total(entry_new))
590 entry_new = NULL;
591 netkit_entry_update(dev, entry_new);
592 netkit_entry_sync();
593 bpf_mprog_commit(entry);
594 }
595 out:
596 rtnl_unlock();
597 return ret;
598 }
599
netkit_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)600 int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
601 {
602 struct net_device *dev;
603 int ret;
604
605 rtnl_lock();
606 dev = netkit_dev_fetch(current->nsproxy->net_ns,
607 attr->query.target_ifindex,
608 attr->query.attach_type);
609 if (IS_ERR(dev)) {
610 ret = PTR_ERR(dev);
611 goto out;
612 }
613 ret = bpf_mprog_query(attr, uattr, netkit_entry_fetch(dev, false));
614 out:
615 rtnl_unlock();
616 return ret;
617 }
618
netkit_link(const struct bpf_link * link)619 static struct netkit_link *netkit_link(const struct bpf_link *link)
620 {
621 return container_of(link, struct netkit_link, link);
622 }
623
netkit_link_prog_attach(struct bpf_link * link,u32 flags,u32 id_or_fd,u64 revision)624 static int netkit_link_prog_attach(struct bpf_link *link, u32 flags,
625 u32 id_or_fd, u64 revision)
626 {
627 struct netkit_link *nkl = netkit_link(link);
628 struct bpf_mprog_entry *entry, *entry_new;
629 struct net_device *dev = nkl->dev;
630 int ret;
631
632 ASSERT_RTNL();
633 entry = netkit_entry_fetch(dev, true);
634 ret = bpf_mprog_attach(entry, &entry_new, link->prog, link, NULL, flags,
635 id_or_fd, revision);
636 if (!ret) {
637 if (entry != entry_new) {
638 netkit_entry_update(dev, entry_new);
639 netkit_entry_sync();
640 }
641 bpf_mprog_commit(entry);
642 }
643 return ret;
644 }
645
netkit_link_release(struct bpf_link * link)646 static void netkit_link_release(struct bpf_link *link)
647 {
648 struct netkit_link *nkl = netkit_link(link);
649 struct bpf_mprog_entry *entry, *entry_new;
650 struct net_device *dev;
651 int ret = 0;
652
653 rtnl_lock();
654 dev = nkl->dev;
655 if (!dev)
656 goto out;
657 entry = netkit_entry_fetch(dev, false);
658 if (!entry) {
659 ret = -ENOENT;
660 goto out;
661 }
662 ret = bpf_mprog_detach(entry, &entry_new, link->prog, link, 0, 0, 0);
663 if (!ret) {
664 if (!bpf_mprog_total(entry_new))
665 entry_new = NULL;
666 netkit_entry_update(dev, entry_new);
667 netkit_entry_sync();
668 bpf_mprog_commit(entry);
669 nkl->dev = NULL;
670 }
671 out:
672 WARN_ON_ONCE(ret);
673 rtnl_unlock();
674 }
675
netkit_link_update(struct bpf_link * link,struct bpf_prog * nprog,struct bpf_prog * oprog)676 static int netkit_link_update(struct bpf_link *link, struct bpf_prog *nprog,
677 struct bpf_prog *oprog)
678 {
679 struct netkit_link *nkl = netkit_link(link);
680 struct bpf_mprog_entry *entry, *entry_new;
681 struct net_device *dev;
682 int ret = 0;
683
684 rtnl_lock();
685 dev = nkl->dev;
686 if (!dev) {
687 ret = -ENOLINK;
688 goto out;
689 }
690 if (oprog && link->prog != oprog) {
691 ret = -EPERM;
692 goto out;
693 }
694 oprog = link->prog;
695 if (oprog == nprog) {
696 bpf_prog_put(nprog);
697 goto out;
698 }
699 entry = netkit_entry_fetch(dev, false);
700 if (!entry) {
701 ret = -ENOENT;
702 goto out;
703 }
704 ret = bpf_mprog_attach(entry, &entry_new, nprog, link, oprog,
705 BPF_F_REPLACE | BPF_F_ID,
706 link->prog->aux->id, 0);
707 if (!ret) {
708 WARN_ON_ONCE(entry != entry_new);
709 oprog = xchg(&link->prog, nprog);
710 bpf_prog_put(oprog);
711 bpf_mprog_commit(entry);
712 }
713 out:
714 rtnl_unlock();
715 return ret;
716 }
717
netkit_link_dealloc(struct bpf_link * link)718 static void netkit_link_dealloc(struct bpf_link *link)
719 {
720 kfree(netkit_link(link));
721 }
722
netkit_link_fdinfo(const struct bpf_link * link,struct seq_file * seq)723 static void netkit_link_fdinfo(const struct bpf_link *link, struct seq_file *seq)
724 {
725 const struct netkit_link *nkl = netkit_link(link);
726 u32 ifindex = 0;
727
728 rtnl_lock();
729 if (nkl->dev)
730 ifindex = nkl->dev->ifindex;
731 rtnl_unlock();
732
733 seq_printf(seq, "ifindex:\t%u\n", ifindex);
734 seq_printf(seq, "attach_type:\t%u (%s)\n",
735 link->attach_type,
736 link->attach_type == BPF_NETKIT_PRIMARY ? "primary" : "peer");
737 }
738
netkit_link_fill_info(const struct bpf_link * link,struct bpf_link_info * info)739 static int netkit_link_fill_info(const struct bpf_link *link,
740 struct bpf_link_info *info)
741 {
742 const struct netkit_link *nkl = netkit_link(link);
743 u32 ifindex = 0;
744
745 rtnl_lock();
746 if (nkl->dev)
747 ifindex = nkl->dev->ifindex;
748 rtnl_unlock();
749
750 info->netkit.ifindex = ifindex;
751 info->netkit.attach_type = link->attach_type;
752 return 0;
753 }
754
netkit_link_detach(struct bpf_link * link)755 static int netkit_link_detach(struct bpf_link *link)
756 {
757 netkit_link_release(link);
758 return 0;
759 }
760
761 static const struct bpf_link_ops netkit_link_lops = {
762 .release = netkit_link_release,
763 .detach = netkit_link_detach,
764 .dealloc = netkit_link_dealloc,
765 .update_prog = netkit_link_update,
766 .show_fdinfo = netkit_link_fdinfo,
767 .fill_link_info = netkit_link_fill_info,
768 };
769
netkit_link_init(struct netkit_link * nkl,struct bpf_link_primer * link_primer,const union bpf_attr * attr,struct net_device * dev,struct bpf_prog * prog)770 static int netkit_link_init(struct netkit_link *nkl,
771 struct bpf_link_primer *link_primer,
772 const union bpf_attr *attr,
773 struct net_device *dev,
774 struct bpf_prog *prog)
775 {
776 bpf_link_init(&nkl->link, BPF_LINK_TYPE_NETKIT,
777 &netkit_link_lops, prog, attr->link_create.attach_type);
778 nkl->dev = dev;
779 return bpf_link_prime(&nkl->link, link_primer);
780 }
781
netkit_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)782 int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
783 {
784 struct bpf_link_primer link_primer;
785 struct netkit_link *nkl;
786 struct net_device *dev;
787 int ret;
788
789 rtnl_lock();
790 dev = netkit_dev_fetch(current->nsproxy->net_ns,
791 attr->link_create.target_ifindex,
792 attr->link_create.attach_type);
793 if (IS_ERR(dev)) {
794 ret = PTR_ERR(dev);
795 goto out;
796 }
797 nkl = kzalloc(sizeof(*nkl), GFP_KERNEL_ACCOUNT);
798 if (!nkl) {
799 ret = -ENOMEM;
800 goto out;
801 }
802 ret = netkit_link_init(nkl, &link_primer, attr, dev, prog);
803 if (ret) {
804 kfree(nkl);
805 goto out;
806 }
807 ret = netkit_link_prog_attach(&nkl->link,
808 attr->link_create.flags,
809 attr->link_create.netkit.relative_fd,
810 attr->link_create.netkit.expected_revision);
811 if (ret) {
812 nkl->dev = NULL;
813 bpf_link_cleanup(&link_primer);
814 goto out;
815 }
816 ret = bpf_link_settle(&link_primer);
817 out:
818 rtnl_unlock();
819 return ret;
820 }
821
netkit_release_all(struct net_device * dev)822 static void netkit_release_all(struct net_device *dev)
823 {
824 struct bpf_mprog_entry *entry;
825 struct bpf_tuple tuple = {};
826 struct bpf_mprog_fp *fp;
827 struct bpf_mprog_cp *cp;
828
829 entry = netkit_entry_fetch(dev, false);
830 if (!entry)
831 return;
832 netkit_entry_update(dev, NULL);
833 netkit_entry_sync();
834 bpf_mprog_foreach_tuple(entry, fp, cp, tuple) {
835 if (tuple.link)
836 netkit_link(tuple.link)->dev = NULL;
837 else
838 bpf_prog_put(tuple.prog);
839 }
840 }
841
netkit_uninit(struct net_device * dev)842 static void netkit_uninit(struct net_device *dev)
843 {
844 netkit_release_all(dev);
845 }
846
netkit_del_link(struct net_device * dev,struct list_head * head)847 static void netkit_del_link(struct net_device *dev, struct list_head *head)
848 {
849 struct netkit *nk = netkit_priv(dev);
850 struct net_device *peer = rtnl_dereference(nk->peer);
851
852 RCU_INIT_POINTER(nk->peer, NULL);
853 unregister_netdevice_queue(dev, head);
854 if (peer) {
855 nk = netkit_priv(peer);
856 RCU_INIT_POINTER(nk->peer, NULL);
857 unregister_netdevice_queue(peer, head);
858 }
859 }
860
netkit_change_link(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)861 static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
862 struct nlattr *data[],
863 struct netlink_ext_ack *extack)
864 {
865 struct netkit *nk = netkit_priv(dev);
866 struct net_device *peer = rtnl_dereference(nk->peer);
867 enum netkit_action policy;
868 struct nlattr *attr;
869 int err, i;
870 static const struct {
871 u32 attr;
872 char *name;
873 } fixed_params[] = {
874 { IFLA_NETKIT_MODE, "operating mode" },
875 { IFLA_NETKIT_SCRUB, "scrubbing" },
876 { IFLA_NETKIT_PEER_SCRUB, "peer scrubbing" },
877 { IFLA_NETKIT_PEER_INFO, "peer info" },
878 { IFLA_NETKIT_HEADROOM, "headroom" },
879 { IFLA_NETKIT_TAILROOM, "tailroom" },
880 };
881
882 if (!nk->primary) {
883 NL_SET_ERR_MSG(extack,
884 "netkit link settings can be changed only through the primary device");
885 return -EACCES;
886 }
887
888 for (i = 0; i < ARRAY_SIZE(fixed_params); i++) {
889 attr = data[fixed_params[i].attr];
890 if (attr) {
891 NL_SET_ERR_MSG_ATTR_FMT(extack, attr,
892 "netkit link %s cannot be changed after device creation",
893 fixed_params[i].name);
894 return -EACCES;
895 }
896 }
897
898 if (data[IFLA_NETKIT_POLICY]) {
899 attr = data[IFLA_NETKIT_POLICY];
900 policy = nla_get_u32(attr);
901 err = netkit_check_policy(policy, attr, extack);
902 if (err)
903 return err;
904 WRITE_ONCE(nk->policy, policy);
905 }
906
907 if (data[IFLA_NETKIT_PEER_POLICY]) {
908 err = -EOPNOTSUPP;
909 attr = data[IFLA_NETKIT_PEER_POLICY];
910 policy = nla_get_u32(attr);
911 if (peer)
912 err = netkit_check_policy(policy, attr, extack);
913 if (err)
914 return err;
915 nk = netkit_priv(peer);
916 WRITE_ONCE(nk->policy, policy);
917 }
918
919 return 0;
920 }
921
netkit_get_size(const struct net_device * dev)922 static size_t netkit_get_size(const struct net_device *dev)
923 {
924 return nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_POLICY */
925 nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_POLICY */
926 nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_SCRUB */
927 nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_SCRUB */
928 nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_MODE */
929 nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */
930 nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_HEADROOM */
931 nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_TAILROOM */
932 0;
933 }
934
netkit_fill_info(struct sk_buff * skb,const struct net_device * dev)935 static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev)
936 {
937 struct netkit *nk = netkit_priv(dev);
938 struct net_device *peer = rtnl_dereference(nk->peer);
939
940 if (nla_put_u8(skb, IFLA_NETKIT_PRIMARY, nk->primary))
941 return -EMSGSIZE;
942 if (nla_put_u32(skb, IFLA_NETKIT_POLICY, nk->policy))
943 return -EMSGSIZE;
944 if (nla_put_u32(skb, IFLA_NETKIT_MODE, nk->mode))
945 return -EMSGSIZE;
946 if (nla_put_u32(skb, IFLA_NETKIT_SCRUB, nk->scrub))
947 return -EMSGSIZE;
948 if (nla_put_u16(skb, IFLA_NETKIT_HEADROOM, dev->needed_headroom))
949 return -EMSGSIZE;
950 if (nla_put_u16(skb, IFLA_NETKIT_TAILROOM, dev->needed_tailroom))
951 return -EMSGSIZE;
952
953 if (peer) {
954 nk = netkit_priv(peer);
955 if (nla_put_u32(skb, IFLA_NETKIT_PEER_POLICY, nk->policy))
956 return -EMSGSIZE;
957 if (nla_put_u32(skb, IFLA_NETKIT_PEER_SCRUB, nk->scrub))
958 return -EMSGSIZE;
959 }
960
961 return 0;
962 }
963
964 static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = {
965 [IFLA_NETKIT_PEER_INFO] = { .len = sizeof(struct ifinfomsg) },
966 [IFLA_NETKIT_MODE] = NLA_POLICY_MAX(NLA_U32, NETKIT_L3),
967 [IFLA_NETKIT_POLICY] = { .type = NLA_U32 },
968 [IFLA_NETKIT_PEER_POLICY] = { .type = NLA_U32 },
969 [IFLA_NETKIT_HEADROOM] = { .type = NLA_U16 },
970 [IFLA_NETKIT_TAILROOM] = { .type = NLA_U16 },
971 [IFLA_NETKIT_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
972 [IFLA_NETKIT_PEER_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
973 [IFLA_NETKIT_PRIMARY] = { .type = NLA_REJECT,
974 .reject_message = "Primary attribute is read-only" },
975 };
976
977 static struct rtnl_link_ops netkit_link_ops = {
978 .kind = DRV_NAME,
979 .priv_size = sizeof(struct netkit),
980 .setup = netkit_setup,
981 .newlink = netkit_new_link,
982 .dellink = netkit_del_link,
983 .changelink = netkit_change_link,
984 .get_link_net = netkit_get_link_net,
985 .get_size = netkit_get_size,
986 .fill_info = netkit_fill_info,
987 .policy = netkit_policy,
988 .validate = netkit_validate,
989 .peer_type = IFLA_NETKIT_PEER_INFO,
990 .maxtype = IFLA_NETKIT_MAX,
991 };
992
netkit_init(void)993 static __init int netkit_init(void)
994 {
995 BUILD_BUG_ON((int)NETKIT_NEXT != (int)TCX_NEXT ||
996 (int)NETKIT_PASS != (int)TCX_PASS ||
997 (int)NETKIT_DROP != (int)TCX_DROP ||
998 (int)NETKIT_REDIRECT != (int)TCX_REDIRECT);
999
1000 return rtnl_link_register(&netkit_link_ops);
1001 }
1002
netkit_exit(void)1003 static __exit void netkit_exit(void)
1004 {
1005 rtnl_link_unregister(&netkit_link_ops);
1006 }
1007
1008 module_init(netkit_init);
1009 module_exit(netkit_exit);
1010
1011 MODULE_DESCRIPTION("BPF-programmable network device");
1012 MODULE_AUTHOR("Daniel Borkmann <daniel@iogearbox.net>");
1013 MODULE_AUTHOR("Nikolay Aleksandrov <razor@blackwall.org>");
1014 MODULE_LICENSE("GPL");
1015 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1016