1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com> */
3
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6 #include <linux/module.h>
7 #include <linux/skbuff.h>
8 #include <linux/udp.h>
9 #include <linux/jhash.h>
10 #include <linux/if_tunnel.h>
11 #include <linux/net.h>
12 #include <linux/igmp.h>
13 #include <linux/workqueue.h>
14 #include <net/flow.h>
15 #include <net/pkt_sched.h>
16 #include <net/net_namespace.h>
17 #include <net/ip.h>
18 #include <net/udp.h>
19 #include <net/udp_tunnel.h>
20 #include <net/icmp.h>
21 #include <net/mld.h>
22 #include <net/amt.h>
23 #include <uapi/linux/amt.h>
24 #include <linux/security.h>
25 #include <net/gro_cells.h>
26 #include <net/ipv6.h>
27 #include <net/if_inet6.h>
28 #include <net/ndisc.h>
29 #include <net/addrconf.h>
30 #include <net/ip6_route.h>
31 #include <net/inet_common.h>
32 #include <net/inet_dscp.h>
33 #include <net/ip6_checksum.h>
34
35 static struct workqueue_struct *amt_wq;
36
37 static HLIST_HEAD(source_gc_list);
38 /* Lock for source_gc_list */
39 static spinlock_t source_gc_lock;
40 static struct delayed_work source_gc_wq;
41 static char *status_str[] = {
42 "AMT_STATUS_INIT",
43 "AMT_STATUS_SENT_DISCOVERY",
44 "AMT_STATUS_RECEIVED_DISCOVERY",
45 "AMT_STATUS_SENT_ADVERTISEMENT",
46 "AMT_STATUS_RECEIVED_ADVERTISEMENT",
47 "AMT_STATUS_SENT_REQUEST",
48 "AMT_STATUS_RECEIVED_REQUEST",
49 "AMT_STATUS_SENT_QUERY",
50 "AMT_STATUS_RECEIVED_QUERY",
51 "AMT_STATUS_SENT_UPDATE",
52 "AMT_STATUS_RECEIVED_UPDATE",
53 };
54
55 static char *type_str[] = {
56 "", /* Type 0 is not defined */
57 "AMT_MSG_DISCOVERY",
58 "AMT_MSG_ADVERTISEMENT",
59 "AMT_MSG_REQUEST",
60 "AMT_MSG_MEMBERSHIP_QUERY",
61 "AMT_MSG_MEMBERSHIP_UPDATE",
62 "AMT_MSG_MULTICAST_DATA",
63 "AMT_MSG_TEARDOWN",
64 };
65
66 static char *action_str[] = {
67 "AMT_ACT_GMI",
68 "AMT_ACT_GMI_ZERO",
69 "AMT_ACT_GT",
70 "AMT_ACT_STATUS_FWD_NEW",
71 "AMT_ACT_STATUS_D_FWD_NEW",
72 "AMT_ACT_STATUS_NONE_NEW",
73 };
74
75 static struct igmpv3_grec igmpv3_zero_grec;
76
77 #if IS_ENABLED(CONFIG_IPV6)
78 #define MLD2_ALL_NODE_INIT { { { 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01 } } }
79 static struct in6_addr mld2_all_node = MLD2_ALL_NODE_INIT;
80 static struct mld2_grec mldv2_zero_grec;
81 #endif
82
amt_skb_cb(struct sk_buff * skb)83 static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb)
84 {
85 BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct tc_skb_cb) >
86 sizeof_field(struct sk_buff, cb));
87
88 return (struct amt_skb_cb *)((void *)skb->cb +
89 sizeof(struct tc_skb_cb));
90 }
91
__amt_source_gc_work(void)92 static void __amt_source_gc_work(void)
93 {
94 struct amt_source_node *snode;
95 struct hlist_head gc_list;
96 struct hlist_node *t;
97
98 spin_lock_bh(&source_gc_lock);
99 hlist_move_list(&source_gc_list, &gc_list);
100 spin_unlock_bh(&source_gc_lock);
101
102 hlist_for_each_entry_safe(snode, t, &gc_list, node) {
103 hlist_del_rcu(&snode->node);
104 kfree_rcu(snode, rcu);
105 }
106 }
107
amt_source_gc_work(struct work_struct * work)108 static void amt_source_gc_work(struct work_struct *work)
109 {
110 __amt_source_gc_work();
111
112 spin_lock_bh(&source_gc_lock);
113 mod_delayed_work(amt_wq, &source_gc_wq,
114 msecs_to_jiffies(AMT_GC_INTERVAL));
115 spin_unlock_bh(&source_gc_lock);
116 }
117
amt_addr_equal(union amt_addr * a,union amt_addr * b)118 static bool amt_addr_equal(union amt_addr *a, union amt_addr *b)
119 {
120 return !memcmp(a, b, sizeof(union amt_addr));
121 }
122
amt_source_hash(struct amt_tunnel_list * tunnel,union amt_addr * src)123 static u32 amt_source_hash(struct amt_tunnel_list *tunnel, union amt_addr *src)
124 {
125 u32 hash = jhash(src, sizeof(*src), tunnel->amt->hash_seed);
126
127 return reciprocal_scale(hash, tunnel->amt->hash_buckets);
128 }
129
amt_status_filter(struct amt_source_node * snode,enum amt_filter filter)130 static bool amt_status_filter(struct amt_source_node *snode,
131 enum amt_filter filter)
132 {
133 bool rc = false;
134
135 switch (filter) {
136 case AMT_FILTER_FWD:
137 if (snode->status == AMT_SOURCE_STATUS_FWD &&
138 snode->flags == AMT_SOURCE_OLD)
139 rc = true;
140 break;
141 case AMT_FILTER_D_FWD:
142 if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
143 snode->flags == AMT_SOURCE_OLD)
144 rc = true;
145 break;
146 case AMT_FILTER_FWD_NEW:
147 if (snode->status == AMT_SOURCE_STATUS_FWD &&
148 snode->flags == AMT_SOURCE_NEW)
149 rc = true;
150 break;
151 case AMT_FILTER_D_FWD_NEW:
152 if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
153 snode->flags == AMT_SOURCE_NEW)
154 rc = true;
155 break;
156 case AMT_FILTER_ALL:
157 rc = true;
158 break;
159 case AMT_FILTER_NONE_NEW:
160 if (snode->status == AMT_SOURCE_STATUS_NONE &&
161 snode->flags == AMT_SOURCE_NEW)
162 rc = true;
163 break;
164 case AMT_FILTER_BOTH:
165 if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
166 snode->status == AMT_SOURCE_STATUS_FWD) &&
167 snode->flags == AMT_SOURCE_OLD)
168 rc = true;
169 break;
170 case AMT_FILTER_BOTH_NEW:
171 if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
172 snode->status == AMT_SOURCE_STATUS_FWD) &&
173 snode->flags == AMT_SOURCE_NEW)
174 rc = true;
175 break;
176 default:
177 WARN_ON_ONCE(1);
178 break;
179 }
180
181 return rc;
182 }
183
amt_lookup_src(struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,enum amt_filter filter,union amt_addr * src)184 static struct amt_source_node *amt_lookup_src(struct amt_tunnel_list *tunnel,
185 struct amt_group_node *gnode,
186 enum amt_filter filter,
187 union amt_addr *src)
188 {
189 u32 hash = amt_source_hash(tunnel, src);
190 struct amt_source_node *snode;
191
192 hlist_for_each_entry_rcu(snode, &gnode->sources[hash], node)
193 if (amt_status_filter(snode, filter) &&
194 amt_addr_equal(&snode->source_addr, src))
195 return snode;
196
197 return NULL;
198 }
199
amt_group_hash(struct amt_tunnel_list * tunnel,union amt_addr * group)200 static u32 amt_group_hash(struct amt_tunnel_list *tunnel, union amt_addr *group)
201 {
202 u32 hash = jhash(group, sizeof(*group), tunnel->amt->hash_seed);
203
204 return reciprocal_scale(hash, tunnel->amt->hash_buckets);
205 }
206
amt_lookup_group(struct amt_tunnel_list * tunnel,union amt_addr * group,union amt_addr * host,bool v6)207 static struct amt_group_node *amt_lookup_group(struct amt_tunnel_list *tunnel,
208 union amt_addr *group,
209 union amt_addr *host,
210 bool v6)
211 {
212 u32 hash = amt_group_hash(tunnel, group);
213 struct amt_group_node *gnode;
214
215 hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], node) {
216 if (amt_addr_equal(&gnode->group_addr, group) &&
217 amt_addr_equal(&gnode->host_addr, host) &&
218 gnode->v6 == v6)
219 return gnode;
220 }
221
222 return NULL;
223 }
224
amt_destroy_source(struct amt_source_node * snode)225 static void amt_destroy_source(struct amt_source_node *snode)
226 {
227 struct amt_group_node *gnode = snode->gnode;
228 struct amt_tunnel_list *tunnel;
229
230 tunnel = gnode->tunnel_list;
231
232 if (!gnode->v6) {
233 netdev_dbg(snode->gnode->amt->dev,
234 "Delete source %pI4 from %pI4\n",
235 &snode->source_addr.ip4,
236 &gnode->group_addr.ip4);
237 #if IS_ENABLED(CONFIG_IPV6)
238 } else {
239 netdev_dbg(snode->gnode->amt->dev,
240 "Delete source %pI6 from %pI6\n",
241 &snode->source_addr.ip6,
242 &gnode->group_addr.ip6);
243 #endif
244 }
245
246 cancel_delayed_work(&snode->source_timer);
247 hlist_del_init_rcu(&snode->node);
248 tunnel->nr_sources--;
249 gnode->nr_sources--;
250 spin_lock_bh(&source_gc_lock);
251 hlist_add_head_rcu(&snode->node, &source_gc_list);
252 spin_unlock_bh(&source_gc_lock);
253 }
254
amt_del_group(struct amt_dev * amt,struct amt_group_node * gnode)255 static void amt_del_group(struct amt_dev *amt, struct amt_group_node *gnode)
256 {
257 struct amt_source_node *snode;
258 struct hlist_node *t;
259 int i;
260
261 if (cancel_delayed_work(&gnode->group_timer))
262 dev_put(amt->dev);
263 hlist_del_rcu(&gnode->node);
264 gnode->tunnel_list->nr_groups--;
265
266 if (!gnode->v6)
267 netdev_dbg(amt->dev, "Leave group %pI4\n",
268 &gnode->group_addr.ip4);
269 #if IS_ENABLED(CONFIG_IPV6)
270 else
271 netdev_dbg(amt->dev, "Leave group %pI6\n",
272 &gnode->group_addr.ip6);
273 #endif
274 for (i = 0; i < amt->hash_buckets; i++)
275 hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node)
276 amt_destroy_source(snode);
277
278 /* tunnel->lock was acquired outside of amt_del_group()
279 * But rcu_read_lock() was acquired too so It's safe.
280 */
281 kfree_rcu(gnode, rcu);
282 }
283
284 /* If a source timer expires with a router filter-mode for the group of
285 * INCLUDE, the router concludes that traffic from this particular
286 * source is no longer desired on the attached network, and deletes the
287 * associated source record.
288 */
amt_source_work(struct work_struct * work)289 static void amt_source_work(struct work_struct *work)
290 {
291 struct amt_source_node *snode = container_of(to_delayed_work(work),
292 struct amt_source_node,
293 source_timer);
294 struct amt_group_node *gnode = snode->gnode;
295 struct amt_dev *amt = gnode->amt;
296 struct amt_tunnel_list *tunnel;
297
298 tunnel = gnode->tunnel_list;
299 spin_lock_bh(&tunnel->lock);
300 rcu_read_lock();
301 if (gnode->filter_mode == MCAST_INCLUDE) {
302 amt_destroy_source(snode);
303 if (!gnode->nr_sources)
304 amt_del_group(amt, gnode);
305 } else {
306 /* When a router filter-mode for a group is EXCLUDE,
307 * source records are only deleted when the group timer expires
308 */
309 snode->status = AMT_SOURCE_STATUS_D_FWD;
310 }
311 rcu_read_unlock();
312 spin_unlock_bh(&tunnel->lock);
313 }
314
amt_act_src(struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,struct amt_source_node * snode,enum amt_act act)315 static void amt_act_src(struct amt_tunnel_list *tunnel,
316 struct amt_group_node *gnode,
317 struct amt_source_node *snode,
318 enum amt_act act)
319 {
320 struct amt_dev *amt = tunnel->amt;
321
322 switch (act) {
323 case AMT_ACT_GMI:
324 mod_delayed_work(amt_wq, &snode->source_timer,
325 msecs_to_jiffies(amt_gmi(amt)));
326 break;
327 case AMT_ACT_GMI_ZERO:
328 cancel_delayed_work(&snode->source_timer);
329 break;
330 case AMT_ACT_GT:
331 mod_delayed_work(amt_wq, &snode->source_timer,
332 gnode->group_timer.timer.expires);
333 break;
334 case AMT_ACT_STATUS_FWD_NEW:
335 snode->status = AMT_SOURCE_STATUS_FWD;
336 snode->flags = AMT_SOURCE_NEW;
337 break;
338 case AMT_ACT_STATUS_D_FWD_NEW:
339 snode->status = AMT_SOURCE_STATUS_D_FWD;
340 snode->flags = AMT_SOURCE_NEW;
341 break;
342 case AMT_ACT_STATUS_NONE_NEW:
343 cancel_delayed_work(&snode->source_timer);
344 snode->status = AMT_SOURCE_STATUS_NONE;
345 snode->flags = AMT_SOURCE_NEW;
346 break;
347 default:
348 WARN_ON_ONCE(1);
349 return;
350 }
351
352 if (!gnode->v6)
353 netdev_dbg(amt->dev, "Source %pI4 from %pI4 Acted %s\n",
354 &snode->source_addr.ip4,
355 &gnode->group_addr.ip4,
356 action_str[act]);
357 #if IS_ENABLED(CONFIG_IPV6)
358 else
359 netdev_dbg(amt->dev, "Source %pI6 from %pI6 Acted %s\n",
360 &snode->source_addr.ip6,
361 &gnode->group_addr.ip6,
362 action_str[act]);
363 #endif
364 }
365
amt_alloc_snode(struct amt_group_node * gnode,union amt_addr * src)366 static struct amt_source_node *amt_alloc_snode(struct amt_group_node *gnode,
367 union amt_addr *src)
368 {
369 struct amt_source_node *snode;
370
371 snode = kzalloc(sizeof(*snode), GFP_ATOMIC);
372 if (!snode)
373 return NULL;
374
375 memcpy(&snode->source_addr, src, sizeof(union amt_addr));
376 snode->gnode = gnode;
377 snode->status = AMT_SOURCE_STATUS_NONE;
378 snode->flags = AMT_SOURCE_NEW;
379 INIT_HLIST_NODE(&snode->node);
380 INIT_DELAYED_WORK(&snode->source_timer, amt_source_work);
381
382 return snode;
383 }
384
385 /* RFC 3810 - 7.2.2. Definition of Filter Timers
386 *
387 * Router Mode Filter Timer Actions/Comments
388 * ----------- ----------------- ----------------
389 *
390 * INCLUDE Not Used All listeners in
391 * INCLUDE mode.
392 *
393 * EXCLUDE Timer > 0 At least one listener
394 * in EXCLUDE mode.
395 *
396 * EXCLUDE Timer == 0 No more listeners in
397 * EXCLUDE mode for the
398 * multicast address.
399 * If the Requested List
400 * is empty, delete
401 * Multicast Address
402 * Record. If not, switch
403 * to INCLUDE filter mode;
404 * the sources in the
405 * Requested List are
406 * moved to the Include
407 * List, and the Exclude
408 * List is deleted.
409 */
amt_group_work(struct work_struct * work)410 static void amt_group_work(struct work_struct *work)
411 {
412 struct amt_group_node *gnode = container_of(to_delayed_work(work),
413 struct amt_group_node,
414 group_timer);
415 struct amt_tunnel_list *tunnel = gnode->tunnel_list;
416 struct amt_dev *amt = gnode->amt;
417 struct amt_source_node *snode;
418 bool delete_group = true;
419 struct hlist_node *t;
420 int i, buckets;
421
422 buckets = amt->hash_buckets;
423
424 spin_lock_bh(&tunnel->lock);
425 if (gnode->filter_mode == MCAST_INCLUDE) {
426 /* Not Used */
427 spin_unlock_bh(&tunnel->lock);
428 goto out;
429 }
430
431 rcu_read_lock();
432 for (i = 0; i < buckets; i++) {
433 hlist_for_each_entry_safe(snode, t,
434 &gnode->sources[i], node) {
435 if (!delayed_work_pending(&snode->source_timer) ||
436 snode->status == AMT_SOURCE_STATUS_D_FWD) {
437 amt_destroy_source(snode);
438 } else {
439 delete_group = false;
440 snode->status = AMT_SOURCE_STATUS_FWD;
441 }
442 }
443 }
444 if (delete_group)
445 amt_del_group(amt, gnode);
446 else
447 gnode->filter_mode = MCAST_INCLUDE;
448 rcu_read_unlock();
449 spin_unlock_bh(&tunnel->lock);
450 out:
451 dev_put(amt->dev);
452 }
453
454 /* Non-existent group is created as INCLUDE {empty}:
455 *
456 * RFC 3376 - 5.1. Action on Change of Interface State
457 *
458 * If no interface state existed for that multicast address before
459 * the change (i.e., the change consisted of creating a new
460 * per-interface record), or if no state exists after the change
461 * (i.e., the change consisted of deleting a per-interface record),
462 * then the "non-existent" state is considered to have a filter mode
463 * of INCLUDE and an empty source list.
464 */
amt_add_group(struct amt_dev * amt,struct amt_tunnel_list * tunnel,union amt_addr * group,union amt_addr * host,bool v6)465 static struct amt_group_node *amt_add_group(struct amt_dev *amt,
466 struct amt_tunnel_list *tunnel,
467 union amt_addr *group,
468 union amt_addr *host,
469 bool v6)
470 {
471 struct amt_group_node *gnode;
472 u32 hash;
473 int i;
474
475 if (tunnel->nr_groups >= amt->max_groups)
476 return ERR_PTR(-ENOSPC);
477
478 gnode = kzalloc(sizeof(*gnode) +
479 (sizeof(struct hlist_head) * amt->hash_buckets),
480 GFP_ATOMIC);
481 if (unlikely(!gnode))
482 return ERR_PTR(-ENOMEM);
483
484 gnode->amt = amt;
485 gnode->group_addr = *group;
486 gnode->host_addr = *host;
487 gnode->v6 = v6;
488 gnode->tunnel_list = tunnel;
489 gnode->filter_mode = MCAST_INCLUDE;
490 INIT_HLIST_NODE(&gnode->node);
491 INIT_DELAYED_WORK(&gnode->group_timer, amt_group_work);
492 for (i = 0; i < amt->hash_buckets; i++)
493 INIT_HLIST_HEAD(&gnode->sources[i]);
494
495 hash = amt_group_hash(tunnel, group);
496 hlist_add_head_rcu(&gnode->node, &tunnel->groups[hash]);
497 tunnel->nr_groups++;
498
499 if (!gnode->v6)
500 netdev_dbg(amt->dev, "Join group %pI4\n",
501 &gnode->group_addr.ip4);
502 #if IS_ENABLED(CONFIG_IPV6)
503 else
504 netdev_dbg(amt->dev, "Join group %pI6\n",
505 &gnode->group_addr.ip6);
506 #endif
507
508 return gnode;
509 }
510
amt_build_igmp_gq(struct amt_dev * amt)511 static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
512 {
513 u8 ra[AMT_IPHDR_OPTS] = { IPOPT_RA, 4, 0, 0 };
514 int hlen = LL_RESERVED_SPACE(amt->dev);
515 int tlen = amt->dev->needed_tailroom;
516 struct igmpv3_query *ihv3;
517 void *csum_start = NULL;
518 __sum16 *csum = NULL;
519 struct sk_buff *skb;
520 struct ethhdr *eth;
521 struct iphdr *iph;
522 unsigned int len;
523 int offset;
524
525 len = hlen + tlen + sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3);
526 skb = netdev_alloc_skb_ip_align(amt->dev, len);
527 if (!skb)
528 return NULL;
529
530 skb_reserve(skb, hlen);
531 skb_push(skb, sizeof(*eth));
532 skb->protocol = htons(ETH_P_IP);
533 skb_reset_mac_header(skb);
534 skb->priority = TC_PRIO_CONTROL;
535 skb_put(skb, sizeof(*iph));
536 skb_put_data(skb, ra, sizeof(ra));
537 skb_put(skb, sizeof(*ihv3));
538 skb_pull(skb, sizeof(*eth));
539 skb_reset_network_header(skb);
540
541 iph = ip_hdr(skb);
542 iph->version = 4;
543 iph->ihl = (sizeof(struct iphdr) + AMT_IPHDR_OPTS) >> 2;
544 iph->tos = AMT_TOS;
545 iph->tot_len = htons(sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3));
546 iph->frag_off = htons(IP_DF);
547 iph->ttl = 1;
548 iph->id = 0;
549 iph->protocol = IPPROTO_IGMP;
550 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
551 iph->saddr = htonl(INADDR_ANY);
552 ip_send_check(iph);
553
554 eth = eth_hdr(skb);
555 ether_addr_copy(eth->h_source, amt->dev->dev_addr);
556 ip_eth_mc_map(htonl(INADDR_ALLHOSTS_GROUP), eth->h_dest);
557 eth->h_proto = htons(ETH_P_IP);
558
559 ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
560 skb_reset_transport_header(skb);
561 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
562 ihv3->code = 1;
563 ihv3->group = 0;
564 ihv3->qqic = amt->qi;
565 ihv3->nsrcs = 0;
566 ihv3->resv = 0;
567 ihv3->suppress = false;
568 ihv3->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
569 ihv3->csum = 0;
570 csum = &ihv3->csum;
571 csum_start = (void *)ihv3;
572 *csum = ip_compute_csum(csum_start, sizeof(*ihv3));
573 offset = skb_transport_offset(skb);
574 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
575 skb->ip_summed = CHECKSUM_NONE;
576
577 skb_push(skb, sizeof(*eth) + sizeof(*iph) + AMT_IPHDR_OPTS);
578
579 return skb;
580 }
581
amt_update_gw_status(struct amt_dev * amt,enum amt_status status,bool validate)582 static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
583 bool validate)
584 {
585 if (validate && amt->status >= status)
586 return;
587 netdev_dbg(amt->dev, "Update GW status %s -> %s",
588 status_str[amt->status], status_str[status]);
589 WRITE_ONCE(amt->status, status);
590 }
591
__amt_update_relay_status(struct amt_tunnel_list * tunnel,enum amt_status status,bool validate)592 static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
593 enum amt_status status,
594 bool validate)
595 {
596 if (validate && tunnel->status >= status)
597 return;
598 netdev_dbg(tunnel->amt->dev,
599 "Update Tunnel(IP = %pI4, PORT = %u) status %s -> %s",
600 &tunnel->ip4, ntohs(tunnel->source_port),
601 status_str[tunnel->status], status_str[status]);
602 tunnel->status = status;
603 }
604
amt_update_relay_status(struct amt_tunnel_list * tunnel,enum amt_status status,bool validate)605 static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
606 enum amt_status status, bool validate)
607 {
608 spin_lock_bh(&tunnel->lock);
609 __amt_update_relay_status(tunnel, status, validate);
610 spin_unlock_bh(&tunnel->lock);
611 }
612
amt_send_discovery(struct amt_dev * amt)613 static void amt_send_discovery(struct amt_dev *amt)
614 {
615 struct amt_header_discovery *amtd;
616 int hlen, tlen, offset;
617 struct socket *sock;
618 struct udphdr *udph;
619 struct sk_buff *skb;
620 struct iphdr *iph;
621 struct rtable *rt;
622 struct flowi4 fl4;
623 u32 len;
624 int err;
625
626 rcu_read_lock();
627 sock = rcu_dereference(amt->sock);
628 if (!sock)
629 goto out;
630
631 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
632 goto out;
633
634 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
635 amt->discovery_ip, amt->local_ip,
636 amt->gw_port, amt->relay_port,
637 IPPROTO_UDP, 0,
638 amt->stream_dev->ifindex);
639 if (IS_ERR(rt)) {
640 amt->dev->stats.tx_errors++;
641 goto out;
642 }
643
644 hlen = LL_RESERVED_SPACE(amt->dev);
645 tlen = amt->dev->needed_tailroom;
646 len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
647 skb = netdev_alloc_skb_ip_align(amt->dev, len);
648 if (!skb) {
649 ip_rt_put(rt);
650 amt->dev->stats.tx_errors++;
651 goto out;
652 }
653
654 skb->priority = TC_PRIO_CONTROL;
655 skb_dst_set(skb, &rt->dst);
656
657 len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
658 skb_reset_network_header(skb);
659 skb_put(skb, len);
660 amtd = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
661 amtd->version = 0;
662 amtd->type = AMT_MSG_DISCOVERY;
663 amtd->reserved = 0;
664 amtd->nonce = amt->nonce;
665 skb_push(skb, sizeof(*udph));
666 skb_reset_transport_header(skb);
667 udph = udp_hdr(skb);
668 udph->source = amt->gw_port;
669 udph->dest = amt->relay_port;
670 udph->len = htons(sizeof(*udph) + sizeof(*amtd));
671 udph->check = 0;
672 offset = skb_transport_offset(skb);
673 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
674 udph->check = csum_tcpudp_magic(amt->local_ip, amt->discovery_ip,
675 sizeof(*udph) + sizeof(*amtd),
676 IPPROTO_UDP, skb->csum);
677
678 skb_push(skb, sizeof(*iph));
679 iph = ip_hdr(skb);
680 iph->version = 4;
681 iph->ihl = (sizeof(struct iphdr)) >> 2;
682 iph->tos = AMT_TOS;
683 iph->frag_off = 0;
684 iph->ttl = ip4_dst_hoplimit(&rt->dst);
685 iph->daddr = amt->discovery_ip;
686 iph->saddr = amt->local_ip;
687 iph->protocol = IPPROTO_UDP;
688 iph->tot_len = htons(len);
689
690 skb->ip_summed = CHECKSUM_NONE;
691 ip_select_ident(amt->net, skb, NULL);
692 ip_send_check(iph);
693 err = ip_local_out(amt->net, sock->sk, skb);
694 if (unlikely(net_xmit_eval(err)))
695 amt->dev->stats.tx_errors++;
696
697 amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
698 out:
699 rcu_read_unlock();
700 }
701
amt_send_request(struct amt_dev * amt,bool v6)702 static void amt_send_request(struct amt_dev *amt, bool v6)
703 {
704 struct amt_header_request *amtrh;
705 int hlen, tlen, offset;
706 struct socket *sock;
707 struct udphdr *udph;
708 struct sk_buff *skb;
709 struct iphdr *iph;
710 struct rtable *rt;
711 struct flowi4 fl4;
712 u32 len;
713 int err;
714
715 rcu_read_lock();
716 sock = rcu_dereference(amt->sock);
717 if (!sock)
718 goto out;
719
720 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
721 goto out;
722
723 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
724 amt->remote_ip, amt->local_ip,
725 amt->gw_port, amt->relay_port,
726 IPPROTO_UDP, 0,
727 amt->stream_dev->ifindex);
728 if (IS_ERR(rt)) {
729 amt->dev->stats.tx_errors++;
730 goto out;
731 }
732
733 hlen = LL_RESERVED_SPACE(amt->dev);
734 tlen = amt->dev->needed_tailroom;
735 len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
736 skb = netdev_alloc_skb_ip_align(amt->dev, len);
737 if (!skb) {
738 ip_rt_put(rt);
739 amt->dev->stats.tx_errors++;
740 goto out;
741 }
742
743 skb->priority = TC_PRIO_CONTROL;
744 skb_dst_set(skb, &rt->dst);
745
746 len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
747 skb_reset_network_header(skb);
748 skb_put(skb, len);
749 amtrh = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
750 amtrh->version = 0;
751 amtrh->type = AMT_MSG_REQUEST;
752 amtrh->reserved1 = 0;
753 amtrh->p = v6;
754 amtrh->reserved2 = 0;
755 amtrh->nonce = amt->nonce;
756 skb_push(skb, sizeof(*udph));
757 skb_reset_transport_header(skb);
758 udph = udp_hdr(skb);
759 udph->source = amt->gw_port;
760 udph->dest = amt->relay_port;
761 udph->len = htons(sizeof(*amtrh) + sizeof(*udph));
762 udph->check = 0;
763 offset = skb_transport_offset(skb);
764 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
765 udph->check = csum_tcpudp_magic(amt->local_ip, amt->remote_ip,
766 sizeof(*udph) + sizeof(*amtrh),
767 IPPROTO_UDP, skb->csum);
768
769 skb_push(skb, sizeof(*iph));
770 iph = ip_hdr(skb);
771 iph->version = 4;
772 iph->ihl = (sizeof(struct iphdr)) >> 2;
773 iph->tos = AMT_TOS;
774 iph->frag_off = 0;
775 iph->ttl = ip4_dst_hoplimit(&rt->dst);
776 iph->daddr = amt->remote_ip;
777 iph->saddr = amt->local_ip;
778 iph->protocol = IPPROTO_UDP;
779 iph->tot_len = htons(len);
780
781 skb->ip_summed = CHECKSUM_NONE;
782 ip_select_ident(amt->net, skb, NULL);
783 ip_send_check(iph);
784 err = ip_local_out(amt->net, sock->sk, skb);
785 if (unlikely(net_xmit_eval(err)))
786 amt->dev->stats.tx_errors++;
787
788 out:
789 rcu_read_unlock();
790 }
791
amt_send_igmp_gq(struct amt_dev * amt,struct amt_tunnel_list * tunnel)792 static void amt_send_igmp_gq(struct amt_dev *amt,
793 struct amt_tunnel_list *tunnel)
794 {
795 struct sk_buff *skb;
796
797 skb = amt_build_igmp_gq(amt);
798 if (!skb)
799 return;
800
801 amt_skb_cb(skb)->tunnel = tunnel;
802 dev_queue_xmit(skb);
803 }
804
805 #if IS_ENABLED(CONFIG_IPV6)
amt_build_mld_gq(struct amt_dev * amt)806 static struct sk_buff *amt_build_mld_gq(struct amt_dev *amt)
807 {
808 u8 ra[AMT_IP6HDR_OPTS] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
809 2, 0, 0, IPV6_TLV_PAD1, IPV6_TLV_PAD1 };
810 int hlen = LL_RESERVED_SPACE(amt->dev);
811 int tlen = amt->dev->needed_tailroom;
812 struct mld2_query *mld2q;
813 void *csum_start = NULL;
814 struct ipv6hdr *ip6h;
815 struct sk_buff *skb;
816 struct ethhdr *eth;
817 u32 len;
818
819 len = hlen + tlen + sizeof(*ip6h) + sizeof(ra) + sizeof(*mld2q);
820 skb = netdev_alloc_skb_ip_align(amt->dev, len);
821 if (!skb)
822 return NULL;
823
824 skb_reserve(skb, hlen);
825 skb_push(skb, sizeof(*eth));
826 skb_reset_mac_header(skb);
827 eth = eth_hdr(skb);
828 skb->priority = TC_PRIO_CONTROL;
829 skb->protocol = htons(ETH_P_IPV6);
830 skb_put_zero(skb, sizeof(*ip6h));
831 skb_put_data(skb, ra, sizeof(ra));
832 skb_put_zero(skb, sizeof(*mld2q));
833 skb_pull(skb, sizeof(*eth));
834 skb_reset_network_header(skb);
835 ip6h = ipv6_hdr(skb);
836 ip6h->payload_len = htons(sizeof(ra) + sizeof(*mld2q));
837 ip6h->nexthdr = NEXTHDR_HOP;
838 ip6h->hop_limit = 1;
839 ip6h->daddr = mld2_all_node;
840 ip6_flow_hdr(ip6h, 0, 0);
841
842 if (ipv6_dev_get_saddr(amt->net, amt->dev, &ip6h->daddr, 0,
843 &ip6h->saddr)) {
844 amt->dev->stats.tx_errors++;
845 kfree_skb(skb);
846 return NULL;
847 }
848
849 eth->h_proto = htons(ETH_P_IPV6);
850 ether_addr_copy(eth->h_source, amt->dev->dev_addr);
851 ipv6_eth_mc_map(&mld2_all_node, eth->h_dest);
852
853 skb_pull(skb, sizeof(*ip6h) + sizeof(ra));
854 skb_reset_transport_header(skb);
855 mld2q = (struct mld2_query *)icmp6_hdr(skb);
856 mld2q->mld2q_mrc = htons(1);
857 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
858 mld2q->mld2q_code = 0;
859 mld2q->mld2q_cksum = 0;
860 mld2q->mld2q_resv1 = 0;
861 mld2q->mld2q_resv2 = 0;
862 mld2q->mld2q_suppress = 0;
863 mld2q->mld2q_qrv = amt->qrv;
864 mld2q->mld2q_nsrcs = 0;
865 mld2q->mld2q_qqic = amt->qi;
866 csum_start = (void *)mld2q;
867 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
868 sizeof(*mld2q),
869 IPPROTO_ICMPV6,
870 csum_partial(csum_start,
871 sizeof(*mld2q), 0));
872
873 skb->ip_summed = CHECKSUM_NONE;
874 skb_push(skb, sizeof(*eth) + sizeof(*ip6h) + sizeof(ra));
875 return skb;
876 }
877
amt_send_mld_gq(struct amt_dev * amt,struct amt_tunnel_list * tunnel)878 static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
879 {
880 struct sk_buff *skb;
881
882 skb = amt_build_mld_gq(amt);
883 if (!skb)
884 return;
885
886 amt_skb_cb(skb)->tunnel = tunnel;
887 dev_queue_xmit(skb);
888 }
889 #else
amt_send_mld_gq(struct amt_dev * amt,struct amt_tunnel_list * tunnel)890 static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
891 {
892 }
893 #endif
894
amt_queue_event(struct amt_dev * amt,enum amt_event event,struct sk_buff * skb)895 static bool amt_queue_event(struct amt_dev *amt, enum amt_event event,
896 struct sk_buff *skb)
897 {
898 int index;
899
900 spin_lock_bh(&amt->lock);
901 if (amt->nr_events >= AMT_MAX_EVENTS) {
902 spin_unlock_bh(&amt->lock);
903 return 1;
904 }
905
906 index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS;
907 amt->events[index].event = event;
908 amt->events[index].skb = skb;
909 amt->nr_events++;
910 amt->event_idx %= AMT_MAX_EVENTS;
911 queue_work(amt_wq, &amt->event_wq);
912 spin_unlock_bh(&amt->lock);
913
914 return 0;
915 }
916
amt_secret_work(struct work_struct * work)917 static void amt_secret_work(struct work_struct *work)
918 {
919 struct amt_dev *amt = container_of(to_delayed_work(work),
920 struct amt_dev,
921 secret_wq);
922
923 spin_lock_bh(&amt->lock);
924 get_random_bytes(&amt->key, sizeof(siphash_key_t));
925 spin_unlock_bh(&amt->lock);
926 mod_delayed_work(amt_wq, &amt->secret_wq,
927 msecs_to_jiffies(AMT_SECRET_TIMEOUT));
928 }
929
amt_event_send_discovery(struct amt_dev * amt)930 static void amt_event_send_discovery(struct amt_dev *amt)
931 {
932 if (amt->status > AMT_STATUS_SENT_DISCOVERY)
933 goto out;
934 get_random_bytes(&amt->nonce, sizeof(__be32));
935
936 amt_send_discovery(amt);
937 out:
938 mod_delayed_work(amt_wq, &amt->discovery_wq,
939 msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
940 }
941
amt_discovery_work(struct work_struct * work)942 static void amt_discovery_work(struct work_struct *work)
943 {
944 struct amt_dev *amt = container_of(to_delayed_work(work),
945 struct amt_dev,
946 discovery_wq);
947
948 if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL))
949 mod_delayed_work(amt_wq, &amt->discovery_wq,
950 msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
951 }
952
amt_event_send_request(struct amt_dev * amt)953 static void amt_event_send_request(struct amt_dev *amt)
954 {
955 u32 exp;
956
957 if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
958 goto out;
959
960 if (amt->req_cnt > AMT_MAX_REQ_COUNT) {
961 netdev_dbg(amt->dev, "Gateway is not ready");
962 amt->qi = AMT_INIT_REQ_TIMEOUT;
963 WRITE_ONCE(amt->ready4, false);
964 WRITE_ONCE(amt->ready6, false);
965 amt->remote_ip = 0;
966 amt_update_gw_status(amt, AMT_STATUS_INIT, false);
967 amt->req_cnt = 0;
968 amt->nonce = 0;
969 goto out;
970 }
971
972 if (!amt->req_cnt) {
973 WRITE_ONCE(amt->ready4, false);
974 WRITE_ONCE(amt->ready6, false);
975 get_random_bytes(&amt->nonce, sizeof(__be32));
976 }
977
978 amt_send_request(amt, false);
979 amt_send_request(amt, true);
980 amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
981 amt->req_cnt++;
982 out:
983 exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
984 mod_delayed_work(amt_wq, &amt->req_wq, secs_to_jiffies(exp));
985 }
986
amt_req_work(struct work_struct * work)987 static void amt_req_work(struct work_struct *work)
988 {
989 struct amt_dev *amt = container_of(to_delayed_work(work),
990 struct amt_dev,
991 req_wq);
992
993 if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL))
994 mod_delayed_work(amt_wq, &amt->req_wq,
995 msecs_to_jiffies(100));
996 }
997
amt_send_membership_update(struct amt_dev * amt,struct sk_buff * skb,bool v6)998 static bool amt_send_membership_update(struct amt_dev *amt,
999 struct sk_buff *skb,
1000 bool v6)
1001 {
1002 struct amt_header_membership_update *amtmu;
1003 struct socket *sock;
1004 struct iphdr *iph;
1005 struct flowi4 fl4;
1006 struct rtable *rt;
1007 int err;
1008
1009 sock = rcu_dereference_bh(amt->sock);
1010 if (!sock)
1011 return true;
1012
1013 err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmu) +
1014 sizeof(*iph) + sizeof(struct udphdr));
1015 if (err)
1016 return true;
1017
1018 skb_reset_inner_headers(skb);
1019 memset(&fl4, 0, sizeof(struct flowi4));
1020 fl4.flowi4_oif = amt->stream_dev->ifindex;
1021 fl4.daddr = amt->remote_ip;
1022 fl4.saddr = amt->local_ip;
1023 fl4.flowi4_dscp = inet_dsfield_to_dscp(AMT_TOS);
1024 fl4.flowi4_proto = IPPROTO_UDP;
1025 rt = ip_route_output_key(amt->net, &fl4);
1026 if (IS_ERR(rt)) {
1027 netdev_dbg(amt->dev, "no route to %pI4\n", &amt->remote_ip);
1028 return true;
1029 }
1030
1031 amtmu = skb_push(skb, sizeof(*amtmu));
1032 amtmu->version = 0;
1033 amtmu->type = AMT_MSG_MEMBERSHIP_UPDATE;
1034 amtmu->reserved = 0;
1035 amtmu->nonce = amt->nonce;
1036 amtmu->response_mac = amt->mac;
1037
1038 if (!v6)
1039 skb_set_inner_protocol(skb, htons(ETH_P_IP));
1040 else
1041 skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1042 udp_tunnel_xmit_skb(rt, sock->sk, skb,
1043 fl4.saddr,
1044 fl4.daddr,
1045 AMT_TOS,
1046 ip4_dst_hoplimit(&rt->dst),
1047 0,
1048 amt->gw_port,
1049 amt->relay_port,
1050 false,
1051 false,
1052 0);
1053 amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
1054 return false;
1055 }
1056
amt_send_multicast_data(struct amt_dev * amt,const struct sk_buff * oskb,struct amt_tunnel_list * tunnel,bool v6)1057 static void amt_send_multicast_data(struct amt_dev *amt,
1058 const struct sk_buff *oskb,
1059 struct amt_tunnel_list *tunnel,
1060 bool v6)
1061 {
1062 struct amt_header_mcast_data *amtmd;
1063 struct socket *sock;
1064 struct sk_buff *skb;
1065 struct iphdr *iph;
1066 struct flowi4 fl4;
1067 struct rtable *rt;
1068
1069 sock = rcu_dereference_bh(amt->sock);
1070 if (!sock)
1071 return;
1072
1073 skb = skb_copy_expand(oskb, sizeof(*amtmd) + sizeof(*iph) +
1074 sizeof(struct udphdr), 0, GFP_ATOMIC);
1075 if (!skb)
1076 return;
1077
1078 skb_reset_inner_headers(skb);
1079 memset(&fl4, 0, sizeof(struct flowi4));
1080 fl4.flowi4_oif = amt->stream_dev->ifindex;
1081 fl4.daddr = tunnel->ip4;
1082 fl4.saddr = amt->local_ip;
1083 fl4.flowi4_proto = IPPROTO_UDP;
1084 rt = ip_route_output_key(amt->net, &fl4);
1085 if (IS_ERR(rt)) {
1086 netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
1087 kfree_skb(skb);
1088 return;
1089 }
1090
1091 amtmd = skb_push(skb, sizeof(*amtmd));
1092 amtmd->version = 0;
1093 amtmd->reserved = 0;
1094 amtmd->type = AMT_MSG_MULTICAST_DATA;
1095
1096 if (!v6)
1097 skb_set_inner_protocol(skb, htons(ETH_P_IP));
1098 else
1099 skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1100 udp_tunnel_xmit_skb(rt, sock->sk, skb,
1101 fl4.saddr,
1102 fl4.daddr,
1103 AMT_TOS,
1104 ip4_dst_hoplimit(&rt->dst),
1105 0,
1106 amt->relay_port,
1107 tunnel->source_port,
1108 false,
1109 false,
1110 0);
1111 }
1112
amt_send_membership_query(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel,bool v6)1113 static bool amt_send_membership_query(struct amt_dev *amt,
1114 struct sk_buff *skb,
1115 struct amt_tunnel_list *tunnel,
1116 bool v6)
1117 {
1118 struct amt_header_membership_query *amtmq;
1119 struct socket *sock;
1120 struct rtable *rt;
1121 struct flowi4 fl4;
1122 int err;
1123
1124 sock = rcu_dereference_bh(amt->sock);
1125 if (!sock)
1126 return true;
1127
1128 err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmq) +
1129 sizeof(struct iphdr) + sizeof(struct udphdr));
1130 if (err)
1131 return true;
1132
1133 skb_reset_inner_headers(skb);
1134 memset(&fl4, 0, sizeof(struct flowi4));
1135 fl4.flowi4_oif = amt->stream_dev->ifindex;
1136 fl4.daddr = tunnel->ip4;
1137 fl4.saddr = amt->local_ip;
1138 fl4.flowi4_dscp = inet_dsfield_to_dscp(AMT_TOS);
1139 fl4.flowi4_proto = IPPROTO_UDP;
1140 rt = ip_route_output_key(amt->net, &fl4);
1141 if (IS_ERR(rt)) {
1142 netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
1143 return true;
1144 }
1145
1146 amtmq = skb_push(skb, sizeof(*amtmq));
1147 amtmq->version = 0;
1148 amtmq->type = AMT_MSG_MEMBERSHIP_QUERY;
1149 amtmq->reserved = 0;
1150 amtmq->l = 0;
1151 amtmq->g = 0;
1152 amtmq->nonce = tunnel->nonce;
1153 amtmq->response_mac = tunnel->mac;
1154
1155 if (!v6)
1156 skb_set_inner_protocol(skb, htons(ETH_P_IP));
1157 else
1158 skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1159 udp_tunnel_xmit_skb(rt, sock->sk, skb,
1160 fl4.saddr,
1161 fl4.daddr,
1162 AMT_TOS,
1163 ip4_dst_hoplimit(&rt->dst),
1164 0,
1165 amt->relay_port,
1166 tunnel->source_port,
1167 false,
1168 false,
1169 0);
1170 amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true);
1171 return false;
1172 }
1173
amt_dev_xmit(struct sk_buff * skb,struct net_device * dev)1174 static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
1175 {
1176 struct amt_dev *amt = netdev_priv(dev);
1177 struct amt_tunnel_list *tunnel;
1178 struct amt_group_node *gnode;
1179 union amt_addr group = {0,};
1180 #if IS_ENABLED(CONFIG_IPV6)
1181 struct ipv6hdr *ip6h;
1182 struct mld_msg *mld;
1183 #endif
1184 bool report = false;
1185 struct igmphdr *ih;
1186 bool query = false;
1187 struct iphdr *iph;
1188 bool data = false;
1189 bool v6 = false;
1190 u32 hash;
1191
1192 iph = ip_hdr(skb);
1193 if (iph->version == 4) {
1194 if (!ipv4_is_multicast(iph->daddr))
1195 goto free;
1196
1197 if (!ip_mc_check_igmp(skb)) {
1198 ih = igmp_hdr(skb);
1199 switch (ih->type) {
1200 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1201 case IGMP_HOST_MEMBERSHIP_REPORT:
1202 report = true;
1203 break;
1204 case IGMP_HOST_MEMBERSHIP_QUERY:
1205 query = true;
1206 break;
1207 default:
1208 goto free;
1209 }
1210 } else {
1211 data = true;
1212 }
1213 v6 = false;
1214 group.ip4 = iph->daddr;
1215 #if IS_ENABLED(CONFIG_IPV6)
1216 } else if (iph->version == 6) {
1217 ip6h = ipv6_hdr(skb);
1218 if (!ipv6_addr_is_multicast(&ip6h->daddr))
1219 goto free;
1220
1221 if (!ipv6_mc_check_mld(skb)) {
1222 mld = (struct mld_msg *)skb_transport_header(skb);
1223 switch (mld->mld_type) {
1224 case ICMPV6_MGM_REPORT:
1225 case ICMPV6_MLD2_REPORT:
1226 report = true;
1227 break;
1228 case ICMPV6_MGM_QUERY:
1229 query = true;
1230 break;
1231 default:
1232 goto free;
1233 }
1234 } else {
1235 data = true;
1236 }
1237 v6 = true;
1238 group.ip6 = ip6h->daddr;
1239 #endif
1240 } else {
1241 dev->stats.tx_errors++;
1242 goto free;
1243 }
1244
1245 if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
1246 goto free;
1247
1248 skb_pull(skb, sizeof(struct ethhdr));
1249
1250 if (amt->mode == AMT_MODE_GATEWAY) {
1251 /* Gateway only passes IGMP/MLD packets */
1252 if (!report)
1253 goto free;
1254 if ((!v6 && !READ_ONCE(amt->ready4)) ||
1255 (v6 && !READ_ONCE(amt->ready6)))
1256 goto free;
1257 if (amt_send_membership_update(amt, skb, v6))
1258 goto free;
1259 goto unlock;
1260 } else if (amt->mode == AMT_MODE_RELAY) {
1261 if (query) {
1262 tunnel = amt_skb_cb(skb)->tunnel;
1263 if (!tunnel) {
1264 WARN_ON(1);
1265 goto free;
1266 }
1267
1268 /* Do not forward unexpected query */
1269 if (amt_send_membership_query(amt, skb, tunnel, v6))
1270 goto free;
1271 goto unlock;
1272 }
1273
1274 if (!data)
1275 goto free;
1276 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
1277 hash = amt_group_hash(tunnel, &group);
1278 hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash],
1279 node) {
1280 if (!v6) {
1281 if (gnode->group_addr.ip4 == iph->daddr)
1282 goto found;
1283 #if IS_ENABLED(CONFIG_IPV6)
1284 } else {
1285 if (ipv6_addr_equal(&gnode->group_addr.ip6,
1286 &ip6h->daddr))
1287 goto found;
1288 #endif
1289 }
1290 }
1291 continue;
1292 found:
1293 amt_send_multicast_data(amt, skb, tunnel, v6);
1294 }
1295 }
1296
1297 dev_kfree_skb(skb);
1298 return NETDEV_TX_OK;
1299 free:
1300 dev_kfree_skb(skb);
1301 unlock:
1302 dev->stats.tx_dropped++;
1303 return NETDEV_TX_OK;
1304 }
1305
amt_parse_type(struct sk_buff * skb)1306 static int amt_parse_type(struct sk_buff *skb)
1307 {
1308 struct amt_header *amth;
1309
1310 if (!pskb_may_pull(skb, sizeof(struct udphdr) +
1311 sizeof(struct amt_header)))
1312 return -1;
1313
1314 amth = (struct amt_header *)(udp_hdr(skb) + 1);
1315
1316 if (amth->version != 0)
1317 return -1;
1318
1319 if (amth->type >= __AMT_MSG_MAX || !amth->type)
1320 return -1;
1321 return amth->type;
1322 }
1323
amt_clear_groups(struct amt_tunnel_list * tunnel)1324 static void amt_clear_groups(struct amt_tunnel_list *tunnel)
1325 {
1326 struct amt_dev *amt = tunnel->amt;
1327 struct amt_group_node *gnode;
1328 struct hlist_node *t;
1329 int i;
1330
1331 spin_lock_bh(&tunnel->lock);
1332 rcu_read_lock();
1333 for (i = 0; i < amt->hash_buckets; i++)
1334 hlist_for_each_entry_safe(gnode, t, &tunnel->groups[i], node)
1335 amt_del_group(amt, gnode);
1336 rcu_read_unlock();
1337 spin_unlock_bh(&tunnel->lock);
1338 }
1339
amt_tunnel_expire(struct work_struct * work)1340 static void amt_tunnel_expire(struct work_struct *work)
1341 {
1342 struct amt_tunnel_list *tunnel = container_of(to_delayed_work(work),
1343 struct amt_tunnel_list,
1344 gc_wq);
1345 struct amt_dev *amt = tunnel->amt;
1346
1347 spin_lock_bh(&amt->lock);
1348 rcu_read_lock();
1349 list_del_rcu(&tunnel->list);
1350 amt->nr_tunnels--;
1351 amt_clear_groups(tunnel);
1352 rcu_read_unlock();
1353 spin_unlock_bh(&amt->lock);
1354 kfree_rcu(tunnel, rcu);
1355 }
1356
amt_cleanup_srcs(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode)1357 static void amt_cleanup_srcs(struct amt_dev *amt,
1358 struct amt_tunnel_list *tunnel,
1359 struct amt_group_node *gnode)
1360 {
1361 struct amt_source_node *snode;
1362 struct hlist_node *t;
1363 int i;
1364
1365 /* Delete old sources */
1366 for (i = 0; i < amt->hash_buckets; i++) {
1367 hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) {
1368 if (snode->flags == AMT_SOURCE_OLD)
1369 amt_destroy_source(snode);
1370 }
1371 }
1372
1373 /* switch from new to old */
1374 for (i = 0; i < amt->hash_buckets; i++) {
1375 hlist_for_each_entry_rcu(snode, &gnode->sources[i], node) {
1376 snode->flags = AMT_SOURCE_OLD;
1377 if (!gnode->v6)
1378 netdev_dbg(snode->gnode->amt->dev,
1379 "Add source as OLD %pI4 from %pI4\n",
1380 &snode->source_addr.ip4,
1381 &gnode->group_addr.ip4);
1382 #if IS_ENABLED(CONFIG_IPV6)
1383 else
1384 netdev_dbg(snode->gnode->amt->dev,
1385 "Add source as OLD %pI6 from %pI6\n",
1386 &snode->source_addr.ip6,
1387 &gnode->group_addr.ip6);
1388 #endif
1389 }
1390 }
1391 }
1392
amt_add_srcs(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,bool v6)1393 static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
1394 struct amt_group_node *gnode, void *grec,
1395 bool v6)
1396 {
1397 struct igmpv3_grec *igmp_grec;
1398 struct amt_source_node *snode;
1399 #if IS_ENABLED(CONFIG_IPV6)
1400 struct mld2_grec *mld_grec;
1401 #endif
1402 union amt_addr src = {0,};
1403 u16 nsrcs;
1404 u32 hash;
1405 int i;
1406
1407 if (!v6) {
1408 igmp_grec = grec;
1409 nsrcs = ntohs(igmp_grec->grec_nsrcs);
1410 } else {
1411 #if IS_ENABLED(CONFIG_IPV6)
1412 mld_grec = grec;
1413 nsrcs = ntohs(mld_grec->grec_nsrcs);
1414 #else
1415 return;
1416 #endif
1417 }
1418 for (i = 0; i < nsrcs; i++) {
1419 if (tunnel->nr_sources >= amt->max_sources)
1420 return;
1421 if (!v6)
1422 src.ip4 = igmp_grec->grec_src[i];
1423 #if IS_ENABLED(CONFIG_IPV6)
1424 else
1425 memcpy(&src.ip6, &mld_grec->grec_src[i],
1426 sizeof(struct in6_addr));
1427 #endif
1428 if (amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL, &src))
1429 continue;
1430
1431 snode = amt_alloc_snode(gnode, &src);
1432 if (snode) {
1433 hash = amt_source_hash(tunnel, &snode->source_addr);
1434 hlist_add_head_rcu(&snode->node, &gnode->sources[hash]);
1435 tunnel->nr_sources++;
1436 gnode->nr_sources++;
1437
1438 if (!gnode->v6)
1439 netdev_dbg(snode->gnode->amt->dev,
1440 "Add source as NEW %pI4 from %pI4\n",
1441 &snode->source_addr.ip4,
1442 &gnode->group_addr.ip4);
1443 #if IS_ENABLED(CONFIG_IPV6)
1444 else
1445 netdev_dbg(snode->gnode->amt->dev,
1446 "Add source as NEW %pI6 from %pI6\n",
1447 &snode->source_addr.ip6,
1448 &gnode->group_addr.ip6);
1449 #endif
1450 }
1451 }
1452 }
1453
1454 /* Router State Report Rec'd New Router State
1455 * ------------ ------------ ----------------
1456 * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A)
1457 *
1458 * -----------+-----------+-----------+
1459 * | OLD | NEW |
1460 * -----------+-----------+-----------+
1461 * FWD | X | X+A |
1462 * -----------+-----------+-----------+
1463 * D_FWD | Y | Y-A |
1464 * -----------+-----------+-----------+
1465 * NONE | | A |
1466 * -----------+-----------+-----------+
1467 *
1468 * a) Received sources are NONE/NEW
1469 * b) All NONE will be deleted by amt_cleanup_srcs().
1470 * c) All OLD will be deleted by amt_cleanup_srcs().
1471 * d) After delete, NEW source will be switched to OLD.
1472 */
amt_lookup_act_srcs(struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,enum amt_ops ops,enum amt_filter filter,enum amt_act act,bool v6)1473 static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel,
1474 struct amt_group_node *gnode,
1475 void *grec,
1476 enum amt_ops ops,
1477 enum amt_filter filter,
1478 enum amt_act act,
1479 bool v6)
1480 {
1481 struct amt_dev *amt = tunnel->amt;
1482 struct amt_source_node *snode;
1483 struct igmpv3_grec *igmp_grec;
1484 #if IS_ENABLED(CONFIG_IPV6)
1485 struct mld2_grec *mld_grec;
1486 #endif
1487 union amt_addr src = {0,};
1488 struct hlist_node *t;
1489 u16 nsrcs;
1490 int i, j;
1491
1492 if (!v6) {
1493 igmp_grec = grec;
1494 nsrcs = ntohs(igmp_grec->grec_nsrcs);
1495 } else {
1496 #if IS_ENABLED(CONFIG_IPV6)
1497 mld_grec = grec;
1498 nsrcs = ntohs(mld_grec->grec_nsrcs);
1499 #else
1500 return;
1501 #endif
1502 }
1503
1504 memset(&src, 0, sizeof(union amt_addr));
1505 switch (ops) {
1506 case AMT_OPS_INT:
1507 /* A*B */
1508 for (i = 0; i < nsrcs; i++) {
1509 if (!v6)
1510 src.ip4 = igmp_grec->grec_src[i];
1511 #if IS_ENABLED(CONFIG_IPV6)
1512 else
1513 memcpy(&src.ip6, &mld_grec->grec_src[i],
1514 sizeof(struct in6_addr));
1515 #endif
1516 snode = amt_lookup_src(tunnel, gnode, filter, &src);
1517 if (!snode)
1518 continue;
1519 amt_act_src(tunnel, gnode, snode, act);
1520 }
1521 break;
1522 case AMT_OPS_UNI:
1523 /* A+B */
1524 for (i = 0; i < amt->hash_buckets; i++) {
1525 hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
1526 node) {
1527 if (amt_status_filter(snode, filter))
1528 amt_act_src(tunnel, gnode, snode, act);
1529 }
1530 }
1531 for (i = 0; i < nsrcs; i++) {
1532 if (!v6)
1533 src.ip4 = igmp_grec->grec_src[i];
1534 #if IS_ENABLED(CONFIG_IPV6)
1535 else
1536 memcpy(&src.ip6, &mld_grec->grec_src[i],
1537 sizeof(struct in6_addr));
1538 #endif
1539 snode = amt_lookup_src(tunnel, gnode, filter, &src);
1540 if (!snode)
1541 continue;
1542 amt_act_src(tunnel, gnode, snode, act);
1543 }
1544 break;
1545 case AMT_OPS_SUB:
1546 /* A-B */
1547 for (i = 0; i < amt->hash_buckets; i++) {
1548 hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
1549 node) {
1550 if (!amt_status_filter(snode, filter))
1551 continue;
1552 for (j = 0; j < nsrcs; j++) {
1553 if (!v6)
1554 src.ip4 = igmp_grec->grec_src[j];
1555 #if IS_ENABLED(CONFIG_IPV6)
1556 else
1557 memcpy(&src.ip6,
1558 &mld_grec->grec_src[j],
1559 sizeof(struct in6_addr));
1560 #endif
1561 if (amt_addr_equal(&snode->source_addr,
1562 &src))
1563 goto out_sub;
1564 }
1565 amt_act_src(tunnel, gnode, snode, act);
1566 continue;
1567 out_sub:;
1568 }
1569 }
1570 break;
1571 case AMT_OPS_SUB_REV:
1572 /* B-A */
1573 for (i = 0; i < nsrcs; i++) {
1574 if (!v6)
1575 src.ip4 = igmp_grec->grec_src[i];
1576 #if IS_ENABLED(CONFIG_IPV6)
1577 else
1578 memcpy(&src.ip6, &mld_grec->grec_src[i],
1579 sizeof(struct in6_addr));
1580 #endif
1581 snode = amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL,
1582 &src);
1583 if (!snode) {
1584 snode = amt_lookup_src(tunnel, gnode,
1585 filter, &src);
1586 if (snode)
1587 amt_act_src(tunnel, gnode, snode, act);
1588 }
1589 }
1590 break;
1591 default:
1592 netdev_dbg(amt->dev, "Invalid type\n");
1593 return;
1594 }
1595 }
1596
amt_mcast_is_in_handler(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,void * zero_grec,bool v6)1597 static void amt_mcast_is_in_handler(struct amt_dev *amt,
1598 struct amt_tunnel_list *tunnel,
1599 struct amt_group_node *gnode,
1600 void *grec, void *zero_grec, bool v6)
1601 {
1602 if (gnode->filter_mode == MCAST_INCLUDE) {
1603 /* Router State Report Rec'd New Router State Actions
1604 * ------------ ------------ ---------------- -------
1605 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
1606 */
1607 /* Update IS_IN (B) as FWD/NEW */
1608 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1609 AMT_FILTER_NONE_NEW,
1610 AMT_ACT_STATUS_FWD_NEW,
1611 v6);
1612 /* Update INCLUDE (A) as NEW */
1613 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1614 AMT_FILTER_FWD,
1615 AMT_ACT_STATUS_FWD_NEW,
1616 v6);
1617 /* (B)=GMI */
1618 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1619 AMT_FILTER_FWD_NEW,
1620 AMT_ACT_GMI,
1621 v6);
1622 } else {
1623 /* State Actions
1624 * ------------ ------------ ---------------- -------
1625 * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
1626 */
1627 /* Update (A) in (X, Y) as NONE/NEW */
1628 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1629 AMT_FILTER_BOTH,
1630 AMT_ACT_STATUS_NONE_NEW,
1631 v6);
1632 /* Update FWD/OLD as FWD/NEW */
1633 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1634 AMT_FILTER_FWD,
1635 AMT_ACT_STATUS_FWD_NEW,
1636 v6);
1637 /* Update IS_IN (A) as FWD/NEW */
1638 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1639 AMT_FILTER_NONE_NEW,
1640 AMT_ACT_STATUS_FWD_NEW,
1641 v6);
1642 /* Update EXCLUDE (, Y-A) as D_FWD_NEW */
1643 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
1644 AMT_FILTER_D_FWD,
1645 AMT_ACT_STATUS_D_FWD_NEW,
1646 v6);
1647 }
1648 }
1649
amt_mcast_is_ex_handler(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,void * zero_grec,bool v6)1650 static void amt_mcast_is_ex_handler(struct amt_dev *amt,
1651 struct amt_tunnel_list *tunnel,
1652 struct amt_group_node *gnode,
1653 void *grec, void *zero_grec, bool v6)
1654 {
1655 if (gnode->filter_mode == MCAST_INCLUDE) {
1656 /* Router State Report Rec'd New Router State Actions
1657 * ------------ ------------ ---------------- -------
1658 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1659 * Delete (A-B)
1660 * Group Timer=GMI
1661 */
1662 /* EXCLUDE(A*B, ) */
1663 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1664 AMT_FILTER_FWD,
1665 AMT_ACT_STATUS_FWD_NEW,
1666 v6);
1667 /* EXCLUDE(, B-A) */
1668 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1669 AMT_FILTER_FWD,
1670 AMT_ACT_STATUS_D_FWD_NEW,
1671 v6);
1672 /* (B-A)=0 */
1673 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1674 AMT_FILTER_D_FWD_NEW,
1675 AMT_ACT_GMI_ZERO,
1676 v6);
1677 /* Group Timer=GMI */
1678 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1679 msecs_to_jiffies(amt_gmi(amt))))
1680 dev_hold(amt->dev);
1681 gnode->filter_mode = MCAST_EXCLUDE;
1682 /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
1683 } else {
1684 /* Router State Report Rec'd New Router State Actions
1685 * ------------ ------------ ---------------- -------
1686 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
1687 * Delete (X-A)
1688 * Delete (Y-A)
1689 * Group Timer=GMI
1690 */
1691 /* EXCLUDE (A-Y, ) */
1692 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1693 AMT_FILTER_D_FWD,
1694 AMT_ACT_STATUS_FWD_NEW,
1695 v6);
1696 /* EXCLUDE (, Y*A ) */
1697 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1698 AMT_FILTER_D_FWD,
1699 AMT_ACT_STATUS_D_FWD_NEW,
1700 v6);
1701 /* (A-X-Y)=GMI */
1702 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1703 AMT_FILTER_BOTH_NEW,
1704 AMT_ACT_GMI,
1705 v6);
1706 /* Group Timer=GMI */
1707 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1708 msecs_to_jiffies(amt_gmi(amt))))
1709 dev_hold(amt->dev);
1710 /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
1711 }
1712 }
1713
amt_mcast_to_in_handler(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,void * zero_grec,bool v6)1714 static void amt_mcast_to_in_handler(struct amt_dev *amt,
1715 struct amt_tunnel_list *tunnel,
1716 struct amt_group_node *gnode,
1717 void *grec, void *zero_grec, bool v6)
1718 {
1719 if (gnode->filter_mode == MCAST_INCLUDE) {
1720 /* Router State Report Rec'd New Router State Actions
1721 * ------------ ------------ ---------------- -------
1722 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
1723 * Send Q(G,A-B)
1724 */
1725 /* Update TO_IN (B) sources as FWD/NEW */
1726 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1727 AMT_FILTER_NONE_NEW,
1728 AMT_ACT_STATUS_FWD_NEW,
1729 v6);
1730 /* Update INCLUDE (A) sources as NEW */
1731 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1732 AMT_FILTER_FWD,
1733 AMT_ACT_STATUS_FWD_NEW,
1734 v6);
1735 /* (B)=GMI */
1736 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1737 AMT_FILTER_FWD_NEW,
1738 AMT_ACT_GMI,
1739 v6);
1740 } else {
1741 /* Router State Report Rec'd New Router State Actions
1742 * ------------ ------------ ---------------- -------
1743 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
1744 * Send Q(G,X-A)
1745 * Send Q(G)
1746 */
1747 /* Update TO_IN (A) sources as FWD/NEW */
1748 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1749 AMT_FILTER_NONE_NEW,
1750 AMT_ACT_STATUS_FWD_NEW,
1751 v6);
1752 /* Update EXCLUDE(X,) sources as FWD/NEW */
1753 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1754 AMT_FILTER_FWD,
1755 AMT_ACT_STATUS_FWD_NEW,
1756 v6);
1757 /* EXCLUDE (, Y-A)
1758 * (A) are already switched to FWD_NEW.
1759 * So, D_FWD/OLD -> D_FWD/NEW is okay.
1760 */
1761 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1762 AMT_FILTER_D_FWD,
1763 AMT_ACT_STATUS_D_FWD_NEW,
1764 v6);
1765 /* (A)=GMI
1766 * Only FWD_NEW will have (A) sources.
1767 */
1768 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1769 AMT_FILTER_FWD_NEW,
1770 AMT_ACT_GMI,
1771 v6);
1772 }
1773 }
1774
amt_mcast_to_ex_handler(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,void * zero_grec,bool v6)1775 static void amt_mcast_to_ex_handler(struct amt_dev *amt,
1776 struct amt_tunnel_list *tunnel,
1777 struct amt_group_node *gnode,
1778 void *grec, void *zero_grec, bool v6)
1779 {
1780 if (gnode->filter_mode == MCAST_INCLUDE) {
1781 /* Router State Report Rec'd New Router State Actions
1782 * ------------ ------------ ---------------- -------
1783 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1784 * Delete (A-B)
1785 * Send Q(G,A*B)
1786 * Group Timer=GMI
1787 */
1788 /* EXCLUDE (A*B, ) */
1789 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1790 AMT_FILTER_FWD,
1791 AMT_ACT_STATUS_FWD_NEW,
1792 v6);
1793 /* EXCLUDE (, B-A) */
1794 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1795 AMT_FILTER_FWD,
1796 AMT_ACT_STATUS_D_FWD_NEW,
1797 v6);
1798 /* (B-A)=0 */
1799 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1800 AMT_FILTER_D_FWD_NEW,
1801 AMT_ACT_GMI_ZERO,
1802 v6);
1803 /* Group Timer=GMI */
1804 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1805 msecs_to_jiffies(amt_gmi(amt))))
1806 dev_hold(amt->dev);
1807 gnode->filter_mode = MCAST_EXCLUDE;
1808 /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
1809 } else {
1810 /* Router State Report Rec'd New Router State Actions
1811 * ------------ ------------ ---------------- -------
1812 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
1813 * Delete (X-A)
1814 * Delete (Y-A)
1815 * Send Q(G,A-Y)
1816 * Group Timer=GMI
1817 */
1818 /* Update (A-X-Y) as NONE/OLD */
1819 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1820 AMT_FILTER_BOTH,
1821 AMT_ACT_GT,
1822 v6);
1823 /* EXCLUDE (A-Y, ) */
1824 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1825 AMT_FILTER_D_FWD,
1826 AMT_ACT_STATUS_FWD_NEW,
1827 v6);
1828 /* EXCLUDE (, Y*A) */
1829 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1830 AMT_FILTER_D_FWD,
1831 AMT_ACT_STATUS_D_FWD_NEW,
1832 v6);
1833 /* Group Timer=GMI */
1834 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1835 msecs_to_jiffies(amt_gmi(amt))))
1836 dev_hold(amt->dev);
1837 /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
1838 }
1839 }
1840
amt_mcast_allow_handler(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,void * zero_grec,bool v6)1841 static void amt_mcast_allow_handler(struct amt_dev *amt,
1842 struct amt_tunnel_list *tunnel,
1843 struct amt_group_node *gnode,
1844 void *grec, void *zero_grec, bool v6)
1845 {
1846 if (gnode->filter_mode == MCAST_INCLUDE) {
1847 /* Router State Report Rec'd New Router State Actions
1848 * ------------ ------------ ---------------- -------
1849 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
1850 */
1851 /* INCLUDE (A+B) */
1852 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1853 AMT_FILTER_FWD,
1854 AMT_ACT_STATUS_FWD_NEW,
1855 v6);
1856 /* (B)=GMI */
1857 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1858 AMT_FILTER_FWD_NEW,
1859 AMT_ACT_GMI,
1860 v6);
1861 } else {
1862 /* Router State Report Rec'd New Router State Actions
1863 * ------------ ------------ ---------------- -------
1864 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
1865 */
1866 /* EXCLUDE (X+A, ) */
1867 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1868 AMT_FILTER_FWD,
1869 AMT_ACT_STATUS_FWD_NEW,
1870 v6);
1871 /* EXCLUDE (, Y-A) */
1872 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
1873 AMT_FILTER_D_FWD,
1874 AMT_ACT_STATUS_D_FWD_NEW,
1875 v6);
1876 /* (A)=GMI
1877 * All (A) source are now FWD/NEW status.
1878 */
1879 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1880 AMT_FILTER_FWD_NEW,
1881 AMT_ACT_GMI,
1882 v6);
1883 }
1884 }
1885
amt_mcast_block_handler(struct amt_dev * amt,struct amt_tunnel_list * tunnel,struct amt_group_node * gnode,void * grec,void * zero_grec,bool v6)1886 static void amt_mcast_block_handler(struct amt_dev *amt,
1887 struct amt_tunnel_list *tunnel,
1888 struct amt_group_node *gnode,
1889 void *grec, void *zero_grec, bool v6)
1890 {
1891 if (gnode->filter_mode == MCAST_INCLUDE) {
1892 /* Router State Report Rec'd New Router State Actions
1893 * ------------ ------------ ---------------- -------
1894 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
1895 */
1896 /* INCLUDE (A) */
1897 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1898 AMT_FILTER_FWD,
1899 AMT_ACT_STATUS_FWD_NEW,
1900 v6);
1901 } else {
1902 /* Router State Report Rec'd New Router State Actions
1903 * ------------ ------------ ---------------- -------
1904 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
1905 * Send Q(G,A-Y)
1906 */
1907 /* (A-X-Y)=Group Timer */
1908 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1909 AMT_FILTER_BOTH,
1910 AMT_ACT_GT,
1911 v6);
1912 /* EXCLUDE (X, ) */
1913 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1914 AMT_FILTER_FWD,
1915 AMT_ACT_STATUS_FWD_NEW,
1916 v6);
1917 /* EXCLUDE (X+(A-Y) */
1918 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1919 AMT_FILTER_D_FWD,
1920 AMT_ACT_STATUS_FWD_NEW,
1921 v6);
1922 /* EXCLUDE (, Y) */
1923 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1924 AMT_FILTER_D_FWD,
1925 AMT_ACT_STATUS_D_FWD_NEW,
1926 v6);
1927 }
1928 }
1929
1930 /* RFC 3376
1931 * 7.3.2. In the Presence of Older Version Group Members
1932 *
1933 * When Group Compatibility Mode is IGMPv2, a router internally
1934 * translates the following IGMPv2 messages for that group to their
1935 * IGMPv3 equivalents:
1936 *
1937 * IGMPv2 Message IGMPv3 Equivalent
1938 * -------------- -----------------
1939 * Report IS_EX( {} )
1940 * Leave TO_IN( {} )
1941 */
amt_igmpv2_report_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)1942 static void amt_igmpv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
1943 struct amt_tunnel_list *tunnel)
1944 {
1945 struct igmphdr *ih = igmp_hdr(skb);
1946 struct iphdr *iph = ip_hdr(skb);
1947 struct amt_group_node *gnode;
1948 union amt_addr group, host;
1949
1950 memset(&group, 0, sizeof(union amt_addr));
1951 group.ip4 = ih->group;
1952 memset(&host, 0, sizeof(union amt_addr));
1953 host.ip4 = iph->saddr;
1954
1955 gnode = amt_lookup_group(tunnel, &group, &host, false);
1956 if (!gnode) {
1957 gnode = amt_add_group(amt, tunnel, &group, &host, false);
1958 if (!IS_ERR(gnode)) {
1959 gnode->filter_mode = MCAST_EXCLUDE;
1960 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1961 msecs_to_jiffies(amt_gmi(amt))))
1962 dev_hold(amt->dev);
1963 }
1964 }
1965 }
1966
1967 /* RFC 3376
1968 * 7.3.2. In the Presence of Older Version Group Members
1969 *
1970 * When Group Compatibility Mode is IGMPv2, a router internally
1971 * translates the following IGMPv2 messages for that group to their
1972 * IGMPv3 equivalents:
1973 *
1974 * IGMPv2 Message IGMPv3 Equivalent
1975 * -------------- -----------------
1976 * Report IS_EX( {} )
1977 * Leave TO_IN( {} )
1978 */
amt_igmpv2_leave_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)1979 static void amt_igmpv2_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
1980 struct amt_tunnel_list *tunnel)
1981 {
1982 struct igmphdr *ih = igmp_hdr(skb);
1983 struct iphdr *iph = ip_hdr(skb);
1984 struct amt_group_node *gnode;
1985 union amt_addr group, host;
1986
1987 memset(&group, 0, sizeof(union amt_addr));
1988 group.ip4 = ih->group;
1989 memset(&host, 0, sizeof(union amt_addr));
1990 host.ip4 = iph->saddr;
1991
1992 gnode = amt_lookup_group(tunnel, &group, &host, false);
1993 if (gnode)
1994 amt_del_group(amt, gnode);
1995 }
1996
amt_igmpv3_report_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)1997 static void amt_igmpv3_report_handler(struct amt_dev *amt, struct sk_buff *skb,
1998 struct amt_tunnel_list *tunnel)
1999 {
2000 struct igmpv3_report *ihrv3 = igmpv3_report_hdr(skb);
2001 int len = skb_transport_offset(skb) + sizeof(*ihrv3);
2002 void *zero_grec = (void *)&igmpv3_zero_grec;
2003 struct iphdr *iph = ip_hdr(skb);
2004 struct amt_group_node *gnode;
2005 union amt_addr group, host;
2006 struct igmpv3_grec *grec;
2007 u16 nsrcs;
2008 int i;
2009
2010 for (i = 0; i < ntohs(ihrv3->ngrec); i++) {
2011 len += sizeof(*grec);
2012 if (!ip_mc_may_pull(skb, len))
2013 break;
2014
2015 grec = (void *)(skb->data + len - sizeof(*grec));
2016 nsrcs = ntohs(grec->grec_nsrcs);
2017
2018 len += nsrcs * sizeof(__be32);
2019 if (!ip_mc_may_pull(skb, len))
2020 break;
2021
2022 memset(&group, 0, sizeof(union amt_addr));
2023 group.ip4 = grec->grec_mca;
2024 memset(&host, 0, sizeof(union amt_addr));
2025 host.ip4 = iph->saddr;
2026 gnode = amt_lookup_group(tunnel, &group, &host, false);
2027 if (!gnode) {
2028 gnode = amt_add_group(amt, tunnel, &group, &host,
2029 false);
2030 if (IS_ERR(gnode))
2031 continue;
2032 }
2033
2034 amt_add_srcs(amt, tunnel, gnode, grec, false);
2035 switch (grec->grec_type) {
2036 case IGMPV3_MODE_IS_INCLUDE:
2037 amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
2038 zero_grec, false);
2039 break;
2040 case IGMPV3_MODE_IS_EXCLUDE:
2041 amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
2042 zero_grec, false);
2043 break;
2044 case IGMPV3_CHANGE_TO_INCLUDE:
2045 amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
2046 zero_grec, false);
2047 break;
2048 case IGMPV3_CHANGE_TO_EXCLUDE:
2049 amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
2050 zero_grec, false);
2051 break;
2052 case IGMPV3_ALLOW_NEW_SOURCES:
2053 amt_mcast_allow_handler(amt, tunnel, gnode, grec,
2054 zero_grec, false);
2055 break;
2056 case IGMPV3_BLOCK_OLD_SOURCES:
2057 amt_mcast_block_handler(amt, tunnel, gnode, grec,
2058 zero_grec, false);
2059 break;
2060 default:
2061 break;
2062 }
2063 amt_cleanup_srcs(amt, tunnel, gnode);
2064 }
2065 }
2066
2067 /* caller held tunnel->lock */
amt_igmp_report_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)2068 static void amt_igmp_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2069 struct amt_tunnel_list *tunnel)
2070 {
2071 struct igmphdr *ih = igmp_hdr(skb);
2072
2073 switch (ih->type) {
2074 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2075 amt_igmpv3_report_handler(amt, skb, tunnel);
2076 break;
2077 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2078 amt_igmpv2_report_handler(amt, skb, tunnel);
2079 break;
2080 case IGMP_HOST_LEAVE_MESSAGE:
2081 amt_igmpv2_leave_handler(amt, skb, tunnel);
2082 break;
2083 default:
2084 break;
2085 }
2086 }
2087
2088 #if IS_ENABLED(CONFIG_IPV6)
2089 /* RFC 3810
2090 * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
2091 *
2092 * When Multicast Address Compatibility Mode is MLDv2, a router acts
2093 * using the MLDv2 protocol for that multicast address. When Multicast
2094 * Address Compatibility Mode is MLDv1, a router internally translates
2095 * the following MLDv1 messages for that multicast address to their
2096 * MLDv2 equivalents:
2097 *
2098 * MLDv1 Message MLDv2 Equivalent
2099 * -------------- -----------------
2100 * Report IS_EX( {} )
2101 * Done TO_IN( {} )
2102 */
amt_mldv1_report_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)2103 static void amt_mldv1_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2104 struct amt_tunnel_list *tunnel)
2105 {
2106 struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2107 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2108 struct amt_group_node *gnode;
2109 union amt_addr group, host;
2110
2111 memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
2112 memcpy(&host.ip6, &ip6h->saddr, sizeof(struct in6_addr));
2113
2114 gnode = amt_lookup_group(tunnel, &group, &host, true);
2115 if (!gnode) {
2116 gnode = amt_add_group(amt, tunnel, &group, &host, true);
2117 if (!IS_ERR(gnode)) {
2118 gnode->filter_mode = MCAST_EXCLUDE;
2119 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
2120 msecs_to_jiffies(amt_gmi(amt))))
2121 dev_hold(amt->dev);
2122 }
2123 }
2124 }
2125
2126 /* RFC 3810
2127 * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
2128 *
2129 * When Multicast Address Compatibility Mode is MLDv2, a router acts
2130 * using the MLDv2 protocol for that multicast address. When Multicast
2131 * Address Compatibility Mode is MLDv1, a router internally translates
2132 * the following MLDv1 messages for that multicast address to their
2133 * MLDv2 equivalents:
2134 *
2135 * MLDv1 Message MLDv2 Equivalent
2136 * -------------- -----------------
2137 * Report IS_EX( {} )
2138 * Done TO_IN( {} )
2139 */
amt_mldv1_leave_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)2140 static void amt_mldv1_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
2141 struct amt_tunnel_list *tunnel)
2142 {
2143 struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2144 struct iphdr *iph = ip_hdr(skb);
2145 struct amt_group_node *gnode;
2146 union amt_addr group, host;
2147
2148 memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
2149 memset(&host, 0, sizeof(union amt_addr));
2150 host.ip4 = iph->saddr;
2151
2152 gnode = amt_lookup_group(tunnel, &group, &host, true);
2153 if (gnode) {
2154 amt_del_group(amt, gnode);
2155 return;
2156 }
2157 }
2158
amt_mldv2_report_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)2159 static void amt_mldv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2160 struct amt_tunnel_list *tunnel)
2161 {
2162 struct mld2_report *mld2r = (struct mld2_report *)icmp6_hdr(skb);
2163 int len = skb_transport_offset(skb) + sizeof(*mld2r);
2164 void *zero_grec = (void *)&mldv2_zero_grec;
2165 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2166 struct amt_group_node *gnode;
2167 union amt_addr group, host;
2168 struct mld2_grec *grec;
2169 u16 nsrcs;
2170 int i;
2171
2172 for (i = 0; i < ntohs(mld2r->mld2r_ngrec); i++) {
2173 len += sizeof(*grec);
2174 if (!ipv6_mc_may_pull(skb, len))
2175 break;
2176
2177 grec = (void *)(skb->data + len - sizeof(*grec));
2178 nsrcs = ntohs(grec->grec_nsrcs);
2179
2180 len += nsrcs * sizeof(struct in6_addr);
2181 if (!ipv6_mc_may_pull(skb, len))
2182 break;
2183
2184 memset(&group, 0, sizeof(union amt_addr));
2185 group.ip6 = grec->grec_mca;
2186 memset(&host, 0, sizeof(union amt_addr));
2187 host.ip6 = ip6h->saddr;
2188 gnode = amt_lookup_group(tunnel, &group, &host, true);
2189 if (!gnode) {
2190 gnode = amt_add_group(amt, tunnel, &group, &host,
2191 ETH_P_IPV6);
2192 if (IS_ERR(gnode))
2193 continue;
2194 }
2195
2196 amt_add_srcs(amt, tunnel, gnode, grec, true);
2197 switch (grec->grec_type) {
2198 case MLD2_MODE_IS_INCLUDE:
2199 amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
2200 zero_grec, true);
2201 break;
2202 case MLD2_MODE_IS_EXCLUDE:
2203 amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
2204 zero_grec, true);
2205 break;
2206 case MLD2_CHANGE_TO_INCLUDE:
2207 amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
2208 zero_grec, true);
2209 break;
2210 case MLD2_CHANGE_TO_EXCLUDE:
2211 amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
2212 zero_grec, true);
2213 break;
2214 case MLD2_ALLOW_NEW_SOURCES:
2215 amt_mcast_allow_handler(amt, tunnel, gnode, grec,
2216 zero_grec, true);
2217 break;
2218 case MLD2_BLOCK_OLD_SOURCES:
2219 amt_mcast_block_handler(amt, tunnel, gnode, grec,
2220 zero_grec, true);
2221 break;
2222 default:
2223 break;
2224 }
2225 amt_cleanup_srcs(amt, tunnel, gnode);
2226 }
2227 }
2228
2229 /* caller held tunnel->lock */
amt_mld_report_handler(struct amt_dev * amt,struct sk_buff * skb,struct amt_tunnel_list * tunnel)2230 static void amt_mld_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2231 struct amt_tunnel_list *tunnel)
2232 {
2233 struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2234
2235 switch (mld->mld_type) {
2236 case ICMPV6_MGM_REPORT:
2237 amt_mldv1_report_handler(amt, skb, tunnel);
2238 break;
2239 case ICMPV6_MLD2_REPORT:
2240 amt_mldv2_report_handler(amt, skb, tunnel);
2241 break;
2242 case ICMPV6_MGM_REDUCTION:
2243 amt_mldv1_leave_handler(amt, skb, tunnel);
2244 break;
2245 default:
2246 break;
2247 }
2248 }
2249 #endif
2250
amt_advertisement_handler(struct amt_dev * amt,struct sk_buff * skb)2251 static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
2252 {
2253 struct amt_header_advertisement *amta;
2254 int hdr_size;
2255
2256 hdr_size = sizeof(*amta) + sizeof(struct udphdr);
2257 if (!pskb_may_pull(skb, hdr_size))
2258 return true;
2259
2260 amta = (struct amt_header_advertisement *)(udp_hdr(skb) + 1);
2261 if (!amta->ip4)
2262 return true;
2263
2264 if (amta->reserved || amta->version)
2265 return true;
2266
2267 if (ipv4_is_loopback(amta->ip4) || ipv4_is_multicast(amta->ip4) ||
2268 ipv4_is_zeronet(amta->ip4))
2269 return true;
2270
2271 if (amt->status != AMT_STATUS_SENT_DISCOVERY ||
2272 amt->nonce != amta->nonce)
2273 return true;
2274
2275 amt->remote_ip = amta->ip4;
2276 netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
2277 mod_delayed_work(amt_wq, &amt->req_wq, 0);
2278
2279 amt_update_gw_status(amt, AMT_STATUS_RECEIVED_ADVERTISEMENT, true);
2280 return false;
2281 }
2282
amt_multicast_data_handler(struct amt_dev * amt,struct sk_buff * skb)2283 static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
2284 {
2285 struct amt_header_mcast_data *amtmd;
2286 int hdr_size, len, err;
2287 struct ethhdr *eth;
2288 struct iphdr *iph;
2289
2290 if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE)
2291 return true;
2292
2293 hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
2294 if (!pskb_may_pull(skb, hdr_size))
2295 return true;
2296
2297 amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
2298 if (amtmd->reserved || amtmd->version)
2299 return true;
2300
2301 if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
2302 return true;
2303
2304 skb_reset_network_header(skb);
2305 skb_push(skb, sizeof(*eth));
2306 skb_reset_mac_header(skb);
2307 skb_pull(skb, sizeof(*eth));
2308 eth = eth_hdr(skb);
2309
2310 if (!pskb_may_pull(skb, sizeof(*iph)))
2311 return true;
2312 iph = ip_hdr(skb);
2313
2314 if (iph->version == 4) {
2315 if (!ipv4_is_multicast(iph->daddr))
2316 return true;
2317 skb->protocol = htons(ETH_P_IP);
2318 eth->h_proto = htons(ETH_P_IP);
2319 ip_eth_mc_map(iph->daddr, eth->h_dest);
2320 #if IS_ENABLED(CONFIG_IPV6)
2321 } else if (iph->version == 6) {
2322 struct ipv6hdr *ip6h;
2323
2324 if (!pskb_may_pull(skb, sizeof(*ip6h)))
2325 return true;
2326
2327 ip6h = ipv6_hdr(skb);
2328 if (!ipv6_addr_is_multicast(&ip6h->daddr))
2329 return true;
2330 skb->protocol = htons(ETH_P_IPV6);
2331 eth->h_proto = htons(ETH_P_IPV6);
2332 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2333 #endif
2334 } else {
2335 return true;
2336 }
2337
2338 skb->pkt_type = PACKET_MULTICAST;
2339 skb->ip_summed = CHECKSUM_NONE;
2340 len = skb->len;
2341 err = gro_cells_receive(&amt->gro_cells, skb);
2342 if (likely(err == NET_RX_SUCCESS))
2343 dev_sw_netstats_rx_add(amt->dev, len);
2344 else
2345 amt->dev->stats.rx_dropped++;
2346
2347 return false;
2348 }
2349
amt_membership_query_handler(struct amt_dev * amt,struct sk_buff * skb)2350 static bool amt_membership_query_handler(struct amt_dev *amt,
2351 struct sk_buff *skb)
2352 {
2353 struct amt_header_membership_query *amtmq;
2354 struct igmpv3_query *ihv3;
2355 struct ethhdr *eth, *oeth;
2356 struct iphdr *iph;
2357 int hdr_size, len;
2358
2359 hdr_size = sizeof(*amtmq) + sizeof(struct udphdr);
2360 if (!pskb_may_pull(skb, hdr_size))
2361 return true;
2362
2363 amtmq = (struct amt_header_membership_query *)(udp_hdr(skb) + 1);
2364 if (amtmq->reserved || amtmq->version)
2365 return true;
2366
2367 if (amtmq->nonce != amt->nonce)
2368 return true;
2369
2370 hdr_size -= sizeof(*eth);
2371 if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
2372 return true;
2373
2374 oeth = eth_hdr(skb);
2375 skb_reset_mac_header(skb);
2376 skb_pull(skb, sizeof(*eth));
2377 skb_reset_network_header(skb);
2378 eth = eth_hdr(skb);
2379 if (!pskb_may_pull(skb, sizeof(*iph)))
2380 return true;
2381
2382 iph = ip_hdr(skb);
2383 if (iph->version == 4) {
2384 if (READ_ONCE(amt->ready4))
2385 return true;
2386
2387 if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
2388 sizeof(*ihv3)))
2389 return true;
2390
2391 if (!ipv4_is_multicast(iph->daddr))
2392 return true;
2393
2394 ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
2395 skb_reset_transport_header(skb);
2396 skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
2397 WRITE_ONCE(amt->ready4, true);
2398 amt->mac = amtmq->response_mac;
2399 amt->req_cnt = 0;
2400 amt->qi = ihv3->qqic;
2401 skb->protocol = htons(ETH_P_IP);
2402 eth->h_proto = htons(ETH_P_IP);
2403 ip_eth_mc_map(iph->daddr, eth->h_dest);
2404 #if IS_ENABLED(CONFIG_IPV6)
2405 } else if (iph->version == 6) {
2406 struct mld2_query *mld2q;
2407 struct ipv6hdr *ip6h;
2408
2409 if (READ_ONCE(amt->ready6))
2410 return true;
2411
2412 if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
2413 sizeof(*mld2q)))
2414 return true;
2415
2416 ip6h = ipv6_hdr(skb);
2417 if (!ipv6_addr_is_multicast(&ip6h->daddr))
2418 return true;
2419
2420 mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
2421 skb_reset_transport_header(skb);
2422 skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
2423 WRITE_ONCE(amt->ready6, true);
2424 amt->mac = amtmq->response_mac;
2425 amt->req_cnt = 0;
2426 amt->qi = mld2q->mld2q_qqic;
2427 skb->protocol = htons(ETH_P_IPV6);
2428 eth->h_proto = htons(ETH_P_IPV6);
2429 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2430 #endif
2431 } else {
2432 return true;
2433 }
2434
2435 ether_addr_copy(eth->h_source, oeth->h_source);
2436 skb->pkt_type = PACKET_MULTICAST;
2437 skb->ip_summed = CHECKSUM_NONE;
2438 len = skb->len;
2439 local_bh_disable();
2440 if (__netif_rx(skb) == NET_RX_SUCCESS) {
2441 amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
2442 dev_sw_netstats_rx_add(amt->dev, len);
2443 } else {
2444 amt->dev->stats.rx_dropped++;
2445 }
2446 local_bh_enable();
2447
2448 return false;
2449 }
2450
amt_update_handler(struct amt_dev * amt,struct sk_buff * skb)2451 static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
2452 {
2453 struct amt_header_membership_update *amtmu;
2454 struct amt_tunnel_list *tunnel;
2455 struct ethhdr *eth;
2456 struct iphdr *iph;
2457 int len, hdr_size;
2458
2459 iph = ip_hdr(skb);
2460
2461 hdr_size = sizeof(*amtmu) + sizeof(struct udphdr);
2462 if (!pskb_may_pull(skb, hdr_size))
2463 return true;
2464
2465 amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1);
2466 if (amtmu->reserved || amtmu->version)
2467 return true;
2468
2469 if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false))
2470 return true;
2471
2472 skb_reset_network_header(skb);
2473
2474 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
2475 if (tunnel->ip4 == iph->saddr) {
2476 if ((amtmu->nonce == tunnel->nonce &&
2477 amtmu->response_mac == tunnel->mac)) {
2478 mod_delayed_work(amt_wq, &tunnel->gc_wq,
2479 msecs_to_jiffies(amt_gmi(amt))
2480 * 3);
2481 goto report;
2482 } else {
2483 netdev_dbg(amt->dev, "Invalid MAC\n");
2484 return true;
2485 }
2486 }
2487 }
2488
2489 return true;
2490
2491 report:
2492 if (!pskb_may_pull(skb, sizeof(*iph)))
2493 return true;
2494
2495 iph = ip_hdr(skb);
2496 if (iph->version == 4) {
2497 if (ip_mc_check_igmp(skb)) {
2498 netdev_dbg(amt->dev, "Invalid IGMP\n");
2499 return true;
2500 }
2501
2502 spin_lock_bh(&tunnel->lock);
2503 amt_igmp_report_handler(amt, skb, tunnel);
2504 spin_unlock_bh(&tunnel->lock);
2505
2506 skb_push(skb, sizeof(struct ethhdr));
2507 skb_reset_mac_header(skb);
2508 eth = eth_hdr(skb);
2509 skb->protocol = htons(ETH_P_IP);
2510 eth->h_proto = htons(ETH_P_IP);
2511 ip_eth_mc_map(iph->daddr, eth->h_dest);
2512 #if IS_ENABLED(CONFIG_IPV6)
2513 } else if (iph->version == 6) {
2514 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2515
2516 if (ipv6_mc_check_mld(skb)) {
2517 netdev_dbg(amt->dev, "Invalid MLD\n");
2518 return true;
2519 }
2520
2521 spin_lock_bh(&tunnel->lock);
2522 amt_mld_report_handler(amt, skb, tunnel);
2523 spin_unlock_bh(&tunnel->lock);
2524
2525 skb_push(skb, sizeof(struct ethhdr));
2526 skb_reset_mac_header(skb);
2527 eth = eth_hdr(skb);
2528 skb->protocol = htons(ETH_P_IPV6);
2529 eth->h_proto = htons(ETH_P_IPV6);
2530 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2531 #endif
2532 } else {
2533 netdev_dbg(amt->dev, "Unsupported Protocol\n");
2534 return true;
2535 }
2536
2537 skb_pull(skb, sizeof(struct ethhdr));
2538 skb->pkt_type = PACKET_MULTICAST;
2539 skb->ip_summed = CHECKSUM_NONE;
2540 len = skb->len;
2541 if (__netif_rx(skb) == NET_RX_SUCCESS) {
2542 amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
2543 true);
2544 dev_sw_netstats_rx_add(amt->dev, len);
2545 } else {
2546 amt->dev->stats.rx_dropped++;
2547 }
2548
2549 return false;
2550 }
2551
amt_send_advertisement(struct amt_dev * amt,__be32 nonce,__be32 daddr,__be16 dport)2552 static void amt_send_advertisement(struct amt_dev *amt, __be32 nonce,
2553 __be32 daddr, __be16 dport)
2554 {
2555 struct amt_header_advertisement *amta;
2556 int hlen, tlen, offset;
2557 struct socket *sock;
2558 struct udphdr *udph;
2559 struct sk_buff *skb;
2560 struct iphdr *iph;
2561 struct rtable *rt;
2562 struct flowi4 fl4;
2563 u32 len;
2564 int err;
2565
2566 rcu_read_lock();
2567 sock = rcu_dereference(amt->sock);
2568 if (!sock)
2569 goto out;
2570
2571 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
2572 goto out;
2573
2574 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
2575 daddr, amt->local_ip,
2576 dport, amt->relay_port,
2577 IPPROTO_UDP, 0,
2578 amt->stream_dev->ifindex);
2579 if (IS_ERR(rt)) {
2580 amt->dev->stats.tx_errors++;
2581 goto out;
2582 }
2583
2584 hlen = LL_RESERVED_SPACE(amt->dev);
2585 tlen = amt->dev->needed_tailroom;
2586 len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
2587 skb = netdev_alloc_skb_ip_align(amt->dev, len);
2588 if (!skb) {
2589 ip_rt_put(rt);
2590 amt->dev->stats.tx_errors++;
2591 goto out;
2592 }
2593
2594 skb->priority = TC_PRIO_CONTROL;
2595 skb_dst_set(skb, &rt->dst);
2596
2597 len = sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
2598 skb_reset_network_header(skb);
2599 skb_put(skb, len);
2600 amta = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
2601 amta->version = 0;
2602 amta->type = AMT_MSG_ADVERTISEMENT;
2603 amta->reserved = 0;
2604 amta->nonce = nonce;
2605 amta->ip4 = amt->local_ip;
2606 skb_push(skb, sizeof(*udph));
2607 skb_reset_transport_header(skb);
2608 udph = udp_hdr(skb);
2609 udph->source = amt->relay_port;
2610 udph->dest = dport;
2611 udph->len = htons(sizeof(*amta) + sizeof(*udph));
2612 udph->check = 0;
2613 offset = skb_transport_offset(skb);
2614 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
2615 udph->check = csum_tcpudp_magic(amt->local_ip, daddr,
2616 sizeof(*udph) + sizeof(*amta),
2617 IPPROTO_UDP, skb->csum);
2618
2619 skb_push(skb, sizeof(*iph));
2620 iph = ip_hdr(skb);
2621 iph->version = 4;
2622 iph->ihl = (sizeof(struct iphdr)) >> 2;
2623 iph->tos = AMT_TOS;
2624 iph->frag_off = 0;
2625 iph->ttl = ip4_dst_hoplimit(&rt->dst);
2626 iph->daddr = daddr;
2627 iph->saddr = amt->local_ip;
2628 iph->protocol = IPPROTO_UDP;
2629 iph->tot_len = htons(len);
2630
2631 skb->ip_summed = CHECKSUM_NONE;
2632 ip_select_ident(amt->net, skb, NULL);
2633 ip_send_check(iph);
2634 err = ip_local_out(amt->net, sock->sk, skb);
2635 if (unlikely(net_xmit_eval(err)))
2636 amt->dev->stats.tx_errors++;
2637
2638 out:
2639 rcu_read_unlock();
2640 }
2641
amt_discovery_handler(struct amt_dev * amt,struct sk_buff * skb)2642 static bool amt_discovery_handler(struct amt_dev *amt, struct sk_buff *skb)
2643 {
2644 struct amt_header_discovery *amtd;
2645 struct udphdr *udph;
2646 struct iphdr *iph;
2647
2648 if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtd)))
2649 return true;
2650
2651 iph = ip_hdr(skb);
2652 udph = udp_hdr(skb);
2653 amtd = (struct amt_header_discovery *)(udp_hdr(skb) + 1);
2654
2655 if (amtd->reserved || amtd->version)
2656 return true;
2657
2658 amt_send_advertisement(amt, amtd->nonce, iph->saddr, udph->source);
2659
2660 return false;
2661 }
2662
amt_request_handler(struct amt_dev * amt,struct sk_buff * skb)2663 static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
2664 {
2665 struct amt_header_request *amtrh;
2666 struct amt_tunnel_list *tunnel;
2667 unsigned long long key;
2668 struct udphdr *udph;
2669 struct iphdr *iph;
2670 u64 mac;
2671 int i;
2672
2673 if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtrh)))
2674 return true;
2675
2676 iph = ip_hdr(skb);
2677 udph = udp_hdr(skb);
2678 amtrh = (struct amt_header_request *)(udp_hdr(skb) + 1);
2679
2680 if (amtrh->reserved1 || amtrh->reserved2 || amtrh->version)
2681 return true;
2682
2683 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list)
2684 if (tunnel->ip4 == iph->saddr)
2685 goto send;
2686
2687 spin_lock_bh(&amt->lock);
2688 if (amt->nr_tunnels >= amt->max_tunnels) {
2689 spin_unlock_bh(&amt->lock);
2690 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
2691 return true;
2692 }
2693
2694 tunnel = kzalloc(sizeof(*tunnel) +
2695 (sizeof(struct hlist_head) * amt->hash_buckets),
2696 GFP_ATOMIC);
2697 if (!tunnel) {
2698 spin_unlock_bh(&amt->lock);
2699 return true;
2700 }
2701
2702 tunnel->source_port = udph->source;
2703 tunnel->ip4 = iph->saddr;
2704
2705 memcpy(&key, &tunnel->key, sizeof(unsigned long long));
2706 tunnel->amt = amt;
2707 spin_lock_init(&tunnel->lock);
2708 for (i = 0; i < amt->hash_buckets; i++)
2709 INIT_HLIST_HEAD(&tunnel->groups[i]);
2710
2711 INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
2712
2713 list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
2714 tunnel->key = amt->key;
2715 __amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
2716 amt->nr_tunnels++;
2717 mod_delayed_work(amt_wq, &tunnel->gc_wq,
2718 msecs_to_jiffies(amt_gmi(amt)));
2719 spin_unlock_bh(&amt->lock);
2720
2721 send:
2722 tunnel->nonce = amtrh->nonce;
2723 mac = siphash_3u32((__force u32)tunnel->ip4,
2724 (__force u32)tunnel->source_port,
2725 (__force u32)tunnel->nonce,
2726 &tunnel->key);
2727 tunnel->mac = mac >> 16;
2728
2729 if (!netif_running(amt->dev) || !netif_running(amt->stream_dev))
2730 return true;
2731
2732 if (!amtrh->p)
2733 amt_send_igmp_gq(amt, tunnel);
2734 else
2735 amt_send_mld_gq(amt, tunnel);
2736
2737 return false;
2738 }
2739
amt_gw_rcv(struct amt_dev * amt,struct sk_buff * skb)2740 static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb)
2741 {
2742 int type = amt_parse_type(skb);
2743 int err = 1;
2744
2745 if (type == -1)
2746 goto drop;
2747
2748 if (amt->mode == AMT_MODE_GATEWAY) {
2749 switch (type) {
2750 case AMT_MSG_ADVERTISEMENT:
2751 err = amt_advertisement_handler(amt, skb);
2752 break;
2753 case AMT_MSG_MEMBERSHIP_QUERY:
2754 err = amt_membership_query_handler(amt, skb);
2755 if (!err)
2756 return;
2757 break;
2758 default:
2759 netdev_dbg(amt->dev, "Invalid type of Gateway\n");
2760 break;
2761 }
2762 }
2763 drop:
2764 if (err) {
2765 amt->dev->stats.rx_dropped++;
2766 kfree_skb(skb);
2767 } else {
2768 consume_skb(skb);
2769 }
2770 }
2771
amt_rcv(struct sock * sk,struct sk_buff * skb)2772 static int amt_rcv(struct sock *sk, struct sk_buff *skb)
2773 {
2774 struct amt_dev *amt;
2775 struct iphdr *iph;
2776 int type;
2777 bool err;
2778
2779 rcu_read_lock_bh();
2780 amt = rcu_dereference_sk_user_data(sk);
2781 if (!amt) {
2782 err = true;
2783 kfree_skb(skb);
2784 goto out;
2785 }
2786
2787 skb->dev = amt->dev;
2788 iph = ip_hdr(skb);
2789 type = amt_parse_type(skb);
2790 if (type == -1) {
2791 err = true;
2792 goto drop;
2793 }
2794
2795 if (amt->mode == AMT_MODE_GATEWAY) {
2796 switch (type) {
2797 case AMT_MSG_ADVERTISEMENT:
2798 if (iph->saddr != amt->discovery_ip) {
2799 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2800 err = true;
2801 goto drop;
2802 }
2803 if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
2804 netdev_dbg(amt->dev, "AMT Event queue full\n");
2805 err = true;
2806 goto drop;
2807 }
2808 goto out;
2809 case AMT_MSG_MULTICAST_DATA:
2810 if (iph->saddr != amt->remote_ip) {
2811 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2812 err = true;
2813 goto drop;
2814 }
2815 err = amt_multicast_data_handler(amt, skb);
2816 if (err)
2817 goto drop;
2818 else
2819 goto out;
2820 case AMT_MSG_MEMBERSHIP_QUERY:
2821 if (iph->saddr != amt->remote_ip) {
2822 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2823 err = true;
2824 goto drop;
2825 }
2826 if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
2827 netdev_dbg(amt->dev, "AMT Event queue full\n");
2828 err = true;
2829 goto drop;
2830 }
2831 goto out;
2832 default:
2833 err = true;
2834 netdev_dbg(amt->dev, "Invalid type of Gateway\n");
2835 break;
2836 }
2837 } else {
2838 switch (type) {
2839 case AMT_MSG_DISCOVERY:
2840 err = amt_discovery_handler(amt, skb);
2841 break;
2842 case AMT_MSG_REQUEST:
2843 err = amt_request_handler(amt, skb);
2844 break;
2845 case AMT_MSG_MEMBERSHIP_UPDATE:
2846 err = amt_update_handler(amt, skb);
2847 if (err)
2848 goto drop;
2849 else
2850 goto out;
2851 default:
2852 err = true;
2853 netdev_dbg(amt->dev, "Invalid type of relay\n");
2854 break;
2855 }
2856 }
2857 drop:
2858 if (err) {
2859 amt->dev->stats.rx_dropped++;
2860 kfree_skb(skb);
2861 } else {
2862 consume_skb(skb);
2863 }
2864 out:
2865 rcu_read_unlock_bh();
2866 return 0;
2867 }
2868
amt_event_work(struct work_struct * work)2869 static void amt_event_work(struct work_struct *work)
2870 {
2871 struct amt_dev *amt = container_of(work, struct amt_dev, event_wq);
2872 struct sk_buff *skb;
2873 u8 event;
2874 int i;
2875
2876 for (i = 0; i < AMT_MAX_EVENTS; i++) {
2877 spin_lock_bh(&amt->lock);
2878 if (amt->nr_events == 0) {
2879 spin_unlock_bh(&amt->lock);
2880 return;
2881 }
2882 event = amt->events[amt->event_idx].event;
2883 skb = amt->events[amt->event_idx].skb;
2884 amt->events[amt->event_idx].event = AMT_EVENT_NONE;
2885 amt->events[amt->event_idx].skb = NULL;
2886 amt->nr_events--;
2887 amt->event_idx++;
2888 amt->event_idx %= AMT_MAX_EVENTS;
2889 spin_unlock_bh(&amt->lock);
2890
2891 switch (event) {
2892 case AMT_EVENT_RECEIVE:
2893 amt_gw_rcv(amt, skb);
2894 break;
2895 case AMT_EVENT_SEND_DISCOVERY:
2896 amt_event_send_discovery(amt);
2897 break;
2898 case AMT_EVENT_SEND_REQUEST:
2899 amt_event_send_request(amt);
2900 break;
2901 default:
2902 kfree_skb(skb);
2903 break;
2904 }
2905 }
2906 }
2907
amt_err_lookup(struct sock * sk,struct sk_buff * skb)2908 static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
2909 {
2910 struct amt_dev *amt;
2911 int type;
2912
2913 rcu_read_lock_bh();
2914 amt = rcu_dereference_sk_user_data(sk);
2915 if (!amt)
2916 goto out;
2917
2918 if (amt->mode != AMT_MODE_GATEWAY)
2919 goto drop;
2920
2921 type = amt_parse_type(skb);
2922 if (type == -1)
2923 goto drop;
2924
2925 netdev_dbg(amt->dev, "Received IGMP Unreachable of %s\n",
2926 type_str[type]);
2927 switch (type) {
2928 case AMT_MSG_DISCOVERY:
2929 break;
2930 case AMT_MSG_REQUEST:
2931 case AMT_MSG_MEMBERSHIP_UPDATE:
2932 if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
2933 mod_delayed_work(amt_wq, &amt->req_wq, 0);
2934 break;
2935 default:
2936 goto drop;
2937 }
2938 out:
2939 rcu_read_unlock_bh();
2940 return 0;
2941 drop:
2942 rcu_read_unlock_bh();
2943 amt->dev->stats.rx_dropped++;
2944 return 0;
2945 }
2946
amt_create_sock(struct net * net,__be16 port)2947 static struct socket *amt_create_sock(struct net *net, __be16 port)
2948 {
2949 struct udp_port_cfg udp_conf;
2950 struct socket *sock;
2951 int err;
2952
2953 memset(&udp_conf, 0, sizeof(udp_conf));
2954 udp_conf.family = AF_INET;
2955 udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
2956
2957 udp_conf.local_udp_port = port;
2958
2959 err = udp_sock_create(net, &udp_conf, &sock);
2960 if (err < 0)
2961 return ERR_PTR(err);
2962
2963 return sock;
2964 }
2965
amt_socket_create(struct amt_dev * amt)2966 static int amt_socket_create(struct amt_dev *amt)
2967 {
2968 struct udp_tunnel_sock_cfg tunnel_cfg;
2969 struct socket *sock;
2970
2971 sock = amt_create_sock(amt->net, amt->relay_port);
2972 if (IS_ERR(sock))
2973 return PTR_ERR(sock);
2974
2975 /* Mark socket as an encapsulation socket */
2976 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
2977 tunnel_cfg.sk_user_data = amt;
2978 tunnel_cfg.encap_type = 1;
2979 tunnel_cfg.encap_rcv = amt_rcv;
2980 tunnel_cfg.encap_err_lookup = amt_err_lookup;
2981 tunnel_cfg.encap_destroy = NULL;
2982 setup_udp_tunnel_sock(amt->net, sock, &tunnel_cfg);
2983
2984 rcu_assign_pointer(amt->sock, sock);
2985 return 0;
2986 }
2987
amt_dev_open(struct net_device * dev)2988 static int amt_dev_open(struct net_device *dev)
2989 {
2990 struct amt_dev *amt = netdev_priv(dev);
2991 int err;
2992
2993 amt->ready4 = false;
2994 amt->ready6 = false;
2995 amt->event_idx = 0;
2996 amt->nr_events = 0;
2997
2998 err = amt_socket_create(amt);
2999 if (err)
3000 return err;
3001
3002 amt->req_cnt = 0;
3003 amt->remote_ip = 0;
3004 amt->nonce = 0;
3005 get_random_bytes(&amt->key, sizeof(siphash_key_t));
3006
3007 amt->status = AMT_STATUS_INIT;
3008 if (amt->mode == AMT_MODE_GATEWAY) {
3009 mod_delayed_work(amt_wq, &amt->discovery_wq, 0);
3010 mod_delayed_work(amt_wq, &amt->req_wq, 0);
3011 } else if (amt->mode == AMT_MODE_RELAY) {
3012 mod_delayed_work(amt_wq, &amt->secret_wq,
3013 msecs_to_jiffies(AMT_SECRET_TIMEOUT));
3014 }
3015 return err;
3016 }
3017
amt_dev_stop(struct net_device * dev)3018 static int amt_dev_stop(struct net_device *dev)
3019 {
3020 struct amt_dev *amt = netdev_priv(dev);
3021 struct amt_tunnel_list *tunnel, *tmp;
3022 struct socket *sock;
3023 struct sk_buff *skb;
3024 int i;
3025
3026 cancel_delayed_work_sync(&amt->req_wq);
3027 cancel_delayed_work_sync(&amt->discovery_wq);
3028 cancel_delayed_work_sync(&amt->secret_wq);
3029
3030 /* shutdown */
3031 sock = rtnl_dereference(amt->sock);
3032 RCU_INIT_POINTER(amt->sock, NULL);
3033 synchronize_net();
3034 if (sock)
3035 udp_tunnel_sock_release(sock);
3036
3037 cancel_work_sync(&amt->event_wq);
3038 for (i = 0; i < AMT_MAX_EVENTS; i++) {
3039 skb = amt->events[i].skb;
3040 kfree_skb(skb);
3041 amt->events[i].event = AMT_EVENT_NONE;
3042 amt->events[i].skb = NULL;
3043 }
3044
3045 amt->ready4 = false;
3046 amt->ready6 = false;
3047 amt->req_cnt = 0;
3048 amt->remote_ip = 0;
3049
3050 list_for_each_entry_safe(tunnel, tmp, &amt->tunnel_list, list) {
3051 list_del_rcu(&tunnel->list);
3052 amt->nr_tunnels--;
3053 cancel_delayed_work_sync(&tunnel->gc_wq);
3054 amt_clear_groups(tunnel);
3055 kfree_rcu(tunnel, rcu);
3056 }
3057
3058 return 0;
3059 }
3060
3061 static const struct device_type amt_type = {
3062 .name = "amt",
3063 };
3064
amt_dev_init(struct net_device * dev)3065 static int amt_dev_init(struct net_device *dev)
3066 {
3067 struct amt_dev *amt = netdev_priv(dev);
3068 int err;
3069
3070 amt->dev = dev;
3071
3072 err = gro_cells_init(&amt->gro_cells, dev);
3073 if (err)
3074 return err;
3075
3076 return 0;
3077 }
3078
amt_dev_uninit(struct net_device * dev)3079 static void amt_dev_uninit(struct net_device *dev)
3080 {
3081 struct amt_dev *amt = netdev_priv(dev);
3082
3083 gro_cells_destroy(&amt->gro_cells);
3084 }
3085
3086 static const struct net_device_ops amt_netdev_ops = {
3087 .ndo_init = amt_dev_init,
3088 .ndo_uninit = amt_dev_uninit,
3089 .ndo_open = amt_dev_open,
3090 .ndo_stop = amt_dev_stop,
3091 .ndo_start_xmit = amt_dev_xmit,
3092 };
3093
amt_link_setup(struct net_device * dev)3094 static void amt_link_setup(struct net_device *dev)
3095 {
3096 dev->netdev_ops = &amt_netdev_ops;
3097 dev->needs_free_netdev = true;
3098 SET_NETDEV_DEVTYPE(dev, &amt_type);
3099 dev->min_mtu = ETH_MIN_MTU;
3100 dev->max_mtu = ETH_MAX_MTU;
3101 dev->type = ARPHRD_NONE;
3102 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
3103 dev->hard_header_len = 0;
3104 dev->addr_len = 0;
3105 dev->priv_flags |= IFF_NO_QUEUE;
3106 dev->lltx = true;
3107 dev->netns_immutable = true;
3108 dev->features |= NETIF_F_GSO_SOFTWARE;
3109 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
3110 dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
3111 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
3112 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
3113 eth_hw_addr_random(dev);
3114 eth_zero_addr(dev->broadcast);
3115 ether_setup(dev);
3116 }
3117
3118 static const struct nla_policy amt_policy[IFLA_AMT_MAX + 1] = {
3119 [IFLA_AMT_MODE] = { .type = NLA_U32 },
3120 [IFLA_AMT_RELAY_PORT] = { .type = NLA_U16 },
3121 [IFLA_AMT_GATEWAY_PORT] = { .type = NLA_U16 },
3122 [IFLA_AMT_LINK] = { .type = NLA_U32 },
3123 [IFLA_AMT_LOCAL_IP] = { .len = sizeof_field(struct iphdr, daddr) },
3124 [IFLA_AMT_REMOTE_IP] = { .len = sizeof_field(struct iphdr, daddr) },
3125 [IFLA_AMT_DISCOVERY_IP] = { .len = sizeof_field(struct iphdr, daddr) },
3126 [IFLA_AMT_MAX_TUNNELS] = { .type = NLA_U32 },
3127 };
3128
amt_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)3129 static int amt_validate(struct nlattr *tb[], struct nlattr *data[],
3130 struct netlink_ext_ack *extack)
3131 {
3132 if (!data)
3133 return -EINVAL;
3134
3135 if (!data[IFLA_AMT_LINK]) {
3136 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LINK],
3137 "Link attribute is required");
3138 return -EINVAL;
3139 }
3140
3141 if (!data[IFLA_AMT_MODE]) {
3142 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
3143 "Mode attribute is required");
3144 return -EINVAL;
3145 }
3146
3147 if (nla_get_u32(data[IFLA_AMT_MODE]) > AMT_MODE_MAX) {
3148 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
3149 "Mode attribute is not valid");
3150 return -EINVAL;
3151 }
3152
3153 if (!data[IFLA_AMT_LOCAL_IP]) {
3154 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_DISCOVERY_IP],
3155 "Local attribute is required");
3156 return -EINVAL;
3157 }
3158
3159 if (!data[IFLA_AMT_DISCOVERY_IP] &&
3160 nla_get_u32(data[IFLA_AMT_MODE]) == AMT_MODE_GATEWAY) {
3161 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LOCAL_IP],
3162 "Discovery attribute is required");
3163 return -EINVAL;
3164 }
3165
3166 return 0;
3167 }
3168
amt_newlink(struct net_device * dev,struct rtnl_newlink_params * params,struct netlink_ext_ack * extack)3169 static int amt_newlink(struct net_device *dev,
3170 struct rtnl_newlink_params *params,
3171 struct netlink_ext_ack *extack)
3172 {
3173 struct net *link_net = rtnl_newlink_link_net(params);
3174 struct amt_dev *amt = netdev_priv(dev);
3175 struct nlattr **data = params->data;
3176 struct nlattr **tb = params->tb;
3177 int err = -EINVAL;
3178
3179 amt->net = link_net;
3180 amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
3181
3182 if (data[IFLA_AMT_MAX_TUNNELS] &&
3183 nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]))
3184 amt->max_tunnels = nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]);
3185 else
3186 amt->max_tunnels = AMT_MAX_TUNNELS;
3187
3188 spin_lock_init(&amt->lock);
3189 amt->max_groups = AMT_MAX_GROUP;
3190 amt->max_sources = AMT_MAX_SOURCE;
3191 amt->hash_buckets = AMT_HSIZE;
3192 amt->nr_tunnels = 0;
3193 get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
3194 amt->stream_dev = dev_get_by_index(link_net,
3195 nla_get_u32(data[IFLA_AMT_LINK]));
3196 if (!amt->stream_dev) {
3197 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
3198 "Can't find stream device");
3199 return -ENODEV;
3200 }
3201
3202 if (amt->stream_dev->type != ARPHRD_ETHER) {
3203 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
3204 "Invalid stream device type");
3205 goto err;
3206 }
3207
3208 amt->local_ip = nla_get_in_addr(data[IFLA_AMT_LOCAL_IP]);
3209 if (ipv4_is_loopback(amt->local_ip) ||
3210 ipv4_is_zeronet(amt->local_ip) ||
3211 ipv4_is_multicast(amt->local_ip)) {
3212 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LOCAL_IP],
3213 "Invalid Local address");
3214 goto err;
3215 }
3216
3217 amt->relay_port = nla_get_be16_default(data[IFLA_AMT_RELAY_PORT],
3218 htons(IANA_AMT_UDP_PORT));
3219
3220 amt->gw_port = nla_get_be16_default(data[IFLA_AMT_GATEWAY_PORT],
3221 htons(IANA_AMT_UDP_PORT));
3222
3223 if (!amt->relay_port) {
3224 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3225 "relay port must not be 0");
3226 goto err;
3227 }
3228 if (amt->mode == AMT_MODE_RELAY) {
3229 amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
3230 amt->qri = 10;
3231 dev->needed_headroom = amt->stream_dev->needed_headroom +
3232 AMT_RELAY_HLEN;
3233 dev->mtu = amt->stream_dev->mtu - AMT_RELAY_HLEN;
3234 dev->max_mtu = dev->mtu;
3235 dev->min_mtu = ETH_MIN_MTU + AMT_RELAY_HLEN;
3236 } else {
3237 if (!data[IFLA_AMT_DISCOVERY_IP]) {
3238 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3239 "discovery must be set in gateway mode");
3240 goto err;
3241 }
3242 if (!amt->gw_port) {
3243 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3244 "gateway port must not be 0");
3245 goto err;
3246 }
3247 amt->remote_ip = 0;
3248 amt->discovery_ip = nla_get_in_addr(data[IFLA_AMT_DISCOVERY_IP]);
3249 if (ipv4_is_loopback(amt->discovery_ip) ||
3250 ipv4_is_zeronet(amt->discovery_ip) ||
3251 ipv4_is_multicast(amt->discovery_ip)) {
3252 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3253 "discovery must be unicast");
3254 goto err;
3255 }
3256
3257 dev->needed_headroom = amt->stream_dev->needed_headroom +
3258 AMT_GW_HLEN;
3259 dev->mtu = amt->stream_dev->mtu - AMT_GW_HLEN;
3260 dev->max_mtu = dev->mtu;
3261 dev->min_mtu = ETH_MIN_MTU + AMT_GW_HLEN;
3262 }
3263 amt->qi = AMT_INIT_QUERY_INTERVAL;
3264
3265 err = register_netdevice(dev);
3266 if (err < 0) {
3267 netdev_dbg(dev, "failed to register new netdev %d\n", err);
3268 goto err;
3269 }
3270
3271 err = netdev_upper_dev_link(amt->stream_dev, dev, extack);
3272 if (err < 0) {
3273 unregister_netdevice(dev);
3274 goto err;
3275 }
3276
3277 INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
3278 INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
3279 INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
3280 INIT_WORK(&amt->event_wq, amt_event_work);
3281 INIT_LIST_HEAD(&amt->tunnel_list);
3282 return 0;
3283 err:
3284 dev_put(amt->stream_dev);
3285 return err;
3286 }
3287
amt_dellink(struct net_device * dev,struct list_head * head)3288 static void amt_dellink(struct net_device *dev, struct list_head *head)
3289 {
3290 struct amt_dev *amt = netdev_priv(dev);
3291
3292 unregister_netdevice_queue(dev, head);
3293 netdev_upper_dev_unlink(amt->stream_dev, dev);
3294 dev_put(amt->stream_dev);
3295 }
3296
amt_get_size(const struct net_device * dev)3297 static size_t amt_get_size(const struct net_device *dev)
3298 {
3299 return nla_total_size(sizeof(__u32)) + /* IFLA_AMT_MODE */
3300 nla_total_size(sizeof(__u16)) + /* IFLA_AMT_RELAY_PORT */
3301 nla_total_size(sizeof(__u16)) + /* IFLA_AMT_GATEWAY_PORT */
3302 nla_total_size(sizeof(__u32)) + /* IFLA_AMT_LINK */
3303 nla_total_size(sizeof(__u32)) + /* IFLA_MAX_TUNNELS */
3304 nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_DISCOVERY_IP */
3305 nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_REMOTE_IP */
3306 nla_total_size(sizeof(struct iphdr)); /* IFLA_AMT_LOCAL_IP */
3307 }
3308
amt_fill_info(struct sk_buff * skb,const struct net_device * dev)3309 static int amt_fill_info(struct sk_buff *skb, const struct net_device *dev)
3310 {
3311 struct amt_dev *amt = netdev_priv(dev);
3312
3313 if (nla_put_u32(skb, IFLA_AMT_MODE, amt->mode))
3314 goto nla_put_failure;
3315 if (nla_put_be16(skb, IFLA_AMT_RELAY_PORT, amt->relay_port))
3316 goto nla_put_failure;
3317 if (nla_put_be16(skb, IFLA_AMT_GATEWAY_PORT, amt->gw_port))
3318 goto nla_put_failure;
3319 if (nla_put_u32(skb, IFLA_AMT_LINK, amt->stream_dev->ifindex))
3320 goto nla_put_failure;
3321 if (nla_put_in_addr(skb, IFLA_AMT_LOCAL_IP, amt->local_ip))
3322 goto nla_put_failure;
3323 if (nla_put_in_addr(skb, IFLA_AMT_DISCOVERY_IP, amt->discovery_ip))
3324 goto nla_put_failure;
3325 if (amt->remote_ip)
3326 if (nla_put_in_addr(skb, IFLA_AMT_REMOTE_IP, amt->remote_ip))
3327 goto nla_put_failure;
3328 if (nla_put_u32(skb, IFLA_AMT_MAX_TUNNELS, amt->max_tunnels))
3329 goto nla_put_failure;
3330
3331 return 0;
3332
3333 nla_put_failure:
3334 return -EMSGSIZE;
3335 }
3336
3337 static struct rtnl_link_ops amt_link_ops __read_mostly = {
3338 .kind = "amt",
3339 .maxtype = IFLA_AMT_MAX,
3340 .policy = amt_policy,
3341 .priv_size = sizeof(struct amt_dev),
3342 .setup = amt_link_setup,
3343 .validate = amt_validate,
3344 .newlink = amt_newlink,
3345 .dellink = amt_dellink,
3346 .get_size = amt_get_size,
3347 .fill_info = amt_fill_info,
3348 };
3349
amt_lookup_upper_dev(struct net_device * dev)3350 static struct net_device *amt_lookup_upper_dev(struct net_device *dev)
3351 {
3352 struct net_device *upper_dev;
3353 struct amt_dev *amt;
3354
3355 for_each_netdev(dev_net(dev), upper_dev) {
3356 if (netif_is_amt(upper_dev)) {
3357 amt = netdev_priv(upper_dev);
3358 if (amt->stream_dev == dev)
3359 return upper_dev;
3360 }
3361 }
3362
3363 return NULL;
3364 }
3365
amt_device_event(struct notifier_block * unused,unsigned long event,void * ptr)3366 static int amt_device_event(struct notifier_block *unused,
3367 unsigned long event, void *ptr)
3368 {
3369 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3370 struct net_device *upper_dev;
3371 struct amt_dev *amt;
3372 LIST_HEAD(list);
3373 int new_mtu;
3374
3375 upper_dev = amt_lookup_upper_dev(dev);
3376 if (!upper_dev)
3377 return NOTIFY_DONE;
3378 amt = netdev_priv(upper_dev);
3379
3380 switch (event) {
3381 case NETDEV_UNREGISTER:
3382 amt_dellink(amt->dev, &list);
3383 unregister_netdevice_many(&list);
3384 break;
3385 case NETDEV_CHANGEMTU:
3386 if (amt->mode == AMT_MODE_RELAY)
3387 new_mtu = dev->mtu - AMT_RELAY_HLEN;
3388 else
3389 new_mtu = dev->mtu - AMT_GW_HLEN;
3390
3391 dev_set_mtu(amt->dev, new_mtu);
3392 break;
3393 }
3394
3395 return NOTIFY_DONE;
3396 }
3397
3398 static struct notifier_block amt_notifier_block __read_mostly = {
3399 .notifier_call = amt_device_event,
3400 };
3401
amt_init(void)3402 static int __init amt_init(void)
3403 {
3404 int err;
3405
3406 err = register_netdevice_notifier(&amt_notifier_block);
3407 if (err < 0)
3408 goto err;
3409
3410 err = rtnl_link_register(&amt_link_ops);
3411 if (err < 0)
3412 goto unregister_notifier;
3413
3414 amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0);
3415 if (!amt_wq) {
3416 err = -ENOMEM;
3417 goto rtnl_unregister;
3418 }
3419
3420 spin_lock_init(&source_gc_lock);
3421 spin_lock_bh(&source_gc_lock);
3422 INIT_DELAYED_WORK(&source_gc_wq, amt_source_gc_work);
3423 mod_delayed_work(amt_wq, &source_gc_wq,
3424 msecs_to_jiffies(AMT_GC_INTERVAL));
3425 spin_unlock_bh(&source_gc_lock);
3426
3427 return 0;
3428
3429 rtnl_unregister:
3430 rtnl_link_unregister(&amt_link_ops);
3431 unregister_notifier:
3432 unregister_netdevice_notifier(&amt_notifier_block);
3433 err:
3434 pr_err("error loading AMT module loaded\n");
3435 return err;
3436 }
3437 late_initcall(amt_init);
3438
amt_fini(void)3439 static void __exit amt_fini(void)
3440 {
3441 rtnl_link_unregister(&amt_link_ops);
3442 unregister_netdevice_notifier(&amt_notifier_block);
3443 cancel_delayed_work_sync(&source_gc_wq);
3444 __amt_source_gc_work();
3445 destroy_workqueue(amt_wq);
3446 }
3447 module_exit(amt_fini);
3448
3449 MODULE_LICENSE("GPL");
3450 MODULE_DESCRIPTION("Driver for Automatic Multicast Tunneling (AMT)");
3451 MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
3452 MODULE_ALIAS_RTNL_LINK("amt");
3453