1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
9 #include <net/ip.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/ipv6.h>
14 #include <net/addrconf.h>
15 #endif
16 
17 #include "br_private.h"
18 
19 static bool
20 br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
21 			unsigned long *timer)
22 {
23 	*timer = br_timer_value(&pmctx->ip4_mc_router_timer);
24 	return !hlist_unhashed(&pmctx->ip4_rlist);
25 }
26 
27 static bool
28 br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
29 			unsigned long *timer)
30 {
31 #if IS_ENABLED(CONFIG_IPV6)
32 	*timer = br_timer_value(&pmctx->ip6_mc_router_timer);
33 	return !hlist_unhashed(&pmctx->ip6_rlist);
34 #else
35 	*timer = 0;
36 	return false;
37 #endif
38 }
39 
40 static size_t __br_rports_one_size(void)
41 {
42 	return nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PORT */
43 	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
44 	       nla_total_size(sizeof(u8)) +  /* MDBA_ROUTER_PATTR_TYPE */
45 	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
46 	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
47 	       nla_total_size(sizeof(u32));  /* MDBA_ROUTER_PATTR_VID */
48 }
49 
50 size_t br_rports_size(const struct net_bridge_mcast *brmctx)
51 {
52 	struct net_bridge_mcast_port *pmctx;
53 	size_t size = nla_total_size(0); /* MDBA_ROUTER */
54 
55 	rcu_read_lock();
56 	hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
57 				 ip4_rlist)
58 		size += __br_rports_one_size();
59 
60 #if IS_ENABLED(CONFIG_IPV6)
61 	hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
62 				 ip6_rlist)
63 		size += __br_rports_one_size();
64 #endif
65 	rcu_read_unlock();
66 
67 	return size;
68 }
69 
70 int br_rports_fill_info(struct sk_buff *skb,
71 			const struct net_bridge_mcast *brmctx)
72 {
73 	u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
74 	bool have_ip4_mc_rtr, have_ip6_mc_rtr;
75 	unsigned long ip4_timer, ip6_timer;
76 	struct nlattr *nest, *port_nest;
77 	struct net_bridge_port *p;
78 
79 	if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
80 		return 0;
81 
82 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
83 	if (nest == NULL)
84 		return -EMSGSIZE;
85 
86 	list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
87 		struct net_bridge_mcast_port *pmctx;
88 
89 		if (vid) {
90 			struct net_bridge_vlan *v;
91 
92 			v = br_vlan_find(nbp_vlan_group(p), vid);
93 			if (!v)
94 				continue;
95 			pmctx = &v->port_mcast_ctx;
96 		} else {
97 			pmctx = &p->multicast_ctx;
98 		}
99 
100 		have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, &ip4_timer);
101 		have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, &ip6_timer);
102 
103 		if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
104 			continue;
105 
106 		port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
107 		if (!port_nest)
108 			goto fail;
109 
110 		if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
111 		    nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
112 				max(ip4_timer, ip6_timer)) ||
113 		    nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
114 			       p->multicast_ctx.multicast_router) ||
115 		    (have_ip4_mc_rtr &&
116 		     nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
117 				 ip4_timer)) ||
118 		    (have_ip6_mc_rtr &&
119 		     nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
120 				 ip6_timer)) ||
121 		    (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid))) {
122 			nla_nest_cancel(skb, port_nest);
123 			goto fail;
124 		}
125 		nla_nest_end(skb, port_nest);
126 	}
127 
128 	nla_nest_end(skb, nest);
129 	return 0;
130 fail:
131 	nla_nest_cancel(skb, nest);
132 	return -EMSGSIZE;
133 }
134 
135 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
136 {
137 	e->state = flags & MDB_PG_FLAGS_PERMANENT;
138 	e->flags = 0;
139 	if (flags & MDB_PG_FLAGS_OFFLOAD)
140 		e->flags |= MDB_FLAGS_OFFLOAD;
141 	if (flags & MDB_PG_FLAGS_FAST_LEAVE)
142 		e->flags |= MDB_FLAGS_FAST_LEAVE;
143 	if (flags & MDB_PG_FLAGS_STAR_EXCL)
144 		e->flags |= MDB_FLAGS_STAR_EXCL;
145 	if (flags & MDB_PG_FLAGS_BLOCKED)
146 		e->flags |= MDB_FLAGS_BLOCKED;
147 	if (flags & MDB_PG_FLAGS_OFFLOAD_FAILED)
148 		e->flags |= MDB_FLAGS_OFFLOAD_FAILED;
149 }
150 
151 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
152 				 struct nlattr **mdb_attrs)
153 {
154 	memset(ip, 0, sizeof(struct br_ip));
155 	ip->vid = entry->vid;
156 	ip->proto = entry->addr.proto;
157 	switch (ip->proto) {
158 	case htons(ETH_P_IP):
159 		ip->dst.ip4 = entry->addr.u.ip4;
160 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
161 			ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
162 		break;
163 #if IS_ENABLED(CONFIG_IPV6)
164 	case htons(ETH_P_IPV6):
165 		ip->dst.ip6 = entry->addr.u.ip6;
166 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
167 			ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
168 		break;
169 #endif
170 	default:
171 		ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
172 	}
173 
174 }
175 
176 static int __mdb_fill_srcs(struct sk_buff *skb,
177 			   struct net_bridge_port_group *p)
178 {
179 	struct net_bridge_group_src *ent;
180 	struct nlattr *nest, *nest_ent;
181 
182 	if (hlist_empty(&p->src_list))
183 		return 0;
184 
185 	nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
186 	if (!nest)
187 		return -EMSGSIZE;
188 
189 	hlist_for_each_entry_rcu(ent, &p->src_list, node,
190 				 lockdep_is_held(&p->key.port->br->multicast_lock)) {
191 		nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
192 		if (!nest_ent)
193 			goto out_cancel_err;
194 		switch (ent->addr.proto) {
195 		case htons(ETH_P_IP):
196 			if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
197 					    ent->addr.src.ip4)) {
198 				nla_nest_cancel(skb, nest_ent);
199 				goto out_cancel_err;
200 			}
201 			break;
202 #if IS_ENABLED(CONFIG_IPV6)
203 		case htons(ETH_P_IPV6):
204 			if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
205 					     &ent->addr.src.ip6)) {
206 				nla_nest_cancel(skb, nest_ent);
207 				goto out_cancel_err;
208 			}
209 			break;
210 #endif
211 		default:
212 			nla_nest_cancel(skb, nest_ent);
213 			continue;
214 		}
215 		if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
216 				br_timer_value(&ent->timer))) {
217 			nla_nest_cancel(skb, nest_ent);
218 			goto out_cancel_err;
219 		}
220 		nla_nest_end(skb, nest_ent);
221 	}
222 
223 	nla_nest_end(skb, nest);
224 
225 	return 0;
226 
227 out_cancel_err:
228 	nla_nest_cancel(skb, nest);
229 	return -EMSGSIZE;
230 }
231 
232 static int __mdb_fill_info(struct sk_buff *skb,
233 			   struct net_bridge_mdb_entry *mp,
234 			   struct net_bridge_port_group *p)
235 {
236 	bool dump_srcs_mode = false;
237 	struct timer_list *mtimer;
238 	struct nlattr *nest_ent;
239 	struct br_mdb_entry e;
240 	u8 flags = 0;
241 	int ifindex;
242 
243 	memset(&e, 0, sizeof(e));
244 	if (p) {
245 		ifindex = p->key.port->dev->ifindex;
246 		mtimer = &p->timer;
247 		flags = p->flags;
248 	} else {
249 		ifindex = mp->br->dev->ifindex;
250 		mtimer = &mp->timer;
251 	}
252 
253 	__mdb_entry_fill_flags(&e, flags);
254 	e.ifindex = ifindex;
255 	e.vid = mp->addr.vid;
256 	if (mp->addr.proto == htons(ETH_P_IP)) {
257 		e.addr.u.ip4 = mp->addr.dst.ip4;
258 #if IS_ENABLED(CONFIG_IPV6)
259 	} else if (mp->addr.proto == htons(ETH_P_IPV6)) {
260 		e.addr.u.ip6 = mp->addr.dst.ip6;
261 #endif
262 	} else {
263 		ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
264 		e.state = MDB_PERMANENT;
265 	}
266 	e.addr.proto = mp->addr.proto;
267 	nest_ent = nla_nest_start_noflag(skb,
268 					 MDBA_MDB_ENTRY_INFO);
269 	if (!nest_ent)
270 		return -EMSGSIZE;
271 
272 	if (nla_put_nohdr(skb, sizeof(e), &e) ||
273 	    nla_put_u32(skb,
274 			MDBA_MDB_EATTR_TIMER,
275 			br_timer_value(mtimer)))
276 		goto nest_err;
277 
278 	switch (mp->addr.proto) {
279 	case htons(ETH_P_IP):
280 		dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
281 		if (mp->addr.src.ip4) {
282 			if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
283 					    mp->addr.src.ip4))
284 				goto nest_err;
285 			break;
286 		}
287 		break;
288 #if IS_ENABLED(CONFIG_IPV6)
289 	case htons(ETH_P_IPV6):
290 		dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
291 		if (!ipv6_addr_any(&mp->addr.src.ip6)) {
292 			if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
293 					     &mp->addr.src.ip6))
294 				goto nest_err;
295 			break;
296 		}
297 		break;
298 #endif
299 	default:
300 		ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
301 	}
302 	if (p) {
303 		if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
304 			goto nest_err;
305 		if (dump_srcs_mode &&
306 		    (__mdb_fill_srcs(skb, p) ||
307 		     nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
308 				p->filter_mode)))
309 			goto nest_err;
310 	}
311 	nla_nest_end(skb, nest_ent);
312 
313 	return 0;
314 
315 nest_err:
316 	nla_nest_cancel(skb, nest_ent);
317 	return -EMSGSIZE;
318 }
319 
320 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
321 			    struct net_device *dev)
322 {
323 	int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
324 	struct net_bridge *br = netdev_priv(dev);
325 	struct net_bridge_mdb_entry *mp;
326 	struct nlattr *nest, *nest2;
327 
328 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
329 	if (nest == NULL)
330 		return -EMSGSIZE;
331 
332 	hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
333 		struct net_bridge_port_group *p;
334 		struct net_bridge_port_group __rcu **pp;
335 
336 		if (idx < s_idx)
337 			goto skip;
338 
339 		nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
340 		if (!nest2) {
341 			err = -EMSGSIZE;
342 			break;
343 		}
344 
345 		if (!s_pidx && mp->host_joined) {
346 			err = __mdb_fill_info(skb, mp, NULL);
347 			if (err) {
348 				nla_nest_cancel(skb, nest2);
349 				break;
350 			}
351 		}
352 
353 		for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
354 		      pp = &p->next) {
355 			if (!p->key.port)
356 				continue;
357 			if (pidx < s_pidx)
358 				goto skip_pg;
359 
360 			err = __mdb_fill_info(skb, mp, p);
361 			if (err) {
362 				nla_nest_end(skb, nest2);
363 				goto out;
364 			}
365 skip_pg:
366 			pidx++;
367 		}
368 		pidx = 0;
369 		s_pidx = 0;
370 		nla_nest_end(skb, nest2);
371 skip:
372 		idx++;
373 	}
374 
375 out:
376 	cb->args[1] = idx;
377 	cb->args[2] = pidx;
378 	nla_nest_end(skb, nest);
379 	return err;
380 }
381 
382 int br_mdb_dump(struct net_device *dev, struct sk_buff *skb,
383 		struct netlink_callback *cb)
384 {
385 	struct net_bridge *br = netdev_priv(dev);
386 	struct br_port_msg *bpm;
387 	struct nlmsghdr *nlh;
388 	int err;
389 
390 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
391 			cb->nlh->nlmsg_seq, RTM_GETMDB, sizeof(*bpm),
392 			NLM_F_MULTI);
393 	if (!nlh)
394 		return -EMSGSIZE;
395 
396 	bpm = nlmsg_data(nlh);
397 	memset(bpm, 0, sizeof(*bpm));
398 	bpm->ifindex = dev->ifindex;
399 
400 	rcu_read_lock();
401 
402 	err = br_mdb_fill_info(skb, cb, dev);
403 	if (err)
404 		goto out;
405 	err = br_rports_fill_info(skb, &br->multicast_ctx);
406 	if (err)
407 		goto out;
408 
409 out:
410 	rcu_read_unlock();
411 	nlmsg_end(skb, nlh);
412 	return err;
413 }
414 
415 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
416 				   struct net_device *dev,
417 				   struct net_bridge_mdb_entry *mp,
418 				   struct net_bridge_port_group *pg,
419 				   int type)
420 {
421 	struct nlmsghdr *nlh;
422 	struct br_port_msg *bpm;
423 	struct nlattr *nest, *nest2;
424 
425 	nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
426 	if (!nlh)
427 		return -EMSGSIZE;
428 
429 	bpm = nlmsg_data(nlh);
430 	memset(bpm, 0, sizeof(*bpm));
431 	bpm->family  = AF_BRIDGE;
432 	bpm->ifindex = dev->ifindex;
433 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
434 	if (nest == NULL)
435 		goto cancel;
436 	nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
437 	if (nest2 == NULL)
438 		goto end;
439 
440 	if (__mdb_fill_info(skb, mp, pg))
441 		goto end;
442 
443 	nla_nest_end(skb, nest2);
444 	nla_nest_end(skb, nest);
445 	nlmsg_end(skb, nlh);
446 	return 0;
447 
448 end:
449 	nla_nest_end(skb, nest);
450 cancel:
451 	nlmsg_cancel(skb, nlh);
452 	return -EMSGSIZE;
453 }
454 
455 static size_t rtnl_mdb_nlmsg_pg_size(const struct net_bridge_port_group *pg)
456 {
457 	struct net_bridge_group_src *ent;
458 	size_t nlmsg_size, addr_size = 0;
459 
460 		     /* MDBA_MDB_ENTRY_INFO */
461 	nlmsg_size = nla_total_size(sizeof(struct br_mdb_entry)) +
462 		     /* MDBA_MDB_EATTR_TIMER */
463 		     nla_total_size(sizeof(u32));
464 
465 	if (!pg)
466 		goto out;
467 
468 	/* MDBA_MDB_EATTR_RTPROT */
469 	nlmsg_size += nla_total_size(sizeof(u8));
470 
471 	switch (pg->key.addr.proto) {
472 	case htons(ETH_P_IP):
473 		/* MDBA_MDB_EATTR_SOURCE */
474 		if (pg->key.addr.src.ip4)
475 			nlmsg_size += nla_total_size(sizeof(__be32));
476 		if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
477 			goto out;
478 		addr_size = sizeof(__be32);
479 		break;
480 #if IS_ENABLED(CONFIG_IPV6)
481 	case htons(ETH_P_IPV6):
482 		/* MDBA_MDB_EATTR_SOURCE */
483 		if (!ipv6_addr_any(&pg->key.addr.src.ip6))
484 			nlmsg_size += nla_total_size(sizeof(struct in6_addr));
485 		if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
486 			goto out;
487 		addr_size = sizeof(struct in6_addr);
488 		break;
489 #endif
490 	}
491 
492 	/* MDBA_MDB_EATTR_GROUP_MODE */
493 	nlmsg_size += nla_total_size(sizeof(u8));
494 
495 	/* MDBA_MDB_EATTR_SRC_LIST nested attr */
496 	if (!hlist_empty(&pg->src_list))
497 		nlmsg_size += nla_total_size(0);
498 
499 	hlist_for_each_entry(ent, &pg->src_list, node) {
500 		/* MDBA_MDB_SRCLIST_ENTRY nested attr +
501 		 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
502 		 */
503 		nlmsg_size += nla_total_size(0) +
504 			      nla_total_size(addr_size) +
505 			      nla_total_size(sizeof(u32));
506 	}
507 out:
508 	return nlmsg_size;
509 }
510 
511 static size_t rtnl_mdb_nlmsg_size(const struct net_bridge_port_group *pg)
512 {
513 	return NLMSG_ALIGN(sizeof(struct br_port_msg)) +
514 	       /* MDBA_MDB */
515 	       nla_total_size(0) +
516 	       /* MDBA_MDB_ENTRY */
517 	       nla_total_size(0) +
518 	       /* Port group entry */
519 	       rtnl_mdb_nlmsg_pg_size(pg);
520 }
521 
522 static void __br_mdb_notify(struct net_device *dev,
523 			    struct net_bridge_mdb_entry *mp,
524 			    struct net_bridge_port_group *pg,
525 			    int type, bool notify_switchdev)
526 {
527 	struct net *net = dev_net(dev);
528 	struct sk_buff *skb;
529 	int err = -ENOBUFS;
530 
531 	if (notify_switchdev)
532 		br_switchdev_mdb_notify(dev, mp, pg, type);
533 
534 	skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
535 	if (!skb)
536 		goto errout;
537 
538 	err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
539 	if (err < 0) {
540 		kfree_skb(skb);
541 		goto errout;
542 	}
543 
544 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
545 	return;
546 errout:
547 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
548 }
549 
550 void br_mdb_notify(struct net_device *dev,
551 		   struct net_bridge_mdb_entry *mp,
552 		   struct net_bridge_port_group *pg,
553 		   int type)
554 {
555 	__br_mdb_notify(dev, mp, pg, type, true);
556 }
557 
558 void br_mdb_flag_change_notify(struct net_device *dev,
559 			       struct net_bridge_mdb_entry *mp,
560 			       struct net_bridge_port_group *pg)
561 {
562 	__br_mdb_notify(dev, mp, pg, RTM_NEWMDB, false);
563 }
564 
565 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
566 				   struct net_device *dev,
567 				   int ifindex, u16 vid, u32 pid,
568 				   u32 seq, int type, unsigned int flags)
569 {
570 	struct nlattr *nest, *port_nest;
571 	struct br_port_msg *bpm;
572 	struct nlmsghdr *nlh;
573 
574 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
575 	if (!nlh)
576 		return -EMSGSIZE;
577 
578 	bpm = nlmsg_data(nlh);
579 	memset(bpm, 0, sizeof(*bpm));
580 	bpm->family = AF_BRIDGE;
581 	bpm->ifindex = dev->ifindex;
582 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
583 	if (!nest)
584 		goto cancel;
585 
586 	port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
587 	if (!port_nest)
588 		goto end;
589 	if (nla_put_nohdr(skb, sizeof(u32), &ifindex)) {
590 		nla_nest_cancel(skb, port_nest);
591 		goto end;
592 	}
593 	if (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid)) {
594 		nla_nest_cancel(skb, port_nest);
595 		goto end;
596 	}
597 	nla_nest_end(skb, port_nest);
598 
599 	nla_nest_end(skb, nest);
600 	nlmsg_end(skb, nlh);
601 	return 0;
602 
603 end:
604 	nla_nest_end(skb, nest);
605 cancel:
606 	nlmsg_cancel(skb, nlh);
607 	return -EMSGSIZE;
608 }
609 
610 static inline size_t rtnl_rtr_nlmsg_size(void)
611 {
612 	return NLMSG_ALIGN(sizeof(struct br_port_msg))
613 		+ nla_total_size(sizeof(__u32))
614 		+ nla_total_size(sizeof(u16));
615 }
616 
617 void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
618 		   int type)
619 {
620 	struct net *net = dev_net(dev);
621 	struct sk_buff *skb;
622 	int err = -ENOBUFS;
623 	int ifindex;
624 	u16 vid;
625 
626 	ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
627 	vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
628 							      0;
629 	skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
630 	if (!skb)
631 		goto errout;
632 
633 	err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, 0, 0, type,
634 				      NTF_SELF);
635 	if (err < 0) {
636 		kfree_skb(skb);
637 		goto errout;
638 	}
639 
640 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
641 	return;
642 
643 errout:
644 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
645 }
646 
647 static const struct nla_policy
648 br_mdbe_src_list_entry_pol[MDBE_SRCATTR_MAX + 1] = {
649 	[MDBE_SRCATTR_ADDRESS] = NLA_POLICY_RANGE(NLA_BINARY,
650 						  sizeof(struct in_addr),
651 						  sizeof(struct in6_addr)),
652 };
653 
654 static const struct nla_policy
655 br_mdbe_src_list_pol[MDBE_SRC_LIST_MAX + 1] = {
656 	[MDBE_SRC_LIST_ENTRY] = NLA_POLICY_NESTED(br_mdbe_src_list_entry_pol),
657 };
658 
659 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
660 	[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
661 					      sizeof(struct in_addr),
662 					      sizeof(struct in6_addr)),
663 	[MDBE_ATTR_GROUP_MODE] = NLA_POLICY_RANGE(NLA_U8, MCAST_EXCLUDE,
664 						  MCAST_INCLUDE),
665 	[MDBE_ATTR_SRC_LIST] = NLA_POLICY_NESTED(br_mdbe_src_list_pol),
666 	[MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
667 };
668 
669 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
670 				struct netlink_ext_ack *extack)
671 {
672 	switch (proto) {
673 	case htons(ETH_P_IP):
674 		if (nla_len(attr) != sizeof(struct in_addr)) {
675 			NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
676 			return false;
677 		}
678 		if (ipv4_is_multicast(nla_get_in_addr(attr))) {
679 			NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
680 			return false;
681 		}
682 		break;
683 #if IS_ENABLED(CONFIG_IPV6)
684 	case htons(ETH_P_IPV6): {
685 		struct in6_addr src;
686 
687 		if (nla_len(attr) != sizeof(struct in6_addr)) {
688 			NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
689 			return false;
690 		}
691 		src = nla_get_in6_addr(attr);
692 		if (ipv6_addr_is_multicast(&src)) {
693 			NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
694 			return false;
695 		}
696 		break;
697 	}
698 #endif
699 	default:
700 		NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
701 		return false;
702 	}
703 
704 	return true;
705 }
706 
707 static struct net_bridge_mcast *
708 __br_mdb_choose_context(struct net_bridge *br,
709 			const struct br_mdb_entry *entry,
710 			struct netlink_ext_ack *extack)
711 {
712 	struct net_bridge_mcast *brmctx = NULL;
713 	struct net_bridge_vlan *v;
714 
715 	if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
716 		brmctx = &br->multicast_ctx;
717 		goto out;
718 	}
719 
720 	if (!entry->vid) {
721 		NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
722 		goto out;
723 	}
724 
725 	v = br_vlan_find(br_vlan_group(br), entry->vid);
726 	if (!v) {
727 		NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
728 		goto out;
729 	}
730 	if (br_multicast_ctx_vlan_global_disabled(&v->br_mcast_ctx)) {
731 		NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
732 		goto out;
733 	}
734 	brmctx = &v->br_mcast_ctx;
735 out:
736 	return brmctx;
737 }
738 
739 static int br_mdb_replace_group_sg(const struct br_mdb_config *cfg,
740 				   struct net_bridge_mdb_entry *mp,
741 				   struct net_bridge_port_group *pg,
742 				   struct net_bridge_mcast *brmctx,
743 				   unsigned char flags)
744 {
745 	unsigned long now = jiffies;
746 
747 	pg->flags = flags;
748 	pg->rt_protocol = cfg->rt_protocol;
749 	if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
750 		mod_timer(&pg->timer,
751 			  now + brmctx->multicast_membership_interval);
752 	else
753 		timer_delete(&pg->timer);
754 
755 	br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
756 
757 	return 0;
758 }
759 
760 static int br_mdb_add_group_sg(const struct br_mdb_config *cfg,
761 			       struct net_bridge_mdb_entry *mp,
762 			       struct net_bridge_mcast *brmctx,
763 			       unsigned char flags,
764 			       struct netlink_ext_ack *extack)
765 {
766 	struct net_bridge_port_group __rcu **pp;
767 	struct net_bridge_port_group *p;
768 	unsigned long now = jiffies;
769 
770 	for (pp = &mp->ports;
771 	     (p = mlock_dereference(*pp, cfg->br)) != NULL;
772 	     pp = &p->next) {
773 		if (p->key.port == cfg->p) {
774 			if (!(cfg->nlflags & NLM_F_REPLACE)) {
775 				NL_SET_ERR_MSG_MOD(extack, "(S, G) group is already joined by port");
776 				return -EEXIST;
777 			}
778 			return br_mdb_replace_group_sg(cfg, mp, p, brmctx,
779 						       flags);
780 		}
781 		if ((unsigned long)p->key.port < (unsigned long)cfg->p)
782 			break;
783 	}
784 
785 	p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
786 					MCAST_INCLUDE, cfg->rt_protocol, extack);
787 	if (unlikely(!p))
788 		return -ENOMEM;
789 
790 	rcu_assign_pointer(*pp, p);
791 	if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
792 		mod_timer(&p->timer,
793 			  now + brmctx->multicast_membership_interval);
794 	br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
795 
796 	/* All of (*, G) EXCLUDE ports need to be added to the new (S, G) for
797 	 * proper replication.
798 	 */
799 	if (br_multicast_should_handle_mode(brmctx, cfg->group.proto)) {
800 		struct net_bridge_mdb_entry *star_mp;
801 		struct br_ip star_group;
802 
803 		star_group = p->key.addr;
804 		memset(&star_group.src, 0, sizeof(star_group.src));
805 		star_mp = br_mdb_ip_get(cfg->br, &star_group);
806 		if (star_mp)
807 			br_multicast_sg_add_exclude_ports(star_mp, p);
808 	}
809 
810 	return 0;
811 }
812 
813 static int br_mdb_add_group_src_fwd(const struct br_mdb_config *cfg,
814 				    struct br_ip *src_ip,
815 				    struct net_bridge_mcast *brmctx,
816 				    struct netlink_ext_ack *extack)
817 {
818 	struct net_bridge_mdb_entry *sgmp;
819 	struct br_mdb_config sg_cfg;
820 	struct br_ip sg_ip;
821 	u8 flags = 0;
822 
823 	sg_ip = cfg->group;
824 	sg_ip.src = src_ip->src;
825 	sgmp = br_multicast_new_group(cfg->br, &sg_ip);
826 	if (IS_ERR(sgmp)) {
827 		NL_SET_ERR_MSG_MOD(extack, "Failed to add (S, G) MDB entry");
828 		return PTR_ERR(sgmp);
829 	}
830 
831 	if (cfg->entry->state == MDB_PERMANENT)
832 		flags |= MDB_PG_FLAGS_PERMANENT;
833 	if (cfg->filter_mode == MCAST_EXCLUDE)
834 		flags |= MDB_PG_FLAGS_BLOCKED;
835 
836 	memset(&sg_cfg, 0, sizeof(sg_cfg));
837 	sg_cfg.br = cfg->br;
838 	sg_cfg.p = cfg->p;
839 	sg_cfg.entry = cfg->entry;
840 	sg_cfg.group = sg_ip;
841 	sg_cfg.src_entry = true;
842 	sg_cfg.filter_mode = MCAST_INCLUDE;
843 	sg_cfg.rt_protocol = cfg->rt_protocol;
844 	sg_cfg.nlflags = cfg->nlflags;
845 	return br_mdb_add_group_sg(&sg_cfg, sgmp, brmctx, flags, extack);
846 }
847 
848 static int br_mdb_add_group_src(const struct br_mdb_config *cfg,
849 				struct net_bridge_port_group *pg,
850 				struct net_bridge_mcast *brmctx,
851 				struct br_mdb_src_entry *src,
852 				struct netlink_ext_ack *extack)
853 {
854 	struct net_bridge_group_src *ent;
855 	unsigned long now = jiffies;
856 	int err;
857 
858 	ent = br_multicast_find_group_src(pg, &src->addr);
859 	if (!ent) {
860 		ent = br_multicast_new_group_src(pg, &src->addr);
861 		if (!ent) {
862 			NL_SET_ERR_MSG_MOD(extack, "Failed to add new source entry");
863 			return -ENOSPC;
864 		}
865 	} else if (!(cfg->nlflags & NLM_F_REPLACE)) {
866 		NL_SET_ERR_MSG_MOD(extack, "Source entry already exists");
867 		return -EEXIST;
868 	}
869 
870 	if (cfg->filter_mode == MCAST_INCLUDE &&
871 	    cfg->entry->state == MDB_TEMPORARY)
872 		mod_timer(&ent->timer, now + br_multicast_gmi(brmctx));
873 	else
874 		timer_delete(&ent->timer);
875 
876 	/* Install a (S, G) forwarding entry for the source. */
877 	err = br_mdb_add_group_src_fwd(cfg, &src->addr, brmctx, extack);
878 	if (err)
879 		goto err_del_sg;
880 
881 	ent->flags = BR_SGRP_F_INSTALLED | BR_SGRP_F_USER_ADDED;
882 
883 	return 0;
884 
885 err_del_sg:
886 	__br_multicast_del_group_src(ent);
887 	return err;
888 }
889 
890 static void br_mdb_del_group_src(struct net_bridge_port_group *pg,
891 				 struct br_mdb_src_entry *src)
892 {
893 	struct net_bridge_group_src *ent;
894 
895 	ent = br_multicast_find_group_src(pg, &src->addr);
896 	if (WARN_ON_ONCE(!ent))
897 		return;
898 	br_multicast_del_group_src(ent, false);
899 }
900 
901 static int br_mdb_add_group_srcs(const struct br_mdb_config *cfg,
902 				 struct net_bridge_port_group *pg,
903 				 struct net_bridge_mcast *brmctx,
904 				 struct netlink_ext_ack *extack)
905 {
906 	int i, err;
907 
908 	for (i = 0; i < cfg->num_src_entries; i++) {
909 		err = br_mdb_add_group_src(cfg, pg, brmctx,
910 					   &cfg->src_entries[i], extack);
911 		if (err)
912 			goto err_del_group_srcs;
913 	}
914 
915 	return 0;
916 
917 err_del_group_srcs:
918 	for (i--; i >= 0; i--)
919 		br_mdb_del_group_src(pg, &cfg->src_entries[i]);
920 	return err;
921 }
922 
923 static int br_mdb_replace_group_srcs(const struct br_mdb_config *cfg,
924 				     struct net_bridge_port_group *pg,
925 				     struct net_bridge_mcast *brmctx,
926 				     struct netlink_ext_ack *extack)
927 {
928 	struct net_bridge_group_src *ent;
929 	struct hlist_node *tmp;
930 	int err;
931 
932 	hlist_for_each_entry(ent, &pg->src_list, node)
933 		ent->flags |= BR_SGRP_F_DELETE;
934 
935 	err = br_mdb_add_group_srcs(cfg, pg, brmctx, extack);
936 	if (err)
937 		goto err_clear_delete;
938 
939 	hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) {
940 		if (ent->flags & BR_SGRP_F_DELETE)
941 			br_multicast_del_group_src(ent, false);
942 	}
943 
944 	return 0;
945 
946 err_clear_delete:
947 	hlist_for_each_entry(ent, &pg->src_list, node)
948 		ent->flags &= ~BR_SGRP_F_DELETE;
949 	return err;
950 }
951 
952 static int br_mdb_replace_group_star_g(const struct br_mdb_config *cfg,
953 				       struct net_bridge_mdb_entry *mp,
954 				       struct net_bridge_port_group *pg,
955 				       struct net_bridge_mcast *brmctx,
956 				       unsigned char flags,
957 				       struct netlink_ext_ack *extack)
958 {
959 	unsigned long now = jiffies;
960 	int err;
961 
962 	err = br_mdb_replace_group_srcs(cfg, pg, brmctx, extack);
963 	if (err)
964 		return err;
965 
966 	pg->flags = flags;
967 	pg->filter_mode = cfg->filter_mode;
968 	pg->rt_protocol = cfg->rt_protocol;
969 	if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
970 	    cfg->filter_mode == MCAST_EXCLUDE)
971 		mod_timer(&pg->timer,
972 			  now + brmctx->multicast_membership_interval);
973 	else
974 		timer_delete(&pg->timer);
975 
976 	br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
977 
978 	if (br_multicast_should_handle_mode(brmctx, cfg->group.proto))
979 		br_multicast_star_g_handle_mode(pg, cfg->filter_mode);
980 
981 	return 0;
982 }
983 
984 static int br_mdb_add_group_star_g(const struct br_mdb_config *cfg,
985 				   struct net_bridge_mdb_entry *mp,
986 				   struct net_bridge_mcast *brmctx,
987 				   unsigned char flags,
988 				   struct netlink_ext_ack *extack)
989 {
990 	struct net_bridge_port_group __rcu **pp;
991 	struct net_bridge_port_group *p;
992 	unsigned long now = jiffies;
993 	int err;
994 
995 	for (pp = &mp->ports;
996 	     (p = mlock_dereference(*pp, cfg->br)) != NULL;
997 	     pp = &p->next) {
998 		if (p->key.port == cfg->p) {
999 			if (!(cfg->nlflags & NLM_F_REPLACE)) {
1000 				NL_SET_ERR_MSG_MOD(extack, "(*, G) group is already joined by port");
1001 				return -EEXIST;
1002 			}
1003 			return br_mdb_replace_group_star_g(cfg, mp, p, brmctx,
1004 							   flags, extack);
1005 		}
1006 		if ((unsigned long)p->key.port < (unsigned long)cfg->p)
1007 			break;
1008 	}
1009 
1010 	p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
1011 					cfg->filter_mode, cfg->rt_protocol,
1012 					extack);
1013 	if (unlikely(!p))
1014 		return -ENOMEM;
1015 
1016 	err = br_mdb_add_group_srcs(cfg, p, brmctx, extack);
1017 	if (err)
1018 		goto err_del_port_group;
1019 
1020 	rcu_assign_pointer(*pp, p);
1021 	if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
1022 	    cfg->filter_mode == MCAST_EXCLUDE)
1023 		mod_timer(&p->timer,
1024 			  now + brmctx->multicast_membership_interval);
1025 	br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
1026 	/* If we are adding a new EXCLUDE port group (*, G), it needs to be
1027 	 * also added to all (S, G) entries for proper replication.
1028 	 */
1029 	if (br_multicast_should_handle_mode(brmctx, cfg->group.proto) &&
1030 	    cfg->filter_mode == MCAST_EXCLUDE)
1031 		br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
1032 
1033 	return 0;
1034 
1035 err_del_port_group:
1036 	br_multicast_del_port_group(p);
1037 	return err;
1038 }
1039 
1040 static int br_mdb_add_group(const struct br_mdb_config *cfg,
1041 			    struct netlink_ext_ack *extack)
1042 {
1043 	struct br_mdb_entry *entry = cfg->entry;
1044 	struct net_bridge_port *port = cfg->p;
1045 	struct net_bridge_mdb_entry *mp;
1046 	struct net_bridge *br = cfg->br;
1047 	struct net_bridge_mcast *brmctx;
1048 	struct br_ip group = cfg->group;
1049 	unsigned char flags = 0;
1050 
1051 	brmctx = __br_mdb_choose_context(br, entry, extack);
1052 	if (!brmctx)
1053 		return -EINVAL;
1054 
1055 	mp = br_multicast_new_group(br, &group);
1056 	if (IS_ERR(mp))
1057 		return PTR_ERR(mp);
1058 
1059 	/* host join */
1060 	if (!port) {
1061 		if (mp->host_joined && !(cfg->nlflags & NLM_F_REPLACE)) {
1062 			NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
1063 			return -EEXIST;
1064 		}
1065 
1066 		br_multicast_host_join(brmctx, mp, false);
1067 		br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
1068 
1069 		return 0;
1070 	}
1071 
1072 	if (entry->state == MDB_PERMANENT)
1073 		flags |= MDB_PG_FLAGS_PERMANENT;
1074 
1075 	if (br_multicast_is_star_g(&group))
1076 		return br_mdb_add_group_star_g(cfg, mp, brmctx, flags, extack);
1077 	else
1078 		return br_mdb_add_group_sg(cfg, mp, brmctx, flags, extack);
1079 }
1080 
1081 static int __br_mdb_add(const struct br_mdb_config *cfg,
1082 			struct netlink_ext_ack *extack)
1083 {
1084 	int ret;
1085 
1086 	spin_lock_bh(&cfg->br->multicast_lock);
1087 	ret = br_mdb_add_group(cfg, extack);
1088 	spin_unlock_bh(&cfg->br->multicast_lock);
1089 
1090 	return ret;
1091 }
1092 
1093 static int br_mdb_config_src_entry_init(struct nlattr *src_entry,
1094 					struct br_mdb_src_entry *src,
1095 					__be16 proto,
1096 					struct netlink_ext_ack *extack)
1097 {
1098 	struct nlattr *tb[MDBE_SRCATTR_MAX + 1];
1099 	int err;
1100 
1101 	err = nla_parse_nested(tb, MDBE_SRCATTR_MAX, src_entry,
1102 			       br_mdbe_src_list_entry_pol, extack);
1103 	if (err)
1104 		return err;
1105 
1106 	if (NL_REQ_ATTR_CHECK(extack, src_entry, tb, MDBE_SRCATTR_ADDRESS))
1107 		return -EINVAL;
1108 
1109 	if (!is_valid_mdb_source(tb[MDBE_SRCATTR_ADDRESS], proto, extack))
1110 		return -EINVAL;
1111 
1112 	src->addr.proto = proto;
1113 	nla_memcpy(&src->addr.src, tb[MDBE_SRCATTR_ADDRESS],
1114 		   nla_len(tb[MDBE_SRCATTR_ADDRESS]));
1115 
1116 	return 0;
1117 }
1118 
1119 static int br_mdb_config_src_list_init(struct nlattr *src_list,
1120 				       struct br_mdb_config *cfg,
1121 				       struct netlink_ext_ack *extack)
1122 {
1123 	struct nlattr *src_entry;
1124 	int rem, err;
1125 	int i = 0;
1126 
1127 	nla_for_each_nested(src_entry, src_list, rem)
1128 		cfg->num_src_entries++;
1129 
1130 	if (cfg->num_src_entries >= PG_SRC_ENT_LIMIT) {
1131 		NL_SET_ERR_MSG_FMT_MOD(extack, "Exceeded maximum number of source entries (%u)",
1132 				       PG_SRC_ENT_LIMIT - 1);
1133 		return -EINVAL;
1134 	}
1135 
1136 	cfg->src_entries = kcalloc(cfg->num_src_entries,
1137 				   sizeof(struct br_mdb_src_entry), GFP_KERNEL);
1138 	if (!cfg->src_entries)
1139 		return -ENOMEM;
1140 
1141 	nla_for_each_nested(src_entry, src_list, rem) {
1142 		err = br_mdb_config_src_entry_init(src_entry,
1143 						   &cfg->src_entries[i],
1144 						   cfg->entry->addr.proto,
1145 						   extack);
1146 		if (err)
1147 			goto err_src_entry_init;
1148 		i++;
1149 	}
1150 
1151 	return 0;
1152 
1153 err_src_entry_init:
1154 	kfree(cfg->src_entries);
1155 	return err;
1156 }
1157 
1158 static void br_mdb_config_src_list_fini(struct br_mdb_config *cfg)
1159 {
1160 	kfree(cfg->src_entries);
1161 }
1162 
1163 static int br_mdb_config_attrs_init(struct nlattr *set_attrs,
1164 				    struct br_mdb_config *cfg,
1165 				    struct netlink_ext_ack *extack)
1166 {
1167 	struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1168 	int err;
1169 
1170 	err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX, set_attrs,
1171 			       br_mdbe_attrs_pol, extack);
1172 	if (err)
1173 		return err;
1174 
1175 	if (mdb_attrs[MDBE_ATTR_SOURCE] &&
1176 	    !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
1177 				 cfg->entry->addr.proto, extack))
1178 		return -EINVAL;
1179 
1180 	__mdb_entry_to_br_ip(cfg->entry, &cfg->group, mdb_attrs);
1181 
1182 	if (mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
1183 		if (!cfg->p) {
1184 			NL_SET_ERR_MSG_MOD(extack, "Filter mode cannot be set for host groups");
1185 			return -EINVAL;
1186 		}
1187 		if (!br_multicast_is_star_g(&cfg->group)) {
1188 			NL_SET_ERR_MSG_MOD(extack, "Filter mode can only be set for (*, G) entries");
1189 			return -EINVAL;
1190 		}
1191 		cfg->filter_mode = nla_get_u8(mdb_attrs[MDBE_ATTR_GROUP_MODE]);
1192 	} else {
1193 		cfg->filter_mode = MCAST_EXCLUDE;
1194 	}
1195 
1196 	if (mdb_attrs[MDBE_ATTR_SRC_LIST]) {
1197 		if (!cfg->p) {
1198 			NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set for host groups");
1199 			return -EINVAL;
1200 		}
1201 		if (!br_multicast_is_star_g(&cfg->group)) {
1202 			NL_SET_ERR_MSG_MOD(extack, "Source list can only be set for (*, G) entries");
1203 			return -EINVAL;
1204 		}
1205 		if (!mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
1206 			NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set without filter mode");
1207 			return -EINVAL;
1208 		}
1209 		err = br_mdb_config_src_list_init(mdb_attrs[MDBE_ATTR_SRC_LIST],
1210 						  cfg, extack);
1211 		if (err)
1212 			return err;
1213 	}
1214 
1215 	if (!cfg->num_src_entries && cfg->filter_mode == MCAST_INCLUDE) {
1216 		NL_SET_ERR_MSG_MOD(extack, "Cannot add (*, G) INCLUDE with an empty source list");
1217 		return -EINVAL;
1218 	}
1219 
1220 	if (mdb_attrs[MDBE_ATTR_RTPROT]) {
1221 		if (!cfg->p) {
1222 			NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be set for host groups");
1223 			return -EINVAL;
1224 		}
1225 		cfg->rt_protocol = nla_get_u8(mdb_attrs[MDBE_ATTR_RTPROT]);
1226 	}
1227 
1228 	return 0;
1229 }
1230 
1231 static int br_mdb_config_init(struct br_mdb_config *cfg, struct net_device *dev,
1232 			      struct nlattr *tb[], u16 nlmsg_flags,
1233 			      struct netlink_ext_ack *extack)
1234 {
1235 	struct net *net = dev_net(dev);
1236 
1237 	memset(cfg, 0, sizeof(*cfg));
1238 	cfg->filter_mode = MCAST_EXCLUDE;
1239 	cfg->rt_protocol = RTPROT_STATIC;
1240 	cfg->nlflags = nlmsg_flags;
1241 
1242 	cfg->br = netdev_priv(dev);
1243 
1244 	if (!netif_running(cfg->br->dev)) {
1245 		NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
1246 		return -EINVAL;
1247 	}
1248 
1249 	if (!br_opt_get(cfg->br, BROPT_MULTICAST_ENABLED)) {
1250 		NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1251 		return -EINVAL;
1252 	}
1253 
1254 	cfg->entry = nla_data(tb[MDBA_SET_ENTRY]);
1255 
1256 	if (cfg->entry->ifindex != cfg->br->dev->ifindex) {
1257 		struct net_device *pdev;
1258 
1259 		pdev = __dev_get_by_index(net, cfg->entry->ifindex);
1260 		if (!pdev) {
1261 			NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1262 			return -ENODEV;
1263 		}
1264 
1265 		cfg->p = br_port_get_rtnl(pdev);
1266 		if (!cfg->p) {
1267 			NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1268 			return -EINVAL;
1269 		}
1270 
1271 		if (cfg->p->br != cfg->br) {
1272 			NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1273 			return -EINVAL;
1274 		}
1275 	}
1276 
1277 	if (cfg->entry->addr.proto == htons(ETH_P_IP) &&
1278 	    ipv4_is_zeronet(cfg->entry->addr.u.ip4)) {
1279 		NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address 0.0.0.0 is not allowed");
1280 		return -EINVAL;
1281 	}
1282 
1283 	if (tb[MDBA_SET_ENTRY_ATTRS])
1284 		return br_mdb_config_attrs_init(tb[MDBA_SET_ENTRY_ATTRS], cfg,
1285 						extack);
1286 	else
1287 		__mdb_entry_to_br_ip(cfg->entry, &cfg->group, NULL);
1288 
1289 	return 0;
1290 }
1291 
1292 static void br_mdb_config_fini(struct br_mdb_config *cfg)
1293 {
1294 	br_mdb_config_src_list_fini(cfg);
1295 }
1296 
1297 int br_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
1298 	       struct netlink_ext_ack *extack)
1299 {
1300 	struct net_bridge_vlan_group *vg;
1301 	struct net_bridge_vlan *v;
1302 	struct br_mdb_config cfg;
1303 	int err;
1304 
1305 	err = br_mdb_config_init(&cfg, dev, tb, nlmsg_flags, extack);
1306 	if (err)
1307 		return err;
1308 
1309 	err = -EINVAL;
1310 	/* host join errors which can happen before creating the group */
1311 	if (!cfg.p && !br_group_is_l2(&cfg.group)) {
1312 		/* don't allow any flags for host-joined IP groups */
1313 		if (cfg.entry->state) {
1314 			NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
1315 			goto out;
1316 		}
1317 		if (!br_multicast_is_star_g(&cfg.group)) {
1318 			NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
1319 			goto out;
1320 		}
1321 	}
1322 
1323 	if (br_group_is_l2(&cfg.group) && cfg.entry->state != MDB_PERMANENT) {
1324 		NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
1325 		goto out;
1326 	}
1327 
1328 	if (cfg.p) {
1329 		if (cfg.p->state == BR_STATE_DISABLED && cfg.entry->state != MDB_PERMANENT) {
1330 			NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state and entry is not permanent");
1331 			goto out;
1332 		}
1333 		vg = nbp_vlan_group(cfg.p);
1334 	} else {
1335 		vg = br_vlan_group(cfg.br);
1336 	}
1337 
1338 	/* If vlan filtering is enabled and VLAN is not specified
1339 	 * install mdb entry on all vlans configured on the port.
1340 	 */
1341 	if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
1342 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1343 			cfg.entry->vid = v->vid;
1344 			cfg.group.vid = v->vid;
1345 			err = __br_mdb_add(&cfg, extack);
1346 			if (err)
1347 				break;
1348 		}
1349 	} else {
1350 		err = __br_mdb_add(&cfg, extack);
1351 	}
1352 
1353 out:
1354 	br_mdb_config_fini(&cfg);
1355 	return err;
1356 }
1357 
1358 static int __br_mdb_del(const struct br_mdb_config *cfg)
1359 {
1360 	struct br_mdb_entry *entry = cfg->entry;
1361 	struct net_bridge *br = cfg->br;
1362 	struct net_bridge_mdb_entry *mp;
1363 	struct net_bridge_port_group *p;
1364 	struct net_bridge_port_group __rcu **pp;
1365 	struct br_ip ip = cfg->group;
1366 	int err = -EINVAL;
1367 
1368 	spin_lock_bh(&br->multicast_lock);
1369 	mp = br_mdb_ip_get(br, &ip);
1370 	if (!mp)
1371 		goto unlock;
1372 
1373 	/* host leave */
1374 	if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1375 		br_multicast_host_leave(mp, false);
1376 		err = 0;
1377 		br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1378 		if (!mp->ports && netif_running(br->dev))
1379 			mod_timer(&mp->timer, jiffies);
1380 		goto unlock;
1381 	}
1382 
1383 	for (pp = &mp->ports;
1384 	     (p = mlock_dereference(*pp, br)) != NULL;
1385 	     pp = &p->next) {
1386 		if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1387 			continue;
1388 
1389 		br_multicast_del_pg(mp, p, pp);
1390 		err = 0;
1391 		break;
1392 	}
1393 
1394 unlock:
1395 	spin_unlock_bh(&br->multicast_lock);
1396 	return err;
1397 }
1398 
1399 int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
1400 	       struct netlink_ext_ack *extack)
1401 {
1402 	struct net_bridge_vlan_group *vg;
1403 	struct net_bridge_vlan *v;
1404 	struct br_mdb_config cfg;
1405 	int err;
1406 
1407 	err = br_mdb_config_init(&cfg, dev, tb, 0, extack);
1408 	if (err)
1409 		return err;
1410 
1411 	if (cfg.p)
1412 		vg = nbp_vlan_group(cfg.p);
1413 	else
1414 		vg = br_vlan_group(cfg.br);
1415 
1416 	/* If vlan filtering is enabled and VLAN is not specified
1417 	 * delete mdb entry on all vlans configured on the port.
1418 	 */
1419 	if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
1420 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1421 			cfg.entry->vid = v->vid;
1422 			cfg.group.vid = v->vid;
1423 			err = __br_mdb_del(&cfg);
1424 		}
1425 	} else {
1426 		err = __br_mdb_del(&cfg);
1427 	}
1428 
1429 	br_mdb_config_fini(&cfg);
1430 	return err;
1431 }
1432 
1433 struct br_mdb_flush_desc {
1434 	u32 port_ifindex;
1435 	u16 vid;
1436 	u8 rt_protocol;
1437 	u8 state;
1438 	u8 state_mask;
1439 };
1440 
1441 static const struct nla_policy br_mdbe_attrs_del_bulk_pol[MDBE_ATTR_MAX + 1] = {
1442 	[MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
1443 	[MDBE_ATTR_STATE_MASK] = NLA_POLICY_MASK(NLA_U8, MDB_PERMANENT),
1444 };
1445 
1446 static int br_mdb_flush_desc_init(struct br_mdb_flush_desc *desc,
1447 				  struct nlattr *tb[],
1448 				  struct netlink_ext_ack *extack)
1449 {
1450 	struct br_mdb_entry *entry = nla_data(tb[MDBA_SET_ENTRY]);
1451 	struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
1452 	int err;
1453 
1454 	desc->port_ifindex = entry->ifindex;
1455 	desc->vid = entry->vid;
1456 	desc->state = entry->state;
1457 
1458 	if (!tb[MDBA_SET_ENTRY_ATTRS])
1459 		return 0;
1460 
1461 	err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
1462 			       tb[MDBA_SET_ENTRY_ATTRS],
1463 			       br_mdbe_attrs_del_bulk_pol, extack);
1464 	if (err)
1465 		return err;
1466 
1467 	if (mdbe_attrs[MDBE_ATTR_STATE_MASK])
1468 		desc->state_mask = nla_get_u8(mdbe_attrs[MDBE_ATTR_STATE_MASK]);
1469 
1470 	if (mdbe_attrs[MDBE_ATTR_RTPROT])
1471 		desc->rt_protocol = nla_get_u8(mdbe_attrs[MDBE_ATTR_RTPROT]);
1472 
1473 	return 0;
1474 }
1475 
1476 static void br_mdb_flush_host(struct net_bridge *br,
1477 			      struct net_bridge_mdb_entry *mp,
1478 			      const struct br_mdb_flush_desc *desc)
1479 {
1480 	u8 state;
1481 
1482 	if (desc->port_ifindex && desc->port_ifindex != br->dev->ifindex)
1483 		return;
1484 
1485 	if (desc->rt_protocol)
1486 		return;
1487 
1488 	state = br_group_is_l2(&mp->addr) ? MDB_PERMANENT : 0;
1489 	if (desc->state_mask && (state & desc->state_mask) != desc->state)
1490 		return;
1491 
1492 	br_multicast_host_leave(mp, true);
1493 	if (!mp->ports && netif_running(br->dev))
1494 		mod_timer(&mp->timer, jiffies);
1495 }
1496 
1497 static void br_mdb_flush_pgs(struct net_bridge *br,
1498 			     struct net_bridge_mdb_entry *mp,
1499 			     const struct br_mdb_flush_desc *desc)
1500 {
1501 	struct net_bridge_port_group __rcu **pp;
1502 	struct net_bridge_port_group *p;
1503 
1504 	for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;) {
1505 		u8 state;
1506 
1507 		if (desc->port_ifindex &&
1508 		    desc->port_ifindex != p->key.port->dev->ifindex) {
1509 			pp = &p->next;
1510 			continue;
1511 		}
1512 
1513 		if (desc->rt_protocol && desc->rt_protocol != p->rt_protocol) {
1514 			pp = &p->next;
1515 			continue;
1516 		}
1517 
1518 		state = p->flags & MDB_PG_FLAGS_PERMANENT ? MDB_PERMANENT : 0;
1519 		if (desc->state_mask &&
1520 		    (state & desc->state_mask) != desc->state) {
1521 			pp = &p->next;
1522 			continue;
1523 		}
1524 
1525 		br_multicast_del_pg(mp, p, pp);
1526 	}
1527 }
1528 
1529 static void br_mdb_flush(struct net_bridge *br,
1530 			 const struct br_mdb_flush_desc *desc)
1531 {
1532 	struct net_bridge_mdb_entry *mp;
1533 
1534 	spin_lock_bh(&br->multicast_lock);
1535 
1536 	/* Safe variant is not needed because entries are removed from the list
1537 	 * upon group timer expiration or bridge deletion.
1538 	 */
1539 	hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
1540 		if (desc->vid && desc->vid != mp->addr.vid)
1541 			continue;
1542 
1543 		br_mdb_flush_host(br, mp, desc);
1544 		br_mdb_flush_pgs(br, mp, desc);
1545 	}
1546 
1547 	spin_unlock_bh(&br->multicast_lock);
1548 }
1549 
1550 int br_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
1551 		    struct netlink_ext_ack *extack)
1552 {
1553 	struct net_bridge *br = netdev_priv(dev);
1554 	struct br_mdb_flush_desc desc = {};
1555 	int err;
1556 
1557 	err = br_mdb_flush_desc_init(&desc, tb, extack);
1558 	if (err)
1559 		return err;
1560 
1561 	br_mdb_flush(br, &desc);
1562 
1563 	return 0;
1564 }
1565 
1566 static const struct nla_policy br_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = {
1567 	[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
1568 					      sizeof(struct in_addr),
1569 					      sizeof(struct in6_addr)),
1570 };
1571 
1572 static int br_mdb_get_parse(struct net_device *dev, struct nlattr *tb[],
1573 			    struct br_ip *group, struct netlink_ext_ack *extack)
1574 {
1575 	struct br_mdb_entry *entry = nla_data(tb[MDBA_GET_ENTRY]);
1576 	struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
1577 	int err;
1578 
1579 	if (!tb[MDBA_GET_ENTRY_ATTRS]) {
1580 		__mdb_entry_to_br_ip(entry, group, NULL);
1581 		return 0;
1582 	}
1583 
1584 	err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
1585 			       tb[MDBA_GET_ENTRY_ATTRS], br_mdbe_attrs_get_pol,
1586 			       extack);
1587 	if (err)
1588 		return err;
1589 
1590 	if (mdbe_attrs[MDBE_ATTR_SOURCE] &&
1591 	    !is_valid_mdb_source(mdbe_attrs[MDBE_ATTR_SOURCE],
1592 				 entry->addr.proto, extack))
1593 		return -EINVAL;
1594 
1595 	__mdb_entry_to_br_ip(entry, group, mdbe_attrs);
1596 
1597 	return 0;
1598 }
1599 
1600 static struct sk_buff *
1601 br_mdb_get_reply_alloc(const struct net_bridge_mdb_entry *mp)
1602 {
1603 	struct net_bridge_port_group *pg;
1604 	size_t nlmsg_size;
1605 
1606 	nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
1607 		     /* MDBA_MDB */
1608 		     nla_total_size(0) +
1609 		     /* MDBA_MDB_ENTRY */
1610 		     nla_total_size(0);
1611 
1612 	if (mp->host_joined)
1613 		nlmsg_size += rtnl_mdb_nlmsg_pg_size(NULL);
1614 
1615 	for (pg = mlock_dereference(mp->ports, mp->br); pg;
1616 	     pg = mlock_dereference(pg->next, mp->br))
1617 		nlmsg_size += rtnl_mdb_nlmsg_pg_size(pg);
1618 
1619 	return nlmsg_new(nlmsg_size, GFP_ATOMIC);
1620 }
1621 
1622 static int br_mdb_get_reply_fill(struct sk_buff *skb,
1623 				 struct net_bridge_mdb_entry *mp, u32 portid,
1624 				 u32 seq)
1625 {
1626 	struct nlattr *mdb_nest, *mdb_entry_nest;
1627 	struct net_bridge_port_group *pg;
1628 	struct br_port_msg *bpm;
1629 	struct nlmsghdr *nlh;
1630 	int err;
1631 
1632 	nlh = nlmsg_put(skb, portid, seq, RTM_NEWMDB, sizeof(*bpm), 0);
1633 	if (!nlh)
1634 		return -EMSGSIZE;
1635 
1636 	bpm = nlmsg_data(nlh);
1637 	memset(bpm, 0, sizeof(*bpm));
1638 	bpm->family  = AF_BRIDGE;
1639 	bpm->ifindex = mp->br->dev->ifindex;
1640 	mdb_nest = nla_nest_start_noflag(skb, MDBA_MDB);
1641 	if (!mdb_nest) {
1642 		err = -EMSGSIZE;
1643 		goto cancel;
1644 	}
1645 	mdb_entry_nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
1646 	if (!mdb_entry_nest) {
1647 		err = -EMSGSIZE;
1648 		goto cancel;
1649 	}
1650 
1651 	if (mp->host_joined) {
1652 		err = __mdb_fill_info(skb, mp, NULL);
1653 		if (err)
1654 			goto cancel;
1655 	}
1656 
1657 	for (pg = mlock_dereference(mp->ports, mp->br); pg;
1658 	     pg = mlock_dereference(pg->next, mp->br)) {
1659 		err = __mdb_fill_info(skb, mp, pg);
1660 		if (err)
1661 			goto cancel;
1662 	}
1663 
1664 	nla_nest_end(skb, mdb_entry_nest);
1665 	nla_nest_end(skb, mdb_nest);
1666 	nlmsg_end(skb, nlh);
1667 
1668 	return 0;
1669 
1670 cancel:
1671 	nlmsg_cancel(skb, nlh);
1672 	return err;
1673 }
1674 
1675 int br_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, u32 seq,
1676 	       struct netlink_ext_ack *extack)
1677 {
1678 	struct net_bridge *br = netdev_priv(dev);
1679 	struct net_bridge_mdb_entry *mp;
1680 	struct sk_buff *skb;
1681 	struct br_ip group;
1682 	int err;
1683 
1684 	err = br_mdb_get_parse(dev, tb, &group, extack);
1685 	if (err)
1686 		return err;
1687 
1688 	/* Hold the multicast lock to ensure that the MDB entry does not change
1689 	 * between the time the reply size is determined and when the reply is
1690 	 * filled in.
1691 	 */
1692 	spin_lock_bh(&br->multicast_lock);
1693 
1694 	mp = br_mdb_ip_get(br, &group);
1695 	if (!mp || (!mp->ports && !mp->host_joined)) {
1696 		NL_SET_ERR_MSG_MOD(extack, "MDB entry not found");
1697 		err = -ENOENT;
1698 		goto unlock;
1699 	}
1700 
1701 	skb = br_mdb_get_reply_alloc(mp);
1702 	if (!skb) {
1703 		err = -ENOMEM;
1704 		goto unlock;
1705 	}
1706 
1707 	err = br_mdb_get_reply_fill(skb, mp, portid, seq);
1708 	if (err) {
1709 		NL_SET_ERR_MSG_MOD(extack, "Failed to fill MDB get reply");
1710 		goto free;
1711 	}
1712 
1713 	spin_unlock_bh(&br->multicast_lock);
1714 
1715 	return rtnl_unicast(skb, dev_net(dev), portid);
1716 
1717 free:
1718 	kfree_skb(skb);
1719 unlock:
1720 	spin_unlock_bh(&br->multicast_lock);
1721 	return err;
1722 }
1723