xref: /src/sys/netinet6/mld6.c (revision d19fd2f349226116f7effb281baa1eb32b8292e7)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2009 Bruce Simpson.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote
15  *    products derived from this software without specific prior written
16  *    permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	$KAME: mld6.c,v 1.27 2001/04/04 05:17:30 itojun Exp $
31  */
32 
33 /*-
34  * Copyright (c) 1988 Stephen Deering.
35  * Copyright (c) 1992, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  *
38  * This code is derived from software contributed to Berkeley by
39  * Stephen Deering of Stanford University.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  */
65 
66 #include "opt_inet.h"
67 #include "opt_inet6.h"
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/mbuf.h>
72 #include <sys/socket.h>
73 #include <sys/sysctl.h>
74 #include <sys/kernel.h>
75 #include <sys/callout.h>
76 #include <sys/malloc.h>
77 #include <sys/module.h>
78 #include <sys/ktr.h>
79 
80 #include <net/if.h>
81 #include <net/if_var.h>
82 #include <net/if_private.h>
83 #include <net/route.h>
84 #include <net/vnet.h>
85 
86 #include <netinet/in.h>
87 #include <netinet/in_var.h>
88 #include <netinet6/in6_var.h>
89 #include <netinet/ip6.h>
90 #include <netinet6/ip6_var.h>
91 #include <netinet6/scope6_var.h>
92 #include <netinet/icmp6.h>
93 #include <netinet6/ip6_mroute.h>
94 #include <netinet6/mld6.h>
95 #include <netinet6/mld6_var.h>
96 
97 #include <security/mac/mac_framework.h>
98 
99 #ifndef KTR_MLD
100 #define KTR_MLD KTR_INET6
101 #endif
102 
103 static void	mld_dispatch_packet(struct mbuf *);
104 static void	mld_dispatch_queue(struct mbufq *, int);
105 static void	mld_final_leave(struct in6_multi *, struct mld_ifsoftc *);
106 static void	mld_fasttimo_vnet(struct in6_multi_head *inmh);
107 static int	mld_handle_state_change(struct in6_multi *,
108 		    struct mld_ifsoftc *);
109 static int	mld_initial_join(struct in6_multi *, struct mld_ifsoftc *,
110 		    const int);
111 #ifdef KTR
112 static char *	mld_rec_type_to_str(const int);
113 #endif
114 static void	mld_set_version(struct mld_ifsoftc *, const int);
115 static void	mld_slowtimo_vnet(void);
116 static int	mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
117 		    /*const*/ struct mld_hdr *);
118 static int	mld_v1_input_report(struct ifnet *, const struct ip6_hdr *,
119 		    /*const*/ struct mld_hdr *);
120 static void	mld_v1_process_group_timer(struct in6_multi_head *,
121 		    struct in6_multi *);
122 static void	mld_v1_process_querier_timers(struct mld_ifsoftc *);
123 static int	mld_v1_transmit_report(struct in6_multi *, const int);
124 static void	mld_v1_update_group(struct in6_multi *, const int);
125 static void	mld_v2_cancel_link_timers(struct mld_ifsoftc *);
126 static void	mld_v2_dispatch_general_query(struct mld_ifsoftc *);
127 static struct mbuf *
128 		mld_v2_encap_report(struct ifnet *, struct mbuf *);
129 static int	mld_v2_enqueue_filter_change(struct mbufq *,
130 		    struct in6_multi *);
131 static int	mld_v2_enqueue_group_record(struct mbufq *,
132 		    struct in6_multi *, const int, const int, const int,
133 		    const int);
134 static int	mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
135 		    struct mbuf *, struct mldv2_query *, const int, const int);
136 static int	mld_v2_merge_state_changes(struct in6_multi *,
137 		    struct mbufq *);
138 static void	mld_v2_process_group_timers(struct in6_multi_head *,
139 		    struct mbufq *, struct mbufq *,
140 		    struct in6_multi *, const int);
141 static int	mld_v2_process_group_query(struct in6_multi *,
142 		    struct mld_ifsoftc *mli, int, struct mbuf *,
143 		    struct mldv2_query *, const int);
144 static int	sysctl_mld_gsr(SYSCTL_HANDLER_ARGS);
145 static int	sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS);
146 
147 /*
148  * Normative references: RFC 2710, RFC 3590, RFC 3810.
149  *
150  * Locking:
151  *  * The MLD subsystem lock ends up being system-wide for the moment,
152  *    but could be per-VIMAGE later on.
153  *  * The permitted lock order is: IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK.
154  *    Any may be taken independently; if any are held at the same
155  *    time, the above lock order must be followed.
156  *  * IN6_MULTI_LOCK covers in_multi.
157  *  * MLD_LOCK covers per-link state and any global variables in this file.
158  *  * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
159  *    per-link state iterators.
160  *
161  *  XXX LOR PREVENTION
162  *  A special case for IPv6 is the in6_setscope() routine. ip6_output()
163  *  will not accept an ifp; it wants an embedded scope ID, unlike
164  *  ip_output(), which happily takes the ifp given to it. The embedded
165  *  scope ID is only used by MLD to select the outgoing interface.
166  *
167  *  During interface attach and detach, MLD will take MLD_LOCK *after*
168  *  the LLTABLE_LOCK.
169  *  As in6_setscope() takes LLTABLE_LOCK then SCOPE_LOCK, we can't call
170  *  it with MLD_LOCK held without triggering an LOR. A netisr with indirect
171  *  dispatch could work around this, but we'd rather not do that, as it
172  *  can introduce other races.
173  *
174  *  As such, we exploit the fact that the scope ID is just the interface
175  *  index, and embed it in the IPv6 destination address accordingly.
176  *  This is potentially NOT VALID for MLDv1 reports, as they
177  *  are always sent to the multicast group itself; as MLDv2
178  *  reports are always sent to ff02::16, this is not an issue
179  *  when MLDv2 is in use.
180  *
181  *  This does not however eliminate the LOR when ip6_output() itself
182  *  calls in6_setscope() internally whilst MLD_LOCK is held. This will
183  *  trigger a LOR warning in WITNESS when the ifnet is detached.
184  *
185  *  The right answer is probably to make LLTABLE_LOCK an rwlock, given
186  *  how it's used across the network stack. Here we're simply exploiting
187  *  the fact that MLD runs at a similar layer in the stack to scope6.c.
188  *
189  * VIMAGE:
190  *  * Each in6_multi corresponds to an ifp, and each ifp corresponds
191  *    to a vnet in ifp->if_vnet.
192  */
193 static struct mtx		 mld_mtx;
194 static MALLOC_DEFINE(M_MLD, "mld", "mld state");
195 
196 #define	MLD_EMBEDSCOPE(pin6, zoneid)					\
197 	if (IN6_IS_SCOPE_LINKLOCAL(pin6) ||				\
198 	    IN6_IS_ADDR_MC_INTFACELOCAL(pin6))				\
199 		(pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF)		\
200 
201 /*
202  * VIMAGE-wide globals.
203  */
204 VNET_DEFINE_STATIC(struct timeval, mld_gsrdelay) = {10, 0};
205 VNET_DEFINE_STATIC(LIST_HEAD(, mld_ifsoftc), mli_head);
206 VNET_DEFINE_STATIC(int, interface_timers_running6);
207 VNET_DEFINE_STATIC(int, state_change_timers_running6);
208 VNET_DEFINE_STATIC(int, current_state_timers_running6);
209 
210 #define	V_mld_gsrdelay			VNET(mld_gsrdelay)
211 #define	V_mli_head			VNET(mli_head)
212 #define	V_interface_timers_running6	VNET(interface_timers_running6)
213 #define	V_state_change_timers_running6	VNET(state_change_timers_running6)
214 #define	V_current_state_timers_running6	VNET(current_state_timers_running6)
215 
216 SYSCTL_DECL(_net_inet6);	/* Note: Not in any common header. */
217 
218 SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
219     "IPv6 Multicast Listener Discovery");
220 
221 /*
222  * Virtualized sysctls.
223  */
224 SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay,
225     CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
226     &VNET_NAME(mld_gsrdelay.tv_sec), 0, sysctl_mld_gsr, "I",
227     "Rate limit for MLDv2 Group-and-Source queries in seconds");
228 
229 /*
230  * Non-virtualized sysctls.
231  */
232 static SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo,
233     CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_mld_ifinfo,
234     "Per-interface MLDv2 state");
235 
236 VNET_DEFINE_STATIC(bool, mld_v1enable) = true;
237 #define	V_mld_v1enable	VNET(mld_v1enable)
238 SYSCTL_BOOL(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_VNET | CTLFLAG_RWTUN,
239     &VNET_NAME(mld_v1enable), 0, "Enable fallback to MLDv1");
240 
241 VNET_DEFINE_STATIC(bool, mld_v2enable) = true;
242 #define	V_mld_v2enable	VNET(mld_v2enable)
243 SYSCTL_BOOL(_net_inet6_mld, OID_AUTO, v2enable, CTLFLAG_VNET | CTLFLAG_RWTUN,
244     &VNET_NAME(mld_v2enable), 0, "Enable MLDv2");
245 
246 VNET_DEFINE_STATIC(bool, mld_use_allow) = true;
247 #define	V_mld_use_allow	VNET(mld_use_allow)
248 SYSCTL_BOOL(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_VNET | CTLFLAG_RWTUN,
249     &VNET_NAME(mld_use_allow), 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
250 
251 /*
252  * Packed Router Alert option structure declaration.
253  */
254 struct mld_raopt {
255 	struct ip6_hbh		hbh;
256 	struct ip6_opt		pad;
257 	struct ip6_opt_router	ra;
258 } __packed;
259 
260 /*
261  * Router Alert hop-by-hop option header.
262  */
263 static struct mld_raopt mld_ra = {
264 	.hbh = { 0, 0 },
265 	.pad = { .ip6o_type = IP6OPT_PADN, 0 },
266 	.ra = {
267 	    .ip6or_type = IP6OPT_ROUTER_ALERT,
268 	    .ip6or_len = IP6OPT_RTALERT_LEN - 2,
269 	    .ip6or_value[0] = ((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
270 	    .ip6or_value[1] = (IP6OPT_RTALERT_MLD & 0xFF)
271 	}
272 };
273 static struct ip6_pktopts mld_po;
274 
275 static __inline void
mld_save_context(struct mbuf * m,struct ifnet * ifp)276 mld_save_context(struct mbuf *m, struct ifnet *ifp)
277 {
278 
279 #ifdef VIMAGE
280 	m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
281 #endif /* VIMAGE */
282 	m->m_pkthdr.rcvif = ifp;
283 	m->m_pkthdr.flowid = ifp->if_index;
284 }
285 
286 static __inline void
mld_scrub_context(struct mbuf * m)287 mld_scrub_context(struct mbuf *m)
288 {
289 
290 	m->m_pkthdr.PH_loc.ptr = NULL;
291 	m->m_pkthdr.flowid = 0;
292 }
293 
294 /*
295  * Restore context from a queued output chain.
296  * Return saved ifindex.
297  *
298  * VIMAGE: The assertion is there to make sure that we
299  * actually called CURVNET_SET() with what's in the mbuf chain.
300  */
301 static __inline uint32_t
mld_restore_context(struct mbuf * m)302 mld_restore_context(struct mbuf *m)
303 {
304 
305 #if defined(VIMAGE) && defined(INVARIANTS)
306 	KASSERT(curvnet == m->m_pkthdr.PH_loc.ptr,
307 	    ("%s: called when curvnet was not restored: cuvnet %p m ptr %p",
308 	    __func__, curvnet, m->m_pkthdr.PH_loc.ptr));
309 #endif
310 	return (m->m_pkthdr.flowid);
311 }
312 
313 /*
314  * Retrieve or set threshold between group-source queries in seconds.
315  *
316  * VIMAGE: Assume curvnet set by caller.
317  * SMPng: NOTE: Serialized by MLD lock.
318  */
319 static int
sysctl_mld_gsr(SYSCTL_HANDLER_ARGS)320 sysctl_mld_gsr(SYSCTL_HANDLER_ARGS)
321 {
322 	int error;
323 	int i;
324 
325 	error = sysctl_wire_old_buffer(req, sizeof(int));
326 	if (error)
327 		return (error);
328 
329 	MLD_LOCK();
330 
331 	i = V_mld_gsrdelay.tv_sec;
332 
333 	error = sysctl_handle_int(oidp, &i, 0, req);
334 	if (error || !req->newptr)
335 		goto out_locked;
336 
337 	if (i < -1 || i >= 60) {
338 		error = EINVAL;
339 		goto out_locked;
340 	}
341 
342 	CTR2(KTR_MLD, "change mld_gsrdelay from %d to %d",
343 	     V_mld_gsrdelay.tv_sec, i);
344 	V_mld_gsrdelay.tv_sec = i;
345 
346 out_locked:
347 	MLD_UNLOCK();
348 	return (error);
349 }
350 
351 /*
352  * Expose struct mld_ifsoftc to userland, keyed by ifindex.
353  * For use by ifmcstat(8).
354  *
355  * VIMAGE: Assume curvnet set by caller. The node handler itself
356  * is not directly virtualized.
357  */
358 static int
sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)359 sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)
360 {
361 	struct epoch_tracker	 et;
362 	int			*name;
363 	int			 error;
364 	u_int			 namelen;
365 	struct ifnet		*ifp;
366 	struct mld_ifsoftc	*mli;
367 
368 	name = (int *)arg1;
369 	namelen = arg2;
370 
371 	if (req->newptr != NULL)
372 		return (EPERM);
373 
374 	if (namelen != 1)
375 		return (EINVAL);
376 
377 	error = sysctl_wire_old_buffer(req, sizeof(struct mld_ifinfo));
378 	if (error)
379 		return (error);
380 
381 	IN6_MULTI_LOCK();
382 	IN6_MULTI_LIST_LOCK();
383 	MLD_LOCK();
384 	NET_EPOCH_ENTER(et);
385 
386 	error = ENOENT;
387 	ifp = ifnet_byindex(name[0]);
388 	if (ifp == NULL)
389 		goto out_locked;
390 
391 	LIST_FOREACH(mli, &V_mli_head, mli_link) {
392 		if (ifp == mli->mli_ifp) {
393 			struct mld_ifinfo info;
394 
395 			info.mli_version = mli->mli_version;
396 			info.mli_v1_timer = mli->mli_v1_timer;
397 			info.mli_v2_timer = mli->mli_v2_timer;
398 			info.mli_flags = mli->mli_flags;
399 			info.mli_rv = mli->mli_rv;
400 			info.mli_qi = mli->mli_qi;
401 			info.mli_qri = mli->mli_qri;
402 			info.mli_uri = mli->mli_uri;
403 			error = SYSCTL_OUT(req, &info, sizeof(info));
404 			break;
405 		}
406 	}
407 
408 out_locked:
409 	NET_EPOCH_EXIT(et);
410 	MLD_UNLOCK();
411 	IN6_MULTI_LIST_UNLOCK();
412 	IN6_MULTI_UNLOCK();
413 	return (error);
414 }
415 
416 /*
417  * Dispatch an entire queue of pending packet chains.
418  * VIMAGE: Assumes the vnet pointer has been set.
419  */
420 static void
mld_dispatch_queue(struct mbufq * mq,int limit)421 mld_dispatch_queue(struct mbufq *mq, int limit)
422 {
423 	struct mbuf *m;
424 
425 	while ((m = mbufq_dequeue(mq)) != NULL) {
426 		CTR3(KTR_MLD, "%s: dispatch %p from %p", __func__, mq, m);
427 		mld_dispatch_packet(m);
428 		if (--limit == 0)
429 			break;
430 	}
431 }
432 
433 /*
434  * Filter outgoing MLD report state by group.
435  *
436  * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
437  * and node-local addresses. However, kernel and socket consumers
438  * always embed the KAME scope ID in the address provided, so strip it
439  * when performing comparison.
440  * Note: This is not the same as the *multicast* scope.
441  *
442  * Return zero if the given group is one for which MLD reports
443  * should be suppressed, or non-zero if reports should be issued.
444  */
445 static __inline int
mld_is_addr_reported(const struct in6_addr * addr)446 mld_is_addr_reported(const struct in6_addr *addr)
447 {
448 
449 	KASSERT(IN6_IS_ADDR_MULTICAST(addr), ("%s: not multicast", __func__));
450 
451 	if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL)
452 		return (0);
453 
454 	if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) {
455 		struct in6_addr tmp = *addr;
456 		in6_clearscope(&tmp);
457 		if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes))
458 			return (0);
459 	}
460 
461 	return (1);
462 }
463 
464 /*
465  * Attach MLD when PF_INET6 is attached to an interface.  Assumes that the
466  * current VNET is set by the caller.
467  */
468 void
mld_domifattach(struct ifnet * ifp)469 mld_domifattach(struct ifnet *ifp)
470 {
471 	struct mld_ifsoftc *mli = MLD_IFINFO(ifp);
472 
473 	CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp, if_name(ifp));
474 
475 	*mli = (struct mld_ifsoftc){
476 		.mli_ifp = ifp,
477 		.mli_version = MLD_VERSION_2,
478 		.mli_rv = MLD_RV_INIT,
479 		.mli_qi = MLD_QI_INIT,
480 		.mli_qri = MLD_QRI_INIT,
481 		.mli_uri = MLD_URI_INIT,
482 	};
483 	mbufq_init(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS);
484 	if ((ifp->if_flags & IFF_MULTICAST) == 0)
485 		mli->mli_flags |= MLIF_SILENT;
486 	if (V_mld_use_allow)
487 		mli->mli_flags |= MLIF_USEALLOW;
488 
489 	MLD_LOCK();
490 	LIST_INSERT_HEAD(&V_mli_head, mli, mli_link);
491 	MLD_UNLOCK();
492 }
493 
494 /*
495  * Hook for ifdetach.
496  *
497  * NOTE: Some finalization tasks need to run before the protocol domain
498  * is detached, but also before the link layer does its cleanup.
499  * Run before link-layer cleanup; cleanup groups, but do not free MLD state.
500  *
501  * SMPng: Caller must hold IN6_MULTI_LOCK().
502  * Must take IF_ADDR_LOCK() to cover if_multiaddrs iterator.
503  * XXX This routine is also bitten by unlocked ifma_protospec access.
504  */
505 void
mld_ifdetach(struct ifnet * ifp,struct in6_multi_head * inmh)506 mld_ifdetach(struct ifnet *ifp, struct in6_multi_head *inmh)
507 {
508 	struct epoch_tracker     et;
509 	struct mld_ifsoftc	*mli;
510 	struct ifmultiaddr	*ifma;
511 	struct in6_multi	*inm;
512 
513 	CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp,
514 	    if_name(ifp));
515 
516 	IN6_MULTI_LIST_LOCK_ASSERT();
517 	MLD_LOCK();
518 
519 	mli = MLD_IFINFO(ifp);
520 	IF_ADDR_WLOCK(ifp);
521 	/*
522 	 * Extract list of in6_multi associated with the detaching ifp
523 	 * which the PF_INET6 layer is about to release.
524 	 */
525 	NET_EPOCH_ENTER(et);
526 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
527 		inm = in6m_ifmultiaddr_get_inm(ifma);
528 		if (inm == NULL)
529 			continue;
530 		in6m_disconnect_locked(inmh, inm);
531 
532 		if (mli->mli_version == MLD_VERSION_2) {
533 			in6m_clear_recorded(inm);
534 
535 			/*
536 			 * We need to release the final reference held
537 			 * for issuing the INCLUDE {}.
538 			 */
539 			if (inm->in6m_state == MLD_LEAVING_MEMBER) {
540 				inm->in6m_state = MLD_NOT_MEMBER;
541 				in6m_rele_locked(inmh, inm);
542 			}
543 		}
544 	}
545 	NET_EPOCH_EXIT(et);
546 	IF_ADDR_WUNLOCK(ifp);
547 	MLD_UNLOCK();
548 }
549 
550 /*
551  * Hook for domifdetach.
552  * Runs after link-layer cleanup; free MLD state.
553  */
554 void
mld_domifdetach(struct ifnet * ifp)555 mld_domifdetach(struct ifnet *ifp)
556 {
557 	struct mld_ifsoftc *mli = MLD_IFINFO(ifp);
558 
559 	CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
560 	    __func__, ifp, if_name(ifp));
561 
562 	MLD_LOCK();
563 	LIST_REMOVE(mli, mli_link);
564 	MLD_UNLOCK();
565 	mbufq_drain(&mli->mli_gq);
566 }
567 
568 /*
569  * Process a received MLDv1 general or address-specific query.
570  * Assumes that the query header has been pulled up to sizeof(mld_hdr).
571  *
572  * NOTE: Can't be fully const correct as we temporarily embed scope ID in
573  * mld_addr. This is OK as we own the mbuf chain.
574  */
575 static int
mld_v1_input_query(struct ifnet * ifp,const struct ip6_hdr * ip6,struct mld_hdr * mld)576 mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
577     /*const*/ struct mld_hdr *mld)
578 {
579 	struct ifmultiaddr	*ifma;
580 	struct mld_ifsoftc	*mli;
581 	struct in6_multi	*inm;
582 	int			 is_general_query;
583 	uint16_t		 timer;
584 #ifdef KTR
585 	char			 ip6tbuf[INET6_ADDRSTRLEN];
586 #endif
587 
588 	NET_EPOCH_ASSERT();
589 
590 	is_general_query = 0;
591 
592 	if (!V_mld_v1enable) {
593 		CTR3(KTR_MLD, "ignore v1 query %s on ifp %p(%s)",
594 		    ip6_sprintf(ip6tbuf, &mld->mld_addr),
595 		    ifp, if_name(ifp));
596 		return (0);
597 	}
598 
599 	/*
600 	 * RFC3810 Section 6.2: MLD queries must originate from
601 	 * a router's link-local address.
602 	 */
603 	if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
604 		CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
605 		    ip6_sprintf(ip6tbuf, &ip6->ip6_src),
606 		    ifp, if_name(ifp));
607 		return (0);
608 	}
609 
610 	/*
611 	 * Do address field validation upfront before we accept
612 	 * the query.
613 	 */
614 	if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
615 		/*
616 		 * MLDv1 General Query.
617 		 * If this was not sent to the all-nodes group, ignore it.
618 		 */
619 		struct in6_addr		 dst;
620 
621 		dst = ip6->ip6_dst;
622 		in6_clearscope(&dst);
623 		if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes))
624 			return (EINVAL);
625 		is_general_query = 1;
626 	} else {
627 		/*
628 		 * Embed scope ID of receiving interface in MLD query for
629 		 * lookup whilst we don't hold other locks.
630 		 */
631 		in6_setscope(&mld->mld_addr, ifp, NULL);
632 	}
633 
634 	IN6_MULTI_LIST_LOCK();
635 	MLD_LOCK();
636 
637 	/*
638 	 * Switch to MLDv1 host compatibility mode.
639 	 */
640 	mli = MLD_IFINFO(ifp);
641 	KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
642 	mld_set_version(mli, MLD_VERSION_1);
643 
644 	timer = (ntohs(mld->mld_maxdelay) * MLD_FASTHZ) / MLD_TIMER_SCALE;
645 	if (timer == 0)
646 		timer = 1;
647 
648 	if (is_general_query) {
649 		/*
650 		 * For each reporting group joined on this
651 		 * interface, kick the report timer.
652 		 */
653 		CTR2(KTR_MLD, "process v1 general query on ifp %p(%s)",
654 			 ifp, if_name(ifp));
655 		CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
656 			inm = in6m_ifmultiaddr_get_inm(ifma);
657 			if (inm == NULL)
658 				continue;
659 			mld_v1_update_group(inm, timer);
660 		}
661 	} else {
662 		/*
663 		 * MLDv1 Group-Specific Query.
664 		 * If this is a group-specific MLDv1 query, we need only
665 		 * look up the single group to process it.
666 		 */
667 		inm = in6m_lookup_locked(ifp, &mld->mld_addr);
668 		if (inm != NULL) {
669 			CTR3(KTR_MLD, "process v1 query %s on ifp %p(%s)",
670 			    ip6_sprintf(ip6tbuf, &mld->mld_addr),
671 			    ifp, if_name(ifp));
672 			mld_v1_update_group(inm, timer);
673 		}
674 		/* XXX Clear embedded scope ID as userland won't expect it. */
675 		in6_clearscope(&mld->mld_addr);
676 	}
677 
678 	MLD_UNLOCK();
679 	IN6_MULTI_LIST_UNLOCK();
680 
681 	return (0);
682 }
683 
684 /*
685  * Update the report timer on a group in response to an MLDv1 query.
686  *
687  * If we are becoming the reporting member for this group, start the timer.
688  * If we already are the reporting member for this group, and timer is
689  * below the threshold, reset it.
690  *
691  * We may be updating the group for the first time since we switched
692  * to MLDv2. If we are, then we must clear any recorded source lists,
693  * and transition to REPORTING state; the group timer is overloaded
694  * for group and group-source query responses.
695  *
696  * Unlike MLDv2, the delay per group should be jittered
697  * to avoid bursts of MLDv1 reports.
698  */
699 static void
mld_v1_update_group(struct in6_multi * inm,const int timer)700 mld_v1_update_group(struct in6_multi *inm, const int timer)
701 {
702 #ifdef KTR
703 	char			 ip6tbuf[INET6_ADDRSTRLEN];
704 #endif
705 
706 	CTR4(KTR_MLD, "%s: %s/%s timer=%d", __func__,
707 	    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
708 	    if_name(inm->in6m_ifp), timer);
709 
710 	IN6_MULTI_LIST_LOCK_ASSERT();
711 
712 	switch (inm->in6m_state) {
713 	case MLD_NOT_MEMBER:
714 	case MLD_SILENT_MEMBER:
715 		break;
716 	case MLD_REPORTING_MEMBER:
717 		if (inm->in6m_timer != 0 &&
718 		    inm->in6m_timer <= timer) {
719 			CTR1(KTR_MLD, "%s: REPORTING and timer running, "
720 			    "skipping.", __func__);
721 			break;
722 		}
723 		/* FALLTHROUGH */
724 	case MLD_SG_QUERY_PENDING_MEMBER:
725 	case MLD_G_QUERY_PENDING_MEMBER:
726 	case MLD_IDLE_MEMBER:
727 	case MLD_LAZY_MEMBER:
728 	case MLD_AWAKENING_MEMBER:
729 		CTR1(KTR_MLD, "%s: ->REPORTING", __func__);
730 		inm->in6m_state = MLD_REPORTING_MEMBER;
731 		inm->in6m_timer = MLD_RANDOM_DELAY(timer);
732 		V_current_state_timers_running6 = 1;
733 		break;
734 	case MLD_SLEEPING_MEMBER:
735 		CTR1(KTR_MLD, "%s: ->AWAKENING", __func__);
736 		inm->in6m_state = MLD_AWAKENING_MEMBER;
737 		break;
738 	case MLD_LEAVING_MEMBER:
739 		break;
740 	}
741 }
742 
743 /*
744  * Process a received MLDv2 general, group-specific or
745  * group-and-source-specific query.
746  *
747  * Assumes that mld points to a struct mldv2_query which is stored in
748  * contiguous memory.
749  *
750  * Return 0 if successful, otherwise an appropriate error code is returned.
751  */
752 static int
mld_v2_input_query(struct ifnet * ifp,const struct ip6_hdr * ip6,struct mbuf * m,struct mldv2_query * mld,const int off,const int icmp6len)753 mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
754     struct mbuf *m, struct mldv2_query *mld, const int off, const int icmp6len)
755 {
756 	struct mld_ifsoftc	*mli;
757 	struct in6_multi	*inm;
758 	uint32_t		 maxdelay, nsrc, qqi;
759 	int			 is_general_query;
760 	uint16_t		 timer;
761 	uint8_t			 qrv;
762 #ifdef KTR
763 	char			 ip6tbuf[INET6_ADDRSTRLEN];
764 #endif
765 
766 	NET_EPOCH_ASSERT();
767 
768 	if (!V_mld_v2enable) {
769 		CTR3(KTR_MLD, "ignore v2 query src %s on ifp %p(%s)",
770 		    ip6_sprintf(ip6tbuf, &ip6->ip6_src),
771 		    ifp, if_name(ifp));
772 		return (0);
773 	}
774 
775 	/*
776 	 * RFC3810 Section 6.2: MLD queries must originate from
777 	 * a router's link-local address.
778 	 */
779 	if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
780 		CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
781 		    ip6_sprintf(ip6tbuf, &ip6->ip6_src),
782 		    ifp, if_name(ifp));
783 		return (0);
784 	}
785 
786 	is_general_query = 0;
787 
788 	CTR2(KTR_MLD, "input v2 query on ifp %p(%s)", ifp, if_name(ifp));
789 
790 	maxdelay = ntohs(mld->mld_maxdelay);	/* in 1/10ths of a second */
791 	if (maxdelay >= 32768) {
792 		maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) <<
793 			   (MLD_MRC_EXP(maxdelay) + 3);
794 	}
795 	timer = (maxdelay * MLD_FASTHZ) / MLD_TIMER_SCALE;
796 	if (timer == 0)
797 		timer = 1;
798 
799 	qrv = MLD_QRV(mld->mld_misc);
800 	if (qrv < 2) {
801 		CTR3(KTR_MLD, "%s: clamping qrv %d to %d", __func__,
802 		    qrv, MLD_RV_INIT);
803 		qrv = MLD_RV_INIT;
804 	}
805 
806 	qqi = mld->mld_qqi;
807 	if (qqi >= 128) {
808 		qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
809 		     (MLD_QQIC_EXP(mld->mld_qqi) + 3);
810 	}
811 
812 	nsrc = ntohs(mld->mld_numsrc);
813 	if (nsrc > MLD_MAX_GS_SOURCES)
814 		return (EMSGSIZE);
815 	if (icmp6len < sizeof(struct mldv2_query) +
816 	    (nsrc * sizeof(struct in6_addr)))
817 		return (EMSGSIZE);
818 
819 	/*
820 	 * Do further input validation upfront to avoid resetting timers
821 	 * should we need to discard this query.
822 	 */
823 	if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
824 		/*
825 		 * A general query with a source list has undefined
826 		 * behaviour; discard it.
827 		 */
828 		if (nsrc > 0)
829 			return (EINVAL);
830 		is_general_query = 1;
831 	} else {
832 		/*
833 		 * Embed scope ID of receiving interface in MLD query for
834 		 * lookup whilst we don't hold other locks (due to KAME
835 		 * locking lameness). We own this mbuf chain just now.
836 		 */
837 		in6_setscope(&mld->mld_addr, ifp, NULL);
838 	}
839 
840 	IN6_MULTI_LIST_LOCK();
841 	MLD_LOCK();
842 
843 	mli = MLD_IFINFO(ifp);
844 	KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
845 
846 	/*
847 	 * Discard the v2 query if we're in Compatibility Mode.
848 	 * The RFC is pretty clear that hosts need to stay in MLDv1 mode
849 	 * until the Old Version Querier Present timer expires.
850 	 */
851 	if (mli->mli_version != MLD_VERSION_2)
852 		goto out_locked;
853 
854 	mld_set_version(mli, MLD_VERSION_2);
855 	mli->mli_rv = qrv;
856 	mli->mli_qi = qqi;
857 	mli->mli_qri = maxdelay;
858 
859 	CTR4(KTR_MLD, "%s: qrv %d qi %d maxdelay %d", __func__, qrv, qqi,
860 	    maxdelay);
861 
862 	if (is_general_query) {
863 		/*
864 		 * MLDv2 General Query.
865 		 *
866 		 * Schedule a current-state report on this ifp for
867 		 * all groups, possibly containing source lists.
868 		 *
869 		 * If there is a pending General Query response
870 		 * scheduled earlier than the selected delay, do
871 		 * not schedule any other reports.
872 		 * Otherwise, reset the interface timer.
873 		 */
874 		CTR2(KTR_MLD, "process v2 general query on ifp %p(%s)",
875 		    ifp, if_name(ifp));
876 		if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
877 			mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
878 			V_interface_timers_running6 = 1;
879 		}
880 	} else {
881 		/*
882 		 * MLDv2 Group-specific or Group-and-source-specific Query.
883 		 *
884 		 * Group-source-specific queries are throttled on
885 		 * a per-group basis to defeat denial-of-service attempts.
886 		 * Queries for groups we are not a member of on this
887 		 * link are simply ignored.
888 		 */
889 		inm = in6m_lookup_locked(ifp, &mld->mld_addr);
890 		if (inm == NULL)
891 			goto out_locked;
892 		if (nsrc > 0) {
893 			if (!ratecheck(&inm->in6m_lastgsrtv,
894 			    &V_mld_gsrdelay)) {
895 				CTR1(KTR_MLD, "%s: GS query throttled.",
896 				    __func__);
897 				goto out_locked;
898 			}
899 		}
900 		CTR2(KTR_MLD, "process v2 group query on ifp %p(%s)",
901 		     ifp, if_name(ifp));
902 		/*
903 		 * If there is a pending General Query response
904 		 * scheduled sooner than the selected delay, no
905 		 * further report need be scheduled.
906 		 * Otherwise, prepare to respond to the
907 		 * group-specific or group-and-source query.
908 		 */
909 		if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer)
910 			mld_v2_process_group_query(inm, mli, timer, m, mld, off);
911 
912 		/* XXX Clear embedded scope ID as userland won't expect it. */
913 		in6_clearscope(&mld->mld_addr);
914 	}
915 
916 out_locked:
917 	MLD_UNLOCK();
918 	IN6_MULTI_LIST_UNLOCK();
919 
920 	return (0);
921 }
922 
923 /*
924  * Process a received MLDv2 group-specific or group-and-source-specific
925  * query.
926  * Return <0 if any error occurred. Currently this is ignored.
927  */
928 static int
mld_v2_process_group_query(struct in6_multi * inm,struct mld_ifsoftc * mli,int timer,struct mbuf * m0,struct mldv2_query * mld,const int off)929 mld_v2_process_group_query(struct in6_multi *inm, struct mld_ifsoftc *mli,
930     int timer, struct mbuf *m0, struct mldv2_query *mld, const int off)
931 {
932 	int			 retval;
933 	uint16_t		 nsrc;
934 
935 	IN6_MULTI_LIST_LOCK_ASSERT();
936 	MLD_LOCK_ASSERT();
937 
938 	retval = 0;
939 
940 	switch (inm->in6m_state) {
941 	case MLD_NOT_MEMBER:
942 	case MLD_SILENT_MEMBER:
943 	case MLD_SLEEPING_MEMBER:
944 	case MLD_LAZY_MEMBER:
945 	case MLD_AWAKENING_MEMBER:
946 	case MLD_IDLE_MEMBER:
947 	case MLD_LEAVING_MEMBER:
948 		return (retval);
949 		break;
950 	case MLD_REPORTING_MEMBER:
951 	case MLD_G_QUERY_PENDING_MEMBER:
952 	case MLD_SG_QUERY_PENDING_MEMBER:
953 		break;
954 	}
955 
956 	nsrc = ntohs(mld->mld_numsrc);
957 
958 	/* Length should be checked by calling function. */
959 	KASSERT((m0->m_flags & M_PKTHDR) == 0 ||
960 	    m0->m_pkthdr.len >= off + sizeof(struct mldv2_query) +
961 	    nsrc * sizeof(struct in6_addr),
962 	    ("mldv2 packet is too short: (%d bytes < %zd bytes, m=%p)",
963 	    m0->m_pkthdr.len, off + sizeof(struct mldv2_query) +
964 	    nsrc * sizeof(struct in6_addr), m0));
965 
966 	/*
967 	 * Deal with group-specific queries upfront.
968 	 * If any group query is already pending, purge any recorded
969 	 * source-list state if it exists, and schedule a query response
970 	 * for this group-specific query.
971 	 */
972 	if (nsrc == 0) {
973 		if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
974 		    inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
975 			in6m_clear_recorded(inm);
976 			timer = min(inm->in6m_timer, timer);
977 		}
978 		inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
979 		inm->in6m_timer = MLD_RANDOM_DELAY(timer);
980 		V_current_state_timers_running6 = 1;
981 		return (retval);
982 	}
983 
984 	/*
985 	 * Deal with the case where a group-and-source-specific query has
986 	 * been received but a group-specific query is already pending.
987 	 */
988 	if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
989 		timer = min(inm->in6m_timer, timer);
990 		inm->in6m_timer = MLD_RANDOM_DELAY(timer);
991 		V_current_state_timers_running6 = 1;
992 		return (retval);
993 	}
994 
995 	/*
996 	 * Finally, deal with the case where a group-and-source-specific
997 	 * query has been received, where a response to a previous g-s-r
998 	 * query exists, or none exists.
999 	 * In this case, we need to parse the source-list which the Querier
1000 	 * has provided us with and check if we have any source list filter
1001 	 * entries at T1 for these sources. If we do not, there is no need
1002 	 * schedule a report and the query may be dropped.
1003 	 * If we do, we must record them and schedule a current-state
1004 	 * report for those sources.
1005 	 */
1006 	if (inm->in6m_nsrc > 0) {
1007 		struct in6_addr		 srcaddr;
1008 		int			 i, nrecorded;
1009 		int			 soff;
1010 
1011 		soff = off + sizeof(struct mldv2_query);
1012 		nrecorded = 0;
1013 		for (i = 0; i < nsrc; i++) {
1014 			m_copydata(m0, soff, sizeof(struct in6_addr),
1015 			    (caddr_t)&srcaddr);
1016 			retval = in6m_record_source(inm, &srcaddr);
1017 			if (retval < 0)
1018 				break;
1019 			nrecorded += retval;
1020 			soff += sizeof(struct in6_addr);
1021 		}
1022 		if (nrecorded > 0) {
1023 			CTR1(KTR_MLD,
1024 			    "%s: schedule response to SG query", __func__);
1025 			inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
1026 			inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1027 			V_current_state_timers_running6 = 1;
1028 		}
1029 	}
1030 
1031 	return (retval);
1032 }
1033 
1034 /*
1035  * Process a received MLDv1 host membership report.
1036  * Assumes mld points to mld_hdr in pulled up mbuf chain.
1037  *
1038  * NOTE: Can't be fully const correct as we temporarily embed scope ID in
1039  * mld_addr. This is OK as we own the mbuf chain.
1040  */
1041 static int
mld_v1_input_report(struct ifnet * ifp,const struct ip6_hdr * ip6,struct mld_hdr * mld)1042 mld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6,
1043     /*const*/ struct mld_hdr *mld)
1044 {
1045 	struct in6_addr		 src, dst;
1046 	struct in6_ifaddr	*ia;
1047 	struct in6_multi	*inm;
1048 #ifdef KTR
1049 	char			 ip6tbuf[INET6_ADDRSTRLEN];
1050 #endif
1051 
1052 	NET_EPOCH_ASSERT();
1053 
1054 	if (!V_mld_v1enable) {
1055 		CTR3(KTR_MLD, "ignore v1 report %s on ifp %p(%s)",
1056 		    ip6_sprintf(ip6tbuf, &mld->mld_addr),
1057 		    ifp, if_name(ifp));
1058 		return (0);
1059 	}
1060 
1061 	if (ifp->if_flags & IFF_LOOPBACK)
1062 		return (0);
1063 
1064 	/*
1065 	 * MLDv1 reports must originate from a host's link-local address,
1066 	 * or the unspecified address (when booting).
1067 	 */
1068 	src = ip6->ip6_src;
1069 	in6_clearscope(&src);
1070 	if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
1071 		CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
1072 		    ip6_sprintf(ip6tbuf, &ip6->ip6_src),
1073 		    ifp, if_name(ifp));
1074 		return (EINVAL);
1075 	}
1076 
1077 	/*
1078 	 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
1079 	 * group, and must be directed to the group itself.
1080 	 */
1081 	dst = ip6->ip6_dst;
1082 	in6_clearscope(&dst);
1083 	if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
1084 	    !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
1085 		CTR3(KTR_MLD, "ignore v1 query dst %s on ifp %p(%s)",
1086 		    ip6_sprintf(ip6tbuf, &ip6->ip6_dst),
1087 		    ifp, if_name(ifp));
1088 		return (EINVAL);
1089 	}
1090 
1091 	/*
1092 	 * Make sure we don't hear our own membership report, as fast
1093 	 * leave requires knowing that we are the only member of a
1094 	 * group. Assume we used the link-local address if available,
1095 	 * otherwise look for ::.
1096 	 *
1097 	 * XXX Note that scope ID comparison is needed for the address
1098 	 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
1099 	 * performed for the on-wire address.
1100 	 */
1101 	ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
1102 	if ((ia && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia))) ||
1103 	    (ia == NULL && IN6_IS_ADDR_UNSPECIFIED(&src))) {
1104 		if (ia != NULL)
1105 			ifa_free(&ia->ia_ifa);
1106 		return (0);
1107 	}
1108 	if (ia != NULL)
1109 		ifa_free(&ia->ia_ifa);
1110 
1111 	CTR3(KTR_MLD, "process v1 report %s on ifp %p(%s)",
1112 	    ip6_sprintf(ip6tbuf, &mld->mld_addr), ifp, if_name(ifp));
1113 
1114 	/*
1115 	 * Embed scope ID of receiving interface in MLD query for lookup
1116 	 * whilst we don't hold other locks (due to KAME locking lameness).
1117 	 */
1118 	if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
1119 		in6_setscope(&mld->mld_addr, ifp, NULL);
1120 
1121 	IN6_MULTI_LIST_LOCK();
1122 	MLD_LOCK();
1123 
1124 	/*
1125 	 * MLDv1 report suppression.
1126 	 * If we are a member of this group, and our membership should be
1127 	 * reported, and our group timer is pending or about to be reset,
1128 	 * stop our group timer by transitioning to the 'lazy' state.
1129 	 */
1130 	inm = in6m_lookup_locked(ifp, &mld->mld_addr);
1131 	if (inm != NULL) {
1132 		struct mld_ifsoftc *mli;
1133 
1134 		mli = inm->in6m_mli;
1135 		KASSERT(mli != NULL,
1136 		    ("%s: no mli for ifp %p", __func__, ifp));
1137 
1138 		/*
1139 		 * If we are in MLDv2 host mode, do not allow the
1140 		 * other host's MLDv1 report to suppress our reports.
1141 		 */
1142 		if (mli->mli_version == MLD_VERSION_2)
1143 			goto out_locked;
1144 
1145 		inm->in6m_timer = 0;
1146 
1147 		switch (inm->in6m_state) {
1148 		case MLD_NOT_MEMBER:
1149 		case MLD_SILENT_MEMBER:
1150 		case MLD_SLEEPING_MEMBER:
1151 			break;
1152 		case MLD_REPORTING_MEMBER:
1153 		case MLD_IDLE_MEMBER:
1154 		case MLD_AWAKENING_MEMBER:
1155 			CTR3(KTR_MLD,
1156 			    "report suppressed for %s on ifp %p(%s)",
1157 			    ip6_sprintf(ip6tbuf, &mld->mld_addr),
1158 			    ifp, if_name(ifp));
1159 		case MLD_LAZY_MEMBER:
1160 			inm->in6m_state = MLD_LAZY_MEMBER;
1161 			break;
1162 		case MLD_G_QUERY_PENDING_MEMBER:
1163 		case MLD_SG_QUERY_PENDING_MEMBER:
1164 		case MLD_LEAVING_MEMBER:
1165 			break;
1166 		}
1167 	}
1168 
1169 out_locked:
1170 	MLD_UNLOCK();
1171 	IN6_MULTI_LIST_UNLOCK();
1172 
1173 	/* XXX Clear embedded scope ID as userland won't expect it. */
1174 	in6_clearscope(&mld->mld_addr);
1175 
1176 	return (0);
1177 }
1178 
1179 /*
1180  * MLD input path.
1181  *
1182  * Assume query messages which fit in a single ICMPv6 message header
1183  * have been pulled up.
1184  * Assume that userland will want to see the message, even if it
1185  * otherwise fails kernel input validation; do not free it.
1186  * Pullup may however free the mbuf chain m if it fails.
1187  *
1188  * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
1189  */
1190 int
mld_input(struct mbuf ** mp,int off,int icmp6len)1191 mld_input(struct mbuf **mp, int off, int icmp6len)
1192 {
1193 	struct ifnet	*ifp;
1194 	struct ip6_hdr	*ip6;
1195 	struct mbuf	*m;
1196 	struct mld_hdr	*mld;
1197 	int		 mldlen;
1198 
1199 	m = *mp;
1200 	CTR3(KTR_MLD, "%s: called w/mbuf (%p,%d)", __func__, m, off);
1201 
1202 	ifp = m->m_pkthdr.rcvif;
1203 
1204 	/* Pullup to appropriate size. */
1205 	if (m->m_len < off + sizeof(*mld)) {
1206 		m = m_pullup(m, off + sizeof(*mld));
1207 		if (m == NULL) {
1208 			ICMP6STAT_INC(icp6s_badlen);
1209 			return (IPPROTO_DONE);
1210 		}
1211 	}
1212 	mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
1213 	if (mld->mld_type == MLD_LISTENER_QUERY &&
1214 	    icmp6len >= sizeof(struct mldv2_query)) {
1215 		mldlen = sizeof(struct mldv2_query);
1216 	} else {
1217 		mldlen = sizeof(struct mld_hdr);
1218 	}
1219 	if (m->m_len < off + mldlen) {
1220 		m = m_pullup(m, off + mldlen);
1221 		if (m == NULL) {
1222 			ICMP6STAT_INC(icp6s_badlen);
1223 			return (IPPROTO_DONE);
1224 		}
1225 	}
1226 	*mp = m;
1227 	ip6 = mtod(m, struct ip6_hdr *);
1228 	mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
1229 
1230 	/*
1231 	 * Userland needs to see all of this traffic for implementing
1232 	 * the endpoint discovery portion of multicast routing.
1233 	 */
1234 	switch (mld->mld_type) {
1235 	case MLD_LISTENER_QUERY:
1236 		icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
1237 		if (icmp6len == sizeof(struct mld_hdr)) {
1238 			if (mld_v1_input_query(ifp, ip6, mld) != 0)
1239 				return (0);
1240 		} else if (icmp6len >= sizeof(struct mldv2_query)) {
1241 			if (mld_v2_input_query(ifp, ip6, m,
1242 			    (struct mldv2_query *)mld, off, icmp6len) != 0)
1243 				return (0);
1244 		}
1245 		break;
1246 	case MLD_LISTENER_REPORT:
1247 		icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1248 		if (mld_v1_input_report(ifp, ip6, mld) != 0)
1249 			return (0);
1250 		break;
1251 	case MLDV2_LISTENER_REPORT:
1252 		icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1253 		break;
1254 	case MLD_LISTENER_DONE:
1255 		icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
1256 		break;
1257 	default:
1258 		break;
1259 	}
1260 
1261 	return (0);
1262 }
1263 
1264 /*
1265  * Fast timeout handler (global).
1266  * VIMAGE: Timeout handlers are expected to service all vimages.
1267  */
1268 static struct callout mldfast_callout;
1269 static void
mld_fasttimo(void * arg __unused)1270 mld_fasttimo(void *arg __unused)
1271 {
1272 	struct epoch_tracker et;
1273 	struct in6_multi_head inmh;
1274 	VNET_ITERATOR_DECL(vnet_iter);
1275 
1276 	SLIST_INIT(&inmh);
1277 
1278 	NET_EPOCH_ENTER(et);
1279 	VNET_LIST_RLOCK_NOSLEEP();
1280 	VNET_FOREACH(vnet_iter) {
1281 		CURVNET_SET(vnet_iter);
1282 		mld_fasttimo_vnet(&inmh);
1283 		CURVNET_RESTORE();
1284 	}
1285 	VNET_LIST_RUNLOCK_NOSLEEP();
1286 	NET_EPOCH_EXIT(et);
1287 	in6m_release_list_deferred(&inmh);
1288 
1289 	callout_reset(&mldfast_callout, hz / MLD_FASTHZ, mld_fasttimo, NULL);
1290 }
1291 
1292 /*
1293  * Fast timeout handler (per-vnet).
1294  *
1295  * VIMAGE: Assume caller has set up our curvnet.
1296  */
1297 static void
mld_fasttimo_vnet(struct in6_multi_head * inmh)1298 mld_fasttimo_vnet(struct in6_multi_head *inmh)
1299 {
1300 	struct mbufq		 scq;	/* State-change packets */
1301 	struct mbufq		 qrq;	/* Query response packets */
1302 	struct ifnet		*ifp;
1303 	struct mld_ifsoftc	*mli;
1304 	struct ifmultiaddr	*ifma;
1305 	struct in6_multi	*inm;
1306 	int			 uri_fasthz;
1307 
1308 	uri_fasthz = 0;
1309 
1310 	/*
1311 	 * Quick check to see if any work needs to be done, in order to
1312 	 * minimize the overhead of fasttimo processing.
1313 	 * SMPng: XXX Unlocked reads.
1314 	 */
1315 	if (!V_current_state_timers_running6 &&
1316 	    !V_interface_timers_running6 &&
1317 	    !V_state_change_timers_running6)
1318 		return;
1319 
1320 	IN6_MULTI_LIST_LOCK();
1321 	MLD_LOCK();
1322 
1323 	/*
1324 	 * MLDv2 General Query response timer processing.
1325 	 */
1326 	if (V_interface_timers_running6) {
1327 		CTR1(KTR_MLD, "%s: interface timers running", __func__);
1328 
1329 		V_interface_timers_running6 = 0;
1330 		LIST_FOREACH(mli, &V_mli_head, mli_link) {
1331 			if (mli->mli_v2_timer == 0) {
1332 				/* Do nothing. */
1333 			} else if (--mli->mli_v2_timer == 0) {
1334 				mld_v2_dispatch_general_query(mli);
1335 			} else {
1336 				V_interface_timers_running6 = 1;
1337 			}
1338 		}
1339 	}
1340 
1341 	if (!V_current_state_timers_running6 &&
1342 	    !V_state_change_timers_running6)
1343 		goto out_locked;
1344 
1345 	V_current_state_timers_running6 = 0;
1346 	V_state_change_timers_running6 = 0;
1347 
1348 	CTR1(KTR_MLD, "%s: state change timers running", __func__);
1349 
1350 	/*
1351 	 * MLD host report and state-change timer processing.
1352 	 * Note: Processing a v2 group timer may remove a node.
1353 	 */
1354 	LIST_FOREACH(mli, &V_mli_head, mli_link) {
1355 		ifp = mli->mli_ifp;
1356 
1357 		if (mli->mli_version == MLD_VERSION_2) {
1358 			uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri *
1359 			    MLD_FASTHZ);
1360 			mbufq_init(&qrq, MLD_MAX_G_GS_PACKETS);
1361 			mbufq_init(&scq, MLD_MAX_STATE_CHANGE_PACKETS);
1362 		}
1363 
1364 		IF_ADDR_WLOCK(ifp);
1365 		CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1366 			inm = in6m_ifmultiaddr_get_inm(ifma);
1367 			if (inm == NULL)
1368 				continue;
1369 			switch (mli->mli_version) {
1370 			case MLD_VERSION_1:
1371 				mld_v1_process_group_timer(inmh, inm);
1372 				break;
1373 			case MLD_VERSION_2:
1374 				mld_v2_process_group_timers(inmh, &qrq,
1375 				    &scq, inm, uri_fasthz);
1376 				break;
1377 			}
1378 		}
1379 		IF_ADDR_WUNLOCK(ifp);
1380 
1381 		switch (mli->mli_version) {
1382 		case MLD_VERSION_1:
1383 			/*
1384 			 * Transmit reports for this lifecycle.  This
1385 			 * is done while not holding IF_ADDR_LOCK
1386 			 * since this can call
1387 			 * in6ifa_ifpforlinklocal() which locks
1388 			 * IF_ADDR_LOCK internally as well as
1389 			 * ip6_output() to transmit a packet.
1390 			 */
1391 			while ((inm = SLIST_FIRST(inmh)) != NULL) {
1392 				SLIST_REMOVE_HEAD(inmh, in6m_defer);
1393 				(void)mld_v1_transmit_report(inm,
1394 				    MLD_LISTENER_REPORT);
1395 			}
1396 			break;
1397 		case MLD_VERSION_2:
1398 			mld_dispatch_queue(&qrq, 0);
1399 			mld_dispatch_queue(&scq, 0);
1400 			break;
1401 		}
1402 	}
1403 
1404 out_locked:
1405 	MLD_UNLOCK();
1406 	IN6_MULTI_LIST_UNLOCK();
1407 }
1408 
1409 /*
1410  * Update host report group timer.
1411  * Will update the global pending timer flags.
1412  */
1413 static void
mld_v1_process_group_timer(struct in6_multi_head * inmh,struct in6_multi * inm)1414 mld_v1_process_group_timer(struct in6_multi_head *inmh, struct in6_multi *inm)
1415 {
1416 	int report_timer_expired;
1417 
1418 	IN6_MULTI_LIST_LOCK_ASSERT();
1419 	MLD_LOCK_ASSERT();
1420 
1421 	if (inm->in6m_timer == 0) {
1422 		report_timer_expired = 0;
1423 	} else if (--inm->in6m_timer == 0) {
1424 		report_timer_expired = 1;
1425 	} else {
1426 		V_current_state_timers_running6 = 1;
1427 		return;
1428 	}
1429 
1430 	switch (inm->in6m_state) {
1431 	case MLD_NOT_MEMBER:
1432 	case MLD_SILENT_MEMBER:
1433 	case MLD_IDLE_MEMBER:
1434 	case MLD_LAZY_MEMBER:
1435 	case MLD_SLEEPING_MEMBER:
1436 	case MLD_AWAKENING_MEMBER:
1437 		break;
1438 	case MLD_REPORTING_MEMBER:
1439 		if (report_timer_expired) {
1440 			inm->in6m_state = MLD_IDLE_MEMBER;
1441 			SLIST_INSERT_HEAD(inmh, inm, in6m_defer);
1442 		}
1443 		break;
1444 	case MLD_G_QUERY_PENDING_MEMBER:
1445 	case MLD_SG_QUERY_PENDING_MEMBER:
1446 	case MLD_LEAVING_MEMBER:
1447 		break;
1448 	}
1449 }
1450 
1451 /*
1452  * Update a group's timers for MLDv2.
1453  * Will update the global pending timer flags.
1454  * Note: Unlocked read from mli.
1455  */
1456 static void
mld_v2_process_group_timers(struct in6_multi_head * inmh,struct mbufq * qrq,struct mbufq * scq,struct in6_multi * inm,const int uri_fasthz)1457 mld_v2_process_group_timers(struct in6_multi_head *inmh,
1458     struct mbufq *qrq, struct mbufq *scq,
1459     struct in6_multi *inm, const int uri_fasthz)
1460 {
1461 	int query_response_timer_expired;
1462 	int state_change_retransmit_timer_expired;
1463 #ifdef KTR
1464 	char ip6tbuf[INET6_ADDRSTRLEN];
1465 #endif
1466 
1467 	IN6_MULTI_LIST_LOCK_ASSERT();
1468 	MLD_LOCK_ASSERT();
1469 
1470 	query_response_timer_expired = 0;
1471 	state_change_retransmit_timer_expired = 0;
1472 
1473 	/*
1474 	 * During a transition from compatibility mode back to MLDv2,
1475 	 * a group record in REPORTING state may still have its group
1476 	 * timer active. This is a no-op in this function; it is easier
1477 	 * to deal with it here than to complicate the slow-timeout path.
1478 	 */
1479 	if (inm->in6m_timer == 0) {
1480 		query_response_timer_expired = 0;
1481 	} else if (--inm->in6m_timer == 0) {
1482 		query_response_timer_expired = 1;
1483 	} else {
1484 		V_current_state_timers_running6 = 1;
1485 	}
1486 
1487 	if (inm->in6m_sctimer == 0) {
1488 		state_change_retransmit_timer_expired = 0;
1489 	} else if (--inm->in6m_sctimer == 0) {
1490 		state_change_retransmit_timer_expired = 1;
1491 	} else {
1492 		V_state_change_timers_running6 = 1;
1493 	}
1494 
1495 	/* We are in fasttimo, so be quick about it. */
1496 	if (!state_change_retransmit_timer_expired &&
1497 	    !query_response_timer_expired)
1498 		return;
1499 
1500 	switch (inm->in6m_state) {
1501 	case MLD_NOT_MEMBER:
1502 	case MLD_SILENT_MEMBER:
1503 	case MLD_SLEEPING_MEMBER:
1504 	case MLD_LAZY_MEMBER:
1505 	case MLD_AWAKENING_MEMBER:
1506 	case MLD_IDLE_MEMBER:
1507 		break;
1508 	case MLD_G_QUERY_PENDING_MEMBER:
1509 	case MLD_SG_QUERY_PENDING_MEMBER:
1510 		/*
1511 		 * Respond to a previously pending Group-Specific
1512 		 * or Group-and-Source-Specific query by enqueueing
1513 		 * the appropriate Current-State report for
1514 		 * immediate transmission.
1515 		 */
1516 		if (query_response_timer_expired) {
1517 			int retval __unused;
1518 
1519 			retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1,
1520 			    (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER),
1521 			    0);
1522 			CTR2(KTR_MLD, "%s: enqueue record = %d",
1523 			    __func__, retval);
1524 			inm->in6m_state = MLD_REPORTING_MEMBER;
1525 			in6m_clear_recorded(inm);
1526 		}
1527 		/* FALLTHROUGH */
1528 	case MLD_REPORTING_MEMBER:
1529 	case MLD_LEAVING_MEMBER:
1530 		if (state_change_retransmit_timer_expired) {
1531 			/*
1532 			 * State-change retransmission timer fired.
1533 			 * If there are any further pending retransmissions,
1534 			 * set the global pending state-change flag, and
1535 			 * reset the timer.
1536 			 */
1537 			if (--inm->in6m_scrv > 0) {
1538 				inm->in6m_sctimer = uri_fasthz;
1539 				V_state_change_timers_running6 = 1;
1540 			}
1541 			/*
1542 			 * Retransmit the previously computed state-change
1543 			 * report. If there are no further pending
1544 			 * retransmissions, the mbuf queue will be consumed.
1545 			 * Update T0 state to T1 as we have now sent
1546 			 * a state-change.
1547 			 */
1548 			(void)mld_v2_merge_state_changes(inm, scq);
1549 
1550 			in6m_commit(inm);
1551 			CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
1552 			    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
1553 			    if_name(inm->in6m_ifp));
1554 
1555 			/*
1556 			 * If we are leaving the group for good, make sure
1557 			 * we release MLD's reference to it.
1558 			 * This release must be deferred using a SLIST,
1559 			 * as we are called from a loop which traverses
1560 			 * the in_ifmultiaddr TAILQ.
1561 			 */
1562 			if (inm->in6m_state == MLD_LEAVING_MEMBER &&
1563 			    inm->in6m_scrv == 0) {
1564 				inm->in6m_state = MLD_NOT_MEMBER;
1565 				in6m_disconnect_locked(inmh, inm);
1566 				in6m_rele_locked(inmh, inm);
1567 			}
1568 		}
1569 		break;
1570 	}
1571 }
1572 
1573 /*
1574  * Switch to a different version on the given interface,
1575  * as per Section 9.12.
1576  */
1577 static void
mld_set_version(struct mld_ifsoftc * mli,const int version)1578 mld_set_version(struct mld_ifsoftc *mli, const int version)
1579 {
1580 	int old_version_timer;
1581 
1582 	MLD_LOCK_ASSERT();
1583 
1584 	CTR4(KTR_MLD, "%s: switching to v%d on ifp %p(%s)", __func__,
1585 	    version, mli->mli_ifp, if_name(mli->mli_ifp));
1586 
1587 	if (version == MLD_VERSION_1) {
1588 		/*
1589 		 * Compute the "Older Version Querier Present" timer as per
1590 		 * Section 9.12.
1591 		 */
1592 		old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
1593 		old_version_timer *= MLD_SLOWHZ;
1594 		mli->mli_v1_timer = old_version_timer;
1595 	}
1596 
1597 	if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) {
1598 		mli->mli_version = MLD_VERSION_1;
1599 		mld_v2_cancel_link_timers(mli);
1600 	}
1601 }
1602 
1603 /*
1604  * Cancel pending MLDv2 timers for the given link and all groups
1605  * joined on it; state-change, general-query, and group-query timers.
1606  */
1607 static void
mld_v2_cancel_link_timers(struct mld_ifsoftc * mli)1608 mld_v2_cancel_link_timers(struct mld_ifsoftc *mli)
1609 {
1610 	struct epoch_tracker	 et;
1611 	struct in6_multi_head	 inmh;
1612 	struct ifmultiaddr	*ifma;
1613 	struct ifnet		*ifp;
1614 	struct in6_multi	*inm;
1615 
1616 	CTR3(KTR_MLD, "%s: cancel v2 timers on ifp %p(%s)", __func__,
1617 	    mli->mli_ifp, if_name(mli->mli_ifp));
1618 
1619 	SLIST_INIT(&inmh);
1620 	IN6_MULTI_LIST_LOCK_ASSERT();
1621 	MLD_LOCK_ASSERT();
1622 
1623 	/*
1624 	 * Fast-track this potentially expensive operation
1625 	 * by checking all the global 'timer pending' flags.
1626 	 */
1627 	if (!V_interface_timers_running6 &&
1628 	    !V_state_change_timers_running6 &&
1629 	    !V_current_state_timers_running6)
1630 		return;
1631 
1632 	mli->mli_v2_timer = 0;
1633 
1634 	ifp = mli->mli_ifp;
1635 
1636 	IF_ADDR_WLOCK(ifp);
1637 	NET_EPOCH_ENTER(et);
1638 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1639 		inm = in6m_ifmultiaddr_get_inm(ifma);
1640 		if (inm == NULL)
1641 			continue;
1642 		switch (inm->in6m_state) {
1643 		case MLD_NOT_MEMBER:
1644 		case MLD_SILENT_MEMBER:
1645 		case MLD_IDLE_MEMBER:
1646 		case MLD_LAZY_MEMBER:
1647 		case MLD_SLEEPING_MEMBER:
1648 		case MLD_AWAKENING_MEMBER:
1649 			break;
1650 		case MLD_LEAVING_MEMBER:
1651 			/*
1652 			 * If we are leaving the group and switching
1653 			 * version, we need to release the final
1654 			 * reference held for issuing the INCLUDE {}.
1655 			 */
1656 			if (inm->in6m_refcount == 1)
1657 				in6m_disconnect_locked(&inmh, inm);
1658 			in6m_rele_locked(&inmh, inm);
1659 			/* FALLTHROUGH */
1660 		case MLD_G_QUERY_PENDING_MEMBER:
1661 		case MLD_SG_QUERY_PENDING_MEMBER:
1662 			in6m_clear_recorded(inm);
1663 			/* FALLTHROUGH */
1664 		case MLD_REPORTING_MEMBER:
1665 			inm->in6m_sctimer = 0;
1666 			inm->in6m_timer = 0;
1667 			inm->in6m_state = MLD_REPORTING_MEMBER;
1668 			/*
1669 			 * Free any pending MLDv2 state-change records.
1670 			 */
1671 			mbufq_drain(&inm->in6m_scq);
1672 			break;
1673 		}
1674 	}
1675 	NET_EPOCH_EXIT(et);
1676 	IF_ADDR_WUNLOCK(ifp);
1677 	in6m_release_list_deferred(&inmh);
1678 }
1679 
1680 /*
1681  * Global slowtimo handler.
1682  * VIMAGE: Timeout handlers are expected to service all vimages.
1683  */
1684 static struct callout mldslow_callout;
1685 static void
mld_slowtimo(void * arg __unused)1686 mld_slowtimo(void *arg __unused)
1687 {
1688 	VNET_ITERATOR_DECL(vnet_iter);
1689 
1690 	VNET_LIST_RLOCK_NOSLEEP();
1691 	VNET_FOREACH(vnet_iter) {
1692 		CURVNET_SET(vnet_iter);
1693 		mld_slowtimo_vnet();
1694 		CURVNET_RESTORE();
1695 	}
1696 	VNET_LIST_RUNLOCK_NOSLEEP();
1697 
1698 	callout_reset(&mldslow_callout, hz / MLD_SLOWHZ, mld_slowtimo, NULL);
1699 }
1700 
1701 /*
1702  * Per-vnet slowtimo handler.
1703  */
1704 static void
mld_slowtimo_vnet(void)1705 mld_slowtimo_vnet(void)
1706 {
1707 	struct mld_ifsoftc *mli;
1708 
1709 	MLD_LOCK();
1710 
1711 	LIST_FOREACH(mli, &V_mli_head, mli_link) {
1712 		mld_v1_process_querier_timers(mli);
1713 	}
1714 
1715 	MLD_UNLOCK();
1716 }
1717 
1718 /*
1719  * Update the Older Version Querier Present timers for a link.
1720  * See Section 9.12 of RFC 3810.
1721  */
1722 static void
mld_v1_process_querier_timers(struct mld_ifsoftc * mli)1723 mld_v1_process_querier_timers(struct mld_ifsoftc *mli)
1724 {
1725 
1726 	MLD_LOCK_ASSERT();
1727 
1728 	if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) {
1729 		/*
1730 		 * MLDv1 Querier Present timer expired; revert to MLDv2.
1731 		 */
1732 		CTR5(KTR_MLD,
1733 		    "%s: transition from v%d -> v%d on %p(%s)",
1734 		    __func__, mli->mli_version, MLD_VERSION_2,
1735 		    mli->mli_ifp, if_name(mli->mli_ifp));
1736 		mli->mli_version = MLD_VERSION_2;
1737 	}
1738 }
1739 
1740 /*
1741  * Transmit an MLDv1 report immediately.
1742  */
1743 static int
mld_v1_transmit_report(struct in6_multi * in6m,const int type)1744 mld_v1_transmit_report(struct in6_multi *in6m, const int type)
1745 {
1746 	struct ifnet		*ifp;
1747 	struct in6_ifaddr	*ia;
1748 	struct ip6_hdr		*ip6;
1749 	struct mbuf		*mh, *md;
1750 	struct mld_hdr		*mld;
1751 
1752 	NET_EPOCH_ASSERT();
1753 	IN6_MULTI_LIST_LOCK_ASSERT();
1754 	MLD_LOCK_ASSERT();
1755 
1756 	ifp = in6m->in6m_ifp;
1757 	/* in process of being freed */
1758 	if (ifp == NULL)
1759 		return (0);
1760 	ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
1761 	/* ia may be NULL if link-local address is tentative. */
1762 
1763 	mh = m_gethdr(M_NOWAIT, MT_DATA);
1764 	if (mh == NULL) {
1765 		if (ia != NULL)
1766 			ifa_free(&ia->ia_ifa);
1767 		return (ENOMEM);
1768 	}
1769 	md = m_get(M_NOWAIT, MT_DATA);
1770 	if (md == NULL) {
1771 		m_free(mh);
1772 		if (ia != NULL)
1773 			ifa_free(&ia->ia_ifa);
1774 		return (ENOMEM);
1775 	}
1776 	mh->m_next = md;
1777 
1778 	/*
1779 	 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
1780 	 * that ether_output() does not need to allocate another mbuf
1781 	 * for the header in the most common case.
1782 	 */
1783 	M_ALIGN(mh, sizeof(struct ip6_hdr));
1784 	mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr);
1785 	mh->m_len = sizeof(struct ip6_hdr);
1786 
1787 	ip6 = mtod(mh, struct ip6_hdr *);
1788 	ip6->ip6_flow = 0;
1789 	ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
1790 	ip6->ip6_vfc |= IPV6_VERSION;
1791 	ip6->ip6_nxt = IPPROTO_ICMPV6;
1792 	ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
1793 	ip6->ip6_dst = in6m->in6m_addr;
1794 
1795 	md->m_len = sizeof(struct mld_hdr);
1796 	mld = mtod(md, struct mld_hdr *);
1797 	mld->mld_type = type;
1798 	mld->mld_code = 0;
1799 	mld->mld_cksum = 0;
1800 	mld->mld_maxdelay = 0;
1801 	mld->mld_reserved = 0;
1802 	mld->mld_addr = in6m->in6m_addr;
1803 	in6_clearscope(&mld->mld_addr);
1804 	mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
1805 	    sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
1806 
1807 	mld_save_context(mh, ifp);
1808 	mh->m_flags |= M_MLDV1;
1809 
1810 	mld_dispatch_packet(mh);
1811 
1812 	if (ia != NULL)
1813 		ifa_free(&ia->ia_ifa);
1814 	return (0);
1815 }
1816 
1817 /*
1818  * Process a state change from the upper layer for the given IPv6 group.
1819  *
1820  * Each socket holds a reference on the in_multi in its own ip_moptions.
1821  * The socket layer will have made the necessary updates to.the group
1822  * state, it is now up to MLD to issue a state change report if there
1823  * has been any change between T0 (when the last state-change was issued)
1824  * and T1 (now).
1825  *
1826  * We use the MLDv2 state machine at group level. The MLd module
1827  * however makes the decision as to which MLD protocol version to speak.
1828  * A state change *from* INCLUDE {} always means an initial join.
1829  * A state change *to* INCLUDE {} always means a final leave.
1830  *
1831  * If delay is non-zero, and the state change is an initial multicast
1832  * join, the state change report will be delayed by 'delay' ticks
1833  * in units of MLD_FASTHZ if MLDv1 is active on the link; otherwise
1834  * the initial MLDv2 state change report will be delayed by whichever
1835  * is sooner, a pending state-change timer or delay itself.
1836  *
1837  * VIMAGE: curvnet should have been set by caller, as this routine
1838  * is called from the socket option handlers.
1839  */
1840 int
mld_change_state(struct in6_multi * inm,const int delay)1841 mld_change_state(struct in6_multi *inm, const int delay)
1842 {
1843 	struct mld_ifsoftc *mli;
1844 	struct ifnet *ifp;
1845 	int error;
1846 
1847 	IN6_MULTI_LIST_LOCK_ASSERT();
1848 
1849 	error = 0;
1850 
1851 	/*
1852 	 * Check if the in6_multi has already been disconnected.
1853 	 */
1854 	if (inm->in6m_ifp == NULL) {
1855 		CTR1(KTR_MLD, "%s: inm is disconnected", __func__);
1856 		return (0);
1857 	}
1858 
1859 	/*
1860 	 * Try to detect if the upper layer just asked us to change state
1861 	 * for an interface which has now gone away.
1862 	 */
1863 	KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__));
1864 	ifp = inm->in6m_ifma->ifma_ifp;
1865 	if (ifp == NULL)
1866 		return (0);
1867 	/*
1868 	 * Sanity check that netinet6's notion of ifp is the
1869 	 * same as net's.
1870 	 */
1871 	KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__));
1872 
1873 	MLD_LOCK();
1874 	mli = MLD_IFINFO(ifp);
1875 	KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
1876 
1877 	/*
1878 	 * If we detect a state transition to or from MCAST_UNDEFINED
1879 	 * for this group, then we are starting or finishing an MLD
1880 	 * life cycle for this group.
1881 	 */
1882 	if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) {
1883 		CTR3(KTR_MLD, "%s: inm transition %d -> %d", __func__,
1884 		    inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode);
1885 		if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
1886 			CTR1(KTR_MLD, "%s: initial join", __func__);
1887 			error = mld_initial_join(inm, mli, delay);
1888 			goto out_locked;
1889 		} else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
1890 			CTR1(KTR_MLD, "%s: final leave", __func__);
1891 			mld_final_leave(inm, mli);
1892 			goto out_locked;
1893 		}
1894 	} else {
1895 		CTR1(KTR_MLD, "%s: filter set change", __func__);
1896 	}
1897 
1898 	error = mld_handle_state_change(inm, mli);
1899 
1900 out_locked:
1901 	MLD_UNLOCK();
1902 	return (error);
1903 }
1904 
1905 /*
1906  * Perform the initial join for an MLD group.
1907  *
1908  * When joining a group:
1909  *  If the group should have its MLD traffic suppressed, do nothing.
1910  *  MLDv1 starts sending MLDv1 host membership reports.
1911  *  MLDv2 will schedule an MLDv2 state-change report containing the
1912  *  initial state of the membership.
1913  *
1914  * If the delay argument is non-zero, then we must delay sending the
1915  * initial state change for delay ticks (in units of MLD_FASTHZ).
1916  */
1917 static int
mld_initial_join(struct in6_multi * inm,struct mld_ifsoftc * mli,const int delay)1918 mld_initial_join(struct in6_multi *inm, struct mld_ifsoftc *mli,
1919     const int delay)
1920 {
1921 	struct epoch_tracker     et;
1922 	struct ifnet		*ifp;
1923 	struct mbufq		*mq;
1924 	int			 error, retval, syncstates;
1925 	int			 odelay;
1926 #ifdef KTR
1927 	char			 ip6tbuf[INET6_ADDRSTRLEN];
1928 #endif
1929 
1930 	CTR4(KTR_MLD, "%s: initial join %s on ifp %p(%s)",
1931 	    __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
1932 	    inm->in6m_ifp, if_name(inm->in6m_ifp));
1933 
1934 	error = 0;
1935 	syncstates = 1;
1936 
1937 	ifp = inm->in6m_ifp;
1938 
1939 	IN6_MULTI_LIST_LOCK_ASSERT();
1940 	MLD_LOCK_ASSERT();
1941 
1942 	KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__));
1943 
1944 	/*
1945 	 * Groups joined on loopback or marked as 'not reported',
1946 	 * enter the MLD_SILENT_MEMBER state and
1947 	 * are never reported in any protocol exchanges.
1948 	 * All other groups enter the appropriate state machine
1949 	 * for the version in use on this link.
1950 	 * A link marked as MLIF_SILENT causes MLD to be completely
1951 	 * disabled for the link.
1952 	 */
1953 	if ((ifp->if_flags & IFF_LOOPBACK) ||
1954 	    (mli->mli_flags & MLIF_SILENT) ||
1955 	    !mld_is_addr_reported(&inm->in6m_addr)) {
1956 		CTR1(KTR_MLD,
1957 "%s: not kicking state machine for silent group", __func__);
1958 		inm->in6m_state = MLD_SILENT_MEMBER;
1959 		inm->in6m_timer = 0;
1960 	} else {
1961 		/*
1962 		 * Deal with overlapping in_multi lifecycle.
1963 		 * If this group was LEAVING, then make sure
1964 		 * we drop the reference we picked up to keep the
1965 		 * group around for the final INCLUDE {} enqueue.
1966 		 */
1967 		if (mli->mli_version == MLD_VERSION_2 &&
1968 		    inm->in6m_state == MLD_LEAVING_MEMBER) {
1969 			inm->in6m_refcount--;
1970 			MPASS(inm->in6m_refcount > 0);
1971 		}
1972 		inm->in6m_state = MLD_REPORTING_MEMBER;
1973 
1974 		switch (mli->mli_version) {
1975 		case MLD_VERSION_1:
1976 			/*
1977 			 * If a delay was provided, only use it if
1978 			 * it is greater than the delay normally
1979 			 * used for an MLDv1 state change report,
1980 			 * and delay sending the initial MLDv1 report
1981 			 * by not transitioning to the IDLE state.
1982 			 */
1983 			odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI * MLD_FASTHZ);
1984 			if (delay) {
1985 				inm->in6m_timer = max(delay, odelay);
1986 				V_current_state_timers_running6 = 1;
1987 			} else {
1988 				inm->in6m_state = MLD_IDLE_MEMBER;
1989 				NET_EPOCH_ENTER(et);
1990 				error = mld_v1_transmit_report(inm,
1991 				     MLD_LISTENER_REPORT);
1992 				NET_EPOCH_EXIT(et);
1993 				if (error == 0) {
1994 					inm->in6m_timer = odelay;
1995 					V_current_state_timers_running6 = 1;
1996 				}
1997 			}
1998 			break;
1999 
2000 		case MLD_VERSION_2:
2001 			/*
2002 			 * Defer update of T0 to T1, until the first copy
2003 			 * of the state change has been transmitted.
2004 			 */
2005 			syncstates = 0;
2006 
2007 			/*
2008 			 * Immediately enqueue a State-Change Report for
2009 			 * this interface, freeing any previous reports.
2010 			 * Don't kick the timers if there is nothing to do,
2011 			 * or if an error occurred.
2012 			 */
2013 			mq = &inm->in6m_scq;
2014 			mbufq_drain(mq);
2015 			retval = mld_v2_enqueue_group_record(mq, inm, 1,
2016 			    0, 0, (mli->mli_flags & MLIF_USEALLOW));
2017 			CTR2(KTR_MLD, "%s: enqueue record = %d",
2018 			    __func__, retval);
2019 			if (retval <= 0) {
2020 				error = retval * -1;
2021 				break;
2022 			}
2023 
2024 			/*
2025 			 * Schedule transmission of pending state-change
2026 			 * report up to RV times for this link. The timer
2027 			 * will fire at the next mld_fasttimo (~200ms),
2028 			 * giving us an opportunity to merge the reports.
2029 			 *
2030 			 * If a delay was provided to this function, only
2031 			 * use this delay if sooner than the existing one.
2032 			 */
2033 			KASSERT(mli->mli_rv > 1,
2034 			   ("%s: invalid robustness %d", __func__,
2035 			    mli->mli_rv));
2036 			inm->in6m_scrv = mli->mli_rv;
2037 			if (delay) {
2038 				if (inm->in6m_sctimer > 1) {
2039 					inm->in6m_sctimer =
2040 					    min(inm->in6m_sctimer, delay);
2041 				} else
2042 					inm->in6m_sctimer = delay;
2043 			} else
2044 				inm->in6m_sctimer = 1;
2045 			V_state_change_timers_running6 = 1;
2046 
2047 			error = 0;
2048 			break;
2049 		}
2050 	}
2051 
2052 	/*
2053 	 * Only update the T0 state if state change is atomic,
2054 	 * i.e. we don't need to wait for a timer to fire before we
2055 	 * can consider the state change to have been communicated.
2056 	 */
2057 	if (syncstates) {
2058 		in6m_commit(inm);
2059 		CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2060 		    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2061 		    if_name(inm->in6m_ifp));
2062 	}
2063 
2064 	return (error);
2065 }
2066 
2067 /*
2068  * Issue an intermediate state change during the life-cycle.
2069  */
2070 static int
mld_handle_state_change(struct in6_multi * inm,struct mld_ifsoftc * mli)2071 mld_handle_state_change(struct in6_multi *inm, struct mld_ifsoftc *mli)
2072 {
2073 	struct ifnet		*ifp;
2074 	int			 retval;
2075 #ifdef KTR
2076 	char			 ip6tbuf[INET6_ADDRSTRLEN];
2077 #endif
2078 
2079 	CTR4(KTR_MLD, "%s: state change for %s on ifp %p(%s)",
2080 	    __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2081 	    inm->in6m_ifp, if_name(inm->in6m_ifp));
2082 
2083 	ifp = inm->in6m_ifp;
2084 
2085 	IN6_MULTI_LIST_LOCK_ASSERT();
2086 	MLD_LOCK_ASSERT();
2087 
2088 	KASSERT(mli && mli->mli_ifp == ifp,
2089 	    ("%s: inconsistent ifp", __func__));
2090 
2091 	if ((ifp->if_flags & IFF_LOOPBACK) ||
2092 	    (mli->mli_flags & MLIF_SILENT) ||
2093 	    !mld_is_addr_reported(&inm->in6m_addr) ||
2094 	    (mli->mli_version != MLD_VERSION_2)) {
2095 		if (!mld_is_addr_reported(&inm->in6m_addr)) {
2096 			CTR1(KTR_MLD,
2097 "%s: not kicking state machine for silent group", __func__);
2098 		}
2099 		CTR1(KTR_MLD, "%s: nothing to do", __func__);
2100 		in6m_commit(inm);
2101 		CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2102 		    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2103 		    if_name(inm->in6m_ifp));
2104 		return (0);
2105 	}
2106 
2107 	mbufq_drain(&inm->in6m_scq);
2108 
2109 	retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
2110 	    (mli->mli_flags & MLIF_USEALLOW));
2111 	CTR2(KTR_MLD, "%s: enqueue record = %d", __func__, retval);
2112 	if (retval <= 0)
2113 		return (-retval);
2114 
2115 	/*
2116 	 * If record(s) were enqueued, start the state-change
2117 	 * report timer for this group.
2118 	 */
2119 	inm->in6m_scrv = mli->mli_rv;
2120 	inm->in6m_sctimer = 1;
2121 	V_state_change_timers_running6 = 1;
2122 
2123 	return (0);
2124 }
2125 
2126 /*
2127  * Perform the final leave for a multicast address.
2128  *
2129  * When leaving a group:
2130  *  MLDv1 sends a DONE message, if and only if we are the reporter.
2131  *  MLDv2 enqueues a state-change report containing a transition
2132  *  to INCLUDE {} for immediate transmission.
2133  */
2134 static void
mld_final_leave(struct in6_multi * inm,struct mld_ifsoftc * mli)2135 mld_final_leave(struct in6_multi *inm, struct mld_ifsoftc *mli)
2136 {
2137 	struct epoch_tracker     et;
2138 #ifdef KTR
2139 	char ip6tbuf[INET6_ADDRSTRLEN];
2140 #endif
2141 
2142 	CTR4(KTR_MLD, "%s: final leave %s on ifp %p(%s)",
2143 	    __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2144 	    inm->in6m_ifp, if_name(inm->in6m_ifp));
2145 
2146 	IN6_MULTI_LIST_LOCK_ASSERT();
2147 	MLD_LOCK_ASSERT();
2148 
2149 	switch (inm->in6m_state) {
2150 	case MLD_NOT_MEMBER:
2151 	case MLD_SILENT_MEMBER:
2152 	case MLD_LEAVING_MEMBER:
2153 		/* Already leaving or left; do nothing. */
2154 		CTR1(KTR_MLD,
2155 "%s: not kicking state machine for silent group", __func__);
2156 		break;
2157 	case MLD_REPORTING_MEMBER:
2158 	case MLD_IDLE_MEMBER:
2159 	case MLD_G_QUERY_PENDING_MEMBER:
2160 	case MLD_SG_QUERY_PENDING_MEMBER:
2161 		if (mli->mli_version == MLD_VERSION_1) {
2162 #ifdef INVARIANTS
2163 			if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
2164 			    inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER)
2165 			panic("%s: MLDv2 state reached, not MLDv2 mode",
2166 			     __func__);
2167 #endif
2168 			NET_EPOCH_ENTER(et);
2169 			mld_v1_transmit_report(inm, MLD_LISTENER_DONE);
2170 			NET_EPOCH_EXIT(et);
2171 			inm->in6m_state = MLD_NOT_MEMBER;
2172 			V_current_state_timers_running6 = 1;
2173 		} else if (mli->mli_version == MLD_VERSION_2) {
2174 			/*
2175 			 * Stop group timer and all pending reports.
2176 			 * Immediately enqueue a state-change report
2177 			 * TO_IN {} to be sent on the next fast timeout,
2178 			 * giving us an opportunity to merge reports.
2179 			 */
2180 			mbufq_drain(&inm->in6m_scq);
2181 			inm->in6m_timer = 0;
2182 			inm->in6m_scrv = mli->mli_rv;
2183 			CTR4(KTR_MLD, "%s: Leaving %s/%s with %d "
2184 			    "pending retransmissions.", __func__,
2185 			    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2186 			    if_name(inm->in6m_ifp), inm->in6m_scrv);
2187 			if (inm->in6m_scrv == 0) {
2188 				inm->in6m_state = MLD_NOT_MEMBER;
2189 				inm->in6m_sctimer = 0;
2190 			} else {
2191 				int retval __diagused;
2192 
2193 				in6m_acquire_locked(inm);
2194 
2195 				retval = mld_v2_enqueue_group_record(
2196 				    &inm->in6m_scq, inm, 1, 0, 0,
2197 				    (mli->mli_flags & MLIF_USEALLOW));
2198 				KASSERT(retval != 0,
2199 				    ("%s: enqueue record = %d", __func__,
2200 				     retval));
2201 
2202 				inm->in6m_state = MLD_LEAVING_MEMBER;
2203 				inm->in6m_sctimer = 1;
2204 				V_state_change_timers_running6 = 1;
2205 			}
2206 			break;
2207 		}
2208 		break;
2209 	case MLD_LAZY_MEMBER:
2210 	case MLD_SLEEPING_MEMBER:
2211 	case MLD_AWAKENING_MEMBER:
2212 		/* Our reports are suppressed; do nothing. */
2213 		break;
2214 	}
2215 
2216 	in6m_commit(inm);
2217 	CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2218 	    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2219 	    if_name(inm->in6m_ifp));
2220 	inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
2221 	CTR3(KTR_MLD, "%s: T1 now MCAST_UNDEFINED for %p/%s",
2222 	    __func__, &inm->in6m_addr, if_name(inm->in6m_ifp));
2223 }
2224 
2225 /*
2226  * Enqueue an MLDv2 group record to the given output queue.
2227  *
2228  * If is_state_change is zero, a current-state record is appended.
2229  * If is_state_change is non-zero, a state-change report is appended.
2230  *
2231  * If is_group_query is non-zero, an mbuf packet chain is allocated.
2232  * If is_group_query is zero, and if there is a packet with free space
2233  * at the tail of the queue, it will be appended to providing there
2234  * is enough free space.
2235  * Otherwise a new mbuf packet chain is allocated.
2236  *
2237  * If is_source_query is non-zero, each source is checked to see if
2238  * it was recorded for a Group-Source query, and will be omitted if
2239  * it is not both in-mode and recorded.
2240  *
2241  * If use_block_allow is non-zero, state change reports for initial join
2242  * and final leave, on an inclusive mode group with a source list, will be
2243  * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
2244  *
2245  * The function will attempt to allocate leading space in the packet
2246  * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
2247  *
2248  * If successful the size of all data appended to the queue is returned,
2249  * otherwise an error code less than zero is returned, or zero if
2250  * no record(s) were appended.
2251  */
2252 static int
mld_v2_enqueue_group_record(struct mbufq * mq,struct in6_multi * inm,const int is_state_change,const int is_group_query,const int is_source_query,const int use_block_allow)2253 mld_v2_enqueue_group_record(struct mbufq *mq, struct in6_multi *inm,
2254     const int is_state_change, const int is_group_query,
2255     const int is_source_query, const int use_block_allow)
2256 {
2257 	struct mldv2_record	 mr;
2258 	struct mldv2_record	*pmr;
2259 	struct ifnet		*ifp;
2260 	struct ip6_msource	*ims, *nims;
2261 	struct mbuf		*m0, *m, *md;
2262 	int			 is_filter_list_change;
2263 	int			 minrec0len, m0srcs, msrcs, nbytes, off;
2264 	int			 record_has_sources;
2265 	int			 now;
2266 	int			 type;
2267 	uint8_t			 mode;
2268 #ifdef KTR
2269 	char			 ip6tbuf[INET6_ADDRSTRLEN];
2270 #endif
2271 
2272 	IN6_MULTI_LIST_LOCK_ASSERT();
2273 
2274 	ifp = inm->in6m_ifp;
2275 	is_filter_list_change = 0;
2276 	m = NULL;
2277 	m0 = NULL;
2278 	m0srcs = 0;
2279 	msrcs = 0;
2280 	nbytes = 0;
2281 	nims = NULL;
2282 	record_has_sources = 1;
2283 	pmr = NULL;
2284 	type = MLD_DO_NOTHING;
2285 	mode = inm->in6m_st[1].iss_fmode;
2286 
2287 	/*
2288 	 * If we did not transition out of ASM mode during t0->t1,
2289 	 * and there are no source nodes to process, we can skip
2290 	 * the generation of source records.
2291 	 */
2292 	if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
2293 	    inm->in6m_nsrc == 0)
2294 		record_has_sources = 0;
2295 
2296 	if (is_state_change) {
2297 		/*
2298 		 * Queue a state change record.
2299 		 * If the mode did not change, and there are non-ASM
2300 		 * listeners or source filters present,
2301 		 * we potentially need to issue two records for the group.
2302 		 * If there are ASM listeners, and there was no filter
2303 		 * mode transition of any kind, do nothing.
2304 		 *
2305 		 * If we are transitioning to MCAST_UNDEFINED, we need
2306 		 * not send any sources. A transition to/from this state is
2307 		 * considered inclusive with some special treatment.
2308 		 *
2309 		 * If we are rewriting initial joins/leaves to use
2310 		 * ALLOW/BLOCK, and the group's membership is inclusive,
2311 		 * we need to send sources in all cases.
2312 		 */
2313 		if (mode != inm->in6m_st[0].iss_fmode) {
2314 			if (mode == MCAST_EXCLUDE) {
2315 				CTR1(KTR_MLD, "%s: change to EXCLUDE",
2316 				    __func__);
2317 				type = MLD_CHANGE_TO_EXCLUDE_MODE;
2318 			} else {
2319 				CTR1(KTR_MLD, "%s: change to INCLUDE",
2320 				    __func__);
2321 				if (use_block_allow) {
2322 					/*
2323 					 * XXX
2324 					 * Here we're interested in state
2325 					 * edges either direction between
2326 					 * MCAST_UNDEFINED and MCAST_INCLUDE.
2327 					 * Perhaps we should just check
2328 					 * the group state, rather than
2329 					 * the filter mode.
2330 					 */
2331 					if (mode == MCAST_UNDEFINED) {
2332 						type = MLD_BLOCK_OLD_SOURCES;
2333 					} else {
2334 						type = MLD_ALLOW_NEW_SOURCES;
2335 					}
2336 				} else {
2337 					type = MLD_CHANGE_TO_INCLUDE_MODE;
2338 					if (mode == MCAST_UNDEFINED)
2339 						record_has_sources = 0;
2340 				}
2341 			}
2342 		} else {
2343 			if (record_has_sources) {
2344 				is_filter_list_change = 1;
2345 			} else {
2346 				type = MLD_DO_NOTHING;
2347 			}
2348 		}
2349 	} else {
2350 		/*
2351 		 * Queue a current state record.
2352 		 */
2353 		if (mode == MCAST_EXCLUDE) {
2354 			type = MLD_MODE_IS_EXCLUDE;
2355 		} else if (mode == MCAST_INCLUDE) {
2356 			type = MLD_MODE_IS_INCLUDE;
2357 			KASSERT(inm->in6m_st[1].iss_asm == 0,
2358 			    ("%s: inm %p is INCLUDE but ASM count is %d",
2359 			     __func__, inm, inm->in6m_st[1].iss_asm));
2360 		}
2361 	}
2362 
2363 	/*
2364 	 * Generate the filter list changes using a separate function.
2365 	 */
2366 	if (is_filter_list_change)
2367 		return (mld_v2_enqueue_filter_change(mq, inm));
2368 
2369 	if (type == MLD_DO_NOTHING) {
2370 		CTR3(KTR_MLD, "%s: nothing to do for %s/%s",
2371 		    __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2372 		    if_name(inm->in6m_ifp));
2373 		return (0);
2374 	}
2375 
2376 	/*
2377 	 * If any sources are present, we must be able to fit at least
2378 	 * one in the trailing space of the tail packet's mbuf,
2379 	 * ideally more.
2380 	 */
2381 	minrec0len = sizeof(struct mldv2_record);
2382 	if (record_has_sources)
2383 		minrec0len += sizeof(struct in6_addr);
2384 
2385 	CTR4(KTR_MLD, "%s: queueing %s for %s/%s", __func__,
2386 	    mld_rec_type_to_str(type),
2387 	    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2388 	    if_name(inm->in6m_ifp));
2389 
2390 	/*
2391 	 * Check if we have a packet in the tail of the queue for this
2392 	 * group into which the first group record for this group will fit.
2393 	 * Otherwise allocate a new packet.
2394 	 * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
2395 	 * Note: Group records for G/GSR query responses MUST be sent
2396 	 * in their own packet.
2397 	 */
2398 	m0 = mbufq_last(mq);
2399 	if (!is_group_query &&
2400 	    m0 != NULL &&
2401 	    (m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
2402 	    (m0->m_pkthdr.len + minrec0len) <
2403 	     (ifp->if_mtu - MLD_MTUSPACE)) {
2404 		m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2405 			    sizeof(struct mldv2_record)) /
2406 			    sizeof(struct in6_addr);
2407 		m = m0;
2408 		CTR1(KTR_MLD, "%s: use existing packet", __func__);
2409 	} else {
2410 		if (mbufq_full(mq)) {
2411 			CTR1(KTR_MLD, "%s: outbound queue full", __func__);
2412 			return (-ENOMEM);
2413 		}
2414 		m = NULL;
2415 		m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2416 		    sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2417 		if (!is_state_change && !is_group_query)
2418 			m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2419 		if (m == NULL)
2420 			m = m_gethdr(M_NOWAIT, MT_DATA);
2421 		if (m == NULL)
2422 			return (-ENOMEM);
2423 
2424 		mld_save_context(m, ifp);
2425 
2426 		CTR1(KTR_MLD, "%s: allocated first packet", __func__);
2427 	}
2428 
2429 	/*
2430 	 * Append group record.
2431 	 * If we have sources, we don't know how many yet.
2432 	 */
2433 	mr.mr_type = type;
2434 	mr.mr_datalen = 0;
2435 	mr.mr_numsrc = 0;
2436 	mr.mr_addr = inm->in6m_addr;
2437 	in6_clearscope(&mr.mr_addr);
2438 	if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2439 		if (m != m0)
2440 			m_freem(m);
2441 		CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
2442 		return (-ENOMEM);
2443 	}
2444 	nbytes += sizeof(struct mldv2_record);
2445 
2446 	/*
2447 	 * Append as many sources as will fit in the first packet.
2448 	 * If we are appending to a new packet, the chain allocation
2449 	 * may potentially use clusters; use m_getptr() in this case.
2450 	 * If we are appending to an existing packet, we need to obtain
2451 	 * a pointer to the group record after m_append(), in case a new
2452 	 * mbuf was allocated.
2453 	 *
2454 	 * Only append sources which are in-mode at t1. If we are
2455 	 * transitioning to MCAST_UNDEFINED state on the group, and
2456 	 * use_block_allow is zero, do not include source entries.
2457 	 * Otherwise, we need to include this source in the report.
2458 	 *
2459 	 * Only report recorded sources in our filter set when responding
2460 	 * to a group-source query.
2461 	 */
2462 	if (record_has_sources) {
2463 		if (m == m0) {
2464 			md = m_last(m);
2465 			pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2466 			    md->m_len - nbytes);
2467 		} else {
2468 			md = m_getptr(m, 0, &off);
2469 			pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2470 			    off);
2471 		}
2472 		msrcs = 0;
2473 		RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs,
2474 		    nims) {
2475 			CTR2(KTR_MLD, "%s: visit node %s", __func__,
2476 			    ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2477 			now = im6s_get_mode(inm, ims, 1);
2478 			CTR2(KTR_MLD, "%s: node is %d", __func__, now);
2479 			if ((now != mode) ||
2480 			    (now == mode &&
2481 			     (!use_block_allow && mode == MCAST_UNDEFINED))) {
2482 				CTR1(KTR_MLD, "%s: skip node", __func__);
2483 				continue;
2484 			}
2485 			if (is_source_query && ims->im6s_stp == 0) {
2486 				CTR1(KTR_MLD, "%s: skip unrecorded node",
2487 				    __func__);
2488 				continue;
2489 			}
2490 			CTR1(KTR_MLD, "%s: append node", __func__);
2491 			if (!m_append(m, sizeof(struct in6_addr),
2492 			    (void *)&ims->im6s_addr)) {
2493 				if (m != m0)
2494 					m_freem(m);
2495 				CTR1(KTR_MLD, "%s: m_append() failed.",
2496 				    __func__);
2497 				return (-ENOMEM);
2498 			}
2499 			nbytes += sizeof(struct in6_addr);
2500 			++msrcs;
2501 			if (msrcs == m0srcs)
2502 				break;
2503 		}
2504 		CTR2(KTR_MLD, "%s: msrcs is %d this packet", __func__,
2505 		    msrcs);
2506 		pmr->mr_numsrc = htons(msrcs);
2507 		nbytes += (msrcs * sizeof(struct in6_addr));
2508 	}
2509 
2510 	if (is_source_query && msrcs == 0) {
2511 		CTR1(KTR_MLD, "%s: no recorded sources to report", __func__);
2512 		if (m != m0)
2513 			m_freem(m);
2514 		return (0);
2515 	}
2516 
2517 	/*
2518 	 * We are good to go with first packet.
2519 	 */
2520 	if (m != m0) {
2521 		CTR1(KTR_MLD, "%s: enqueueing first packet", __func__);
2522 		m->m_pkthdr.vt_nrecs = 1;
2523 		mbufq_enqueue(mq, m);
2524 	} else
2525 		m->m_pkthdr.vt_nrecs++;
2526 
2527 	/*
2528 	 * No further work needed if no source list in packet(s).
2529 	 */
2530 	if (!record_has_sources)
2531 		return (nbytes);
2532 
2533 	/*
2534 	 * Whilst sources remain to be announced, we need to allocate
2535 	 * a new packet and fill out as many sources as will fit.
2536 	 * Always try for a cluster first.
2537 	 */
2538 	while (nims != NULL) {
2539 		if (mbufq_full(mq)) {
2540 			CTR1(KTR_MLD, "%s: outbound queue full", __func__);
2541 			return (-ENOMEM);
2542 		}
2543 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2544 		if (m == NULL)
2545 			m = m_gethdr(M_NOWAIT, MT_DATA);
2546 		if (m == NULL)
2547 			return (-ENOMEM);
2548 		mld_save_context(m, ifp);
2549 		md = m_getptr(m, 0, &off);
2550 		pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
2551 		CTR1(KTR_MLD, "%s: allocated next packet", __func__);
2552 
2553 		if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2554 			if (m != m0)
2555 				m_freem(m);
2556 			CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
2557 			return (-ENOMEM);
2558 		}
2559 		m->m_pkthdr.vt_nrecs = 1;
2560 		nbytes += sizeof(struct mldv2_record);
2561 
2562 		m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2563 		    sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2564 
2565 		msrcs = 0;
2566 		RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2567 			CTR2(KTR_MLD, "%s: visit node %s",
2568 			    __func__, ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2569 			now = im6s_get_mode(inm, ims, 1);
2570 			if ((now != mode) ||
2571 			    (now == mode &&
2572 			     (!use_block_allow && mode == MCAST_UNDEFINED))) {
2573 				CTR1(KTR_MLD, "%s: skip node", __func__);
2574 				continue;
2575 			}
2576 			if (is_source_query && ims->im6s_stp == 0) {
2577 				CTR1(KTR_MLD, "%s: skip unrecorded node",
2578 				    __func__);
2579 				continue;
2580 			}
2581 			CTR1(KTR_MLD, "%s: append node", __func__);
2582 			if (!m_append(m, sizeof(struct in6_addr),
2583 			    (void *)&ims->im6s_addr)) {
2584 				if (m != m0)
2585 					m_freem(m);
2586 				CTR1(KTR_MLD, "%s: m_append() failed.",
2587 				    __func__);
2588 				return (-ENOMEM);
2589 			}
2590 			++msrcs;
2591 			if (msrcs == m0srcs)
2592 				break;
2593 		}
2594 		pmr->mr_numsrc = htons(msrcs);
2595 		nbytes += (msrcs * sizeof(struct in6_addr));
2596 
2597 		CTR1(KTR_MLD, "%s: enqueueing next packet", __func__);
2598 		mbufq_enqueue(mq, m);
2599 	}
2600 
2601 	return (nbytes);
2602 }
2603 
2604 /*
2605  * Type used to mark record pass completion.
2606  * We exploit the fact we can cast to this easily from the
2607  * current filter modes on each ip_msource node.
2608  */
2609 typedef enum {
2610 	REC_NONE = 0x00,	/* MCAST_UNDEFINED */
2611 	REC_ALLOW = 0x01,	/* MCAST_INCLUDE */
2612 	REC_BLOCK = 0x02,	/* MCAST_EXCLUDE */
2613 	REC_FULL = REC_ALLOW | REC_BLOCK
2614 } rectype_t;
2615 
2616 /*
2617  * Enqueue an MLDv2 filter list change to the given output queue.
2618  *
2619  * Source list filter state is held in an RB-tree. When the filter list
2620  * for a group is changed without changing its mode, we need to compute
2621  * the deltas between T0 and T1 for each source in the filter set,
2622  * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2623  *
2624  * As we may potentially queue two record types, and the entire R-B tree
2625  * needs to be walked at once, we break this out into its own function
2626  * so we can generate a tightly packed queue of packets.
2627  *
2628  * XXX This could be written to only use one tree walk, although that makes
2629  * serializing into the mbuf chains a bit harder. For now we do two walks
2630  * which makes things easier on us, and it may or may not be harder on
2631  * the L2 cache.
2632  *
2633  * If successful the size of all data appended to the queue is returned,
2634  * otherwise an error code less than zero is returned, or zero if
2635  * no record(s) were appended.
2636  */
2637 static int
mld_v2_enqueue_filter_change(struct mbufq * mq,struct in6_multi * inm)2638 mld_v2_enqueue_filter_change(struct mbufq *mq, struct in6_multi *inm)
2639 {
2640 	static const int MINRECLEN =
2641 	    sizeof(struct mldv2_record) + sizeof(struct in6_addr);
2642 	struct ifnet		*ifp;
2643 	struct mldv2_record	 mr;
2644 	struct mldv2_record	*pmr;
2645 	struct ip6_msource	*ims, *nims;
2646 	struct mbuf		*m, *m0, *md;
2647 	int			 m0srcs, nbytes, npbytes, off, rsrcs, schanged;
2648 	uint8_t			 mode, now, then;
2649 	rectype_t		 crt, drt, nrt;
2650 #ifdef KTR
2651 	int			 nallow, nblock;
2652 	char			 ip6tbuf[INET6_ADDRSTRLEN];
2653 #endif
2654 
2655 	IN6_MULTI_LIST_LOCK_ASSERT();
2656 
2657 	if (inm->in6m_nsrc == 0 ||
2658 	    (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
2659 		return (0);
2660 
2661 	ifp = inm->in6m_ifp;			/* interface */
2662 	mode = inm->in6m_st[1].iss_fmode;	/* filter mode at t1 */
2663 	crt = REC_NONE;	/* current group record type */
2664 	drt = REC_NONE;	/* mask of completed group record types */
2665 	nrt = REC_NONE;	/* record type for current node */
2666 	m0srcs = 0;	/* # source which will fit in current mbuf chain */
2667 	npbytes = 0;	/* # of bytes appended this packet */
2668 	nbytes = 0;	/* # of bytes appended to group's state-change queue */
2669 	rsrcs = 0;	/* # sources encoded in current record */
2670 	schanged = 0;	/* # nodes encoded in overall filter change */
2671 #ifdef KTR
2672 	nallow = 0;	/* # of source entries in ALLOW_NEW */
2673 	nblock = 0;	/* # of source entries in BLOCK_OLD */
2674 #endif
2675 	nims = NULL;	/* next tree node pointer */
2676 
2677 	/*
2678 	 * For each possible filter record mode.
2679 	 * The first kind of source we encounter tells us which
2680 	 * is the first kind of record we start appending.
2681 	 * If a node transitioned to UNDEFINED at t1, its mode is treated
2682 	 * as the inverse of the group's filter mode.
2683 	 */
2684 	while (drt != REC_FULL) {
2685 		do {
2686 			m0 = mbufq_last(mq);
2687 			if (m0 != NULL &&
2688 			    (m0->m_pkthdr.vt_nrecs + 1 <=
2689 			     MLD_V2_REPORT_MAXRECS) &&
2690 			    (m0->m_pkthdr.len + MINRECLEN) <
2691 			     (ifp->if_mtu - MLD_MTUSPACE)) {
2692 				m = m0;
2693 				m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2694 					    sizeof(struct mldv2_record)) /
2695 					    sizeof(struct in6_addr);
2696 				CTR1(KTR_MLD,
2697 				    "%s: use previous packet", __func__);
2698 			} else {
2699 				m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2700 				if (m == NULL)
2701 					m = m_gethdr(M_NOWAIT, MT_DATA);
2702 				if (m == NULL) {
2703 					CTR1(KTR_MLD,
2704 					    "%s: m_get*() failed", __func__);
2705 					return (-ENOMEM);
2706 				}
2707 				m->m_pkthdr.vt_nrecs = 0;
2708 				mld_save_context(m, ifp);
2709 				m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2710 				    sizeof(struct mldv2_record)) /
2711 				    sizeof(struct in6_addr);
2712 				npbytes = 0;
2713 				CTR1(KTR_MLD,
2714 				    "%s: allocated new packet", __func__);
2715 			}
2716 			/*
2717 			 * Append the MLD group record header to the
2718 			 * current packet's data area.
2719 			 * Recalculate pointer to free space for next
2720 			 * group record, in case m_append() allocated
2721 			 * a new mbuf or cluster.
2722 			 */
2723 			memset(&mr, 0, sizeof(mr));
2724 			mr.mr_addr = inm->in6m_addr;
2725 			in6_clearscope(&mr.mr_addr);
2726 			if (!m_append(m, sizeof(mr), (void *)&mr)) {
2727 				if (m != m0)
2728 					m_freem(m);
2729 				CTR1(KTR_MLD,
2730 				    "%s: m_append() failed", __func__);
2731 				return (-ENOMEM);
2732 			}
2733 			npbytes += sizeof(struct mldv2_record);
2734 			if (m != m0) {
2735 				/* new packet; offset in chain */
2736 				md = m_getptr(m, npbytes -
2737 				    sizeof(struct mldv2_record), &off);
2738 				pmr = (struct mldv2_record *)(mtod(md,
2739 				    uint8_t *) + off);
2740 			} else {
2741 				/* current packet; offset from last append */
2742 				md = m_last(m);
2743 				pmr = (struct mldv2_record *)(mtod(md,
2744 				    uint8_t *) + md->m_len -
2745 				    sizeof(struct mldv2_record));
2746 			}
2747 			/*
2748 			 * Begin walking the tree for this record type
2749 			 * pass, or continue from where we left off
2750 			 * previously if we had to allocate a new packet.
2751 			 * Only report deltas in-mode at t1.
2752 			 * We need not report included sources as allowed
2753 			 * if we are in inclusive mode on the group,
2754 			 * however the converse is not true.
2755 			 */
2756 			rsrcs = 0;
2757 			if (nims == NULL) {
2758 				nims = RB_MIN(ip6_msource_tree,
2759 				    &inm->in6m_srcs);
2760 			}
2761 			RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2762 				CTR2(KTR_MLD, "%s: visit node %s", __func__,
2763 				    ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2764 				now = im6s_get_mode(inm, ims, 1);
2765 				then = im6s_get_mode(inm, ims, 0);
2766 				CTR3(KTR_MLD, "%s: mode: t0 %d, t1 %d",
2767 				    __func__, then, now);
2768 				if (now == then) {
2769 					CTR1(KTR_MLD,
2770 					    "%s: skip unchanged", __func__);
2771 					continue;
2772 				}
2773 				if (mode == MCAST_EXCLUDE &&
2774 				    now == MCAST_INCLUDE) {
2775 					CTR1(KTR_MLD,
2776 					    "%s: skip IN src on EX group",
2777 					    __func__);
2778 					continue;
2779 				}
2780 				nrt = (rectype_t)now;
2781 				if (nrt == REC_NONE)
2782 					nrt = (rectype_t)(~mode & REC_FULL);
2783 				if (schanged++ == 0) {
2784 					crt = nrt;
2785 				} else if (crt != nrt)
2786 					continue;
2787 				if (!m_append(m, sizeof(struct in6_addr),
2788 				    (void *)&ims->im6s_addr)) {
2789 					if (m != m0)
2790 						m_freem(m);
2791 					CTR1(KTR_MLD,
2792 					    "%s: m_append() failed", __func__);
2793 					return (-ENOMEM);
2794 				}
2795 #ifdef KTR
2796 				nallow += !!(crt == REC_ALLOW);
2797 				nblock += !!(crt == REC_BLOCK);
2798 #endif
2799 				if (++rsrcs == m0srcs)
2800 					break;
2801 			}
2802 			/*
2803 			 * If we did not append any tree nodes on this
2804 			 * pass, back out of allocations.
2805 			 */
2806 			if (rsrcs == 0) {
2807 				npbytes -= sizeof(struct mldv2_record);
2808 				if (m != m0) {
2809 					CTR1(KTR_MLD,
2810 					    "%s: m_free(m)", __func__);
2811 					m_freem(m);
2812 				} else {
2813 					CTR1(KTR_MLD,
2814 					    "%s: m_adj(m, -mr)", __func__);
2815 					m_adj(m, -((int)sizeof(
2816 					    struct mldv2_record)));
2817 				}
2818 				continue;
2819 			}
2820 			npbytes += (rsrcs * sizeof(struct in6_addr));
2821 			if (crt == REC_ALLOW)
2822 				pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
2823 			else if (crt == REC_BLOCK)
2824 				pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
2825 			pmr->mr_numsrc = htons(rsrcs);
2826 			/*
2827 			 * Count the new group record, and enqueue this
2828 			 * packet if it wasn't already queued.
2829 			 */
2830 			m->m_pkthdr.vt_nrecs++;
2831 			if (m != m0)
2832 				mbufq_enqueue(mq, m);
2833 			nbytes += npbytes;
2834 		} while (nims != NULL);
2835 		drt |= crt;
2836 		crt = (~crt & REC_FULL);
2837 	}
2838 
2839 	CTR3(KTR_MLD, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
2840 	    nallow, nblock);
2841 
2842 	return (nbytes);
2843 }
2844 
2845 static int
mld_v2_merge_state_changes(struct in6_multi * inm,struct mbufq * scq)2846 mld_v2_merge_state_changes(struct in6_multi *inm, struct mbufq *scq)
2847 {
2848 	struct mbufq	*gq;
2849 	struct mbuf	*m;		/* pending state-change */
2850 	struct mbuf	*m0;		/* copy of pending state-change */
2851 	struct mbuf	*mt;		/* last state-change in packet */
2852 	int		 docopy, domerge;
2853 	u_int		 recslen;
2854 
2855 	docopy = 0;
2856 	domerge = 0;
2857 	recslen = 0;
2858 
2859 	IN6_MULTI_LIST_LOCK_ASSERT();
2860 	MLD_LOCK_ASSERT();
2861 
2862 	/*
2863 	 * If there are further pending retransmissions, make a writable
2864 	 * copy of each queued state-change message before merging.
2865 	 */
2866 	if (inm->in6m_scrv > 0)
2867 		docopy = 1;
2868 
2869 	gq = &inm->in6m_scq;
2870 #ifdef KTR
2871 	if (mbufq_first(gq) == NULL) {
2872 		CTR2(KTR_MLD, "%s: WARNING: queue for inm %p is empty",
2873 		    __func__, inm);
2874 	}
2875 #endif
2876 
2877 	m = mbufq_first(gq);
2878 	while (m != NULL) {
2879 		/*
2880 		 * Only merge the report into the current packet if
2881 		 * there is sufficient space to do so; an MLDv2 report
2882 		 * packet may only contain 65,535 group records.
2883 		 * Always use a simple mbuf chain concatentation to do this,
2884 		 * as large state changes for single groups may have
2885 		 * allocated clusters.
2886 		 */
2887 		domerge = 0;
2888 		mt = mbufq_last(scq);
2889 		if (mt != NULL) {
2890 			recslen = m_length(m, NULL);
2891 
2892 			if ((mt->m_pkthdr.vt_nrecs +
2893 			    m->m_pkthdr.vt_nrecs <=
2894 			    MLD_V2_REPORT_MAXRECS) &&
2895 			    (mt->m_pkthdr.len + recslen <=
2896 			    (inm->in6m_ifp->if_mtu - MLD_MTUSPACE)))
2897 				domerge = 1;
2898 		}
2899 
2900 		if (!domerge && mbufq_full(gq)) {
2901 			CTR2(KTR_MLD,
2902 			    "%s: outbound queue full, skipping whole packet %p",
2903 			    __func__, m);
2904 			mt = m->m_nextpkt;
2905 			if (!docopy)
2906 				m_freem(m);
2907 			m = mt;
2908 			continue;
2909 		}
2910 
2911 		if (!docopy) {
2912 			CTR2(KTR_MLD, "%s: dequeueing %p", __func__, m);
2913 			m0 = mbufq_dequeue(gq);
2914 			m = m0->m_nextpkt;
2915 		} else {
2916 			CTR2(KTR_MLD, "%s: copying %p", __func__, m);
2917 			m0 = m_dup(m, M_NOWAIT);
2918 			if (m0 == NULL)
2919 				return (ENOMEM);
2920 			m0->m_nextpkt = NULL;
2921 			m = m->m_nextpkt;
2922 		}
2923 
2924 		if (!domerge) {
2925 			CTR3(KTR_MLD, "%s: queueing %p to scq %p)",
2926 			    __func__, m0, scq);
2927 			mbufq_enqueue(scq, m0);
2928 		} else {
2929 			struct mbuf *mtl;	/* last mbuf of packet mt */
2930 
2931 			CTR3(KTR_MLD, "%s: merging %p with ifscq tail %p)",
2932 			    __func__, m0, mt);
2933 
2934 			mtl = m_last(mt);
2935 			m0->m_flags &= ~M_PKTHDR;
2936 			mt->m_pkthdr.len += recslen;
2937 			mt->m_pkthdr.vt_nrecs +=
2938 			    m0->m_pkthdr.vt_nrecs;
2939 
2940 			mtl->m_next = m0;
2941 		}
2942 	}
2943 
2944 	return (0);
2945 }
2946 
2947 /*
2948  * Respond to a pending MLDv2 General Query.
2949  */
2950 static void
mld_v2_dispatch_general_query(struct mld_ifsoftc * mli)2951 mld_v2_dispatch_general_query(struct mld_ifsoftc *mli)
2952 {
2953 	struct ifmultiaddr	*ifma;
2954 	struct ifnet		*ifp;
2955 	struct in6_multi	*inm;
2956 	int			 retval __unused;
2957 
2958 	NET_EPOCH_ASSERT();
2959 	IN6_MULTI_LIST_LOCK_ASSERT();
2960 	MLD_LOCK_ASSERT();
2961 
2962 	KASSERT(mli->mli_version == MLD_VERSION_2,
2963 	    ("%s: called when version %d", __func__, mli->mli_version));
2964 
2965 	/*
2966 	 * Check that there are some packets queued. If so, send them first.
2967 	 * For large number of groups the reply to general query can take
2968 	 * many packets, we should finish sending them before starting of
2969 	 * queuing the new reply.
2970 	 */
2971 	if (!mbufq_empty(&mli->mli_gq))
2972 		goto send;
2973 
2974 	ifp = mli->mli_ifp;
2975 
2976 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2977 		inm = in6m_ifmultiaddr_get_inm(ifma);
2978 		if (inm == NULL)
2979 			continue;
2980 		KASSERT(ifp == inm->in6m_ifp,
2981 		    ("%s: inconsistent ifp", __func__));
2982 
2983 		switch (inm->in6m_state) {
2984 		case MLD_NOT_MEMBER:
2985 		case MLD_SILENT_MEMBER:
2986 			break;
2987 		case MLD_REPORTING_MEMBER:
2988 		case MLD_IDLE_MEMBER:
2989 		case MLD_LAZY_MEMBER:
2990 		case MLD_SLEEPING_MEMBER:
2991 		case MLD_AWAKENING_MEMBER:
2992 			inm->in6m_state = MLD_REPORTING_MEMBER;
2993 			retval = mld_v2_enqueue_group_record(&mli->mli_gq,
2994 			    inm, 0, 0, 0, 0);
2995 			CTR2(KTR_MLD, "%s: enqueue record = %d",
2996 			    __func__, retval);
2997 			break;
2998 		case MLD_G_QUERY_PENDING_MEMBER:
2999 		case MLD_SG_QUERY_PENDING_MEMBER:
3000 		case MLD_LEAVING_MEMBER:
3001 			break;
3002 		}
3003 	}
3004 
3005 send:
3006 	mld_dispatch_queue(&mli->mli_gq, MLD_MAX_RESPONSE_BURST);
3007 
3008 	/*
3009 	 * Slew transmission of bursts over 500ms intervals.
3010 	 */
3011 	if (mbufq_first(&mli->mli_gq) != NULL) {
3012 		mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
3013 		    MLD_RESPONSE_BURST_INTERVAL);
3014 		V_interface_timers_running6 = 1;
3015 	}
3016 }
3017 
3018 /*
3019  * Transmit the next pending message in the output queue.
3020  *
3021  * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3022  * MRT: Nothing needs to be done, as MLD traffic is always local to
3023  * a link and uses a link-scope multicast address.
3024  */
3025 static void
mld_dispatch_packet(struct mbuf * m)3026 mld_dispatch_packet(struct mbuf *m)
3027 {
3028 	struct ip6_moptions	 im6o;
3029 	struct ifnet		*ifp;
3030 	struct ifnet		*oifp;
3031 	struct mbuf		*m0;
3032 	struct mbuf		*md;
3033 	struct ip6_hdr		*ip6;
3034 	struct mld_hdr		*mld;
3035 	int			 error;
3036 	int			 off;
3037 	int			 type;
3038 	uint32_t		 ifindex;
3039 
3040 	CTR2(KTR_MLD, "%s: transmit %p", __func__, m);
3041 	NET_EPOCH_ASSERT();
3042 
3043 	/*
3044 	 * Set VNET image pointer from enqueued mbuf chain
3045 	 * before doing anything else. Whilst we use interface
3046 	 * indexes to guard against interface detach, they are
3047 	 * unique to each VIMAGE and must be retrieved.
3048 	 */
3049 	ifindex = mld_restore_context(m);
3050 
3051 	/*
3052 	 * Check if the ifnet still exists. This limits the scope of
3053 	 * any race in the absence of a global ifp lock for low cost
3054 	 * (an array lookup).
3055 	 */
3056 	ifp = ifnet_byindex(ifindex);
3057 	if (ifp == NULL) {
3058 		CTR3(KTR_MLD, "%s: dropped %p as ifindex %u went away.",
3059 		    __func__, m, ifindex);
3060 		m_freem(m);
3061 		IP6STAT_INC(ip6s_noroute);
3062 		goto out;
3063 	}
3064 
3065 	im6o.im6o_multicast_hlim  = 1;
3066 	im6o.im6o_multicast_loop = V_ip6_mrouting_enabled;
3067 	im6o.im6o_multicast_ifp = ifp;
3068 
3069 	if (m->m_flags & M_MLDV1) {
3070 		m0 = m;
3071 	} else {
3072 		m0 = mld_v2_encap_report(ifp, m);
3073 		if (m0 == NULL) {
3074 			CTR2(KTR_MLD, "%s: dropped %p", __func__, m);
3075 			IP6STAT_INC(ip6s_odropped);
3076 			goto out;
3077 		}
3078 	}
3079 
3080 	mld_scrub_context(m0);
3081 	m_clrprotoflags(m);
3082 	m0->m_pkthdr.rcvif = V_loif;
3083 
3084 	ip6 = mtod(m0, struct ip6_hdr *);
3085 #if 0
3086 	(void)in6_setscope(&ip6->ip6_dst, ifp, NULL);	/* XXX LOR */
3087 #else
3088 	/*
3089 	 * XXX XXX Break some KPI rules to prevent an LOR which would
3090 	 * occur if we called in6_setscope() at transmission.
3091 	 * See comments at top of file.
3092 	 */
3093 	MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index);
3094 #endif
3095 
3096 	/*
3097 	 * Retrieve the ICMPv6 type before handoff to ip6_output(),
3098 	 * so we can bump the stats.
3099 	 */
3100 	md = m_getptr(m0, sizeof(struct ip6_hdr), &off);
3101 	mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off);
3102 	type = mld->mld_type;
3103 
3104 	oifp = NULL;
3105 	error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, &im6o,
3106 	    &oifp, NULL);
3107 	if (error) {
3108 		CTR3(KTR_MLD, "%s: ip6_output(%p) = %d", __func__, m0, error);
3109 		goto out;
3110 	}
3111 	ICMP6STAT_INC2(icp6s_outhist, type);
3112 	if (oifp != NULL) {
3113 		icmp6_ifstat_inc(oifp, ifs6_out_msg);
3114 		switch (type) {
3115 		case MLD_LISTENER_REPORT:
3116 		case MLDV2_LISTENER_REPORT:
3117 			icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
3118 			break;
3119 		case MLD_LISTENER_DONE:
3120 			icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
3121 			break;
3122 		}
3123 	}
3124 out:
3125 	return;
3126 }
3127 
3128 /*
3129  * Encapsulate an MLDv2 report.
3130  *
3131  * KAME IPv6 requires that hop-by-hop options be passed separately,
3132  * and that the IPv6 header be prepended in a separate mbuf.
3133  *
3134  * Returns a pointer to the new mbuf chain head, or NULL if the
3135  * allocation failed.
3136  */
3137 static struct mbuf *
mld_v2_encap_report(struct ifnet * ifp,struct mbuf * m)3138 mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
3139 {
3140 	struct mbuf		*mh;
3141 	struct mldv2_report	*mld;
3142 	struct ip6_hdr		*ip6;
3143 	struct in6_ifaddr	*ia;
3144 	int			 mldreclen;
3145 
3146 	KASSERT(ifp != NULL, ("%s: null ifp", __func__));
3147 	KASSERT((m->m_flags & M_PKTHDR),
3148 	    ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3149 
3150 	/*
3151 	 * RFC3590: OK to send as :: or tentative during DAD.
3152 	 */
3153 	NET_EPOCH_ASSERT();
3154 	ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
3155 	if (ia == NULL)
3156 		CTR1(KTR_MLD, "%s: warning: ia is NULL", __func__);
3157 
3158 	mh = m_gethdr(M_NOWAIT, MT_DATA);
3159 	if (mh == NULL) {
3160 		if (ia != NULL)
3161 			ifa_free(&ia->ia_ifa);
3162 		m_freem(m);
3163 		return (NULL);
3164 	}
3165 	M_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
3166 
3167 	mldreclen = m_length(m, NULL);
3168 	CTR2(KTR_MLD, "%s: mldreclen is %d", __func__, mldreclen);
3169 
3170 	mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report);
3171 	mh->m_pkthdr.len = sizeof(struct ip6_hdr) +
3172 	    sizeof(struct mldv2_report) + mldreclen;
3173 
3174 	ip6 = mtod(mh, struct ip6_hdr *);
3175 	ip6->ip6_flow = 0;
3176 	ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
3177 	ip6->ip6_vfc |= IPV6_VERSION;
3178 	ip6->ip6_nxt = IPPROTO_ICMPV6;
3179 	ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
3180 	if (ia != NULL)
3181 		ifa_free(&ia->ia_ifa);
3182 	ip6->ip6_dst = in6addr_linklocal_allv2routers;
3183 	/* scope ID will be set in netisr */
3184 
3185 	mld = (struct mldv2_report *)(ip6 + 1);
3186 	mld->mld_type = MLDV2_LISTENER_REPORT;
3187 	mld->mld_code = 0;
3188 	mld->mld_cksum = 0;
3189 	mld->mld_v2_reserved = 0;
3190 	mld->mld_v2_numrecs = htons(m->m_pkthdr.vt_nrecs);
3191 	m->m_pkthdr.vt_nrecs = 0;
3192 
3193 	mh->m_next = m;
3194 	mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
3195 	    sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
3196 	return (mh);
3197 }
3198 
3199 #ifdef KTR
3200 static char *
mld_rec_type_to_str(const int type)3201 mld_rec_type_to_str(const int type)
3202 {
3203 
3204 	switch (type) {
3205 		case MLD_CHANGE_TO_EXCLUDE_MODE:
3206 			return "TO_EX";
3207 			break;
3208 		case MLD_CHANGE_TO_INCLUDE_MODE:
3209 			return "TO_IN";
3210 			break;
3211 		case MLD_MODE_IS_EXCLUDE:
3212 			return "MODE_EX";
3213 			break;
3214 		case MLD_MODE_IS_INCLUDE:
3215 			return "MODE_IN";
3216 			break;
3217 		case MLD_ALLOW_NEW_SOURCES:
3218 			return "ALLOW_NEW";
3219 			break;
3220 		case MLD_BLOCK_OLD_SOURCES:
3221 			return "BLOCK_OLD";
3222 			break;
3223 		default:
3224 			break;
3225 	}
3226 	return "unknown";
3227 }
3228 #endif
3229 
3230 static void
mld_init(void * unused __unused)3231 mld_init(void *unused __unused)
3232 {
3233 
3234 	CTR1(KTR_MLD, "%s: initializing", __func__);
3235 	MLD_LOCK_INIT();
3236 
3237 	ip6_initpktopts(&mld_po);
3238 	mld_po.ip6po_hlim = 1;
3239 	mld_po.ip6po_hbh = &mld_ra.hbh;
3240 	mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER;
3241 	mld_po.ip6po_flags = IP6PO_DONTFRAG;
3242 	mld_po.ip6po_valid = IP6PO_VALID_HLIM | IP6PO_VALID_HBH;
3243 
3244 	callout_init(&mldslow_callout, 1);
3245 	callout_reset(&mldslow_callout, hz / MLD_SLOWHZ, mld_slowtimo, NULL);
3246 	callout_init(&mldfast_callout, 1);
3247 	callout_reset(&mldfast_callout, hz / MLD_FASTHZ, mld_fasttimo, NULL);
3248 }
3249 SYSINIT(mld_init, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_init, NULL);
3250 
3251 static void
mld_uninit(void * unused __unused)3252 mld_uninit(void *unused __unused)
3253 {
3254 
3255 	CTR1(KTR_MLD, "%s: tearing down", __func__);
3256 	callout_drain(&mldslow_callout);
3257 	callout_drain(&mldfast_callout);
3258 	MLD_LOCK_DESTROY();
3259 }
3260 SYSUNINIT(mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_uninit, NULL);
3261 
3262 static void
vnet_mld_init(const void * unused __unused)3263 vnet_mld_init(const void *unused __unused)
3264 {
3265 
3266 	CTR1(KTR_MLD, "%s: initializing", __func__);
3267 
3268 	LIST_INIT(&V_mli_head);
3269 }
3270 VNET_SYSINIT(vnet_mld_init, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_init,
3271     NULL);
3272 
3273 static void
vnet_mld_uninit(const void * unused __unused)3274 vnet_mld_uninit(const void *unused __unused)
3275 {
3276 
3277 	/* This can happen if we shutdown the network stack. */
3278 	CTR1(KTR_MLD, "%s: tearing down", __func__);
3279 }
3280 VNET_SYSUNINIT(vnet_mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_uninit,
3281     NULL);
3282 
3283 static int
mld_modevent(module_t mod,int type,void * unused __unused)3284 mld_modevent(module_t mod, int type, void *unused __unused)
3285 {
3286 
3287     switch (type) {
3288     case MOD_LOAD:
3289     case MOD_UNLOAD:
3290 	break;
3291     default:
3292 	return (EOPNOTSUPP);
3293     }
3294     return (0);
3295 }
3296 
3297 static moduledata_t mld_mod = {
3298     "mld",
3299     mld_modevent,
3300     0
3301 };
3302 DECLARE_MODULE(mld, mld_mod, SI_SUB_PROTO_MC, SI_ORDER_ANY);
3303