xref: /xnu-8796.101.5/bsd/netinet6/mld6.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*-
29  * Copyright (c) 2009 Bruce Simpson.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  * 3. The name of the author may not be used to endorse or promote
40  *    products derived from this software without specific prior written
41  *    permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  */
55 
56 /*
57  * Copyright (c) 1988 Stephen Deering.
58  * Copyright (c) 1992, 1993
59  *	The Regents of the University of California.  All rights reserved.
60  *
61  * This code is derived from software contributed to Berkeley by
62  * Stephen Deering of Stanford University.
63  *
64  * Redistribution and use in source and binary forms, with or without
65  * modification, are permitted provided that the following conditions
66  * are met:
67  * 1. Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  * 2. Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in the
71  *    documentation and/or other materials provided with the distribution.
72  * 3. All advertising materials mentioning features or use of this software
73  *    must display the following acknowledgement:
74  *	This product includes software developed by the University of
75  *	California, Berkeley and its contributors.
76  * 4. Neither the name of the University nor the names of its contributors
77  *    may be used to endorse or promote products derived from this software
78  *    without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
81  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
82  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
83  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
84  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
85  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
86  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
87  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
88  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
89  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
90  * SUCH DAMAGE.
91  *
92  *	@(#)igmp.c	8.1 (Berkeley) 7/19/93
93  */
94 /*
95  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
96  * support for mandatory and extensible security protections.  This notice
97  * is included in support of clause 2.2 (b) of the Apple Public License,
98  * Version 2.0.
99  */
100 
101 #include <sys/cdefs.h>
102 
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/mbuf.h>
106 #include <sys/socket.h>
107 #include <sys/protosw.h>
108 #include <sys/sysctl.h>
109 #include <sys/kernel.h>
110 #include <sys/malloc.h>
111 #include <sys/mcache.h>
112 
113 #include <dev/random/randomdev.h>
114 
115 #include <kern/zalloc.h>
116 
117 #include <net/if.h>
118 #include <net/route.h>
119 
120 #include <netinet/in.h>
121 #include <netinet/in_var.h>
122 #include <netinet6/in6_var.h>
123 #include <netinet/ip6.h>
124 #include <netinet6/ip6_var.h>
125 #include <netinet6/scope6_var.h>
126 #include <netinet/icmp6.h>
127 #include <netinet6/mld6.h>
128 #include <netinet6/mld6_var.h>
129 
130 /* Lock group and attribute for mld_mtx */
131 static LCK_ATTR_DECLARE(mld_mtx_attr, 0, 0);
132 static LCK_GRP_DECLARE(mld_mtx_grp, "mld_mtx");
133 
134 /*
135  * Locking and reference counting:
136  *
137  * mld_mtx mainly protects mli_head.  In cases where both mld_mtx and
138  * in6_multihead_lock must be held, the former must be acquired first in order
139  * to maintain lock ordering.  It is not a requirement that mld_mtx be
140  * acquired first before in6_multihead_lock, but in case both must be acquired
141  * in succession, the correct lock ordering must be followed.
142  *
143  * Instead of walking the if_multiaddrs list at the interface and returning
144  * the ifma_protospec value of a matching entry, we search the global list
145  * of in6_multi records and find it that way; this is done with in6_multihead
146  * lock held.  Doing so avoids the race condition issues that many other BSDs
147  * suffer from (therefore in our implementation, ifma_protospec will never be
148  * NULL for as long as the in6_multi is valid.)
149  *
150  * The above creates a requirement for the in6_multi to stay in in6_multihead
151  * list even after the final MLD leave (in MLDv2 mode) until no longer needs
152  * be retransmitted (this is not required for MLDv1.)  In order to handle
153  * this, the request and reference counts of the in6_multi are bumped up when
154  * the state changes to MLD_LEAVING_MEMBER, and later dropped in the timeout
155  * handler.  Each in6_multi holds a reference to the underlying mld_ifinfo.
156  *
157  * Thus, the permitted lock order is:
158  *
159  *	mld_mtx, in6_multihead_lock, inm6_lock, mli_lock
160  *
161  * Any may be taken independently, but if any are held at the same time,
162  * the above lock order must be followed.
163  */
164 static LCK_MTX_DECLARE_ATTR(mld_mtx, &mld_mtx_grp, &mld_mtx_attr);
165 
166 SLIST_HEAD(mld_in6m_relhead, in6_multi);
167 
168 static void     mli_initvar(struct mld_ifinfo *, struct ifnet *, int);
169 static struct mld_ifinfo *mli_alloc(zalloc_flags_t);
170 static void     mli_free(struct mld_ifinfo *);
171 static void     mli_delete(const struct ifnet *, struct mld_in6m_relhead *);
172 static void     mld_dispatch_packet(struct mbuf *);
173 static void     mld_final_leave(struct in6_multi *, struct mld_ifinfo *,
174     struct mld_tparams *);
175 static int      mld_handle_state_change(struct in6_multi *, struct mld_ifinfo *,
176     struct mld_tparams *);
177 static int      mld_initial_join(struct in6_multi *, struct mld_ifinfo *,
178     struct mld_tparams *, const int);
179 #ifdef MLD_DEBUG
180 static const char *     mld_rec_type_to_str(const int);
181 #endif
182 static uint32_t mld_set_version(struct mld_ifinfo *, const int);
183 static void     mld_flush_relq(struct mld_ifinfo *, struct mld_in6m_relhead *);
184 static void     mld_dispatch_queue_locked(struct mld_ifinfo *, struct ifqueue *, int);
185 static int      mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
186     /*const*/ struct mld_hdr *);
187 static int      mld_v1_input_report(struct ifnet *, struct mbuf *,
188     const struct ip6_hdr *, /*const*/ struct mld_hdr *);
189 static void     mld_v1_process_group_timer(struct in6_multi *, const int);
190 static void     mld_v1_process_querier_timers(struct mld_ifinfo *);
191 static int      mld_v1_transmit_report(struct in6_multi *, const uint8_t);
192 static uint32_t mld_v1_update_group(struct in6_multi *, const int);
193 static void     mld_v2_cancel_link_timers(struct mld_ifinfo *);
194 static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *);
195 static struct mbuf *
196 mld_v2_encap_report(struct ifnet *, struct mbuf *);
197 static int      mld_v2_enqueue_filter_change(struct ifqueue *,
198     struct in6_multi *);
199 static int      mld_v2_enqueue_group_record(struct ifqueue *,
200     struct in6_multi *, const int, const int, const int,
201     const int);
202 static int      mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
203     struct mbuf *, const int, const int);
204 static int      mld_v2_merge_state_changes(struct in6_multi *,
205     struct ifqueue *);
206 static void     mld_v2_process_group_timers(struct mld_ifinfo *,
207     struct ifqueue *, struct ifqueue *,
208     struct in6_multi *, const int);
209 static int      mld_v2_process_group_query(struct in6_multi *,
210     int, struct mbuf *, const int);
211 static int      sysctl_mld_gsr SYSCTL_HANDLER_ARGS;
212 static int      sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS;
213 static int      sysctl_mld_v2enable SYSCTL_HANDLER_ARGS;
214 
215 static int mld_timeout_run;             /* MLD timer is scheduled to run */
216 static void mld_timeout(void *);
217 static void mld_sched_timeout(bool);
218 
219 /*
220  * Normative references: RFC 2710, RFC 3590, RFC 3810.
221  */
222 static struct timeval mld_gsrdelay = {.tv_sec = 10, .tv_usec = 0};
223 static LIST_HEAD(, mld_ifinfo) mli_head;
224 
225 static int querier_present_timers_running6;
226 static int interface_timers_running6;
227 static int state_change_timers_running6;
228 static int current_state_timers_running6;
229 
230 static unsigned int mld_mli_list_genid;
231 /*
232  * Subsystem lock macros.
233  */
234 #define MLD_LOCK()                      \
235 	lck_mtx_lock(&mld_mtx)
236 #define MLD_LOCK_ASSERT_HELD()          \
237 	LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_OWNED)
238 #define MLD_LOCK_ASSERT_NOTHELD()       \
239 	LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_NOTOWNED)
240 #define MLD_UNLOCK()                    \
241 	lck_mtx_unlock(&mld_mtx)
242 
243 #define MLD_ADD_DETACHED_IN6M(_head, _in6m) {                           \
244 	SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle);                     \
245 }
246 
247 #define MLD_REMOVE_DETACHED_IN6M(_head) {                               \
248 	struct in6_multi *_in6m, *_inm_tmp;                             \
249 	SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) {         \
250 	        SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle);       \
251 	        IN6M_REMREF(_in6m);                                     \
252 	}                                                               \
253 	VERIFY(SLIST_EMPTY(_head));                                     \
254 }
255 
256 static KALLOC_TYPE_DEFINE(mli_zone, struct mld_ifinfo, NET_KT_DEFAULT);
257 
258 SYSCTL_DECL(_net_inet6);        /* Note: Not in any common header. */
259 
260 SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
261     "IPv6 Multicast Listener Discovery");
262 SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay,
263     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
264     &mld_gsrdelay.tv_sec, 0, sysctl_mld_gsr, "I",
265     "Rate limit for MLDv2 Group-and-Source queries in seconds");
266 
267 SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_LOCKED,
268     sysctl_mld_ifinfo, "Per-interface MLDv2 state");
269 
270 static int      mld_v1enable = 1;
271 SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RW | CTLFLAG_LOCKED,
272     &mld_v1enable, 0, "Enable fallback to MLDv1");
273 
274 static int      mld_v2enable = 1;
275 SYSCTL_PROC(_net_inet6_mld, OID_AUTO, v2enable,
276     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
277     &mld_v2enable, 0, sysctl_mld_v2enable, "I",
278     "Enable MLDv2 (debug purposes only)");
279 
280 static int      mld_use_allow = 1;
281 SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RW | CTLFLAG_LOCKED,
282     &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
283 
284 #ifdef MLD_DEBUG
285 int mld_debug = 0;
286 SYSCTL_INT(_net_inet6_mld, OID_AUTO,
287     debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, "");
288 #endif
289 /*
290  * Packed Router Alert option structure declaration.
291  */
292 struct mld_raopt {
293 	struct ip6_hbh          hbh;
294 	struct ip6_opt          pad;
295 	struct ip6_opt_router   ra;
296 } __packed;
297 
298 /*
299  * Router Alert hop-by-hop option header.
300  */
301 static struct mld_raopt mld_ra = {
302 	.hbh = { .ip6h_nxt = 0, .ip6h_len = 0 },
303 	.pad = { .ip6o_type = IP6OPT_PADN, .ip6o_len = 0 },
304 	.ra = {
305 		.ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT,
306 		.ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2),
307 		.ip6or_value =  {((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
308 			         (IP6OPT_RTALERT_MLD & 0xFF) }
309 	}
310 };
311 static struct ip6_pktopts mld_po;
312 
313 /* Store MLDv2 record count in the module private scratch space */
314 #define vt_nrecs        pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0]
315 
316 static __inline void
mld_save_context(struct mbuf * m,struct ifnet * ifp)317 mld_save_context(struct mbuf *m, struct ifnet *ifp)
318 {
319 	m->m_pkthdr.rcvif = ifp;
320 }
321 
322 static __inline void
mld_scrub_context(struct mbuf * m)323 mld_scrub_context(struct mbuf *m)
324 {
325 	m->m_pkthdr.rcvif = NULL;
326 }
327 
328 /*
329  * Restore context from a queued output chain.
330  * Return saved ifp.
331  */
332 static __inline struct ifnet *
mld_restore_context(struct mbuf * m)333 mld_restore_context(struct mbuf *m)
334 {
335 	return m->m_pkthdr.rcvif;
336 }
337 
338 /*
339  * Retrieve or set threshold between group-source queries in seconds.
340  */
341 static int
342 sysctl_mld_gsr SYSCTL_HANDLER_ARGS
343 {
344 #pragma unused(arg1, arg2)
345 	int error;
346 	int i;
347 
348 	MLD_LOCK();
349 
350 	i = (int)mld_gsrdelay.tv_sec;
351 
352 	error = sysctl_handle_int(oidp, &i, 0, req);
353 	if (error || !req->newptr) {
354 		goto out_locked;
355 	}
356 
357 	if (i < -1 || i >= 60) {
358 		error = EINVAL;
359 		goto out_locked;
360 	}
361 
362 	mld_gsrdelay.tv_sec = i;
363 
364 out_locked:
365 	MLD_UNLOCK();
366 	return error;
367 }
368 /*
369  * Expose struct mld_ifinfo to userland, keyed by ifindex.
370  * For use by ifmcstat(8).
371  *
372  */
373 static int
374 sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS
375 {
376 #pragma unused(oidp)
377 	int                     *name;
378 	int                      error;
379 	u_int                    namelen;
380 	struct ifnet            *ifp;
381 	struct mld_ifinfo       *mli;
382 	struct mld_ifinfo_u     mli_u;
383 
384 	name = (int *)arg1;
385 	namelen = arg2;
386 
387 	if (req->newptr != USER_ADDR_NULL) {
388 		return EPERM;
389 	}
390 
391 	if (namelen != 1) {
392 		return EINVAL;
393 	}
394 
395 	MLD_LOCK();
396 
397 	if (name[0] <= 0 || name[0] > (u_int)if_index) {
398 		error = ENOENT;
399 		goto out_locked;
400 	}
401 
402 	error = ENOENT;
403 
404 	ifnet_head_lock_shared();
405 	ifp = ifindex2ifnet[name[0]];
406 	ifnet_head_done();
407 	if (ifp == NULL) {
408 		goto out_locked;
409 	}
410 
411 	bzero(&mli_u, sizeof(mli_u));
412 
413 	LIST_FOREACH(mli, &mli_head, mli_link) {
414 		MLI_LOCK(mli);
415 		if (ifp != mli->mli_ifp) {
416 			MLI_UNLOCK(mli);
417 			continue;
418 		}
419 
420 		mli_u.mli_ifindex = mli->mli_ifp->if_index;
421 		mli_u.mli_version = mli->mli_version;
422 		mli_u.mli_v1_timer = mli->mli_v1_timer;
423 		mli_u.mli_v2_timer = mli->mli_v2_timer;
424 		mli_u.mli_flags = mli->mli_flags;
425 		mli_u.mli_rv = mli->mli_rv;
426 		mli_u.mli_qi = mli->mli_qi;
427 		mli_u.mli_qri = mli->mli_qri;
428 		mli_u.mli_uri = mli->mli_uri;
429 		MLI_UNLOCK(mli);
430 
431 		error = SYSCTL_OUT(req, &mli_u, sizeof(mli_u));
432 		break;
433 	}
434 
435 out_locked:
436 	MLD_UNLOCK();
437 	return error;
438 }
439 
440 static int
441 sysctl_mld_v2enable SYSCTL_HANDLER_ARGS
442 {
443 #pragma unused(arg1, arg2)
444 	int error;
445 	int i;
446 	struct mld_ifinfo *mli;
447 	struct mld_tparams mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 };
448 
449 	MLD_LOCK();
450 
451 	i = mld_v2enable;
452 
453 	error = sysctl_handle_int(oidp, &i, 0, req);
454 	if (error || !req->newptr) {
455 		goto out_locked;
456 	}
457 
458 	if (i < 0 || i > 1) {
459 		error = EINVAL;
460 		goto out_locked;
461 	}
462 
463 	mld_v2enable = i;
464 	/*
465 	 * If we enabled v2, the state transition will take care of upgrading
466 	 * the MLD version back to v2. Otherwise, we have to explicitly
467 	 * downgrade. Note that this functionality is to be used for debugging.
468 	 */
469 	if (mld_v2enable == 1) {
470 		goto out_locked;
471 	}
472 
473 	LIST_FOREACH(mli, &mli_head, mli_link) {
474 		MLI_LOCK(mli);
475 		if (mld_set_version(mli, MLD_VERSION_1) > 0) {
476 			mtp.qpt = 1;
477 		}
478 		MLI_UNLOCK(mli);
479 	}
480 
481 out_locked:
482 	MLD_UNLOCK();
483 
484 	mld_set_timeout(&mtp);
485 
486 	return error;
487 }
488 
489 /*
490  * Dispatch an entire queue of pending packet chains.
491  *
492  * Must not be called with in6m_lock held.
493  * XXX This routine unlocks MLD global lock and also mli locks.
494  * Make sure that the calling routine takes reference on the mli
495  * before calling this routine.
496  * Also if we are traversing mli_head, remember to check for
497  * mli list generation count and restart the loop if generation count
498  * has changed.
499  */
500 static void
mld_dispatch_queue_locked(struct mld_ifinfo * mli,struct ifqueue * ifq,int limit)501 mld_dispatch_queue_locked(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit)
502 {
503 	struct mbuf *m;
504 
505 	MLD_LOCK_ASSERT_HELD();
506 
507 	if (mli != NULL) {
508 		MLI_LOCK_ASSERT_HELD(mli);
509 	}
510 
511 	for (;;) {
512 		IF_DEQUEUE(ifq, m);
513 		if (m == NULL) {
514 			break;
515 		}
516 		MLD_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n", __func__,
517 		    (uint64_t)VM_KERNEL_ADDRPERM(ifq),
518 		    (uint64_t)VM_KERNEL_ADDRPERM(m)));
519 
520 		if (mli != NULL) {
521 			MLI_UNLOCK(mli);
522 		}
523 		MLD_UNLOCK();
524 
525 		mld_dispatch_packet(m);
526 
527 		MLD_LOCK();
528 		if (mli != NULL) {
529 			MLI_LOCK(mli);
530 		}
531 
532 		if (--limit == 0) {
533 			break;
534 		}
535 	}
536 
537 	if (mli != NULL) {
538 		MLI_LOCK_ASSERT_HELD(mli);
539 	}
540 }
541 
542 /*
543  * Filter outgoing MLD report state by group.
544  *
545  * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
546  * and node-local addresses. However, kernel and socket consumers
547  * always embed the KAME scope ID in the address provided, so strip it
548  * when performing comparison.
549  * Note: This is not the same as the *multicast* scope.
550  *
551  * Return zero if the given group is one for which MLD reports
552  * should be suppressed, or non-zero if reports should be issued.
553  */
554 static __inline__ int
mld_is_addr_reported(const struct in6_addr * addr)555 mld_is_addr_reported(const struct in6_addr *addr)
556 {
557 	VERIFY(IN6_IS_ADDR_MULTICAST(addr));
558 
559 	if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL) {
560 		return 0;
561 	}
562 
563 	if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL && !IN6_IS_ADDR_UNICAST_BASED_MULTICAST(addr)) {
564 		struct in6_addr tmp = *addr;
565 		in6_clearscope(&tmp);
566 		if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes)) {
567 			return 0;
568 		}
569 	}
570 
571 	return 1;
572 }
573 
574 /*
575  * Attach MLD when PF_INET6 is attached to an interface.
576  */
577 struct mld_ifinfo *
mld_domifattach(struct ifnet * ifp,zalloc_flags_t how)578 mld_domifattach(struct ifnet *ifp, zalloc_flags_t how)
579 {
580 	struct mld_ifinfo *mli;
581 
582 	MLD_PRINTF(("%s: called for ifp 0x%llx(%s)\n", __func__,
583 	    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
584 
585 	mli = mli_alloc(how);
586 	if (mli == NULL) {
587 		return NULL;
588 	}
589 
590 	MLD_LOCK();
591 
592 	MLI_LOCK(mli);
593 	mli_initvar(mli, ifp, 0);
594 	mli->mli_debug |= IFD_ATTACHED;
595 	MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */
596 	MLI_ADDREF_LOCKED(mli); /* hold a reference for caller */
597 	MLI_UNLOCK(mli);
598 	ifnet_lock_shared(ifp);
599 	mld6_initsilent(ifp, mli);
600 	ifnet_lock_done(ifp);
601 
602 	LIST_INSERT_HEAD(&mli_head, mli, mli_link);
603 	mld_mli_list_genid++;
604 
605 	MLD_UNLOCK();
606 
607 	MLD_PRINTF(("%s: allocate mld_ifinfo for ifp 0x%llx(%s)\n",
608 	    __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
609 
610 	return mli;
611 }
612 
613 /*
614  * Attach MLD when PF_INET6 is reattached to an interface.  Caller is
615  * expected to have an outstanding reference to the mli.
616  */
617 void
mld_domifreattach(struct mld_ifinfo * mli)618 mld_domifreattach(struct mld_ifinfo *mli)
619 {
620 	struct ifnet *ifp;
621 
622 	MLD_LOCK();
623 
624 	MLI_LOCK(mli);
625 	VERIFY(!(mli->mli_debug & IFD_ATTACHED));
626 	ifp = mli->mli_ifp;
627 	VERIFY(ifp != NULL);
628 	mli_initvar(mli, ifp, 1);
629 	mli->mli_debug |= IFD_ATTACHED;
630 	MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */
631 	MLI_UNLOCK(mli);
632 	ifnet_lock_shared(ifp);
633 	mld6_initsilent(ifp, mli);
634 	ifnet_lock_done(ifp);
635 
636 	LIST_INSERT_HEAD(&mli_head, mli, mli_link);
637 	mld_mli_list_genid++;
638 
639 	MLD_UNLOCK();
640 
641 	MLD_PRINTF(("%s: reattached mld_ifinfo for ifp 0x%llx(%s)\n",
642 	    __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
643 }
644 
645 /*
646  * Hook for domifdetach.
647  */
648 void
mld_domifdetach(struct ifnet * ifp)649 mld_domifdetach(struct ifnet *ifp)
650 {
651 	SLIST_HEAD(, in6_multi) in6m_dthead;
652 
653 	SLIST_INIT(&in6m_dthead);
654 
655 	MLD_PRINTF(("%s: called for ifp 0x%llx(%s)\n", __func__,
656 	    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
657 
658 	MLD_LOCK();
659 	mli_delete(ifp, (struct mld_in6m_relhead *)&in6m_dthead);
660 	MLD_UNLOCK();
661 
662 	/* Now that we're dropped all locks, release detached records */
663 	MLD_REMOVE_DETACHED_IN6M(&in6m_dthead);
664 }
665 
666 /*
667  * Called at interface detach time.  Note that we only flush all deferred
668  * responses and record releases; all remaining inm records and their source
669  * entries related to this interface are left intact, in order to handle
670  * the reattach case.
671  */
672 static void
mli_delete(const struct ifnet * ifp,struct mld_in6m_relhead * in6m_dthead)673 mli_delete(const struct ifnet *ifp, struct mld_in6m_relhead *in6m_dthead)
674 {
675 	struct mld_ifinfo *mli, *tmli;
676 
677 	MLD_LOCK_ASSERT_HELD();
678 
679 	LIST_FOREACH_SAFE(mli, &mli_head, mli_link, tmli) {
680 		MLI_LOCK(mli);
681 		if (mli->mli_ifp == ifp) {
682 			/*
683 			 * Free deferred General Query responses.
684 			 */
685 			IF_DRAIN(&mli->mli_gq);
686 			IF_DRAIN(&mli->mli_v1q);
687 			mld_flush_relq(mli, in6m_dthead);
688 			VERIFY(SLIST_EMPTY(&mli->mli_relinmhead));
689 			mli->mli_debug &= ~IFD_ATTACHED;
690 			MLI_UNLOCK(mli);
691 
692 			LIST_REMOVE(mli, mli_link);
693 			MLI_REMREF(mli); /* release mli_head reference */
694 			mld_mli_list_genid++;
695 			return;
696 		}
697 		MLI_UNLOCK(mli);
698 	}
699 	panic("%s: mld_ifinfo not found for ifp %p(%s)", __func__,
700 	    ifp, ifp->if_xname);
701 }
702 
703 __private_extern__ void
mld6_initsilent(struct ifnet * ifp,struct mld_ifinfo * mli)704 mld6_initsilent(struct ifnet *ifp, struct mld_ifinfo *mli)
705 {
706 	ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED);
707 
708 	MLI_LOCK_ASSERT_NOTHELD(mli);
709 	MLI_LOCK(mli);
710 	if (!(ifp->if_flags & IFF_MULTICAST) &&
711 	    (ifp->if_eflags & (IFEF_IPV6_ND6ALT | IFEF_LOCALNET_PRIVATE))) {
712 		mli->mli_flags |= MLIF_SILENT;
713 	} else {
714 		mli->mli_flags &= ~MLIF_SILENT;
715 	}
716 	MLI_UNLOCK(mli);
717 }
718 
719 static void
mli_initvar(struct mld_ifinfo * mli,struct ifnet * ifp,int reattach)720 mli_initvar(struct mld_ifinfo *mli, struct ifnet *ifp, int reattach)
721 {
722 	MLI_LOCK_ASSERT_HELD(mli);
723 
724 	mli->mli_ifp = ifp;
725 	if (mld_v2enable) {
726 		mli->mli_version = MLD_VERSION_2;
727 	} else {
728 		mli->mli_version = MLD_VERSION_1;
729 	}
730 	mli->mli_flags = 0;
731 	mli->mli_rv = MLD_RV_INIT;
732 	mli->mli_qi = MLD_QI_INIT;
733 	mli->mli_qri = MLD_QRI_INIT;
734 	mli->mli_uri = MLD_URI_INIT;
735 
736 	if (mld_use_allow) {
737 		mli->mli_flags |= MLIF_USEALLOW;
738 	}
739 	if (!reattach) {
740 		SLIST_INIT(&mli->mli_relinmhead);
741 	}
742 
743 	/*
744 	 * Responses to general queries are subject to bounds.
745 	 */
746 	mli->mli_gq.ifq_maxlen = MLD_MAX_RESPONSE_PACKETS;
747 	mli->mli_v1q.ifq_maxlen = MLD_MAX_RESPONSE_PACKETS;
748 }
749 
750 static struct mld_ifinfo *
mli_alloc(zalloc_flags_t how)751 mli_alloc(zalloc_flags_t how)
752 {
753 	struct mld_ifinfo *mli = zalloc_flags(mli_zone, how | Z_ZERO);
754 	if (mli != NULL) {
755 		lck_mtx_init(&mli->mli_lock, &mld_mtx_grp, &mld_mtx_attr);
756 		mli->mli_debug |= IFD_ALLOC;
757 	}
758 	return mli;
759 }
760 
761 static void
mli_free(struct mld_ifinfo * mli)762 mli_free(struct mld_ifinfo *mli)
763 {
764 	MLI_LOCK(mli);
765 	if (mli->mli_debug & IFD_ATTACHED) {
766 		panic("%s: attached mli=%p is being freed", __func__, mli);
767 		/* NOTREACHED */
768 	} else if (mli->mli_ifp != NULL) {
769 		panic("%s: ifp not NULL for mli=%p", __func__, mli);
770 		/* NOTREACHED */
771 	} else if (!(mli->mli_debug & IFD_ALLOC)) {
772 		panic("%s: mli %p cannot be freed", __func__, mli);
773 		/* NOTREACHED */
774 	} else if (mli->mli_refcnt != 0) {
775 		panic("%s: non-zero refcnt mli=%p", __func__, mli);
776 		/* NOTREACHED */
777 	}
778 	mli->mli_debug &= ~IFD_ALLOC;
779 	MLI_UNLOCK(mli);
780 
781 	lck_mtx_destroy(&mli->mli_lock, &mld_mtx_grp);
782 	zfree(mli_zone, mli);
783 }
784 
785 void
mli_addref(struct mld_ifinfo * mli,int locked)786 mli_addref(struct mld_ifinfo *mli, int locked)
787 {
788 	if (!locked) {
789 		MLI_LOCK_SPIN(mli);
790 	} else {
791 		MLI_LOCK_ASSERT_HELD(mli);
792 	}
793 
794 	if (++mli->mli_refcnt == 0) {
795 		panic("%s: mli=%p wraparound refcnt", __func__, mli);
796 		/* NOTREACHED */
797 	}
798 	if (!locked) {
799 		MLI_UNLOCK(mli);
800 	}
801 }
802 
803 void
mli_remref(struct mld_ifinfo * mli)804 mli_remref(struct mld_ifinfo *mli)
805 {
806 	SLIST_HEAD(, in6_multi) in6m_dthead;
807 	struct ifnet *ifp;
808 
809 	MLI_LOCK_SPIN(mli);
810 
811 	if (mli->mli_refcnt == 0) {
812 		panic("%s: mli=%p negative refcnt", __func__, mli);
813 		/* NOTREACHED */
814 	}
815 
816 	--mli->mli_refcnt;
817 	if (mli->mli_refcnt > 0) {
818 		MLI_UNLOCK(mli);
819 		return;
820 	}
821 
822 	ifp = mli->mli_ifp;
823 	mli->mli_ifp = NULL;
824 	IF_DRAIN(&mli->mli_gq);
825 	IF_DRAIN(&mli->mli_v1q);
826 	SLIST_INIT(&in6m_dthead);
827 	mld_flush_relq(mli, (struct mld_in6m_relhead *)&in6m_dthead);
828 	VERIFY(SLIST_EMPTY(&mli->mli_relinmhead));
829 	MLI_UNLOCK(mli);
830 
831 	/* Now that we're dropped all locks, release detached records */
832 	MLD_REMOVE_DETACHED_IN6M(&in6m_dthead);
833 
834 	MLD_PRINTF(("%s: freeing mld_ifinfo for ifp 0x%llx(%s)\n",
835 	    __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
836 
837 	mli_free(mli);
838 }
839 
840 /*
841  * Process a received MLDv1 general or address-specific query.
842  * Assumes that the query header has been pulled up to sizeof(mld_hdr).
843  *
844  * NOTE: Can't be fully const correct as we temporarily embed scope ID in
845  * mld_addr. This is OK as we own the mbuf chain.
846  */
847 static int
mld_v1_input_query(struct ifnet * ifp,const struct ip6_hdr * ip6,struct mld_hdr * mld)848 mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
849     /*const*/ struct mld_hdr *mld)
850 {
851 	struct mld_ifinfo       *mli;
852 	struct in6_multi        *inm;
853 	int                      err = 0, is_general_query;
854 	uint16_t                 timer;
855 	struct mld_tparams       mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 };
856 
857 	MLD_LOCK_ASSERT_NOTHELD();
858 
859 	is_general_query = 0;
860 
861 	if (!mld_v1enable) {
862 		MLD_PRINTF(("%s: ignore v1 query %s on ifp 0x%llx(%s)\n",
863 		    __func__, ip6_sprintf(&mld->mld_addr),
864 		    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
865 		goto done;
866 	}
867 
868 	/*
869 	 * RFC3810 Section 6.2: MLD queries must originate from
870 	 * a router's link-local address.
871 	 */
872 	if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
873 		MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n",
874 		    __func__, ip6_sprintf(&ip6->ip6_src),
875 		    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
876 		goto done;
877 	}
878 
879 	/*
880 	 * Do address field validation upfront before we accept
881 	 * the query.
882 	 */
883 	if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
884 		/*
885 		 * MLDv1 General Query.
886 		 * If this was not sent to the all-nodes group, ignore it.
887 		 */
888 		struct in6_addr          dst;
889 
890 		dst = ip6->ip6_dst;
891 		in6_clearscope(&dst);
892 		if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes)) {
893 			err = EINVAL;
894 			goto done;
895 		}
896 		is_general_query = 1;
897 	} else {
898 		/*
899 		 * Embed scope ID of receiving interface in MLD query for
900 		 * lookup whilst we don't hold other locks.
901 		 */
902 		(void)in6_setscope(&mld->mld_addr, ifp, NULL);
903 	}
904 
905 	/*
906 	 * Switch to MLDv1 host compatibility mode.
907 	 */
908 	mli = MLD_IFINFO(ifp);
909 	VERIFY(mli != NULL);
910 
911 	MLI_LOCK(mli);
912 	mtp.qpt = mld_set_version(mli, MLD_VERSION_1);
913 	MLI_UNLOCK(mli);
914 
915 	timer = ntohs(mld->mld_maxdelay) / MLD_TIMER_SCALE;
916 	if (timer == 0) {
917 		timer = 1;
918 	}
919 
920 	if (is_general_query) {
921 		struct in6_multistep step;
922 
923 		MLD_PRINTF(("%s: process v1 general query on ifp 0x%llx(%s)\n",
924 		    __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
925 		/*
926 		 * For each reporting group joined on this
927 		 * interface, kick the report timer.
928 		 */
929 		in6_multihead_lock_shared();
930 		IN6_FIRST_MULTI(step, inm);
931 		while (inm != NULL) {
932 			IN6M_LOCK(inm);
933 			if (inm->in6m_ifp == ifp) {
934 				mtp.cst += mld_v1_update_group(inm, timer);
935 			}
936 			IN6M_UNLOCK(inm);
937 			IN6_NEXT_MULTI(step, inm);
938 		}
939 		in6_multihead_lock_done();
940 	} else {
941 		/*
942 		 * MLDv1 Group-Specific Query.
943 		 * If this is a group-specific MLDv1 query, we need only
944 		 * look up the single group to process it.
945 		 */
946 		in6_multihead_lock_shared();
947 		IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
948 		in6_multihead_lock_done();
949 
950 		if (inm != NULL) {
951 			IN6M_LOCK(inm);
952 			MLD_PRINTF(("%s: process v1 query %s on "
953 			    "ifp 0x%llx(%s)\n", __func__,
954 			    ip6_sprintf(&mld->mld_addr),
955 			    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
956 			mtp.cst = mld_v1_update_group(inm, timer);
957 			IN6M_UNLOCK(inm);
958 			IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
959 		}
960 		/* XXX Clear embedded scope ID as userland won't expect it. */
961 		in6_clearscope(&mld->mld_addr);
962 	}
963 done:
964 	mld_set_timeout(&mtp);
965 
966 	return err;
967 }
968 
969 /*
970  * Update the report timer on a group in response to an MLDv1 query.
971  *
972  * If we are becoming the reporting member for this group, start the timer.
973  * If we already are the reporting member for this group, and timer is
974  * below the threshold, reset it.
975  *
976  * We may be updating the group for the first time since we switched
977  * to MLDv2. If we are, then we must clear any recorded source lists,
978  * and transition to REPORTING state; the group timer is overloaded
979  * for group and group-source query responses.
980  *
981  * Unlike MLDv2, the delay per group should be jittered
982  * to avoid bursts of MLDv1 reports.
983  */
984 static uint32_t
mld_v1_update_group(struct in6_multi * inm,const int timer)985 mld_v1_update_group(struct in6_multi *inm, const int timer)
986 {
987 	IN6M_LOCK_ASSERT_HELD(inm);
988 
989 	MLD_PRINTF(("%s: %s/%s timer=%d\n", __func__,
990 	    ip6_sprintf(&inm->in6m_addr),
991 	    if_name(inm->in6m_ifp), timer));
992 
993 	switch (inm->in6m_state) {
994 	case MLD_NOT_MEMBER:
995 	case MLD_SILENT_MEMBER:
996 		break;
997 	case MLD_REPORTING_MEMBER:
998 		if (inm->in6m_timer != 0 &&
999 		    inm->in6m_timer <= timer) {
1000 			MLD_PRINTF(("%s: REPORTING and timer running, "
1001 			    "skipping.\n", __func__));
1002 			break;
1003 		}
1004 		OS_FALLTHROUGH;
1005 	case MLD_SG_QUERY_PENDING_MEMBER:
1006 	case MLD_G_QUERY_PENDING_MEMBER:
1007 	case MLD_IDLE_MEMBER:
1008 	case MLD_LAZY_MEMBER:
1009 	case MLD_AWAKENING_MEMBER:
1010 		MLD_PRINTF(("%s: ->REPORTING\n", __func__));
1011 		inm->in6m_state = MLD_REPORTING_MEMBER;
1012 		inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1013 		break;
1014 	case MLD_SLEEPING_MEMBER:
1015 		MLD_PRINTF(("%s: ->AWAKENING\n", __func__));
1016 		inm->in6m_state = MLD_AWAKENING_MEMBER;
1017 		break;
1018 	case MLD_LEAVING_MEMBER:
1019 		break;
1020 	}
1021 
1022 	return inm->in6m_timer;
1023 }
1024 
1025 /*
1026  * Process a received MLDv2 general, group-specific or
1027  * group-and-source-specific query.
1028  *
1029  * Assumes that the query header has been pulled up to sizeof(mldv2_query).
1030  *
1031  * Return 0 if successful, otherwise an appropriate error code is returned.
1032  */
1033 static int
mld_v2_input_query(struct ifnet * ifp,const struct ip6_hdr * ip6,struct mbuf * m,const int off,const int icmp6len)1034 mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
1035     struct mbuf *m, const int off, const int icmp6len)
1036 {
1037 	struct mld_ifinfo       *mli;
1038 	struct mldv2_query      *mld;
1039 	struct in6_multi        *inm;
1040 	uint32_t                 maxdelay, nsrc, qqi, timer;
1041 	int                      err = 0, is_general_query;
1042 	uint8_t                  qrv;
1043 	struct mld_tparams       mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 };
1044 
1045 	MLD_LOCK_ASSERT_NOTHELD();
1046 
1047 	is_general_query = 0;
1048 
1049 	if (!mld_v2enable) {
1050 		MLD_PRINTF(("%s: ignore v2 query %s on ifp 0x%llx(%s)\n",
1051 		    __func__, ip6_sprintf(&ip6->ip6_src),
1052 		    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1053 		goto done;
1054 	}
1055 
1056 	/*
1057 	 * RFC3810 Section 6.2: MLD queries must originate from
1058 	 * a router's link-local address.
1059 	 */
1060 	if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
1061 		MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n",
1062 		    __func__, ip6_sprintf(&ip6->ip6_src),
1063 		    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1064 		goto done;
1065 	}
1066 
1067 	MLD_PRINTF(("%s: input v2 query on ifp 0x%llx(%s)\n", __func__,
1068 	    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1069 
1070 	mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off);
1071 
1072 	maxdelay = ntohs(mld->mld_maxdelay);    /* in 1/10ths of a second */
1073 	if (maxdelay > SHRT_MAX) {
1074 		maxdelay = (MLD_MRC_MANT((uint16_t)maxdelay) | 0x1000) <<
1075 		    (MLD_MRC_EXP((uint16_t)maxdelay) + 3);
1076 	}
1077 	timer = maxdelay / MLD_TIMER_SCALE;
1078 	if (timer == 0) {
1079 		timer = 1;
1080 	}
1081 
1082 	qrv = MLD_QRV(mld->mld_misc);
1083 	if (qrv < 2) {
1084 		MLD_PRINTF(("%s: clamping qrv %d to %d\n", __func__,
1085 		    qrv, MLD_RV_INIT));
1086 		qrv = MLD_RV_INIT;
1087 	}
1088 
1089 	qqi = mld->mld_qqi;
1090 	if (qqi >= 128) {
1091 		qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
1092 		    (MLD_QQIC_EXP(mld->mld_qqi) + 3);
1093 	}
1094 
1095 	nsrc = ntohs(mld->mld_numsrc);
1096 	if (nsrc > MLD_MAX_GS_SOURCES) {
1097 		err = EMSGSIZE;
1098 		goto done;
1099 	}
1100 	if (icmp6len < sizeof(struct mldv2_query) +
1101 	    (nsrc * sizeof(struct in6_addr))) {
1102 		err = EMSGSIZE;
1103 		goto done;
1104 	}
1105 
1106 	/*
1107 	 * Do further input validation upfront to avoid resetting timers
1108 	 * should we need to discard this query.
1109 	 */
1110 	if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
1111 		/*
1112 		 * A general query with a source list has undefined
1113 		 * behaviour; discard it.
1114 		 */
1115 		if (nsrc > 0) {
1116 			err = EINVAL;
1117 			goto done;
1118 		}
1119 		is_general_query = 1;
1120 	} else {
1121 		/*
1122 		 * Embed scope ID of receiving interface in MLD query for
1123 		 * lookup whilst we don't hold other locks (due to KAME
1124 		 * locking lameness). We own this mbuf chain just now.
1125 		 */
1126 		(void)in6_setscope(&mld->mld_addr, ifp, NULL);
1127 	}
1128 
1129 	mli = MLD_IFINFO(ifp);
1130 	VERIFY(mli != NULL);
1131 
1132 	MLI_LOCK(mli);
1133 	/*
1134 	 * Discard the v2 query if we're in Compatibility Mode.
1135 	 * The RFC is pretty clear that hosts need to stay in MLDv1 mode
1136 	 * until the Old Version Querier Present timer expires.
1137 	 */
1138 	if (mli->mli_version != MLD_VERSION_2) {
1139 		MLI_UNLOCK(mli);
1140 		goto done;
1141 	}
1142 
1143 	mtp.qpt = mld_set_version(mli, MLD_VERSION_2);
1144 	mli->mli_rv = qrv;
1145 	mli->mli_qi = qqi;
1146 	mli->mli_qri = MAX(timer, MLD_QRI_MIN);
1147 
1148 	MLD_PRINTF(("%s: qrv %d qi %d qri %d\n", __func__, mli->mli_rv,
1149 	    mli->mli_qi, mli->mli_qri));
1150 
1151 	if (is_general_query) {
1152 		/*
1153 		 * MLDv2 General Query.
1154 		 *
1155 		 * Schedule a current-state report on this ifp for
1156 		 * all groups, possibly containing source lists.
1157 		 *
1158 		 * If there is a pending General Query response
1159 		 * scheduled earlier than the selected delay, do
1160 		 * not schedule any other reports.
1161 		 * Otherwise, reset the interface timer.
1162 		 */
1163 		MLD_PRINTF(("%s: process v2 general query on ifp 0x%llx(%s)\n",
1164 		    __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1165 		if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
1166 			mtp.it = mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
1167 		}
1168 		MLI_UNLOCK(mli);
1169 	} else {
1170 		MLI_UNLOCK(mli);
1171 		/*
1172 		 * MLDv2 Group-specific or Group-and-source-specific Query.
1173 		 *
1174 		 * Group-source-specific queries are throttled on
1175 		 * a per-group basis to defeat denial-of-service attempts.
1176 		 * Queries for groups we are not a member of on this
1177 		 * link are simply ignored.
1178 		 */
1179 		in6_multihead_lock_shared();
1180 		IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
1181 		in6_multihead_lock_done();
1182 		if (inm == NULL) {
1183 			goto done;
1184 		}
1185 
1186 		IN6M_LOCK(inm);
1187 		if (nsrc > 0) {
1188 			if (!ratecheck(&inm->in6m_lastgsrtv,
1189 			    &mld_gsrdelay)) {
1190 				MLD_PRINTF(("%s: GS query throttled.\n",
1191 				    __func__));
1192 				IN6M_UNLOCK(inm);
1193 				IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1194 				goto done;
1195 			}
1196 		}
1197 		MLD_PRINTF(("%s: process v2 group query on ifp 0x%llx(%s)\n",
1198 		    __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1199 		/*
1200 		 * If there is a pending General Query response
1201 		 * scheduled sooner than the selected delay, no
1202 		 * further report need be scheduled.
1203 		 * Otherwise, prepare to respond to the
1204 		 * group-specific or group-and-source query.
1205 		 */
1206 		MLI_LOCK(mli);
1207 		mtp.it = mli->mli_v2_timer;
1208 		MLI_UNLOCK(mli);
1209 		if (mtp.it == 0 || mtp.it >= timer) {
1210 			(void) mld_v2_process_group_query(inm, timer, m, off);
1211 			mtp.cst = inm->in6m_timer;
1212 		}
1213 		IN6M_UNLOCK(inm);
1214 		IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1215 		/* XXX Clear embedded scope ID as userland won't expect it. */
1216 		in6_clearscope(&mld->mld_addr);
1217 	}
1218 done:
1219 	if (mtp.it > 0) {
1220 		MLD_PRINTF(("%s: v2 general query response scheduled in "
1221 		    "T+%d seconds on ifp 0x%llx(%s)\n", __func__, mtp.it,
1222 		    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1223 	}
1224 	mld_set_timeout(&mtp);
1225 
1226 	return err;
1227 }
1228 
1229 /*
1230  * Process a recieved MLDv2 group-specific or group-and-source-specific
1231  * query.
1232  * Return <0 if any error occured. Currently this is ignored.
1233  */
1234 static int
mld_v2_process_group_query(struct in6_multi * inm,int timer,struct mbuf * m0,const int off)1235 mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0,
1236     const int off)
1237 {
1238 	struct mldv2_query      *mld;
1239 	int                      retval;
1240 	uint16_t                 nsrc;
1241 
1242 	IN6M_LOCK_ASSERT_HELD(inm);
1243 
1244 	retval = 0;
1245 	mld = (struct mldv2_query *)(mtod(m0, uint8_t *) + off);
1246 
1247 	switch (inm->in6m_state) {
1248 	case MLD_NOT_MEMBER:
1249 	case MLD_SILENT_MEMBER:
1250 	case MLD_SLEEPING_MEMBER:
1251 	case MLD_LAZY_MEMBER:
1252 	case MLD_AWAKENING_MEMBER:
1253 	case MLD_IDLE_MEMBER:
1254 	case MLD_LEAVING_MEMBER:
1255 		return retval;
1256 	case MLD_REPORTING_MEMBER:
1257 	case MLD_G_QUERY_PENDING_MEMBER:
1258 	case MLD_SG_QUERY_PENDING_MEMBER:
1259 		break;
1260 	}
1261 
1262 	nsrc = ntohs(mld->mld_numsrc);
1263 
1264 	/*
1265 	 * Deal with group-specific queries upfront.
1266 	 * If any group query is already pending, purge any recorded
1267 	 * source-list state if it exists, and schedule a query response
1268 	 * for this group-specific query.
1269 	 */
1270 	if (nsrc == 0) {
1271 		if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
1272 		    inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
1273 			in6m_clear_recorded(inm);
1274 			timer = min(inm->in6m_timer, timer);
1275 		}
1276 		inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
1277 		inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1278 		return retval;
1279 	}
1280 
1281 	/*
1282 	 * Deal with the case where a group-and-source-specific query has
1283 	 * been received but a group-specific query is already pending.
1284 	 */
1285 	if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
1286 		timer = min(inm->in6m_timer, timer);
1287 		inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1288 		return retval;
1289 	}
1290 
1291 	/*
1292 	 * Finally, deal with the case where a group-and-source-specific
1293 	 * query has been received, where a response to a previous g-s-r
1294 	 * query exists, or none exists.
1295 	 * In this case, we need to parse the source-list which the Querier
1296 	 * has provided us with and check if we have any source list filter
1297 	 * entries at T1 for these sources. If we do not, there is no need
1298 	 * schedule a report and the query may be dropped.
1299 	 * If we do, we must record them and schedule a current-state
1300 	 * report for those sources.
1301 	 */
1302 	if (inm->in6m_nsrc > 0) {
1303 		struct mbuf             *m;
1304 		struct in6_addr          addr;
1305 		int                      i, nrecorded;
1306 		int                      soff;
1307 
1308 		m = m0;
1309 		soff = off + sizeof(struct mldv2_query);
1310 		nrecorded = 0;
1311 		for (i = 0; i < nsrc; i++) {
1312 			m_copydata(m, soff, sizeof(addr), &addr);
1313 			retval = in6m_record_source(inm, &addr);
1314 			if (retval < 0) {
1315 				break;
1316 			}
1317 			nrecorded += retval;
1318 			soff += sizeof(struct in6_addr);
1319 
1320 			while (m && (soff >= m->m_len)) {
1321 				soff -= m->m_len;
1322 				m = m->m_next;
1323 			}
1324 
1325 			/* should not be possible: */
1326 			if (m == NULL) {
1327 				break;
1328 			}
1329 		}
1330 		if (nrecorded > 0) {
1331 			MLD_PRINTF(("%s: schedule response to SG query\n",
1332 			    __func__));
1333 			inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
1334 			inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1335 		}
1336 	}
1337 
1338 	return retval;
1339 }
1340 
1341 /*
1342  * Process a received MLDv1 host membership report.
1343  * Assumes mld points to mld_hdr in pulled up mbuf chain.
1344  *
1345  * NOTE: Can't be fully const correct as we temporarily embed scope ID in
1346  * mld_addr. This is OK as we own the mbuf chain.
1347  */
1348 static int
mld_v1_input_report(struct ifnet * ifp,struct mbuf * m,const struct ip6_hdr * ip6,struct mld_hdr * mld)1349 mld_v1_input_report(struct ifnet *ifp, struct mbuf *m,
1350     const struct ip6_hdr *ip6, /*const*/ struct mld_hdr *mld)
1351 {
1352 	struct in6_addr          src, dst;
1353 	struct in6_ifaddr       *ia;
1354 	struct in6_multi        *inm;
1355 
1356 	if (!mld_v1enable) {
1357 		MLD_PRINTF(("%s: ignore v1 report %s on ifp 0x%llx(%s)\n",
1358 		    __func__, ip6_sprintf(&mld->mld_addr),
1359 		    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1360 		return 0;
1361 	}
1362 
1363 	if ((ifp->if_flags & IFF_LOOPBACK) ||
1364 	    (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1365 		return 0;
1366 	}
1367 
1368 	/*
1369 	 * MLDv1 reports must originate from a host's link-local address,
1370 	 * or the unspecified address (when booting).
1371 	 */
1372 	src = ip6->ip6_src;
1373 	in6_clearscope(&src);
1374 	if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
1375 		MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n",
1376 		    __func__, ip6_sprintf(&ip6->ip6_src),
1377 		    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1378 		return EINVAL;
1379 	}
1380 
1381 	/*
1382 	 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
1383 	 * group, and must be directed to the group itself.
1384 	 */
1385 	dst = ip6->ip6_dst;
1386 	in6_clearscope(&dst);
1387 	if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
1388 	    !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
1389 		MLD_PRINTF(("%s: ignore v1 query dst %s on ifp 0x%llx(%s)\n",
1390 		    __func__, ip6_sprintf(&ip6->ip6_dst),
1391 		    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1392 		return EINVAL;
1393 	}
1394 
1395 	/*
1396 	 * Make sure we don't hear our own membership report, as fast
1397 	 * leave requires knowing that we are the only member of a
1398 	 * group. Assume we used the link-local address if available,
1399 	 * otherwise look for ::.
1400 	 *
1401 	 * XXX Note that scope ID comparison is needed for the address
1402 	 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
1403 	 * performed for the on-wire address.
1404 	 */
1405 	ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
1406 	if (ia != NULL) {
1407 		IFA_LOCK(&ia->ia_ifa);
1408 		if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))) {
1409 			IFA_UNLOCK(&ia->ia_ifa);
1410 			IFA_REMREF(&ia->ia_ifa);
1411 			return 0;
1412 		}
1413 		IFA_UNLOCK(&ia->ia_ifa);
1414 		IFA_REMREF(&ia->ia_ifa);
1415 	} else if (IN6_IS_ADDR_UNSPECIFIED(&src)) {
1416 		return 0;
1417 	}
1418 
1419 	MLD_PRINTF(("%s: process v1 report %s on ifp 0x%llx(%s)\n",
1420 	    __func__, ip6_sprintf(&mld->mld_addr),
1421 	    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1422 
1423 	/*
1424 	 * Embed scope ID of receiving interface in MLD query for lookup
1425 	 * whilst we don't hold other locks (due to KAME locking lameness).
1426 	 */
1427 	if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
1428 		(void)in6_setscope(&mld->mld_addr, ifp, NULL);
1429 	}
1430 
1431 	/*
1432 	 * MLDv1 report suppression.
1433 	 * If we are a member of this group, and our membership should be
1434 	 * reported, and our group timer is pending or about to be reset,
1435 	 * stop our group timer by transitioning to the 'lazy' state.
1436 	 */
1437 	in6_multihead_lock_shared();
1438 	IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
1439 	in6_multihead_lock_done();
1440 
1441 	if (inm != NULL) {
1442 		struct mld_ifinfo *mli;
1443 
1444 		IN6M_LOCK(inm);
1445 		mli = inm->in6m_mli;
1446 		VERIFY(mli != NULL);
1447 
1448 		MLI_LOCK(mli);
1449 		/*
1450 		 * If we are in MLDv2 host mode, do not allow the
1451 		 * other host's MLDv1 report to suppress our reports.
1452 		 */
1453 		if (mli->mli_version == MLD_VERSION_2) {
1454 			MLI_UNLOCK(mli);
1455 			IN6M_UNLOCK(inm);
1456 			IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1457 			goto out;
1458 		}
1459 		MLI_UNLOCK(mli);
1460 
1461 		inm->in6m_timer = 0;
1462 
1463 		switch (inm->in6m_state) {
1464 		case MLD_NOT_MEMBER:
1465 		case MLD_SILENT_MEMBER:
1466 		case MLD_SLEEPING_MEMBER:
1467 			break;
1468 		case MLD_REPORTING_MEMBER:
1469 		case MLD_IDLE_MEMBER:
1470 		case MLD_AWAKENING_MEMBER:
1471 			MLD_PRINTF(("%s: report suppressed for %s on "
1472 			    "ifp 0x%llx(%s)\n", __func__,
1473 			    ip6_sprintf(&mld->mld_addr),
1474 			    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1475 			OS_FALLTHROUGH;
1476 		case MLD_LAZY_MEMBER:
1477 			inm->in6m_state = MLD_LAZY_MEMBER;
1478 			break;
1479 		case MLD_G_QUERY_PENDING_MEMBER:
1480 		case MLD_SG_QUERY_PENDING_MEMBER:
1481 		case MLD_LEAVING_MEMBER:
1482 			break;
1483 		}
1484 		IN6M_UNLOCK(inm);
1485 		IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1486 	}
1487 
1488 out:
1489 	/* XXX Clear embedded scope ID as userland won't expect it. */
1490 	in6_clearscope(&mld->mld_addr);
1491 
1492 	return 0;
1493 }
1494 
1495 /*
1496  * MLD input path.
1497  *
1498  * Assume query messages which fit in a single ICMPv6 message header
1499  * have been pulled up.
1500  * Assume that userland will want to see the message, even if it
1501  * otherwise fails kernel input validation; do not free it.
1502  * Pullup may however free the mbuf chain m if it fails.
1503  *
1504  * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
1505  */
1506 int
mld_input(struct mbuf * m,int off,int icmp6len)1507 mld_input(struct mbuf *m, int off, int icmp6len)
1508 {
1509 	struct ifnet    *ifp = NULL;
1510 	struct ip6_hdr  *ip6 = NULL;
1511 	struct mld_hdr  *mld = NULL;
1512 	int              mldlen = 0;
1513 
1514 	MLD_PRINTF(("%s: called w/mbuf (0x%llx,%d)\n", __func__,
1515 	    (uint64_t)VM_KERNEL_ADDRPERM(m), off));
1516 
1517 	ifp = m->m_pkthdr.rcvif;
1518 
1519 	/* Pullup to appropriate size. */
1520 	mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
1521 	if (mld->mld_type == MLD_LISTENER_QUERY &&
1522 	    icmp6len >= sizeof(struct mldv2_query)) {
1523 		mldlen = sizeof(struct mldv2_query);
1524 	} else {
1525 		mldlen = sizeof(struct mld_hdr);
1526 	}
1527 	// check if mldv2_query/mld_hdr fits in the first mbuf
1528 	IP6_EXTHDR_CHECK(m, off, mldlen, return IPPROTO_DONE);
1529 	IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen);
1530 	if (mld == NULL) {
1531 		icmp6stat.icp6s_badlen++;
1532 		return IPPROTO_DONE;
1533 	}
1534 	ip6 = mtod(m, struct ip6_hdr *);
1535 
1536 	/*
1537 	 * Userland needs to see all of this traffic for implementing
1538 	 * the endpoint discovery portion of multicast routing.
1539 	 */
1540 	switch (mld->mld_type) {
1541 	case MLD_LISTENER_QUERY:
1542 		icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
1543 		if (icmp6len == sizeof(struct mld_hdr)) {
1544 			if (mld_v1_input_query(ifp, ip6, mld) != 0) {
1545 				return 0;
1546 			}
1547 		} else if (icmp6len >= sizeof(struct mldv2_query)) {
1548 			if (mld_v2_input_query(ifp, ip6, m, off,
1549 			    icmp6len) != 0) {
1550 				return 0;
1551 			}
1552 		}
1553 		break;
1554 	case MLD_LISTENER_REPORT:
1555 		icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1556 		if (mld_v1_input_report(ifp, m, ip6, mld) != 0) {
1557 			return 0;
1558 		}
1559 		break;
1560 	case MLDV2_LISTENER_REPORT:
1561 		icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1562 		break;
1563 	case MLD_LISTENER_DONE:
1564 		icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
1565 		break;
1566 	default:
1567 		break;
1568 	}
1569 
1570 	return 0;
1571 }
1572 
1573 /*
1574  * Schedule MLD timer based on various parameters; caller must ensure that
1575  * lock ordering is maintained as this routine acquires MLD global lock.
1576  */
1577 void
mld_set_timeout(struct mld_tparams * mtp)1578 mld_set_timeout(struct mld_tparams *mtp)
1579 {
1580 	MLD_LOCK_ASSERT_NOTHELD();
1581 	VERIFY(mtp != NULL);
1582 
1583 	if (mtp->qpt != 0 || mtp->it != 0 || mtp->cst != 0 || mtp->sct != 0) {
1584 		MLD_LOCK();
1585 		if (mtp->qpt != 0) {
1586 			querier_present_timers_running6 = 1;
1587 		}
1588 		if (mtp->it != 0) {
1589 			interface_timers_running6 = 1;
1590 		}
1591 		if (mtp->cst != 0) {
1592 			current_state_timers_running6 = 1;
1593 		}
1594 		if (mtp->sct != 0) {
1595 			state_change_timers_running6 = 1;
1596 		}
1597 		mld_sched_timeout(mtp->fast);
1598 		MLD_UNLOCK();
1599 	}
1600 }
1601 
1602 void
mld_set_fast_timeout(struct mld_tparams * mtp)1603 mld_set_fast_timeout(struct mld_tparams *mtp)
1604 {
1605 	VERIFY(mtp != NULL);
1606 	mtp->fast = true;
1607 	mld_set_timeout(mtp);
1608 }
1609 
1610 /*
1611  * MLD6 timer handler (per 1 second).
1612  */
1613 static void
mld_timeout(void * arg)1614 mld_timeout(void *arg)
1615 {
1616 	struct ifqueue           scq;   /* State-change packets */
1617 	struct ifqueue           qrq;   /* Query response packets */
1618 	struct ifnet            *ifp;
1619 	struct mld_ifinfo       *mli;
1620 	struct in6_multi        *inm;
1621 	int                      uri_sec = 0;
1622 	unsigned int genid = mld_mli_list_genid;
1623 	bool                     fast = arg != NULL;
1624 
1625 	SLIST_HEAD(, in6_multi) in6m_dthead;
1626 
1627 	SLIST_INIT(&in6m_dthead);
1628 
1629 	/*
1630 	 * Update coarse-grained networking timestamp (in sec.); the idea
1631 	 * is to piggy-back on the timeout callout to update the counter
1632 	 * returnable via net_uptime().
1633 	 */
1634 	net_update_uptime();
1635 
1636 	MLD_LOCK();
1637 
1638 	MLD_PRINTF(("%s: qpt %d, it %d, cst %d, sct %d, fast %d\n", __func__,
1639 	    querier_present_timers_running6, interface_timers_running6,
1640 	    current_state_timers_running6, state_change_timers_running6, fast));
1641 
1642 	if (fast) {
1643 		/*
1644 		 * When running the fast timer, skip processing
1645 		 * of "querier present" timers since they are
1646 		 * based on 1-second intervals.
1647 		 */
1648 		goto skip_query_timers;
1649 	}
1650 	/*
1651 	 * MLDv1 querier present timer processing.
1652 	 */
1653 	if (querier_present_timers_running6) {
1654 		querier_present_timers_running6 = 0;
1655 		LIST_FOREACH(mli, &mli_head, mli_link) {
1656 			MLI_LOCK(mli);
1657 			mld_v1_process_querier_timers(mli);
1658 			if (mli->mli_v1_timer > 0) {
1659 				querier_present_timers_running6 = 1;
1660 			}
1661 			MLI_UNLOCK(mli);
1662 		}
1663 	}
1664 
1665 	/*
1666 	 * MLDv2 General Query response timer processing.
1667 	 */
1668 	if (interface_timers_running6) {
1669 		MLD_PRINTF(("%s: interface timers running\n", __func__));
1670 		interface_timers_running6 = 0;
1671 		mli = LIST_FIRST(&mli_head);
1672 
1673 		while (mli != NULL) {
1674 			if (mli->mli_flags & MLIF_PROCESSED) {
1675 				mli = LIST_NEXT(mli, mli_link);
1676 				continue;
1677 			}
1678 
1679 			MLI_LOCK(mli);
1680 			if (mli->mli_version != MLD_VERSION_2) {
1681 				MLI_UNLOCK(mli);
1682 				mli = LIST_NEXT(mli, mli_link);
1683 				continue;
1684 			}
1685 			/*
1686 			 * XXX The logic below ends up calling
1687 			 * mld_dispatch_packet which can unlock mli
1688 			 * and the global MLD lock.
1689 			 * Therefore grab a reference on MLI and also
1690 			 * check for generation count to see if we should
1691 			 * iterate the list again.
1692 			 */
1693 			MLI_ADDREF_LOCKED(mli);
1694 
1695 			if (mli->mli_v2_timer == 0) {
1696 				/* Do nothing. */
1697 			} else if (--mli->mli_v2_timer == 0) {
1698 				if (mld_v2_dispatch_general_query(mli) > 0) {
1699 					interface_timers_running6 = 1;
1700 				}
1701 			} else {
1702 				interface_timers_running6 = 1;
1703 			}
1704 			mli->mli_flags |= MLIF_PROCESSED;
1705 			MLI_UNLOCK(mli);
1706 			MLI_REMREF(mli);
1707 
1708 			if (genid != mld_mli_list_genid) {
1709 				MLD_PRINTF(("%s: MLD information list changed "
1710 				    "in the middle of iteration! Restart iteration.\n",
1711 				    __func__));
1712 				mli = LIST_FIRST(&mli_head);
1713 				genid = mld_mli_list_genid;
1714 			} else {
1715 				mli = LIST_NEXT(mli, mli_link);
1716 			}
1717 		}
1718 
1719 		LIST_FOREACH(mli, &mli_head, mli_link)
1720 		mli->mli_flags &= ~MLIF_PROCESSED;
1721 	}
1722 
1723 skip_query_timers:
1724 	if (!current_state_timers_running6 &&
1725 	    !state_change_timers_running6) {
1726 		goto out_locked;
1727 	}
1728 
1729 	current_state_timers_running6 = 0;
1730 	state_change_timers_running6 = 0;
1731 
1732 	MLD_PRINTF(("%s: state change timers running\n", __func__));
1733 
1734 	memset(&qrq, 0, sizeof(struct ifqueue));
1735 	qrq.ifq_maxlen = MLD_MAX_G_GS_PACKETS;
1736 
1737 	memset(&scq, 0, sizeof(struct ifqueue));
1738 	scq.ifq_maxlen = MLD_MAX_STATE_CHANGE_PACKETS;
1739 
1740 	/*
1741 	 * MLD host report and state-change timer processing.
1742 	 * Note: Processing a v2 group timer may remove a node.
1743 	 */
1744 	mli = LIST_FIRST(&mli_head);
1745 
1746 	while (mli != NULL) {
1747 		struct in6_multistep step;
1748 
1749 		if (mli->mli_flags & MLIF_PROCESSED) {
1750 			mli = LIST_NEXT(mli, mli_link);
1751 			continue;
1752 		}
1753 
1754 		MLI_LOCK(mli);
1755 		ifp = mli->mli_ifp;
1756 		uri_sec = MLD_RANDOM_DELAY(mli->mli_uri);
1757 		MLI_UNLOCK(mli);
1758 
1759 		in6_multihead_lock_shared();
1760 		IN6_FIRST_MULTI(step, inm);
1761 		while (inm != NULL) {
1762 			IN6M_LOCK(inm);
1763 			if (inm->in6m_ifp != ifp) {
1764 				goto next;
1765 			}
1766 
1767 			MLI_LOCK(mli);
1768 			switch (mli->mli_version) {
1769 			case MLD_VERSION_1:
1770 				mld_v1_process_group_timer(inm,
1771 				    mli->mli_version);
1772 				break;
1773 			case MLD_VERSION_2:
1774 				mld_v2_process_group_timers(mli, &qrq,
1775 				    &scq, inm, uri_sec);
1776 				break;
1777 			}
1778 			MLI_UNLOCK(mli);
1779 next:
1780 			IN6M_UNLOCK(inm);
1781 			IN6_NEXT_MULTI(step, inm);
1782 		}
1783 		in6_multihead_lock_done();
1784 
1785 		/*
1786 		 * XXX The logic below ends up calling
1787 		 * mld_dispatch_packet which can unlock mli
1788 		 * and the global MLD lock.
1789 		 * Therefore grab a reference on MLI and also
1790 		 * check for generation count to see if we should
1791 		 * iterate the list again.
1792 		 */
1793 		MLI_LOCK(mli);
1794 		MLI_ADDREF_LOCKED(mli);
1795 		if (mli->mli_version == MLD_VERSION_1) {
1796 			mld_dispatch_queue_locked(mli, &mli->mli_v1q, 0);
1797 		} else if (mli->mli_version == MLD_VERSION_2) {
1798 			MLI_UNLOCK(mli);
1799 			mld_dispatch_queue_locked(NULL, &qrq, 0);
1800 			mld_dispatch_queue_locked(NULL, &scq, 0);
1801 			VERIFY(qrq.ifq_len == 0);
1802 			VERIFY(scq.ifq_len == 0);
1803 			MLI_LOCK(mli);
1804 		}
1805 		/*
1806 		 * In case there are still any pending membership reports
1807 		 * which didn't get drained at version change time.
1808 		 */
1809 		IF_DRAIN(&mli->mli_v1q);
1810 		/*
1811 		 * Release all deferred inm records, and drain any locally
1812 		 * enqueued packets; do it even if the current MLD version
1813 		 * for the link is no longer MLDv2, in order to handle the
1814 		 * version change case.
1815 		 */
1816 		mld_flush_relq(mli, (struct mld_in6m_relhead *)&in6m_dthead);
1817 		VERIFY(SLIST_EMPTY(&mli->mli_relinmhead));
1818 		mli->mli_flags |= MLIF_PROCESSED;
1819 		MLI_UNLOCK(mli);
1820 		MLI_REMREF(mli);
1821 
1822 		IF_DRAIN(&qrq);
1823 		IF_DRAIN(&scq);
1824 
1825 		if (genid != mld_mli_list_genid) {
1826 			MLD_PRINTF(("%s: MLD information list changed "
1827 			    "in the middle of iteration! Restart iteration.\n",
1828 			    __func__));
1829 			mli = LIST_FIRST(&mli_head);
1830 			genid = mld_mli_list_genid;
1831 		} else {
1832 			mli = LIST_NEXT(mli, mli_link);
1833 		}
1834 	}
1835 
1836 	LIST_FOREACH(mli, &mli_head, mli_link)
1837 	mli->mli_flags &= ~MLIF_PROCESSED;
1838 
1839 out_locked:
1840 	/* re-arm the timer if there's work to do */
1841 	mld_timeout_run = 0;
1842 	mld_sched_timeout(false);
1843 	MLD_UNLOCK();
1844 
1845 	/* Now that we're dropped all locks, release detached records */
1846 	MLD_REMOVE_DETACHED_IN6M(&in6m_dthead);
1847 }
1848 
1849 static void
mld_sched_timeout(bool fast)1850 mld_sched_timeout(bool fast)
1851 {
1852 	MLD_LOCK_ASSERT_HELD();
1853 
1854 	if (!mld_timeout_run &&
1855 	    (querier_present_timers_running6 || current_state_timers_running6 ||
1856 	    interface_timers_running6 || state_change_timers_running6)) {
1857 		mld_timeout_run = 1;
1858 		int sched_hz = fast ? 0 : hz;
1859 		void *arg = fast ? (void *)mld_sched_timeout : NULL;
1860 		timeout(mld_timeout, arg, sched_hz);
1861 	}
1862 }
1863 
1864 /*
1865  * Free the in6_multi reference(s) for this MLD lifecycle.
1866  *
1867  * Caller must be holding mli_lock.
1868  */
1869 static void
mld_flush_relq(struct mld_ifinfo * mli,struct mld_in6m_relhead * in6m_dthead)1870 mld_flush_relq(struct mld_ifinfo *mli, struct mld_in6m_relhead *in6m_dthead)
1871 {
1872 	struct in6_multi *inm;
1873 
1874 again:
1875 	MLI_LOCK_ASSERT_HELD(mli);
1876 	inm = SLIST_FIRST(&mli->mli_relinmhead);
1877 	if (inm != NULL) {
1878 		int lastref;
1879 
1880 		SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
1881 		MLI_UNLOCK(mli);
1882 
1883 		in6_multihead_lock_exclusive();
1884 		IN6M_LOCK(inm);
1885 		VERIFY(inm->in6m_nrelecnt != 0);
1886 		inm->in6m_nrelecnt--;
1887 		lastref = in6_multi_detach(inm);
1888 		VERIFY(!lastref || (!(inm->in6m_debug & IFD_ATTACHED) &&
1889 		    inm->in6m_reqcnt == 0));
1890 		IN6M_UNLOCK(inm);
1891 		in6_multihead_lock_done();
1892 		/* from mli_relinmhead */
1893 		IN6M_REMREF(inm);
1894 		/* from in6_multihead_list */
1895 		if (lastref) {
1896 			/*
1897 			 * Defer releasing our final reference, as we
1898 			 * are holding the MLD lock at this point, and
1899 			 * we could end up with locking issues later on
1900 			 * (while issuing SIOCDELMULTI) when this is the
1901 			 * final reference count.  Let the caller do it
1902 			 * when it is safe.
1903 			 */
1904 			MLD_ADD_DETACHED_IN6M(in6m_dthead, inm);
1905 		}
1906 		MLI_LOCK(mli);
1907 		goto again;
1908 	}
1909 }
1910 
1911 /*
1912  * Update host report group timer.
1913  * Will update the global pending timer flags.
1914  */
1915 static void
mld_v1_process_group_timer(struct in6_multi * inm,const int mld_version)1916 mld_v1_process_group_timer(struct in6_multi *inm, const int mld_version)
1917 {
1918 #pragma unused(mld_version)
1919 	int report_timer_expired;
1920 
1921 	MLD_LOCK_ASSERT_HELD();
1922 	IN6M_LOCK_ASSERT_HELD(inm);
1923 	MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
1924 
1925 	if (inm->in6m_timer == 0) {
1926 		report_timer_expired = 0;
1927 	} else if (--inm->in6m_timer == 0) {
1928 		report_timer_expired = 1;
1929 	} else {
1930 		current_state_timers_running6 = 1;
1931 		/* caller will schedule timer */
1932 		return;
1933 	}
1934 
1935 	switch (inm->in6m_state) {
1936 	case MLD_NOT_MEMBER:
1937 	case MLD_SILENT_MEMBER:
1938 	case MLD_IDLE_MEMBER:
1939 	case MLD_LAZY_MEMBER:
1940 	case MLD_SLEEPING_MEMBER:
1941 	case MLD_AWAKENING_MEMBER:
1942 		break;
1943 	case MLD_REPORTING_MEMBER:
1944 		if (report_timer_expired) {
1945 			inm->in6m_state = MLD_IDLE_MEMBER;
1946 			(void) mld_v1_transmit_report(inm,
1947 			    MLD_LISTENER_REPORT);
1948 			IN6M_LOCK_ASSERT_HELD(inm);
1949 			MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
1950 		}
1951 		break;
1952 	case MLD_G_QUERY_PENDING_MEMBER:
1953 	case MLD_SG_QUERY_PENDING_MEMBER:
1954 	case MLD_LEAVING_MEMBER:
1955 		break;
1956 	}
1957 }
1958 
1959 /*
1960  * Update a group's timers for MLDv2.
1961  * Will update the global pending timer flags.
1962  * Note: Unlocked read from mli.
1963  */
1964 static void
mld_v2_process_group_timers(struct mld_ifinfo * mli,struct ifqueue * qrq,struct ifqueue * scq,struct in6_multi * inm,const int uri_sec)1965 mld_v2_process_group_timers(struct mld_ifinfo *mli,
1966     struct ifqueue *qrq, struct ifqueue *scq,
1967     struct in6_multi *inm, const int uri_sec)
1968 {
1969 	int query_response_timer_expired;
1970 	int state_change_retransmit_timer_expired;
1971 
1972 	MLD_LOCK_ASSERT_HELD();
1973 	IN6M_LOCK_ASSERT_HELD(inm);
1974 	MLI_LOCK_ASSERT_HELD(mli);
1975 	VERIFY(mli == inm->in6m_mli);
1976 
1977 	query_response_timer_expired = 0;
1978 	state_change_retransmit_timer_expired = 0;
1979 
1980 	/*
1981 	 * During a transition from compatibility mode back to MLDv2,
1982 	 * a group record in REPORTING state may still have its group
1983 	 * timer active. This is a no-op in this function; it is easier
1984 	 * to deal with it here than to complicate the timeout path.
1985 	 */
1986 	if (inm->in6m_timer == 0) {
1987 		query_response_timer_expired = 0;
1988 	} else if (--inm->in6m_timer == 0) {
1989 		query_response_timer_expired = 1;
1990 	} else {
1991 		current_state_timers_running6 = 1;
1992 		/* caller will schedule timer */
1993 	}
1994 
1995 	if (inm->in6m_sctimer == 0) {
1996 		state_change_retransmit_timer_expired = 0;
1997 	} else if (--inm->in6m_sctimer == 0) {
1998 		state_change_retransmit_timer_expired = 1;
1999 	} else {
2000 		state_change_timers_running6 = 1;
2001 		/* caller will schedule timer */
2002 	}
2003 
2004 	/* We are in timer callback, so be quick about it. */
2005 	if (!state_change_retransmit_timer_expired &&
2006 	    !query_response_timer_expired) {
2007 		return;
2008 	}
2009 
2010 	switch (inm->in6m_state) {
2011 	case MLD_NOT_MEMBER:
2012 	case MLD_SILENT_MEMBER:
2013 	case MLD_SLEEPING_MEMBER:
2014 	case MLD_LAZY_MEMBER:
2015 	case MLD_AWAKENING_MEMBER:
2016 	case MLD_IDLE_MEMBER:
2017 		break;
2018 	case MLD_G_QUERY_PENDING_MEMBER:
2019 	case MLD_SG_QUERY_PENDING_MEMBER:
2020 		/*
2021 		 * Respond to a previously pending Group-Specific
2022 		 * or Group-and-Source-Specific query by enqueueing
2023 		 * the appropriate Current-State report for
2024 		 * immediate transmission.
2025 		 */
2026 		if (query_response_timer_expired) {
2027 			int retval;
2028 
2029 			retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1,
2030 			    (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER),
2031 			    0);
2032 			MLD_PRINTF(("%s: enqueue record = %d\n",
2033 			    __func__, retval));
2034 			inm->in6m_state = MLD_REPORTING_MEMBER;
2035 			in6m_clear_recorded(inm);
2036 		}
2037 		OS_FALLTHROUGH;
2038 	case MLD_REPORTING_MEMBER:
2039 	case MLD_LEAVING_MEMBER:
2040 		if (state_change_retransmit_timer_expired) {
2041 			/*
2042 			 * State-change retransmission timer fired.
2043 			 * If there are any further pending retransmissions,
2044 			 * set the global pending state-change flag, and
2045 			 * reset the timer.
2046 			 */
2047 			if (--inm->in6m_scrv > 0) {
2048 				inm->in6m_sctimer = (uint16_t)uri_sec;
2049 				state_change_timers_running6 = 1;
2050 				/* caller will schedule timer */
2051 			}
2052 			/*
2053 			 * Retransmit the previously computed state-change
2054 			 * report. If there are no further pending
2055 			 * retransmissions, the mbuf queue will be consumed.
2056 			 * Update T0 state to T1 as we have now sent
2057 			 * a state-change.
2058 			 */
2059 			(void) mld_v2_merge_state_changes(inm, scq);
2060 
2061 			in6m_commit(inm);
2062 			MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
2063 			    ip6_sprintf(&inm->in6m_addr),
2064 			    if_name(inm->in6m_ifp)));
2065 
2066 			/*
2067 			 * If we are leaving the group for good, make sure
2068 			 * we release MLD's reference to it.
2069 			 * This release must be deferred using a SLIST,
2070 			 * as we are called from a loop which traverses
2071 			 * the in_ifmultiaddr TAILQ.
2072 			 */
2073 			if (inm->in6m_state == MLD_LEAVING_MEMBER &&
2074 			    inm->in6m_scrv == 0) {
2075 				inm->in6m_state = MLD_NOT_MEMBER;
2076 				/*
2077 				 * A reference has already been held in
2078 				 * mld_final_leave() for this inm, so
2079 				 * no need to hold another one.  We also
2080 				 * bumped up its request count then, so
2081 				 * that it stays in in6_multihead.  Both
2082 				 * of them will be released when it is
2083 				 * dequeued later on.
2084 				 */
2085 				VERIFY(inm->in6m_nrelecnt != 0);
2086 				SLIST_INSERT_HEAD(&mli->mli_relinmhead,
2087 				    inm, in6m_nrele);
2088 			}
2089 		}
2090 		break;
2091 	}
2092 }
2093 
2094 /*
2095  * Switch to a different version on the given interface,
2096  * as per Section 9.12.
2097  */
2098 static uint32_t
mld_set_version(struct mld_ifinfo * mli,const int mld_version)2099 mld_set_version(struct mld_ifinfo *mli, const int mld_version)
2100 {
2101 	int old_version_timer;
2102 
2103 	MLI_LOCK_ASSERT_HELD(mli);
2104 
2105 	MLD_PRINTF(("%s: switching to v%d on ifp 0x%llx(%s)\n", __func__,
2106 	    mld_version, (uint64_t)VM_KERNEL_ADDRPERM(mli->mli_ifp),
2107 	    if_name(mli->mli_ifp)));
2108 
2109 	if (mld_version == MLD_VERSION_1) {
2110 		/*
2111 		 * Compute the "Older Version Querier Present" timer as per
2112 		 * Section 9.12, in seconds.
2113 		 */
2114 		old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
2115 		mli->mli_v1_timer = old_version_timer;
2116 	}
2117 
2118 	if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) {
2119 		mli->mli_version = MLD_VERSION_1;
2120 		mld_v2_cancel_link_timers(mli);
2121 	}
2122 
2123 	MLI_LOCK_ASSERT_HELD(mli);
2124 
2125 	return mli->mli_v1_timer;
2126 }
2127 
2128 /*
2129  * Cancel pending MLDv2 timers for the given link and all groups
2130  * joined on it; state-change, general-query, and group-query timers.
2131  *
2132  * Only ever called on a transition from v2 to Compatibility mode. Kill
2133  * the timers stone dead (this may be expensive for large N groups), they
2134  * will be restarted if Compatibility Mode deems that they must be due to
2135  * query processing.
2136  */
2137 static void
mld_v2_cancel_link_timers(struct mld_ifinfo * mli)2138 mld_v2_cancel_link_timers(struct mld_ifinfo *mli)
2139 {
2140 	struct ifnet            *ifp;
2141 	struct in6_multi        *inm;
2142 	struct in6_multistep    step;
2143 
2144 	MLI_LOCK_ASSERT_HELD(mli);
2145 
2146 	MLD_PRINTF(("%s: cancel v2 timers on ifp 0x%llx(%s)\n", __func__,
2147 	    (uint64_t)VM_KERNEL_ADDRPERM(mli->mli_ifp), if_name(mli->mli_ifp)));
2148 
2149 	/*
2150 	 * Stop the v2 General Query Response on this link stone dead.
2151 	 * If timer is woken up due to interface_timers_running6,
2152 	 * the flag will be cleared if there are no pending link timers.
2153 	 */
2154 	mli->mli_v2_timer = 0;
2155 
2156 	/*
2157 	 * Now clear the current-state and state-change report timers
2158 	 * for all memberships scoped to this link.
2159 	 */
2160 	ifp = mli->mli_ifp;
2161 	MLI_UNLOCK(mli);
2162 
2163 	in6_multihead_lock_shared();
2164 	IN6_FIRST_MULTI(step, inm);
2165 	while (inm != NULL) {
2166 		IN6M_LOCK(inm);
2167 		if (inm->in6m_ifp != ifp) {
2168 			goto next;
2169 		}
2170 
2171 		switch (inm->in6m_state) {
2172 		case MLD_NOT_MEMBER:
2173 		case MLD_SILENT_MEMBER:
2174 		case MLD_IDLE_MEMBER:
2175 		case MLD_LAZY_MEMBER:
2176 		case MLD_SLEEPING_MEMBER:
2177 		case MLD_AWAKENING_MEMBER:
2178 			/*
2179 			 * These states are either not relevant in v2 mode,
2180 			 * or are unreported. Do nothing.
2181 			 */
2182 			break;
2183 		case MLD_LEAVING_MEMBER:
2184 			/*
2185 			 * If we are leaving the group and switching
2186 			 * version, we need to release the final
2187 			 * reference held for issuing the INCLUDE {}.
2188 			 * During mld_final_leave(), we bumped up both the
2189 			 * request and reference counts.  Since we cannot
2190 			 * call in6_multi_detach() here, defer this task to
2191 			 * the timer routine.
2192 			 */
2193 			VERIFY(inm->in6m_nrelecnt != 0);
2194 			MLI_LOCK(mli);
2195 			SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
2196 			    in6m_nrele);
2197 			MLI_UNLOCK(mli);
2198 			OS_FALLTHROUGH;
2199 		case MLD_G_QUERY_PENDING_MEMBER:
2200 		case MLD_SG_QUERY_PENDING_MEMBER:
2201 			in6m_clear_recorded(inm);
2202 			OS_FALLTHROUGH;
2203 		case MLD_REPORTING_MEMBER:
2204 			inm->in6m_state = MLD_REPORTING_MEMBER;
2205 			break;
2206 		}
2207 		/*
2208 		 * Always clear state-change and group report timers.
2209 		 * Free any pending MLDv2 state-change records.
2210 		 */
2211 		inm->in6m_sctimer = 0;
2212 		inm->in6m_timer = 0;
2213 		IF_DRAIN(&inm->in6m_scq);
2214 next:
2215 		IN6M_UNLOCK(inm);
2216 		IN6_NEXT_MULTI(step, inm);
2217 	}
2218 	in6_multihead_lock_done();
2219 
2220 	MLI_LOCK(mli);
2221 }
2222 
2223 /*
2224  * Update the Older Version Querier Present timers for a link.
2225  * See Section 9.12 of RFC 3810.
2226  */
2227 static void
mld_v1_process_querier_timers(struct mld_ifinfo * mli)2228 mld_v1_process_querier_timers(struct mld_ifinfo *mli)
2229 {
2230 	MLI_LOCK_ASSERT_HELD(mli);
2231 
2232 	if (mld_v2enable && mli->mli_version != MLD_VERSION_2 &&
2233 	    --mli->mli_v1_timer == 0) {
2234 		/*
2235 		 * MLDv1 Querier Present timer expired; revert to MLDv2.
2236 		 */
2237 		MLD_PRINTF(("%s: transition from v%d -> v%d on 0x%llx(%s)\n",
2238 		    __func__, mli->mli_version, MLD_VERSION_2,
2239 		    (uint64_t)VM_KERNEL_ADDRPERM(mli->mli_ifp),
2240 		    if_name(mli->mli_ifp)));
2241 		mli->mli_version = MLD_VERSION_2;
2242 	}
2243 }
2244 
2245 /*
2246  * Transmit an MLDv1 report immediately.
2247  */
2248 static int
mld_v1_transmit_report(struct in6_multi * in6m,const uint8_t type)2249 mld_v1_transmit_report(struct in6_multi *in6m, const uint8_t type)
2250 {
2251 	struct ifnet            *ifp;
2252 	struct in6_ifaddr       *ia;
2253 	struct ip6_hdr          *ip6;
2254 	struct mbuf             *mh, *md;
2255 	struct mld_hdr          *mld;
2256 	int                     error = 0;
2257 
2258 	IN6M_LOCK_ASSERT_HELD(in6m);
2259 	MLI_LOCK_ASSERT_HELD(in6m->in6m_mli);
2260 
2261 	ifp = in6m->in6m_ifp;
2262 	/* ia may be NULL if link-local address is tentative. */
2263 	ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
2264 
2265 	MGETHDR(mh, M_DONTWAIT, MT_HEADER);
2266 	if (mh == NULL) {
2267 		if (ia != NULL) {
2268 			IFA_REMREF(&ia->ia_ifa);
2269 		}
2270 		return ENOMEM;
2271 	}
2272 	MGET(md, M_DONTWAIT, MT_DATA);
2273 	if (md == NULL) {
2274 		m_free(mh);
2275 		if (ia != NULL) {
2276 			IFA_REMREF(&ia->ia_ifa);
2277 		}
2278 		return ENOMEM;
2279 	}
2280 	mh->m_next = md;
2281 
2282 	/*
2283 	 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
2284 	 * that ether_output() does not need to allocate another mbuf
2285 	 * for the header in the most common case.
2286 	 */
2287 	MH_ALIGN(mh, sizeof(struct ip6_hdr));
2288 	mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr);
2289 	mh->m_len = sizeof(struct ip6_hdr);
2290 
2291 	ip6 = mtod(mh, struct ip6_hdr *);
2292 	ip6->ip6_flow = 0;
2293 	ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2294 	ip6->ip6_vfc |= IPV6_VERSION;
2295 	ip6->ip6_nxt = IPPROTO_ICMPV6;
2296 	if (ia != NULL) {
2297 		IFA_LOCK(&ia->ia_ifa);
2298 	}
2299 	ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
2300 	ip6_output_setsrcifscope(mh, IFSCOPE_NONE, ia);
2301 	if (ia != NULL) {
2302 		IFA_UNLOCK(&ia->ia_ifa);
2303 		IFA_REMREF(&ia->ia_ifa);
2304 		ia = NULL;
2305 	}
2306 	ip6->ip6_dst = in6m->in6m_addr;
2307 	ip6_output_setdstifscope(mh, in6m->ifscope, NULL);
2308 
2309 	md->m_len = sizeof(struct mld_hdr);
2310 	mld = mtod(md, struct mld_hdr *);
2311 	mld->mld_type = type;
2312 	mld->mld_code = 0;
2313 	mld->mld_cksum = 0;
2314 	mld->mld_maxdelay = 0;
2315 	mld->mld_reserved = 0;
2316 	mld->mld_addr = in6m->in6m_addr;
2317 	in6_clearscope(&mld->mld_addr);
2318 	mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
2319 	    sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
2320 
2321 	mld_save_context(mh, ifp);
2322 	mh->m_flags |= M_MLDV1;
2323 
2324 	/*
2325 	 * Due to the fact that at this point we are possibly holding
2326 	 * in6_multihead_lock in shared or exclusive mode, we can't call
2327 	 * mld_dispatch_packet() here since that will eventually call
2328 	 * ip6_output(), which will try to lock in6_multihead_lock and cause
2329 	 * a deadlock.
2330 	 * Instead we defer the work to the mld_timeout() thread, thus
2331 	 * avoiding unlocking in_multihead_lock here.
2332 	 */
2333 	if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) {
2334 		MLD_PRINTF(("%s: v1 outbound queue full\n", __func__));
2335 		error = ENOMEM;
2336 		m_freem(mh);
2337 	} else {
2338 		IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh);
2339 		VERIFY(error == 0);
2340 	}
2341 
2342 	return error;
2343 }
2344 
2345 /*
2346  * Process a state change from the upper layer for the given IPv6 group.
2347  *
2348  * Each socket holds a reference on the in6_multi in its own ip_moptions.
2349  * The socket layer will have made the necessary updates to.the group
2350  * state, it is now up to MLD to issue a state change report if there
2351  * has been any change between T0 (when the last state-change was issued)
2352  * and T1 (now).
2353  *
2354  * We use the MLDv2 state machine at group level. The MLd module
2355  * however makes the decision as to which MLD protocol version to speak.
2356  * A state change *from* INCLUDE {} always means an initial join.
2357  * A state change *to* INCLUDE {} always means a final leave.
2358  *
2359  * If delay is non-zero, and the state change is an initial multicast
2360  * join, the state change report will be delayed by 'delay' ticks
2361  * in units of seconds if MLDv1 is active on the link; otherwise
2362  * the initial MLDv2 state change report will be delayed by whichever
2363  * is sooner, a pending state-change timer or delay itself.
2364  */
2365 int
mld_change_state(struct in6_multi * inm,struct mld_tparams * mtp,const int delay)2366 mld_change_state(struct in6_multi *inm, struct mld_tparams *mtp,
2367     const int delay)
2368 {
2369 	struct mld_ifinfo *mli;
2370 	struct ifnet *ifp;
2371 	int error = 0;
2372 
2373 	VERIFY(mtp != NULL);
2374 	bzero(mtp, sizeof(*mtp));
2375 
2376 	IN6M_LOCK_ASSERT_HELD(inm);
2377 	VERIFY(inm->in6m_mli != NULL);
2378 	MLI_LOCK_ASSERT_NOTHELD(inm->in6m_mli);
2379 
2380 	/*
2381 	 * Try to detect if the upper layer just asked us to change state
2382 	 * for an interface which has now gone away.
2383 	 */
2384 	VERIFY(inm->in6m_ifma != NULL);
2385 	ifp = inm->in6m_ifma->ifma_ifp;
2386 	/*
2387 	 * Sanity check that netinet6's notion of ifp is the same as net's.
2388 	 */
2389 	VERIFY(inm->in6m_ifp == ifp);
2390 
2391 	mli = MLD_IFINFO(ifp);
2392 	VERIFY(mli != NULL);
2393 
2394 	/*
2395 	 * If we detect a state transition to or from MCAST_UNDEFINED
2396 	 * for this group, then we are starting or finishing an MLD
2397 	 * life cycle for this group.
2398 	 */
2399 	if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) {
2400 		MLD_PRINTF(("%s: inm transition %d -> %d\n", __func__,
2401 		    inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode));
2402 		if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
2403 			MLD_PRINTF(("%s: initial join\n", __func__));
2404 			error = mld_initial_join(inm, mli, mtp, delay);
2405 			goto out;
2406 		} else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
2407 			MLD_PRINTF(("%s: final leave\n", __func__));
2408 			mld_final_leave(inm, mli, mtp);
2409 			goto out;
2410 		}
2411 	} else {
2412 		MLD_PRINTF(("%s: filter set change\n", __func__));
2413 	}
2414 
2415 	error = mld_handle_state_change(inm, mli, mtp);
2416 out:
2417 	return error;
2418 }
2419 
2420 /*
2421  * Perform the initial join for an MLD group.
2422  *
2423  * When joining a group:
2424  *  If the group should have its MLD traffic suppressed, do nothing.
2425  *  MLDv1 starts sending MLDv1 host membership reports.
2426  *  MLDv2 will schedule an MLDv2 state-change report containing the
2427  *  initial state of the membership.
2428  *
2429  * If the delay argument is non-zero, then we must delay sending the
2430  * initial state change for delay ticks (in units of seconds).
2431  */
2432 static int
mld_initial_join(struct in6_multi * inm,struct mld_ifinfo * mli,struct mld_tparams * mtp,const int delay)2433 mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli,
2434     struct mld_tparams *mtp, const int delay)
2435 {
2436 	struct ifnet            *ifp;
2437 	struct ifqueue          *ifq;
2438 	int                      error, retval, syncstates;
2439 	int                      odelay;
2440 
2441 	IN6M_LOCK_ASSERT_HELD(inm);
2442 	MLI_LOCK_ASSERT_NOTHELD(mli);
2443 	VERIFY(mtp != NULL);
2444 
2445 	MLD_PRINTF(("%s: initial join %s on ifp 0x%llx(%s)\n",
2446 	    __func__, ip6_sprintf(&inm->in6m_addr),
2447 	    (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp),
2448 	    if_name(inm->in6m_ifp)));
2449 
2450 	error = 0;
2451 	syncstates = 1;
2452 
2453 	ifp = inm->in6m_ifp;
2454 
2455 	MLI_LOCK(mli);
2456 	VERIFY(mli->mli_ifp == ifp);
2457 
2458 	/*
2459 	 * Avoid MLD if group is :
2460 	 * 1. Joined on loopback, OR
2461 	 * 2. On a link that is marked MLIF_SILENT
2462 	 * 3. rdar://problem/19227650 Is link local scoped and
2463 	 *    on cellular interface
2464 	 * 4. Is a type that should not be reported (node local
2465 	 *    or all node link local multicast.
2466 	 * All other groups enter the appropriate state machine
2467 	 * for the version in use on this link.
2468 	 */
2469 	if ((ifp->if_flags & IFF_LOOPBACK) ||
2470 	    (mli->mli_flags & MLIF_SILENT) ||
2471 	    (IFNET_IS_CELLULAR(ifp) &&
2472 	    (IN6_IS_ADDR_MC_LINKLOCAL(&inm->in6m_addr) || IN6_IS_ADDR_MC_UNICAST_BASED_LINKLOCAL(&inm->in6m_addr))) ||
2473 	    !mld_is_addr_reported(&inm->in6m_addr)) {
2474 		MLD_PRINTF(("%s: not kicking state machine for silent group\n",
2475 		    __func__));
2476 		inm->in6m_state = MLD_SILENT_MEMBER;
2477 		inm->in6m_timer = 0;
2478 	} else {
2479 		/*
2480 		 * Deal with overlapping in6_multi lifecycle.
2481 		 * If this group was LEAVING, then make sure
2482 		 * we drop the reference we picked up to keep the
2483 		 * group around for the final INCLUDE {} enqueue.
2484 		 * Since we cannot call in6_multi_detach() here,
2485 		 * defer this task to the timer routine.
2486 		 */
2487 		if (mli->mli_version == MLD_VERSION_2 &&
2488 		    inm->in6m_state == MLD_LEAVING_MEMBER) {
2489 			VERIFY(inm->in6m_nrelecnt != 0);
2490 			SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
2491 			    in6m_nrele);
2492 		}
2493 
2494 		inm->in6m_state = MLD_REPORTING_MEMBER;
2495 
2496 		switch (mli->mli_version) {
2497 		case MLD_VERSION_1:
2498 			/*
2499 			 * If a delay was provided, only use it if
2500 			 * it is greater than the delay normally
2501 			 * used for an MLDv1 state change report,
2502 			 * and delay sending the initial MLDv1 report
2503 			 * by not transitioning to the IDLE state.
2504 			 */
2505 			odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI);
2506 			if (delay) {
2507 				inm->in6m_timer = max(delay, odelay);
2508 				mtp->cst = 1;
2509 			} else {
2510 				inm->in6m_state = MLD_IDLE_MEMBER;
2511 				error = mld_v1_transmit_report(inm,
2512 				    MLD_LISTENER_REPORT);
2513 
2514 				IN6M_LOCK_ASSERT_HELD(inm);
2515 				MLI_LOCK_ASSERT_HELD(mli);
2516 
2517 				if (error == 0) {
2518 					inm->in6m_timer = odelay;
2519 					mtp->cst = 1;
2520 				}
2521 			}
2522 			break;
2523 
2524 		case MLD_VERSION_2:
2525 			/*
2526 			 * Defer update of T0 to T1, until the first copy
2527 			 * of the state change has been transmitted.
2528 			 */
2529 			syncstates = 0;
2530 
2531 			/*
2532 			 * Immediately enqueue a State-Change Report for
2533 			 * this interface, freeing any previous reports.
2534 			 * Don't kick the timers if there is nothing to do,
2535 			 * or if an error occurred.
2536 			 */
2537 			ifq = &inm->in6m_scq;
2538 			IF_DRAIN(ifq);
2539 			retval = mld_v2_enqueue_group_record(ifq, inm, 1,
2540 			    0, 0, (mli->mli_flags & MLIF_USEALLOW));
2541 			mtp->cst = (ifq->ifq_len > 0);
2542 			MLD_PRINTF(("%s: enqueue record = %d\n",
2543 			    __func__, retval));
2544 			if (retval <= 0) {
2545 				error = retval * -1;
2546 				break;
2547 			}
2548 
2549 			/*
2550 			 * Schedule transmission of pending state-change
2551 			 * report up to RV times for this link. The timer
2552 			 * will fire at the next mld_timeout (1 second)),
2553 			 * giving us an opportunity to merge the reports.
2554 			 *
2555 			 * If a delay was provided to this function, only
2556 			 * use this delay if sooner than the existing one.
2557 			 */
2558 			VERIFY(mli->mli_rv > 1);
2559 			inm->in6m_scrv = (uint16_t)mli->mli_rv;
2560 			if (delay) {
2561 				if (inm->in6m_sctimer > 1) {
2562 					inm->in6m_sctimer =
2563 					    MIN(inm->in6m_sctimer, (uint16_t)delay);
2564 				} else {
2565 					inm->in6m_sctimer = (uint16_t)delay;
2566 				}
2567 			} else {
2568 				inm->in6m_sctimer = 1;
2569 			}
2570 			mtp->sct = 1;
2571 			error = 0;
2572 			break;
2573 		}
2574 	}
2575 	MLI_UNLOCK(mli);
2576 
2577 	/*
2578 	 * Only update the T0 state if state change is atomic,
2579 	 * i.e. we don't need to wait for a timer to fire before we
2580 	 * can consider the state change to have been communicated.
2581 	 */
2582 	if (syncstates) {
2583 		in6m_commit(inm);
2584 		MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
2585 		    ip6_sprintf(&inm->in6m_addr),
2586 		    if_name(inm->in6m_ifp)));
2587 	}
2588 
2589 	return error;
2590 }
2591 
2592 /*
2593  * Issue an intermediate state change during the life-cycle.
2594  */
2595 static int
mld_handle_state_change(struct in6_multi * inm,struct mld_ifinfo * mli,struct mld_tparams * mtp)2596 mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli,
2597     struct mld_tparams *mtp)
2598 {
2599 	struct ifnet            *ifp;
2600 	int                      retval = 0;
2601 
2602 	IN6M_LOCK_ASSERT_HELD(inm);
2603 	MLI_LOCK_ASSERT_NOTHELD(mli);
2604 	VERIFY(mtp != NULL);
2605 
2606 	MLD_PRINTF(("%s: state change for %s on ifp 0x%llx(%s)\n",
2607 	    __func__, ip6_sprintf(&inm->in6m_addr),
2608 	    (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp),
2609 	    if_name(inm->in6m_ifp)));
2610 
2611 	ifp = inm->in6m_ifp;
2612 
2613 	MLI_LOCK(mli);
2614 	VERIFY(mli->mli_ifp == ifp);
2615 
2616 	if ((ifp->if_flags & IFF_LOOPBACK) ||
2617 	    (mli->mli_flags & MLIF_SILENT) ||
2618 	    !mld_is_addr_reported(&inm->in6m_addr) ||
2619 	    (mli->mli_version != MLD_VERSION_2)) {
2620 		MLI_UNLOCK(mli);
2621 		if (!mld_is_addr_reported(&inm->in6m_addr)) {
2622 			MLD_PRINTF(("%s: not kicking state machine for silent "
2623 			    "group\n", __func__));
2624 		}
2625 		MLD_PRINTF(("%s: nothing to do\n", __func__));
2626 		in6m_commit(inm);
2627 		MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
2628 		    ip6_sprintf(&inm->in6m_addr),
2629 		    if_name(inm->in6m_ifp)));
2630 		goto done;
2631 	}
2632 
2633 	IF_DRAIN(&inm->in6m_scq);
2634 
2635 	retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
2636 	    (mli->mli_flags & MLIF_USEALLOW));
2637 	mtp->cst = (inm->in6m_scq.ifq_len > 0);
2638 	MLD_PRINTF(("%s: enqueue record = %d\n", __func__, retval));
2639 	if (retval <= 0) {
2640 		MLI_UNLOCK(mli);
2641 		retval *= -1;
2642 		goto done;
2643 	} else {
2644 		retval = 0;
2645 	}
2646 
2647 	/*
2648 	 * If record(s) were enqueued, start the state-change
2649 	 * report timer for this group.
2650 	 */
2651 	inm->in6m_scrv = (uint16_t)mli->mli_rv;
2652 	inm->in6m_sctimer = 1;
2653 	mtp->sct = 1;
2654 	MLI_UNLOCK(mli);
2655 
2656 done:
2657 	return retval;
2658 }
2659 
2660 /*
2661  * Perform the final leave for a multicast address.
2662  *
2663  * When leaving a group:
2664  *  MLDv1 sends a DONE message, if and only if we are the reporter.
2665  *  MLDv2 enqueues a state-change report containing a transition
2666  *  to INCLUDE {} for immediate transmission.
2667  */
2668 static void
mld_final_leave(struct in6_multi * inm,struct mld_ifinfo * mli,struct mld_tparams * mtp)2669 mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli,
2670     struct mld_tparams *mtp)
2671 {
2672 	int syncstates = 1;
2673 
2674 	IN6M_LOCK_ASSERT_HELD(inm);
2675 	MLI_LOCK_ASSERT_NOTHELD(mli);
2676 	VERIFY(mtp != NULL);
2677 
2678 	MLD_PRINTF(("%s: final leave %s on ifp 0x%llx(%s)\n",
2679 	    __func__, ip6_sprintf(&inm->in6m_addr),
2680 	    (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp),
2681 	    if_name(inm->in6m_ifp)));
2682 
2683 	switch (inm->in6m_state) {
2684 	case MLD_NOT_MEMBER:
2685 	case MLD_SILENT_MEMBER:
2686 	case MLD_LEAVING_MEMBER:
2687 		/* Already leaving or left; do nothing. */
2688 		MLD_PRINTF(("%s: not kicking state machine for silent group\n",
2689 		    __func__));
2690 		break;
2691 	case MLD_REPORTING_MEMBER:
2692 	case MLD_IDLE_MEMBER:
2693 	case MLD_G_QUERY_PENDING_MEMBER:
2694 	case MLD_SG_QUERY_PENDING_MEMBER:
2695 		MLI_LOCK(mli);
2696 		if (mli->mli_version == MLD_VERSION_1) {
2697 			if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
2698 			    inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
2699 				panic("%s: MLDv2 state reached, not MLDv2 "
2700 				    "mode\n", __func__);
2701 				/* NOTREACHED */
2702 			}
2703 			/* scheduler timer if enqueue is successful */
2704 			mtp->cst = (mld_v1_transmit_report(inm,
2705 			    MLD_LISTENER_DONE) == 0);
2706 
2707 			IN6M_LOCK_ASSERT_HELD(inm);
2708 			MLI_LOCK_ASSERT_HELD(mli);
2709 
2710 			inm->in6m_state = MLD_NOT_MEMBER;
2711 		} else if (mli->mli_version == MLD_VERSION_2) {
2712 			/*
2713 			 * Stop group timer and all pending reports.
2714 			 * Immediately enqueue a state-change report
2715 			 * TO_IN {} to be sent on the next timeout,
2716 			 * giving us an opportunity to merge reports.
2717 			 */
2718 			IF_DRAIN(&inm->in6m_scq);
2719 			inm->in6m_timer = 0;
2720 			inm->in6m_scrv = (uint16_t)mli->mli_rv;
2721 			MLD_PRINTF(("%s: Leaving %s/%s with %d "
2722 			    "pending retransmissions.\n", __func__,
2723 			    ip6_sprintf(&inm->in6m_addr),
2724 			    if_name(inm->in6m_ifp),
2725 			    inm->in6m_scrv));
2726 			if (inm->in6m_scrv == 0) {
2727 				inm->in6m_state = MLD_NOT_MEMBER;
2728 				inm->in6m_sctimer = 0;
2729 			} else {
2730 				int retval;
2731 				/*
2732 				 * Stick around in the in6_multihead list;
2733 				 * the final detach will be issued by
2734 				 * mld_v2_process_group_timers() when
2735 				 * the retransmit timer expires.
2736 				 */
2737 				IN6M_ADDREF_LOCKED(inm);
2738 				VERIFY(inm->in6m_debug & IFD_ATTACHED);
2739 				inm->in6m_reqcnt++;
2740 				VERIFY(inm->in6m_reqcnt >= 1);
2741 				inm->in6m_nrelecnt++;
2742 				VERIFY(inm->in6m_nrelecnt != 0);
2743 
2744 				retval = mld_v2_enqueue_group_record(
2745 					&inm->in6m_scq, inm, 1, 0, 0,
2746 					(mli->mli_flags & MLIF_USEALLOW));
2747 				mtp->cst = (inm->in6m_scq.ifq_len > 0);
2748 				KASSERT(retval != 0,
2749 				    ("%s: enqueue record = %d\n", __func__,
2750 				    retval));
2751 
2752 				inm->in6m_state = MLD_LEAVING_MEMBER;
2753 				inm->in6m_sctimer = 1;
2754 				mtp->sct = 1;
2755 				syncstates = 0;
2756 			}
2757 		}
2758 		MLI_UNLOCK(mli);
2759 		break;
2760 	case MLD_LAZY_MEMBER:
2761 	case MLD_SLEEPING_MEMBER:
2762 	case MLD_AWAKENING_MEMBER:
2763 		/* Our reports are suppressed; do nothing. */
2764 		break;
2765 	}
2766 
2767 	if (syncstates) {
2768 		in6m_commit(inm);
2769 		MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
2770 		    ip6_sprintf(&inm->in6m_addr),
2771 		    if_name(inm->in6m_ifp)));
2772 		inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
2773 		MLD_PRINTF(("%s: T1 now MCAST_UNDEFINED for 0x%llx/%s\n",
2774 		    __func__, (uint64_t)VM_KERNEL_ADDRPERM(&inm->in6m_addr),
2775 		    if_name(inm->in6m_ifp)));
2776 	}
2777 }
2778 
2779 /*
2780  * Enqueue an MLDv2 group record to the given output queue.
2781  *
2782  * If is_state_change is zero, a current-state record is appended.
2783  * If is_state_change is non-zero, a state-change report is appended.
2784  *
2785  * If is_group_query is non-zero, an mbuf packet chain is allocated.
2786  * If is_group_query is zero, and if there is a packet with free space
2787  * at the tail of the queue, it will be appended to providing there
2788  * is enough free space.
2789  * Otherwise a new mbuf packet chain is allocated.
2790  *
2791  * If is_source_query is non-zero, each source is checked to see if
2792  * it was recorded for a Group-Source query, and will be omitted if
2793  * it is not both in-mode and recorded.
2794  *
2795  * If use_block_allow is non-zero, state change reports for initial join
2796  * and final leave, on an inclusive mode group with a source list, will be
2797  * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
2798  *
2799  * The function will attempt to allocate leading space in the packet
2800  * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
2801  *
2802  * If successful the size of all data appended to the queue is returned,
2803  * otherwise an error code less than zero is returned, or zero if
2804  * no record(s) were appended.
2805  */
2806 static int
mld_v2_enqueue_group_record(struct ifqueue * ifq,struct in6_multi * inm,const int is_state_change,const int is_group_query,const int is_source_query,const int use_block_allow)2807 mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm,
2808     const int is_state_change, const int is_group_query,
2809     const int is_source_query, const int use_block_allow)
2810 {
2811 	struct mldv2_record      mr;
2812 	struct mldv2_record     *pmr;
2813 	struct ifnet            *ifp;
2814 	struct ip6_msource      *ims, *nims;
2815 	struct mbuf             *m0, *m, *md;
2816 	int                      error, is_filter_list_change;
2817 	int                      minrec0len, m0srcs, msrcs, nbytes, off;
2818 	int                      record_has_sources;
2819 	int                      now;
2820 	uint8_t                  type;
2821 	uint8_t                  mode;
2822 
2823 	IN6M_LOCK_ASSERT_HELD(inm);
2824 	MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
2825 
2826 	error = 0;
2827 	ifp = inm->in6m_ifp;
2828 	is_filter_list_change = 0;
2829 	m = NULL;
2830 	m0 = NULL;
2831 	m0srcs = 0;
2832 	msrcs = 0;
2833 	nbytes = 0;
2834 	nims = NULL;
2835 	record_has_sources = 1;
2836 	pmr = NULL;
2837 	type = MLD_DO_NOTHING;
2838 	mode = (uint8_t)inm->in6m_st[1].iss_fmode;
2839 
2840 	/*
2841 	 * If we did not transition out of ASM mode during t0->t1,
2842 	 * and there are no source nodes to process, we can skip
2843 	 * the generation of source records.
2844 	 */
2845 	if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
2846 	    inm->in6m_nsrc == 0) {
2847 		record_has_sources = 0;
2848 	}
2849 
2850 	if (is_state_change) {
2851 		/*
2852 		 * Queue a state change record.
2853 		 * If the mode did not change, and there are non-ASM
2854 		 * listeners or source filters present,
2855 		 * we potentially need to issue two records for the group.
2856 		 * If there are ASM listeners, and there was no filter
2857 		 * mode transition of any kind, do nothing.
2858 		 *
2859 		 * If we are transitioning to MCAST_UNDEFINED, we need
2860 		 * not send any sources. A transition to/from this state is
2861 		 * considered inclusive with some special treatment.
2862 		 *
2863 		 * If we are rewriting initial joins/leaves to use
2864 		 * ALLOW/BLOCK, and the group's membership is inclusive,
2865 		 * we need to send sources in all cases.
2866 		 */
2867 		if (mode != inm->in6m_st[0].iss_fmode) {
2868 			if (mode == MCAST_EXCLUDE) {
2869 				MLD_PRINTF(("%s: change to EXCLUDE\n",
2870 				    __func__));
2871 				type = MLD_CHANGE_TO_EXCLUDE_MODE;
2872 			} else {
2873 				MLD_PRINTF(("%s: change to INCLUDE\n",
2874 				    __func__));
2875 				if (use_block_allow) {
2876 					/*
2877 					 * XXX
2878 					 * Here we're interested in state
2879 					 * edges either direction between
2880 					 * MCAST_UNDEFINED and MCAST_INCLUDE.
2881 					 * Perhaps we should just check
2882 					 * the group state, rather than
2883 					 * the filter mode.
2884 					 */
2885 					if (mode == MCAST_UNDEFINED) {
2886 						type = MLD_BLOCK_OLD_SOURCES;
2887 					} else {
2888 						type = MLD_ALLOW_NEW_SOURCES;
2889 					}
2890 				} else {
2891 					type = MLD_CHANGE_TO_INCLUDE_MODE;
2892 					if (mode == MCAST_UNDEFINED) {
2893 						record_has_sources = 0;
2894 					}
2895 				}
2896 			}
2897 		} else {
2898 			if (record_has_sources) {
2899 				is_filter_list_change = 1;
2900 			} else {
2901 				type = MLD_DO_NOTHING;
2902 			}
2903 		}
2904 	} else {
2905 		/*
2906 		 * Queue a current state record.
2907 		 */
2908 		if (mode == MCAST_EXCLUDE) {
2909 			type = MLD_MODE_IS_EXCLUDE;
2910 		} else if (mode == MCAST_INCLUDE) {
2911 			type = MLD_MODE_IS_INCLUDE;
2912 			VERIFY(inm->in6m_st[1].iss_asm == 0);
2913 		}
2914 	}
2915 
2916 	/*
2917 	 * Generate the filter list changes using a separate function.
2918 	 */
2919 	if (is_filter_list_change) {
2920 		return mld_v2_enqueue_filter_change(ifq, inm);
2921 	}
2922 
2923 	if (type == MLD_DO_NOTHING) {
2924 		MLD_PRINTF(("%s: nothing to do for %s/%s\n",
2925 		    __func__, ip6_sprintf(&inm->in6m_addr),
2926 		    if_name(inm->in6m_ifp)));
2927 		return 0;
2928 	}
2929 
2930 	/*
2931 	 * If any sources are present, we must be able to fit at least
2932 	 * one in the trailing space of the tail packet's mbuf,
2933 	 * ideally more.
2934 	 */
2935 	minrec0len = sizeof(struct mldv2_record);
2936 	if (record_has_sources) {
2937 		minrec0len += sizeof(struct in6_addr);
2938 	}
2939 	MLD_PRINTF(("%s: queueing %s for %s/%s\n", __func__,
2940 	    mld_rec_type_to_str(type),
2941 	    ip6_sprintf(&inm->in6m_addr),
2942 	    if_name(inm->in6m_ifp)));
2943 
2944 	/*
2945 	 * Check if we have a packet in the tail of the queue for this
2946 	 * group into which the first group record for this group will fit.
2947 	 * Otherwise allocate a new packet.
2948 	 * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
2949 	 * Note: Group records for G/GSR query responses MUST be sent
2950 	 * in their own packet.
2951 	 */
2952 	m0 = ifq->ifq_tail;
2953 	if (!is_group_query &&
2954 	    m0 != NULL &&
2955 	    (m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
2956 	    (m0->m_pkthdr.len + minrec0len) <
2957 	    (ifp->if_mtu - MLD_MTUSPACE)) {
2958 		m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2959 		    sizeof(struct mldv2_record)) /
2960 		    sizeof(struct in6_addr);
2961 		m = m0;
2962 		MLD_PRINTF(("%s: use existing packet\n", __func__));
2963 	} else {
2964 		if (IF_QFULL(ifq)) {
2965 			MLD_PRINTF(("%s: outbound queue full\n", __func__));
2966 			return -ENOMEM;
2967 		}
2968 		m = NULL;
2969 		m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2970 		    sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2971 		if (!is_state_change && !is_group_query) {
2972 			m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2973 		}
2974 		if (m == NULL) {
2975 			m = m_gethdr(M_DONTWAIT, MT_DATA);
2976 		}
2977 		if (m == NULL) {
2978 			return -ENOMEM;
2979 		}
2980 
2981 		mld_save_context(m, ifp);
2982 
2983 		MLD_PRINTF(("%s: allocated first packet\n", __func__));
2984 	}
2985 
2986 	/*
2987 	 * Append group record.
2988 	 * If we have sources, we don't know how many yet.
2989 	 */
2990 	mr.mr_type = type;
2991 	mr.mr_datalen = 0;
2992 	mr.mr_numsrc = 0;
2993 	mr.mr_addr = inm->in6m_addr;
2994 	in6_clearscope(&mr.mr_addr);
2995 	if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2996 		if (m != m0) {
2997 			m_freem(m);
2998 		}
2999 		MLD_PRINTF(("%s: m_append() failed.\n", __func__));
3000 		return -ENOMEM;
3001 	}
3002 	nbytes += sizeof(struct mldv2_record);
3003 
3004 	/*
3005 	 * Append as many sources as will fit in the first packet.
3006 	 * If we are appending to a new packet, the chain allocation
3007 	 * may potentially use clusters; use m_getptr() in this case.
3008 	 * If we are appending to an existing packet, we need to obtain
3009 	 * a pointer to the group record after m_append(), in case a new
3010 	 * mbuf was allocated.
3011 	 *
3012 	 * Only append sources which are in-mode at t1. If we are
3013 	 * transitioning to MCAST_UNDEFINED state on the group, and
3014 	 * use_block_allow is zero, do not include source entries.
3015 	 * Otherwise, we need to include this source in the report.
3016 	 *
3017 	 * Only report recorded sources in our filter set when responding
3018 	 * to a group-source query.
3019 	 */
3020 	if (record_has_sources) {
3021 		if (m == m0) {
3022 			md = m_last(m);
3023 			pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
3024 			    md->m_len - nbytes);
3025 		} else {
3026 			md = m_getptr(m, 0, &off);
3027 			pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
3028 			    off);
3029 		}
3030 		msrcs = 0;
3031 		RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs,
3032 		    nims) {
3033 			MLD_PRINTF(("%s: visit node %s\n", __func__,
3034 			    ip6_sprintf(&ims->im6s_addr)));
3035 			now = im6s_get_mode(inm, ims, 1);
3036 			MLD_PRINTF(("%s: node is %d\n", __func__, now));
3037 			if ((now != mode) ||
3038 			    (now == mode &&
3039 			    (!use_block_allow && mode == MCAST_UNDEFINED))) {
3040 				MLD_PRINTF(("%s: skip node\n", __func__));
3041 				continue;
3042 			}
3043 			if (is_source_query && ims->im6s_stp == 0) {
3044 				MLD_PRINTF(("%s: skip unrecorded node\n",
3045 				    __func__));
3046 				continue;
3047 			}
3048 			MLD_PRINTF(("%s: append node\n", __func__));
3049 			if (!m_append(m, sizeof(struct in6_addr),
3050 			    (void *)&ims->im6s_addr)) {
3051 				if (m != m0) {
3052 					m_freem(m);
3053 				}
3054 				MLD_PRINTF(("%s: m_append() failed.\n",
3055 				    __func__));
3056 				return -ENOMEM;
3057 			}
3058 			nbytes += sizeof(struct in6_addr);
3059 			++msrcs;
3060 			if (msrcs == m0srcs) {
3061 				break;
3062 			}
3063 		}
3064 		MLD_PRINTF(("%s: msrcs is %d this packet\n", __func__,
3065 		    msrcs));
3066 		pmr->mr_numsrc = htons((uint16_t)msrcs);
3067 		nbytes += (msrcs * sizeof(struct in6_addr));
3068 	}
3069 
3070 	if (is_source_query && msrcs == 0) {
3071 		MLD_PRINTF(("%s: no recorded sources to report\n", __func__));
3072 		if (m != m0) {
3073 			m_freem(m);
3074 		}
3075 		return 0;
3076 	}
3077 
3078 	/*
3079 	 * We are good to go with first packet.
3080 	 */
3081 	if (m != m0) {
3082 		MLD_PRINTF(("%s: enqueueing first packet\n", __func__));
3083 		m->m_pkthdr.vt_nrecs = 1;
3084 		IF_ENQUEUE(ifq, m);
3085 	} else {
3086 		m->m_pkthdr.vt_nrecs++;
3087 	}
3088 	/*
3089 	 * No further work needed if no source list in packet(s).
3090 	 */
3091 	if (!record_has_sources) {
3092 		return nbytes;
3093 	}
3094 
3095 	/*
3096 	 * Whilst sources remain to be announced, we need to allocate
3097 	 * a new packet and fill out as many sources as will fit.
3098 	 * Always try for a cluster first.
3099 	 */
3100 	while (nims != NULL) {
3101 		if (IF_QFULL(ifq)) {
3102 			MLD_PRINTF(("%s: outbound queue full\n", __func__));
3103 			return -ENOMEM;
3104 		}
3105 		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3106 		if (m == NULL) {
3107 			m = m_gethdr(M_DONTWAIT, MT_DATA);
3108 		}
3109 		if (m == NULL) {
3110 			return -ENOMEM;
3111 		}
3112 		mld_save_context(m, ifp);
3113 		md = m_getptr(m, 0, &off);
3114 		pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
3115 		MLD_PRINTF(("%s: allocated next packet\n", __func__));
3116 
3117 		if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
3118 			if (m != m0) {
3119 				m_freem(m);
3120 			}
3121 			MLD_PRINTF(("%s: m_append() failed.\n", __func__));
3122 			return -ENOMEM;
3123 		}
3124 		m->m_pkthdr.vt_nrecs = 1;
3125 		nbytes += sizeof(struct mldv2_record);
3126 
3127 		m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
3128 		    sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
3129 
3130 		msrcs = 0;
3131 		RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
3132 			MLD_PRINTF(("%s: visit node %s\n",
3133 			    __func__, ip6_sprintf(&ims->im6s_addr)));
3134 			now = im6s_get_mode(inm, ims, 1);
3135 			if ((now != mode) ||
3136 			    (now == mode &&
3137 			    (!use_block_allow && mode == MCAST_UNDEFINED))) {
3138 				MLD_PRINTF(("%s: skip node\n", __func__));
3139 				continue;
3140 			}
3141 			if (is_source_query && ims->im6s_stp == 0) {
3142 				MLD_PRINTF(("%s: skip unrecorded node\n",
3143 				    __func__));
3144 				continue;
3145 			}
3146 			MLD_PRINTF(("%s: append node\n", __func__));
3147 			if (!m_append(m, sizeof(struct in6_addr),
3148 			    (void *)&ims->im6s_addr)) {
3149 				if (m != m0) {
3150 					m_freem(m);
3151 				}
3152 				MLD_PRINTF(("%s: m_append() failed.\n",
3153 				    __func__));
3154 				return -ENOMEM;
3155 			}
3156 			++msrcs;
3157 			if (msrcs == m0srcs) {
3158 				break;
3159 			}
3160 		}
3161 		pmr->mr_numsrc = htons((uint16_t)msrcs);
3162 		nbytes += (msrcs * sizeof(struct in6_addr));
3163 
3164 		MLD_PRINTF(("%s: enqueueing next packet\n", __func__));
3165 		IF_ENQUEUE(ifq, m);
3166 	}
3167 
3168 	return nbytes;
3169 }
3170 
3171 /*
3172  * Type used to mark record pass completion.
3173  * We exploit the fact we can cast to this easily from the
3174  * current filter modes on each ip_msource node.
3175  */
3176 typedef enum {
3177 	REC_NONE = 0x00,        /* MCAST_UNDEFINED */
3178 	REC_ALLOW = 0x01,       /* MCAST_INCLUDE */
3179 	REC_BLOCK = 0x02,       /* MCAST_EXCLUDE */
3180 	REC_FULL = REC_ALLOW | REC_BLOCK
3181 } rectype_t;
3182 
3183 /*
3184  * Enqueue an MLDv2 filter list change to the given output queue.
3185  *
3186  * Source list filter state is held in an RB-tree. When the filter list
3187  * for a group is changed without changing its mode, we need to compute
3188  * the deltas between T0 and T1 for each source in the filter set,
3189  * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
3190  *
3191  * As we may potentially queue two record types, and the entire R-B tree
3192  * needs to be walked at once, we break this out into its own function
3193  * so we can generate a tightly packed queue of packets.
3194  *
3195  * XXX This could be written to only use one tree walk, although that makes
3196  * serializing into the mbuf chains a bit harder. For now we do two walks
3197  * which makes things easier on us, and it may or may not be harder on
3198  * the L2 cache.
3199  *
3200  * If successful the size of all data appended to the queue is returned,
3201  * otherwise an error code less than zero is returned, or zero if
3202  * no record(s) were appended.
3203  */
3204 static int
mld_v2_enqueue_filter_change(struct ifqueue * ifq,struct in6_multi * inm)3205 mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm)
3206 {
3207 	static const int MINRECLEN =
3208 	    sizeof(struct mldv2_record) + sizeof(struct in6_addr);
3209 	struct ifnet            *ifp;
3210 	struct mldv2_record      mr;
3211 	struct mldv2_record     *pmr;
3212 	struct ip6_msource      *ims, *nims;
3213 	struct mbuf             *m, *m0, *md;
3214 	int                      m0srcs, nbytes, npbytes, off, rsrcs, schanged;
3215 	int                      nallow, nblock;
3216 	uint8_t                  mode, now, then;
3217 	rectype_t                crt, drt, nrt;
3218 
3219 	IN6M_LOCK_ASSERT_HELD(inm);
3220 
3221 	if (inm->in6m_nsrc == 0 ||
3222 	    (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) {
3223 		return 0;
3224 	}
3225 
3226 	ifp = inm->in6m_ifp;                    /* interface */
3227 	mode = (uint8_t)inm->in6m_st[1].iss_fmode;       /* filter mode at t1 */
3228 	crt = REC_NONE; /* current group record type */
3229 	drt = REC_NONE; /* mask of completed group record types */
3230 	nrt = REC_NONE; /* record type for current node */
3231 	m0srcs = 0;     /* # source which will fit in current mbuf chain */
3232 	npbytes = 0;    /* # of bytes appended this packet */
3233 	nbytes = 0;     /* # of bytes appended to group's state-change queue */
3234 	rsrcs = 0;      /* # sources encoded in current record */
3235 	schanged = 0;   /* # nodes encoded in overall filter change */
3236 	nallow = 0;     /* # of source entries in ALLOW_NEW */
3237 	nblock = 0;     /* # of source entries in BLOCK_OLD */
3238 	nims = NULL;    /* next tree node pointer */
3239 
3240 	/*
3241 	 * For each possible filter record mode.
3242 	 * The first kind of source we encounter tells us which
3243 	 * is the first kind of record we start appending.
3244 	 * If a node transitioned to UNDEFINED at t1, its mode is treated
3245 	 * as the inverse of the group's filter mode.
3246 	 */
3247 	while (drt != REC_FULL) {
3248 		do {
3249 			m0 = ifq->ifq_tail;
3250 			if (m0 != NULL &&
3251 			    (m0->m_pkthdr.vt_nrecs + 1 <=
3252 			    MLD_V2_REPORT_MAXRECS) &&
3253 			    (m0->m_pkthdr.len + MINRECLEN) <
3254 			    (ifp->if_mtu - MLD_MTUSPACE)) {
3255 				m = m0;
3256 				m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3257 				    sizeof(struct mldv2_record)) /
3258 				    sizeof(struct in6_addr);
3259 				MLD_PRINTF(("%s: use previous packet\n",
3260 				    __func__));
3261 			} else {
3262 				m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3263 				if (m == NULL) {
3264 					m = m_gethdr(M_DONTWAIT, MT_DATA);
3265 				}
3266 				if (m == NULL) {
3267 					MLD_PRINTF(("%s: m_get*() failed\n",
3268 					    __func__));
3269 					return -ENOMEM;
3270 				}
3271 				m->m_pkthdr.vt_nrecs = 0;
3272 				mld_save_context(m, ifp);
3273 				m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
3274 				    sizeof(struct mldv2_record)) /
3275 				    sizeof(struct in6_addr);
3276 				npbytes = 0;
3277 				MLD_PRINTF(("%s: allocated new packet\n",
3278 				    __func__));
3279 			}
3280 			/*
3281 			 * Append the MLD group record header to the
3282 			 * current packet's data area.
3283 			 * Recalculate pointer to free space for next
3284 			 * group record, in case m_append() allocated
3285 			 * a new mbuf or cluster.
3286 			 */
3287 			memset(&mr, 0, sizeof(mr));
3288 			mr.mr_addr = inm->in6m_addr;
3289 			in6_clearscope(&mr.mr_addr);
3290 			if (!m_append(m, sizeof(mr), (void *)&mr)) {
3291 				if (m != m0) {
3292 					m_freem(m);
3293 				}
3294 				MLD_PRINTF(("%s: m_append() failed\n",
3295 				    __func__));
3296 				return -ENOMEM;
3297 			}
3298 			npbytes += sizeof(struct mldv2_record);
3299 			if (m != m0) {
3300 				/* new packet; offset in chain */
3301 				md = m_getptr(m, npbytes -
3302 				    sizeof(struct mldv2_record), &off);
3303 				pmr = (struct mldv2_record *)(mtod(md,
3304 				    uint8_t *) + off);
3305 			} else {
3306 				/* current packet; offset from last append */
3307 				md = m_last(m);
3308 				pmr = (struct mldv2_record *)(mtod(md,
3309 				    uint8_t *) + md->m_len -
3310 				    sizeof(struct mldv2_record));
3311 			}
3312 			/*
3313 			 * Begin walking the tree for this record type
3314 			 * pass, or continue from where we left off
3315 			 * previously if we had to allocate a new packet.
3316 			 * Only report deltas in-mode at t1.
3317 			 * We need not report included sources as allowed
3318 			 * if we are in inclusive mode on the group,
3319 			 * however the converse is not true.
3320 			 */
3321 			rsrcs = 0;
3322 			if (nims == NULL) {
3323 				nims = RB_MIN(ip6_msource_tree,
3324 				    &inm->in6m_srcs);
3325 			}
3326 			RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
3327 				MLD_PRINTF(("%s: visit node %s\n", __func__,
3328 				    ip6_sprintf(&ims->im6s_addr)));
3329 				now = im6s_get_mode(inm, ims, 1);
3330 				then = im6s_get_mode(inm, ims, 0);
3331 				MLD_PRINTF(("%s: mode: t0 %d, t1 %d\n",
3332 				    __func__, then, now));
3333 				if (now == then) {
3334 					MLD_PRINTF(("%s: skip unchanged\n",
3335 					    __func__));
3336 					continue;
3337 				}
3338 				if (mode == MCAST_EXCLUDE &&
3339 				    now == MCAST_INCLUDE) {
3340 					MLD_PRINTF(("%s: skip IN src on EX "
3341 					    "group\n", __func__));
3342 					continue;
3343 				}
3344 				nrt = (rectype_t)now;
3345 				if (nrt == REC_NONE) {
3346 					nrt = (rectype_t)(~mode & REC_FULL);
3347 				}
3348 				if (schanged++ == 0) {
3349 					crt = nrt;
3350 				} else if (crt != nrt) {
3351 					continue;
3352 				}
3353 				if (!m_append(m, sizeof(struct in6_addr),
3354 				    (void *)&ims->im6s_addr)) {
3355 					if (m != m0) {
3356 						m_freem(m);
3357 					}
3358 					MLD_PRINTF(("%s: m_append() failed\n",
3359 					    __func__));
3360 					return -ENOMEM;
3361 				}
3362 				nallow += !!(crt == REC_ALLOW);
3363 				nblock += !!(crt == REC_BLOCK);
3364 				if (++rsrcs == m0srcs) {
3365 					break;
3366 				}
3367 			}
3368 			/*
3369 			 * If we did not append any tree nodes on this
3370 			 * pass, back out of allocations.
3371 			 */
3372 			if (rsrcs == 0) {
3373 				npbytes -= sizeof(struct mldv2_record);
3374 				if (m != m0) {
3375 					MLD_PRINTF(("%s: m_free(m)\n",
3376 					    __func__));
3377 					m_freem(m);
3378 				} else {
3379 					MLD_PRINTF(("%s: m_adj(m, -mr)\n",
3380 					    __func__));
3381 					m_adj(m, -((int)sizeof(
3382 						    struct mldv2_record)));
3383 				}
3384 				continue;
3385 			}
3386 			npbytes += (rsrcs * sizeof(struct in6_addr));
3387 			if (crt == REC_ALLOW) {
3388 				pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
3389 			} else if (crt == REC_BLOCK) {
3390 				pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
3391 			}
3392 			pmr->mr_numsrc = htons((uint16_t)rsrcs);
3393 			/*
3394 			 * Count the new group record, and enqueue this
3395 			 * packet if it wasn't already queued.
3396 			 */
3397 			m->m_pkthdr.vt_nrecs++;
3398 			if (m != m0) {
3399 				IF_ENQUEUE(ifq, m);
3400 			}
3401 			nbytes += npbytes;
3402 		} while (nims != NULL);
3403 		drt |= crt;
3404 		crt = (~crt & REC_FULL);
3405 	}
3406 
3407 	MLD_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__,
3408 	    nallow, nblock));
3409 
3410 	return nbytes;
3411 }
3412 
3413 static int
mld_v2_merge_state_changes(struct in6_multi * inm,struct ifqueue * ifscq)3414 mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq)
3415 {
3416 	struct ifqueue  *gq;
3417 	struct mbuf     *m;             /* pending state-change */
3418 	struct mbuf     *m0;            /* copy of pending state-change */
3419 	struct mbuf     *mt;            /* last state-change in packet */
3420 	struct mbuf     *n;
3421 	int              docopy, domerge;
3422 	u_int            recslen;
3423 
3424 	IN6M_LOCK_ASSERT_HELD(inm);
3425 
3426 	docopy = 0;
3427 	domerge = 0;
3428 	recslen = 0;
3429 
3430 	/*
3431 	 * If there are further pending retransmissions, make a writable
3432 	 * copy of each queued state-change message before merging.
3433 	 */
3434 	if (inm->in6m_scrv > 0) {
3435 		docopy = 1;
3436 	}
3437 
3438 	gq = &inm->in6m_scq;
3439 #ifdef MLD_DEBUG
3440 	if (gq->ifq_head == NULL) {
3441 		MLD_PRINTF(("%s: WARNING: queue for inm 0x%llx is empty\n",
3442 		    __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm)));
3443 	}
3444 #endif
3445 
3446 	/*
3447 	 * Use IF_REMQUEUE() instead of IF_DEQUEUE() below, since the
3448 	 * packet might not always be at the head of the ifqueue.
3449 	 */
3450 	m = gq->ifq_head;
3451 	while (m != NULL) {
3452 		/*
3453 		 * Only merge the report into the current packet if
3454 		 * there is sufficient space to do so; an MLDv2 report
3455 		 * packet may only contain 65,535 group records.
3456 		 * Always use a simple mbuf chain concatentation to do this,
3457 		 * as large state changes for single groups may have
3458 		 * allocated clusters.
3459 		 */
3460 		domerge = 0;
3461 		mt = ifscq->ifq_tail;
3462 		if (mt != NULL) {
3463 			recslen = m_length(m);
3464 
3465 			if ((mt->m_pkthdr.vt_nrecs +
3466 			    m->m_pkthdr.vt_nrecs <=
3467 			    MLD_V2_REPORT_MAXRECS) &&
3468 			    (mt->m_pkthdr.len + recslen <=
3469 			    (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) {
3470 				domerge = 1;
3471 			}
3472 		}
3473 
3474 		if (!domerge && IF_QFULL(gq)) {
3475 			MLD_PRINTF(("%s: outbound queue full, skipping whole "
3476 			    "packet 0x%llx\n", __func__,
3477 			    (uint64_t)VM_KERNEL_ADDRPERM(m)));
3478 			n = m->m_nextpkt;
3479 			if (!docopy) {
3480 				IF_REMQUEUE(gq, m);
3481 				m_freem(m);
3482 			}
3483 			m = n;
3484 			continue;
3485 		}
3486 
3487 		if (!docopy) {
3488 			MLD_PRINTF(("%s: dequeueing 0x%llx\n", __func__,
3489 			    (uint64_t)VM_KERNEL_ADDRPERM(m)));
3490 			n = m->m_nextpkt;
3491 			IF_REMQUEUE(gq, m);
3492 			m0 = m;
3493 			m = n;
3494 		} else {
3495 			MLD_PRINTF(("%s: copying 0x%llx\n", __func__,
3496 			    (uint64_t)VM_KERNEL_ADDRPERM(m)));
3497 			m0 = m_dup(m, M_NOWAIT);
3498 			if (m0 == NULL) {
3499 				return ENOMEM;
3500 			}
3501 			m0->m_nextpkt = NULL;
3502 			m = m->m_nextpkt;
3503 		}
3504 
3505 		if (!domerge) {
3506 			MLD_PRINTF(("%s: queueing 0x%llx to ifscq 0x%llx)\n",
3507 			    __func__, (uint64_t)VM_KERNEL_ADDRPERM(m0),
3508 			    (uint64_t)VM_KERNEL_ADDRPERM(ifscq)));
3509 			IF_ENQUEUE(ifscq, m0);
3510 		} else {
3511 			struct mbuf *mtl;       /* last mbuf of packet mt */
3512 
3513 			MLD_PRINTF(("%s: merging 0x%llx with ifscq tail "
3514 			    "0x%llx)\n", __func__,
3515 			    (uint64_t)VM_KERNEL_ADDRPERM(m0),
3516 			    (uint64_t)VM_KERNEL_ADDRPERM(mt)));
3517 
3518 			mtl = m_last(mt);
3519 			m0->m_flags &= ~M_PKTHDR;
3520 			mt->m_pkthdr.len += recslen;
3521 			mt->m_pkthdr.vt_nrecs +=
3522 			    m0->m_pkthdr.vt_nrecs;
3523 
3524 			mtl->m_next = m0;
3525 		}
3526 	}
3527 
3528 	return 0;
3529 }
3530 
3531 /*
3532  * Respond to a pending MLDv2 General Query.
3533  */
3534 static uint32_t
mld_v2_dispatch_general_query(struct mld_ifinfo * mli)3535 mld_v2_dispatch_general_query(struct mld_ifinfo *mli)
3536 {
3537 	struct ifnet            *ifp;
3538 	struct in6_multi        *inm;
3539 	struct in6_multistep    step;
3540 	int                      retval;
3541 
3542 	MLI_LOCK_ASSERT_HELD(mli);
3543 
3544 	VERIFY(mli->mli_version == MLD_VERSION_2);
3545 
3546 	ifp = mli->mli_ifp;
3547 	MLI_UNLOCK(mli);
3548 
3549 	in6_multihead_lock_shared();
3550 	IN6_FIRST_MULTI(step, inm);
3551 	while (inm != NULL) {
3552 		IN6M_LOCK(inm);
3553 		if (inm->in6m_ifp != ifp) {
3554 			goto next;
3555 		}
3556 
3557 		switch (inm->in6m_state) {
3558 		case MLD_NOT_MEMBER:
3559 		case MLD_SILENT_MEMBER:
3560 			break;
3561 		case MLD_REPORTING_MEMBER:
3562 		case MLD_IDLE_MEMBER:
3563 		case MLD_LAZY_MEMBER:
3564 		case MLD_SLEEPING_MEMBER:
3565 		case MLD_AWAKENING_MEMBER:
3566 			inm->in6m_state = MLD_REPORTING_MEMBER;
3567 			MLI_LOCK(mli);
3568 			retval = mld_v2_enqueue_group_record(&mli->mli_gq,
3569 			    inm, 0, 0, 0, 0);
3570 			MLI_UNLOCK(mli);
3571 			MLD_PRINTF(("%s: enqueue record = %d\n",
3572 			    __func__, retval));
3573 			break;
3574 		case MLD_G_QUERY_PENDING_MEMBER:
3575 		case MLD_SG_QUERY_PENDING_MEMBER:
3576 		case MLD_LEAVING_MEMBER:
3577 			break;
3578 		}
3579 next:
3580 		IN6M_UNLOCK(inm);
3581 		IN6_NEXT_MULTI(step, inm);
3582 	}
3583 	in6_multihead_lock_done();
3584 
3585 	MLI_LOCK(mli);
3586 	mld_dispatch_queue_locked(mli, &mli->mli_gq, MLD_MAX_RESPONSE_BURST);
3587 	MLI_LOCK_ASSERT_HELD(mli);
3588 
3589 	/*
3590 	 * Slew transmission of bursts over 1 second intervals.
3591 	 */
3592 	if (mli->mli_gq.ifq_head != NULL) {
3593 		mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
3594 			MLD_RESPONSE_BURST_INTERVAL);
3595 	}
3596 
3597 	return mli->mli_v2_timer;
3598 }
3599 
3600 /*
3601  * Transmit the next pending message in the output queue.
3602  *
3603  * Must not be called with in6m_lockm or mli_lock held.
3604  */
3605 static void
mld_dispatch_packet(struct mbuf * m)3606 mld_dispatch_packet(struct mbuf *m)
3607 {
3608 	struct ip6_moptions     *im6o;
3609 	struct ifnet            *ifp;
3610 	struct ifnet            *oifp = NULL;
3611 	struct mbuf             *m0;
3612 	struct mbuf             *md;
3613 	struct ip6_hdr          *ip6;
3614 	struct mld_hdr          *mld;
3615 	int                      error;
3616 	int                      off;
3617 	int                      type;
3618 
3619 	MLD_PRINTF(("%s: transmit 0x%llx\n", __func__,
3620 	    (uint64_t)VM_KERNEL_ADDRPERM(m)));
3621 
3622 	/*
3623 	 * Check if the ifnet is still attached.
3624 	 */
3625 	ifp = mld_restore_context(m);
3626 	if (ifp == NULL || !ifnet_is_attached(ifp, 0)) {
3627 		MLD_PRINTF(("%s: dropped 0x%llx as ifindex %u went away.\n",
3628 		    __func__, (uint64_t)VM_KERNEL_ADDRPERM(m),
3629 		    (u_int)if_index));
3630 		m_freem(m);
3631 		ip6stat.ip6s_noroute++;
3632 		return;
3633 	}
3634 
3635 	im6o = ip6_allocmoptions(Z_WAITOK);
3636 	if (im6o == NULL) {
3637 		m_freem(m);
3638 		return;
3639 	}
3640 
3641 	im6o->im6o_multicast_hlim  = 1;
3642 	im6o->im6o_multicast_loop = 0;
3643 	im6o->im6o_multicast_ifp = ifp;
3644 
3645 	if (m->m_flags & M_MLDV1) {
3646 		m0 = m;
3647 	} else {
3648 		m0 = mld_v2_encap_report(ifp, m);
3649 		if (m0 == NULL) {
3650 			MLD_PRINTF(("%s: dropped 0x%llx\n", __func__,
3651 			    (uint64_t)VM_KERNEL_ADDRPERM(m)));
3652 			/*
3653 			 * mld_v2_encap_report() has already freed our mbuf.
3654 			 */
3655 			IM6O_REMREF(im6o);
3656 			ip6stat.ip6s_odropped++;
3657 			return;
3658 		}
3659 	}
3660 
3661 	mld_scrub_context(m0);
3662 	m->m_flags &= ~(M_PROTOFLAGS);
3663 	m0->m_pkthdr.rcvif = lo_ifp;
3664 
3665 	ip6 = mtod(m0, struct ip6_hdr *);
3666 	(void)in6_setscope(&ip6->ip6_dst, ifp, NULL);
3667 	ip6_output_setdstifscope(m0, ifp->if_index, NULL);
3668 	/*
3669 	 * Retrieve the ICMPv6 type before handoff to ip6_output(),
3670 	 * so we can bump the stats.
3671 	 */
3672 	md = m_getptr(m0, sizeof(struct ip6_hdr), &off);
3673 	mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off);
3674 	type = mld->mld_type;
3675 
3676 	if (ifp->if_eflags & IFEF_TXSTART) {
3677 		/*
3678 		 * Use control service class if the outgoing
3679 		 * interface supports transmit-start model.
3680 		 */
3681 		(void) m_set_service_class(m0, MBUF_SC_CTL);
3682 	}
3683 
3684 	error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, im6o,
3685 	    &oifp, NULL);
3686 
3687 	IM6O_REMREF(im6o);
3688 
3689 	if (error) {
3690 		MLD_PRINTF(("%s: ip6_output(0x%llx) = %d\n", __func__,
3691 		    (uint64_t)VM_KERNEL_ADDRPERM(m0), error));
3692 		if (oifp != NULL) {
3693 			ifnet_release(oifp);
3694 		}
3695 		return;
3696 	}
3697 
3698 	icmp6stat.icp6s_outhist[type]++;
3699 	if (oifp != NULL) {
3700 		icmp6_ifstat_inc(oifp, ifs6_out_msg);
3701 		switch (type) {
3702 		case MLD_LISTENER_REPORT:
3703 		case MLDV2_LISTENER_REPORT:
3704 			icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
3705 			break;
3706 		case MLD_LISTENER_DONE:
3707 			icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
3708 			break;
3709 		}
3710 		ifnet_release(oifp);
3711 	}
3712 }
3713 
3714 /*
3715  * Encapsulate an MLDv2 report.
3716  *
3717  * KAME IPv6 requires that hop-by-hop options be passed separately,
3718  * and that the IPv6 header be prepended in a separate mbuf.
3719  *
3720  * Returns a pointer to the new mbuf chain head, or NULL if the
3721  * allocation failed.
3722  */
3723 static struct mbuf *
mld_v2_encap_report(struct ifnet * ifp,struct mbuf * m)3724 mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
3725 {
3726 	struct mbuf             *mh;
3727 	struct mldv2_report     *mld;
3728 	struct ip6_hdr          *ip6;
3729 	struct in6_ifaddr       *ia;
3730 	int                      mldreclen;
3731 
3732 	VERIFY(m->m_flags & M_PKTHDR);
3733 
3734 	/*
3735 	 * RFC3590: OK to send as :: or tentative during DAD.
3736 	 */
3737 	ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
3738 	if (ia == NULL) {
3739 		MLD_PRINTF(("%s: warning: ia is NULL\n", __func__));
3740 	}
3741 
3742 	MGETHDR(mh, M_DONTWAIT, MT_HEADER);
3743 	if (mh == NULL) {
3744 		if (ia != NULL) {
3745 			IFA_REMREF(&ia->ia_ifa);
3746 		}
3747 		m_freem(m);
3748 		return NULL;
3749 	}
3750 	MH_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
3751 
3752 	mldreclen = m_length(m);
3753 	MLD_PRINTF(("%s: mldreclen is %d\n", __func__, mldreclen));
3754 
3755 	mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report);
3756 	mh->m_pkthdr.len = sizeof(struct ip6_hdr) +
3757 	    sizeof(struct mldv2_report) + mldreclen;
3758 
3759 	ip6 = mtod(mh, struct ip6_hdr *);
3760 	ip6->ip6_flow = 0;
3761 	ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
3762 	ip6->ip6_vfc |= IPV6_VERSION;
3763 	ip6->ip6_nxt = IPPROTO_ICMPV6;
3764 	if (ia != NULL) {
3765 		IFA_LOCK(&ia->ia_ifa);
3766 	}
3767 	ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
3768 	ip6_output_setsrcifscope(mh, IFSCOPE_NONE, ia);
3769 
3770 	if (ia != NULL) {
3771 		IFA_UNLOCK(&ia->ia_ifa);
3772 		IFA_REMREF(&ia->ia_ifa);
3773 		ia = NULL;
3774 	}
3775 	ip6->ip6_dst = in6addr_linklocal_allv2routers;
3776 	ip6_output_setdstifscope(mh, ifp->if_index, NULL);
3777 	/* scope ID will be set in netisr */
3778 
3779 	mld = (struct mldv2_report *)(ip6 + 1);
3780 	mld->mld_type = MLDV2_LISTENER_REPORT;
3781 	mld->mld_code = 0;
3782 	mld->mld_cksum = 0;
3783 	mld->mld_v2_reserved = 0;
3784 	mld->mld_v2_numrecs = htons(m->m_pkthdr.vt_nrecs);
3785 	m->m_pkthdr.vt_nrecs = 0;
3786 	m->m_flags &= ~M_PKTHDR;
3787 
3788 	mh->m_next = m;
3789 	mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
3790 	    sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
3791 	return mh;
3792 }
3793 
3794 #ifdef MLD_DEBUG
3795 static const char *
mld_rec_type_to_str(const int type)3796 mld_rec_type_to_str(const int type)
3797 {
3798 	switch (type) {
3799 	case MLD_CHANGE_TO_EXCLUDE_MODE:
3800 		return "TO_EX";
3801 	case MLD_CHANGE_TO_INCLUDE_MODE:
3802 		return "TO_IN";
3803 	case MLD_MODE_IS_EXCLUDE:
3804 		return "MODE_EX";
3805 	case MLD_MODE_IS_INCLUDE:
3806 		return "MODE_IN";
3807 	case MLD_ALLOW_NEW_SOURCES:
3808 		return "ALLOW_NEW";
3809 	case MLD_BLOCK_OLD_SOURCES:
3810 		return "BLOCK_OLD";
3811 	default:
3812 		break;
3813 	}
3814 	return "unknown";
3815 }
3816 #endif
3817 
3818 void
mld_init(void)3819 mld_init(void)
3820 {
3821 	MLD_PRINTF(("%s: initializing\n", __func__));
3822 
3823 	ip6_initpktopts(&mld_po);
3824 	mld_po.ip6po_hlim = 1;
3825 	mld_po.ip6po_hbh = &mld_ra.hbh;
3826 	mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER;
3827 	mld_po.ip6po_flags = IP6PO_DONTFRAG;
3828 	LIST_INIT(&mli_head);
3829 }
3830