xref: /xnu-11215.41.3/bsd/netinet6/mld6.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*-
29  * Copyright (c) 2009 Bruce Simpson.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  * 3. The name of the author may not be used to endorse or promote
40  *    products derived from this software without specific prior written
41  *    permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  */
55 
56 /*
57  * Copyright (c) 1988 Stephen Deering.
58  * Copyright (c) 1992, 1993
59  *	The Regents of the University of California.  All rights reserved.
60  *
61  * This code is derived from software contributed to Berkeley by
62  * Stephen Deering of Stanford University.
63  *
64  * Redistribution and use in source and binary forms, with or without
65  * modification, are permitted provided that the following conditions
66  * are met:
67  * 1. Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  * 2. Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in the
71  *    documentation and/or other materials provided with the distribution.
72  * 3. All advertising materials mentioning features or use of this software
73  *    must display the following acknowledgement:
74  *	This product includes software developed by the University of
75  *	California, Berkeley and its contributors.
76  * 4. Neither the name of the University nor the names of its contributors
77  *    may be used to endorse or promote products derived from this software
78  *    without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
81  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
82  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
83  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
84  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
85  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
86  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
87  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
88  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
89  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
90  * SUCH DAMAGE.
91  *
92  *	@(#)igmp.c	8.1 (Berkeley) 7/19/93
93  */
94 /*
95  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
96  * support for mandatory and extensible security protections.  This notice
97  * is included in support of clause 2.2 (b) of the Apple Public License,
98  * Version 2.0.
99  */
100 
101 #include <sys/cdefs.h>
102 
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/mbuf.h>
106 #include <sys/socket.h>
107 #include <sys/protosw.h>
108 #include <sys/kernel.h>
109 #include <sys/malloc.h>
110 #include <sys/mcache.h>
111 
112 #include <dev/random/randomdev.h>
113 
114 #include <kern/zalloc.h>
115 
116 #include <net/if.h>
117 #include <net/route.h>
118 #include <net/net_sysctl.h>
119 
120 #include <netinet/in.h>
121 #include <netinet/in_var.h>
122 #include <netinet6/in6_var.h>
123 #include <netinet/ip6.h>
124 #include <netinet6/ip6_var.h>
125 #include <netinet6/scope6_var.h>
126 #include <netinet/icmp6.h>
127 #include <netinet6/mld6.h>
128 #include <netinet6/mld6_var.h>
129 
130 #include <os/log.h>
131 
132 /* Lock group and attribute for mld_mtx */
133 static LCK_ATTR_DECLARE(mld_mtx_attr, 0, 0);
134 static LCK_GRP_DECLARE(mld_mtx_grp, "mld_mtx");
135 
136 /*
137  * Locking and reference counting:
138  *
139  * mld_mtx mainly protects mli_head.  In cases where both mld_mtx and
140  * in6_multihead_lock must be held, the former must be acquired first in order
141  * to maintain lock ordering.  It is not a requirement that mld_mtx be
142  * acquired first before in6_multihead_lock, but in case both must be acquired
143  * in succession, the correct lock ordering must be followed.
144  *
145  * Instead of walking the if_multiaddrs list at the interface and returning
146  * the ifma_protospec value of a matching entry, we search the global list
147  * of in6_multi records and find it that way; this is done with in6_multihead
148  * lock held.  Doing so avoids the race condition issues that many other BSDs
149  * suffer from (therefore in our implementation, ifma_protospec will never be
150  * NULL for as long as the in6_multi is valid.)
151  *
152  * The above creates a requirement for the in6_multi to stay in in6_multihead
153  * list even after the final MLD leave (in MLDv2 mode) until no longer needs
154  * be retransmitted (this is not required for MLDv1.)  In order to handle
155  * this, the request and reference counts of the in6_multi are bumped up when
156  * the state changes to MLD_LEAVING_MEMBER, and later dropped in the timeout
157  * handler.  Each in6_multi holds a reference to the underlying mld_ifinfo.
158  *
159  * Thus, the permitted lock order is:
160  *
161  *	mld_mtx, in6_multihead_lock, inm6_lock, mli_lock
162  *
163  * Any may be taken independently, but if any are held at the same time,
164  * the above lock order must be followed.
165  */
166 static LCK_MTX_DECLARE_ATTR(mld_mtx, &mld_mtx_grp, &mld_mtx_attr);
167 
168 SLIST_HEAD(mld_in6m_relhead, in6_multi);
169 
170 static void     mli_initvar(struct mld_ifinfo *, struct ifnet *, int);
171 static struct mld_ifinfo *mli_alloc(zalloc_flags_t);
172 static void     mli_free(struct mld_ifinfo *);
173 static void     mli_delete(const struct ifnet *, struct mld_in6m_relhead *);
174 static void     mld_dispatch_packet(struct mbuf *);
175 static void     mld_final_leave(struct in6_multi *, struct mld_ifinfo *,
176     struct mld_tparams *);
177 static int      mld_handle_state_change(struct in6_multi *, struct mld_ifinfo *,
178     struct mld_tparams *);
179 static int      mld_initial_join(struct in6_multi *, struct mld_ifinfo *,
180     struct mld_tparams *, const int);
181 #ifdef MLD_DEBUG
182 static const char *     mld_rec_type_to_str(const int);
183 #endif
184 static uint32_t mld_set_version(struct mld_ifinfo *, const int);
185 static void     mld_append_relq(struct mld_ifinfo *, struct in6_multi *);
186 static void     mld_flush_relq(struct mld_ifinfo *, struct mld_in6m_relhead *);
187 static void     mld_dispatch_queue_locked(struct mld_ifinfo *, struct ifqueue *, int);
188 static int      mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
189     /*const*/ struct mld_hdr *);
190 static int      mld_v1_input_report(struct ifnet *, struct mbuf *,
191     const struct ip6_hdr *, /*const*/ struct mld_hdr *);
192 static void     mld_v1_process_group_timer(struct in6_multi *, const int);
193 static void     mld_v1_process_querier_timers(struct mld_ifinfo *);
194 static int      mld_v1_transmit_report(struct in6_multi *, const uint8_t);
195 static uint32_t mld_v1_update_group(struct in6_multi *, const int);
196 static void     mld_v2_cancel_link_timers(struct mld_ifinfo *);
197 static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *);
198 static struct mbuf *
199 mld_v2_encap_report(struct ifnet *, struct mbuf *);
200 static int      mld_v2_enqueue_filter_change(struct ifqueue *,
201     struct in6_multi *);
202 static int      mld_v2_enqueue_group_record(struct ifqueue *,
203     struct in6_multi *, const int, const int, const int,
204     const int);
205 static int      mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
206     struct mbuf *, const int, const int);
207 static int      mld_v2_merge_state_changes(struct in6_multi *,
208     struct ifqueue *);
209 static void     mld_v2_process_group_timers(struct mld_ifinfo *,
210     struct ifqueue *, struct ifqueue *,
211     struct in6_multi *, const int);
212 static int      mld_v2_process_group_query(struct in6_multi *,
213     int, struct mbuf *, const int);
214 static int      sysctl_mld_gsr SYSCTL_HANDLER_ARGS;
215 static int      sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS;
216 static int      sysctl_mld_v2enable SYSCTL_HANDLER_ARGS;
217 
218 static const uint32_t mld_timeout_delay = 1000; /* in milliseconds */
219 static const uint32_t mld_timeout_leeway = 500; /* in millseconds  */
220 static bool mld_timeout_run;             /* MLD timer is scheduled to run */
221 static bool mld_fast_timeout_run;        /* MLD fast timer is scheduled to run */
222 static void mld_timeout(thread_call_param_t, thread_call_param_t);
223 static void mld_sched_timeout(void);
224 static void mld_sched_fast_timeout(void);
225 
226 /*
227  * Normative references: RFC 2710, RFC 3590, RFC 3810.
228  */
229 static struct timeval mld_gsrdelay = {.tv_sec = 10, .tv_usec = 0};
230 static LIST_HEAD(, mld_ifinfo) mli_head;
231 
232 static int querier_present_timers_running6;
233 static int interface_timers_running6;
234 static int state_change_timers_running6;
235 static int current_state_timers_running6;
236 
237 static unsigned int mld_mli_list_genid;
238 /*
239  * Subsystem lock macros.
240  */
241 #define MLD_LOCK()                      \
242 	lck_mtx_lock(&mld_mtx)
243 #define MLD_LOCK_ASSERT_HELD()          \
244 	LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_OWNED)
245 #define MLD_LOCK_ASSERT_NOTHELD()       \
246 	LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_NOTOWNED)
247 #define MLD_UNLOCK()                    \
248 	lck_mtx_unlock(&mld_mtx)
249 
250 #define MLD_ADD_DETACHED_IN6M(_head, _in6m) {                           \
251 	SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle);                     \
252 }
253 
254 #define MLD_REMOVE_DETACHED_IN6M(_head) {                               \
255 	struct in6_multi *_in6m, *_inm_tmp;                             \
256 	SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) {         \
257 	        SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle);       \
258 	        IN6M_REMREF(_in6m);                                     \
259 	}                                                               \
260 	VERIFY(SLIST_EMPTY(_head));                                     \
261 }
262 
263 static KALLOC_TYPE_DEFINE(mli_zone, struct mld_ifinfo, NET_KT_DEFAULT);
264 
265 SYSCTL_DECL(_net_inet6);        /* Note: Not in any common header. */
266 
267 SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
268     "IPv6 Multicast Listener Discovery");
269 SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay,
270     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
271     &mld_gsrdelay.tv_sec, 0, sysctl_mld_gsr, "I",
272     "Rate limit for MLDv2 Group-and-Source queries in seconds");
273 
274 SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_LOCKED,
275     sysctl_mld_ifinfo, "Per-interface MLDv2 state");
276 
277 static int      mld_v1enable = 1;
278 SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RW | CTLFLAG_LOCKED,
279     &mld_v1enable, 0, "Enable fallback to MLDv1");
280 
281 static int      mld_v2enable = 1;
282 SYSCTL_PROC(_net_inet6_mld, OID_AUTO, v2enable,
283     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
284     &mld_v2enable, 0, sysctl_mld_v2enable, "I",
285     "Enable MLDv2 (debug purposes only)");
286 
287 static int      mld_use_allow = 1;
288 SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RW | CTLFLAG_LOCKED,
289     &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
290 
291 #ifdef MLD_DEBUG
292 int mld_debug = 0;
293 SYSCTL_INT(_net_inet6_mld, OID_AUTO,
294     debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, "");
295 #endif
296 /*
297  * Packed Router Alert option structure declaration.
298  */
299 struct mld_raopt {
300 	struct ip6_hbh          hbh;
301 	struct ip6_opt          pad;
302 	struct ip6_opt_router   ra;
303 } __packed;
304 
305 /*
306  * Router Alert hop-by-hop option header.
307  */
308 static struct mld_raopt mld_ra = {
309 	.hbh = { .ip6h_nxt = 0, .ip6h_len = 0 },
310 	.pad = { .ip6o_type = IP6OPT_PADN, .ip6o_len = 0 },
311 	.ra = {
312 		.ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT,
313 		.ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2),
314 		.ip6or_value =  {((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
315 			         (IP6OPT_RTALERT_MLD & 0xFF) }
316 	}
317 };
318 static struct ip6_pktopts mld_po;
319 
320 /* Store MLDv2 record count in the module private scratch space */
321 #define vt_nrecs        pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0]
322 
323 static __inline void
mld_save_context(struct mbuf * m,struct ifnet * ifp)324 mld_save_context(struct mbuf *m, struct ifnet *ifp)
325 {
326 	m->m_pkthdr.rcvif = ifp;
327 }
328 
329 static __inline void
mld_scrub_context(struct mbuf * m)330 mld_scrub_context(struct mbuf *m)
331 {
332 	m->m_pkthdr.rcvif = NULL;
333 }
334 
335 /*
336  * Restore context from a queued output chain.
337  * Return saved ifp.
338  */
339 static __inline struct ifnet *
mld_restore_context(struct mbuf * m)340 mld_restore_context(struct mbuf *m)
341 {
342 	return m->m_pkthdr.rcvif;
343 }
344 
345 /*
346  * Retrieve or set threshold between group-source queries in seconds.
347  */
348 static int
349 sysctl_mld_gsr SYSCTL_HANDLER_ARGS
350 {
351 #pragma unused(arg1, arg2)
352 	int error;
353 	int i;
354 
355 	MLD_LOCK();
356 
357 	i = (int)mld_gsrdelay.tv_sec;
358 
359 	error = sysctl_handle_int(oidp, &i, 0, req);
360 	if (error || !req->newptr) {
361 		goto out_locked;
362 	}
363 
364 	if (i < -1 || i >= 60) {
365 		error = EINVAL;
366 		goto out_locked;
367 	}
368 
369 	mld_gsrdelay.tv_sec = i;
370 
371 out_locked:
372 	MLD_UNLOCK();
373 	return error;
374 }
375 /*
376  * Expose struct mld_ifinfo to userland, keyed by ifindex.
377  * For use by ifmcstat(8).
378  *
379  */
380 static int
381 sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS
382 {
383 #pragma unused(oidp)
384 	DECLARE_SYSCTL_HANDLER_ARG_ARRAY(int, 1, name, namelen);
385 	int                      error;
386 	struct ifnet            *ifp;
387 	struct mld_ifinfo       *mli;
388 	struct mld_ifinfo_u     mli_u;
389 
390 	if (req->newptr != USER_ADDR_NULL) {
391 		return EPERM;
392 	}
393 
394 	MLD_LOCK();
395 
396 	if (name[0] <= 0 || name[0] > (u_int)if_index) {
397 		error = ENOENT;
398 		goto out_locked;
399 	}
400 
401 	error = ENOENT;
402 
403 	ifnet_head_lock_shared();
404 	ifp = ifindex2ifnet[name[0]];
405 	ifnet_head_done();
406 	if (ifp == NULL) {
407 		goto out_locked;
408 	}
409 
410 	bzero(&mli_u, sizeof(mli_u));
411 
412 	LIST_FOREACH(mli, &mli_head, mli_link) {
413 		MLI_LOCK(mli);
414 		if (ifp != mli->mli_ifp) {
415 			MLI_UNLOCK(mli);
416 			continue;
417 		}
418 
419 		mli_u.mli_ifindex = mli->mli_ifp->if_index;
420 		mli_u.mli_version = mli->mli_version;
421 		mli_u.mli_v1_timer = mli->mli_v1_timer;
422 		mli_u.mli_v2_timer = mli->mli_v2_timer;
423 		mli_u.mli_flags = mli->mli_flags;
424 		mli_u.mli_rv = mli->mli_rv;
425 		mli_u.mli_qi = mli->mli_qi;
426 		mli_u.mli_qri = mli->mli_qri;
427 		mli_u.mli_uri = mli->mli_uri;
428 		MLI_UNLOCK(mli);
429 
430 		error = SYSCTL_OUT(req, &mli_u, sizeof(mli_u));
431 		break;
432 	}
433 
434 out_locked:
435 	MLD_UNLOCK();
436 	return error;
437 }
438 
439 static int
440 sysctl_mld_v2enable SYSCTL_HANDLER_ARGS
441 {
442 #pragma unused(arg1, arg2)
443 	int error;
444 	int i;
445 	struct mld_ifinfo *mli;
446 	struct mld_tparams mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 };
447 
448 	MLD_LOCK();
449 
450 	i = mld_v2enable;
451 
452 	error = sysctl_handle_int(oidp, &i, 0, req);
453 	if (error || !req->newptr) {
454 		goto out_locked;
455 	}
456 
457 	if (i < 0 || i > 1) {
458 		error = EINVAL;
459 		goto out_locked;
460 	}
461 
462 	mld_v2enable = i;
463 	/*
464 	 * If we enabled v2, the state transition will take care of upgrading
465 	 * the MLD version back to v2. Otherwise, we have to explicitly
466 	 * downgrade. Note that this functionality is to be used for debugging.
467 	 */
468 	if (mld_v2enable == 1) {
469 		goto out_locked;
470 	}
471 
472 	LIST_FOREACH(mli, &mli_head, mli_link) {
473 		MLI_LOCK(mli);
474 		if (mld_set_version(mli, MLD_VERSION_1) > 0) {
475 			mtp.qpt = 1;
476 		}
477 		MLI_UNLOCK(mli);
478 	}
479 
480 out_locked:
481 	MLD_UNLOCK();
482 
483 	mld_set_timeout(&mtp);
484 
485 	return error;
486 }
487 
488 /*
489  * Dispatch an entire queue of pending packet chains.
490  *
491  * Must not be called with in6m_lock held.
492  * XXX This routine unlocks MLD global lock and also mli locks.
493  * Make sure that the calling routine takes reference on the mli
494  * before calling this routine.
495  * Also if we are traversing mli_head, remember to check for
496  * mli list generation count and restart the loop if generation count
497  * has changed.
498  */
499 static void
mld_dispatch_queue_locked(struct mld_ifinfo * mli,struct ifqueue * ifq,int limit)500 mld_dispatch_queue_locked(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit)
501 {
502 	struct mbuf *m;
503 
504 	MLD_LOCK_ASSERT_HELD();
505 
506 	if (mli != NULL) {
507 		MLI_LOCK_ASSERT_HELD(mli);
508 	}
509 
510 	for (;;) {
511 		IF_DEQUEUE(ifq, m);
512 		if (m == NULL) {
513 			break;
514 		}
515 		MLD_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n", __func__,
516 		    (uint64_t)VM_KERNEL_ADDRPERM(ifq),
517 		    (uint64_t)VM_KERNEL_ADDRPERM(m)));
518 
519 		if (mli != NULL) {
520 			MLI_UNLOCK(mli);
521 		}
522 		MLD_UNLOCK();
523 
524 		mld_dispatch_packet(m);
525 
526 		MLD_LOCK();
527 		if (mli != NULL) {
528 			MLI_LOCK(mli);
529 		}
530 
531 		if (--limit == 0) {
532 			break;
533 		}
534 	}
535 
536 	if (mli != NULL) {
537 		MLI_LOCK_ASSERT_HELD(mli);
538 	}
539 }
540 
541 /*
542  * Filter outgoing MLD report state by group.
543  *
544  * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
545  * and node-local addresses. However, kernel and socket consumers
546  * always embed the KAME scope ID in the address provided, so strip it
547  * when performing comparison.
548  * Note: This is not the same as the *multicast* scope.
549  *
550  * Return zero if the given group is one for which MLD reports
551  * should be suppressed, or non-zero if reports should be issued.
552  */
553 static __inline__ int
mld_is_addr_reported(const struct in6_addr * addr)554 mld_is_addr_reported(const struct in6_addr *addr)
555 {
556 	VERIFY(IN6_IS_ADDR_MULTICAST(addr));
557 
558 	if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL) {
559 		return 0;
560 	}
561 
562 	if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL && !IN6_IS_ADDR_UNICAST_BASED_MULTICAST(addr)) {
563 		struct in6_addr tmp = *addr;
564 		in6_clearscope(&tmp);
565 		if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes)) {
566 			return 0;
567 		}
568 	}
569 
570 	return 1;
571 }
572 
573 /*
574  * Attach MLD when PF_INET6 is attached to an interface.
575  */
576 struct mld_ifinfo *
mld_domifattach(struct ifnet * ifp,zalloc_flags_t how)577 mld_domifattach(struct ifnet *ifp, zalloc_flags_t how)
578 {
579 	struct mld_ifinfo *mli;
580 
581 	os_log_debug(OS_LOG_DEFAULT, "%s: called for ifp %s\n", __func__,
582 	    if_name(ifp));
583 
584 	mli = mli_alloc(how);
585 	if (mli == NULL) {
586 		return NULL;
587 	}
588 
589 	MLD_LOCK();
590 
591 	MLI_LOCK(mli);
592 	mli_initvar(mli, ifp, 0);
593 	mli->mli_debug |= IFD_ATTACHED;
594 	MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */
595 	MLI_ADDREF_LOCKED(mli); /* hold a reference for caller */
596 	MLI_UNLOCK(mli);
597 	ifnet_lock_shared(ifp);
598 	mld6_initsilent(ifp, mli);
599 	ifnet_lock_done(ifp);
600 
601 	LIST_INSERT_HEAD(&mli_head, mli, mli_link);
602 	mld_mli_list_genid++;
603 
604 	MLD_UNLOCK();
605 
606 	os_log_info(OS_LOG_DEFAULT, "%s: allocated mld_ifinfo for ifp %s\n",
607 	    __func__, if_name(ifp));
608 
609 	return mli;
610 }
611 
612 /*
613  * Attach MLD when PF_INET6 is reattached to an interface.  Caller is
614  * expected to have an outstanding reference to the mli.
615  */
616 void
mld_domifreattach(struct mld_ifinfo * mli)617 mld_domifreattach(struct mld_ifinfo *mli)
618 {
619 	struct ifnet *ifp;
620 
621 	MLD_LOCK();
622 
623 	MLI_LOCK(mli);
624 	VERIFY(!(mli->mli_debug & IFD_ATTACHED));
625 	ifp = mli->mli_ifp;
626 	VERIFY(ifp != NULL);
627 	mli_initvar(mli, ifp, 1);
628 	mli->mli_debug |= IFD_ATTACHED;
629 	MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */
630 	MLI_UNLOCK(mli);
631 	ifnet_lock_shared(ifp);
632 	mld6_initsilent(ifp, mli);
633 	ifnet_lock_done(ifp);
634 
635 	LIST_INSERT_HEAD(&mli_head, mli, mli_link);
636 	mld_mli_list_genid++;
637 
638 	MLD_UNLOCK();
639 
640 	os_log_info(OS_LOG_DEFAULT, "%s: reattached mld_ifinfo for ifp %s\n",
641 	    __func__, if_name(ifp));
642 }
643 
644 /*
645  * Hook for domifdetach.
646  */
647 void
mld_domifdetach(struct ifnet * ifp)648 mld_domifdetach(struct ifnet *ifp)
649 {
650 	SLIST_HEAD(, in6_multi) in6m_dthead;
651 
652 	SLIST_INIT(&in6m_dthead);
653 
654 	os_log_info(OS_LOG_DEFAULT, "%s: called for ifp %s\n", __func__,
655 	    if_name(ifp));
656 
657 	MLD_LOCK();
658 	mli_delete(ifp, (struct mld_in6m_relhead *)&in6m_dthead);
659 	MLD_UNLOCK();
660 
661 	/* Now that we're dropped all locks, release detached records */
662 	MLD_REMOVE_DETACHED_IN6M(&in6m_dthead);
663 }
664 
665 /*
666  * Called at interface detach time.  Note that we only flush all deferred
667  * responses and record releases; all remaining inm records and their source
668  * entries related to this interface are left intact, in order to handle
669  * the reattach case.
670  */
671 static void
mli_delete(const struct ifnet * ifp,struct mld_in6m_relhead * in6m_dthead)672 mli_delete(const struct ifnet *ifp, struct mld_in6m_relhead *in6m_dthead)
673 {
674 	struct mld_ifinfo *mli, *tmli;
675 
676 	MLD_LOCK_ASSERT_HELD();
677 
678 	LIST_FOREACH_SAFE(mli, &mli_head, mli_link, tmli) {
679 		MLI_LOCK(mli);
680 		if (mli->mli_ifp == ifp) {
681 			/*
682 			 * Free deferred General Query responses.
683 			 */
684 			IF_DRAIN(&mli->mli_gq);
685 			IF_DRAIN(&mli->mli_v1q);
686 			mld_flush_relq(mli, in6m_dthead);
687 			mli->mli_debug &= ~IFD_ATTACHED;
688 			MLI_UNLOCK(mli);
689 
690 			LIST_REMOVE(mli, mli_link);
691 			MLI_REMREF(mli); /* release mli_head reference */
692 			mld_mli_list_genid++;
693 			return;
694 		}
695 		MLI_UNLOCK(mli);
696 	}
697 	panic("%s: mld_ifinfo not found for ifp %p(%s)", __func__,
698 	    ifp, ifp->if_xname);
699 }
700 
701 __private_extern__ void
mld6_initsilent(struct ifnet * ifp,struct mld_ifinfo * mli)702 mld6_initsilent(struct ifnet *ifp, struct mld_ifinfo *mli)
703 {
704 	ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED);
705 
706 	MLI_LOCK_ASSERT_NOTHELD(mli);
707 	MLI_LOCK(mli);
708 	if (!(ifp->if_flags & IFF_MULTICAST) &&
709 	    (ifp->if_eflags & (IFEF_IPV6_ND6ALT | IFEF_LOCALNET_PRIVATE))) {
710 		mli->mli_flags |= MLIF_SILENT;
711 	} else {
712 		mli->mli_flags &= ~MLIF_SILENT;
713 	}
714 	MLI_UNLOCK(mli);
715 }
716 
717 static void
mli_initvar(struct mld_ifinfo * mli,struct ifnet * ifp,int reattach)718 mli_initvar(struct mld_ifinfo *mli, struct ifnet *ifp, int reattach)
719 {
720 	MLI_LOCK_ASSERT_HELD(mli);
721 
722 	mli->mli_ifp = ifp;
723 	if (mld_v2enable) {
724 		mli->mli_version = MLD_VERSION_2;
725 	} else {
726 		mli->mli_version = MLD_VERSION_1;
727 	}
728 	mli->mli_flags = 0;
729 	mli->mli_rv = MLD_RV_INIT;
730 	mli->mli_qi = MLD_QI_INIT;
731 	mli->mli_qri = MLD_QRI_INIT;
732 	mli->mli_uri = MLD_URI_INIT;
733 
734 	if (mld_use_allow) {
735 		mli->mli_flags |= MLIF_USEALLOW;
736 	}
737 	if (!reattach) {
738 		SLIST_INIT(&mli->mli_relinmhead);
739 	}
740 
741 	/*
742 	 * Responses to general queries are subject to bounds.
743 	 */
744 	mli->mli_gq.ifq_maxlen = MLD_MAX_RESPONSE_PACKETS;
745 	mli->mli_v1q.ifq_maxlen = MLD_MAX_RESPONSE_PACKETS;
746 }
747 
748 static struct mld_ifinfo *
mli_alloc(zalloc_flags_t how)749 mli_alloc(zalloc_flags_t how)
750 {
751 	struct mld_ifinfo *mli = zalloc_flags(mli_zone, how | Z_ZERO);
752 	if (mli != NULL) {
753 		lck_mtx_init(&mli->mli_lock, &mld_mtx_grp, &mld_mtx_attr);
754 		mli->mli_debug |= IFD_ALLOC;
755 	}
756 	return mli;
757 }
758 
759 static void
mli_free(struct mld_ifinfo * mli)760 mli_free(struct mld_ifinfo *mli)
761 {
762 	MLI_LOCK(mli);
763 	if (mli->mli_debug & IFD_ATTACHED) {
764 		panic("%s: attached mli=%p is being freed", __func__, mli);
765 		/* NOTREACHED */
766 	} else if (mli->mli_ifp != NULL) {
767 		panic("%s: ifp not NULL for mli=%p", __func__, mli);
768 		/* NOTREACHED */
769 	} else if (!(mli->mli_debug & IFD_ALLOC)) {
770 		panic("%s: mli %p cannot be freed", __func__, mli);
771 		/* NOTREACHED */
772 	} else if (mli->mli_refcnt != 0) {
773 		panic("%s: non-zero refcnt mli=%p", __func__, mli);
774 		/* NOTREACHED */
775 	}
776 	mli->mli_debug &= ~IFD_ALLOC;
777 	MLI_UNLOCK(mli);
778 
779 	lck_mtx_destroy(&mli->mli_lock, &mld_mtx_grp);
780 	zfree(mli_zone, mli);
781 }
782 
783 void
mli_addref(struct mld_ifinfo * mli,int locked)784 mli_addref(struct mld_ifinfo *mli, int locked)
785 {
786 	if (!locked) {
787 		MLI_LOCK_SPIN(mli);
788 	} else {
789 		MLI_LOCK_ASSERT_HELD(mli);
790 	}
791 
792 	if (++mli->mli_refcnt == 0) {
793 		panic("%s: mli=%p wraparound refcnt", __func__, mli);
794 		/* NOTREACHED */
795 	}
796 	if (!locked) {
797 		MLI_UNLOCK(mli);
798 	}
799 }
800 
801 void
mli_remref(struct mld_ifinfo * mli)802 mli_remref(struct mld_ifinfo *mli)
803 {
804 	SLIST_HEAD(, in6_multi) in6m_dthead;
805 	struct ifnet *ifp;
806 
807 	MLI_LOCK_SPIN(mli);
808 
809 	if (mli->mli_refcnt == 0) {
810 		panic("%s: mli=%p negative refcnt", __func__, mli);
811 		/* NOTREACHED */
812 	}
813 
814 	--mli->mli_refcnt;
815 	if (mli->mli_refcnt > 0) {
816 		MLI_UNLOCK(mli);
817 		return;
818 	}
819 
820 	ifp = mli->mli_ifp;
821 	mli->mli_ifp = NULL;
822 	IF_DRAIN(&mli->mli_gq);
823 	IF_DRAIN(&mli->mli_v1q);
824 	SLIST_INIT(&in6m_dthead);
825 	mld_flush_relq(mli, (struct mld_in6m_relhead *)&in6m_dthead);
826 	MLI_UNLOCK(mli);
827 
828 	/* Now that we're dropped all locks, release detached records */
829 	MLD_REMOVE_DETACHED_IN6M(&in6m_dthead);
830 
831 	os_log(OS_LOG_DEFAULT, "%s: freeing mld_ifinfo for ifp %s\n",
832 	    __func__, if_name(ifp));
833 
834 	mli_free(mli);
835 }
836 
837 /*
838  * Process a received MLDv1 general or address-specific query.
839  * Assumes that the query header has been pulled up to sizeof(mld_hdr).
840  *
841  * NOTE: Can't be fully const correct as we temporarily embed scope ID in
842  * mld_addr. This is OK as we own the mbuf chain.
843  */
844 static int
mld_v1_input_query(struct ifnet * ifp,const struct ip6_hdr * ip6,struct mld_hdr * mld)845 mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
846     /*const*/ struct mld_hdr *mld)
847 {
848 	struct mld_ifinfo       *mli;
849 	struct in6_multi        *inm;
850 	int                      err = 0, is_general_query;
851 	uint16_t                 timer;
852 	struct mld_tparams       mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 };
853 
854 	MLD_LOCK_ASSERT_NOTHELD();
855 
856 	is_general_query = 0;
857 
858 	if (!mld_v1enable) {
859 		os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 query on ifp %s\n",
860 		    __func__, if_name(ifp));
861 		goto done;
862 	}
863 
864 	/*
865 	 * RFC3810 Section 6.2: MLD queries must originate from
866 	 * a router's link-local address.
867 	 */
868 	if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
869 		os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 query src %s on ifp %s\n",
870 		    __func__, ip6_sprintf(&ip6->ip6_src),
871 		    if_name(ifp));
872 		goto done;
873 	}
874 
875 	/*
876 	 * Do address field validation upfront before we accept
877 	 * the query.
878 	 */
879 	if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
880 		/*
881 		 * MLDv1 General Query.
882 		 * If this was not sent to the all-nodes group, ignore it.
883 		 */
884 		struct in6_addr          dst;
885 
886 		dst = ip6->ip6_dst;
887 		in6_clearscope(&dst);
888 		if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes)) {
889 			err = EINVAL;
890 			goto done;
891 		}
892 		is_general_query = 1;
893 	} else {
894 		/*
895 		 * Embed scope ID of receiving interface in MLD query for
896 		 * lookup whilst we don't hold other locks.
897 		 */
898 		(void)in6_setscope(&mld->mld_addr, ifp, NULL);
899 	}
900 
901 	/*
902 	 * Switch to MLDv1 host compatibility mode.
903 	 */
904 	mli = MLD_IFINFO(ifp);
905 	VERIFY(mli != NULL);
906 
907 	MLI_LOCK(mli);
908 	mtp.qpt = mld_set_version(mli, MLD_VERSION_1);
909 	MLI_UNLOCK(mli);
910 
911 	timer = ntohs(mld->mld_maxdelay) / MLD_TIMER_SCALE;
912 	if (timer == 0) {
913 		timer = 1;
914 	}
915 
916 	if (is_general_query) {
917 		struct in6_multistep step;
918 
919 		os_log_debug(OS_LOG_DEFAULT, "%s: process v1 general query on ifp %s\n",
920 		    __func__, if_name(ifp));
921 		/*
922 		 * For each reporting group joined on this
923 		 * interface, kick the report timer.
924 		 */
925 		in6_multihead_lock_shared();
926 		IN6_FIRST_MULTI(step, inm);
927 		while (inm != NULL) {
928 			IN6M_LOCK(inm);
929 			if (inm->in6m_ifp == ifp) {
930 				mtp.cst += mld_v1_update_group(inm, timer);
931 			}
932 			IN6M_UNLOCK(inm);
933 			IN6_NEXT_MULTI(step, inm);
934 		}
935 		in6_multihead_lock_done();
936 	} else {
937 		/*
938 		 * MLDv1 Group-Specific Query.
939 		 * If this is a group-specific MLDv1 query, we need only
940 		 * look up the single group to process it.
941 		 */
942 		in6_multihead_lock_shared();
943 		IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
944 		in6_multihead_lock_done();
945 
946 		if (inm != NULL) {
947 			IN6M_LOCK(inm);
948 			os_log_debug(OS_LOG_DEFAULT, "%s: process v1 query %s on "
949 			    "ifp %s\n", __func__,
950 			    ip6_sprintf(&mld->mld_addr),
951 			    if_name(ifp));
952 			mtp.cst = mld_v1_update_group(inm, timer);
953 			IN6M_UNLOCK(inm);
954 			IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
955 		}
956 		/* XXX Clear embedded scope ID as userland won't expect it. */
957 		in6_clearscope(&mld->mld_addr);
958 	}
959 done:
960 	mld_set_timeout(&mtp);
961 
962 	return err;
963 }
964 
965 /*
966  * Update the report timer on a group in response to an MLDv1 query.
967  *
968  * If we are becoming the reporting member for this group, start the timer.
969  * If we already are the reporting member for this group, and timer is
970  * below the threshold, reset it.
971  *
972  * We may be updating the group for the first time since we switched
973  * to MLDv2. If we are, then we must clear any recorded source lists,
974  * and transition to REPORTING state; the group timer is overloaded
975  * for group and group-source query responses.
976  *
977  * Unlike MLDv2, the delay per group should be jittered
978  * to avoid bursts of MLDv1 reports.
979  */
980 static uint32_t
mld_v1_update_group(struct in6_multi * inm,const int timer)981 mld_v1_update_group(struct in6_multi *inm, const int timer)
982 {
983 	IN6M_LOCK_ASSERT_HELD(inm);
984 
985 	MLD_PRINTF(("%s: %s/%s timer=%d\n", __func__,
986 	    ip6_sprintf(&inm->in6m_addr),
987 	    if_name(inm->in6m_ifp), timer));
988 
989 	switch (inm->in6m_state) {
990 	case MLD_NOT_MEMBER:
991 	case MLD_SILENT_MEMBER:
992 		break;
993 	case MLD_REPORTING_MEMBER:
994 		if (inm->in6m_timer != 0 &&
995 		    inm->in6m_timer <= timer) {
996 			MLD_PRINTF(("%s: REPORTING and timer running, "
997 			    "skipping.\n", __func__));
998 			break;
999 		}
1000 		OS_FALLTHROUGH;
1001 	case MLD_SG_QUERY_PENDING_MEMBER:
1002 	case MLD_G_QUERY_PENDING_MEMBER:
1003 	case MLD_IDLE_MEMBER:
1004 	case MLD_LAZY_MEMBER:
1005 	case MLD_AWAKENING_MEMBER:
1006 		MLD_PRINTF(("%s: ->REPORTING\n", __func__));
1007 		inm->in6m_state = MLD_REPORTING_MEMBER;
1008 		inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1009 		break;
1010 	case MLD_SLEEPING_MEMBER:
1011 		MLD_PRINTF(("%s: ->AWAKENING\n", __func__));
1012 		inm->in6m_state = MLD_AWAKENING_MEMBER;
1013 		break;
1014 	case MLD_LEAVING_MEMBER:
1015 		break;
1016 	}
1017 
1018 	return inm->in6m_timer;
1019 }
1020 
1021 /*
1022  * Process a received MLDv2 general, group-specific or
1023  * group-and-source-specific query.
1024  *
1025  * Assumes that the query header has been pulled up to sizeof(mldv2_query).
1026  *
1027  * Return 0 if successful, otherwise an appropriate error code is returned.
1028  */
1029 static int
mld_v2_input_query(struct ifnet * ifp,const struct ip6_hdr * ip6,struct mbuf * m,const int off,const int icmp6len)1030 mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
1031     struct mbuf *m, const int off, const int icmp6len)
1032 {
1033 	struct mld_ifinfo       *mli;
1034 	struct mldv2_query      *mld;
1035 	struct in6_multi        *inm;
1036 	uint32_t                 maxdelay, nsrc, qqi, timer;
1037 	int                      err = 0, is_general_query;
1038 	uint8_t                  qrv;
1039 	struct mld_tparams       mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 };
1040 
1041 	MLD_LOCK_ASSERT_NOTHELD();
1042 
1043 	is_general_query = 0;
1044 
1045 	if (!mld_v2enable) {
1046 		os_log_info(OS_LOG_DEFAULT, "%s: ignore v2 query on ifp %s\n",
1047 		    __func__, if_name(ifp));
1048 		goto done;
1049 	}
1050 
1051 	/*
1052 	 * RFC3810 Section 6.2: MLD queries must originate from
1053 	 * a router's link-local address.
1054 	 */
1055 	if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
1056 		os_log_info(OS_LOG_DEFAULT,
1057 		    "%s: ignore v1 query src %s on ifp %s\n",
1058 		    __func__, ip6_sprintf(&ip6->ip6_src),
1059 		    if_name(ifp));
1060 		goto done;
1061 	}
1062 
1063 	os_log_debug(OS_LOG_DEFAULT,
1064 	    "%s: input v2 query on ifp %s\n", __func__,
1065 	    if_name(ifp));
1066 
1067 	mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off);
1068 
1069 	maxdelay = ntohs(mld->mld_maxdelay);    /* in 1/10ths of a second */
1070 	if (maxdelay > SHRT_MAX) {
1071 		maxdelay = (MLD_MRC_MANT((uint16_t)maxdelay) | 0x1000) <<
1072 		    (MLD_MRC_EXP((uint16_t)maxdelay) + 3);
1073 	}
1074 	timer = maxdelay / MLD_TIMER_SCALE;
1075 	if (timer == 0) {
1076 		timer = 1;
1077 	}
1078 
1079 	qrv = MLD_QRV(mld->mld_misc);
1080 	if (qrv < 2) {
1081 		MLD_PRINTF(("%s: clamping qrv %d to %d\n", __func__,
1082 		    qrv, MLD_RV_INIT));
1083 		qrv = MLD_RV_INIT;
1084 	}
1085 
1086 	qqi = mld->mld_qqi;
1087 	if (qqi >= 128) {
1088 		qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
1089 		    (MLD_QQIC_EXP(mld->mld_qqi) + 3);
1090 	}
1091 
1092 	nsrc = ntohs(mld->mld_numsrc);
1093 	if (nsrc > MLD_MAX_GS_SOURCES) {
1094 		err = EMSGSIZE;
1095 		goto done;
1096 	}
1097 	if (icmp6len < sizeof(struct mldv2_query) +
1098 	    (nsrc * sizeof(struct in6_addr))) {
1099 		err = EMSGSIZE;
1100 		goto done;
1101 	}
1102 
1103 	/*
1104 	 * Do further input validation upfront to avoid resetting timers
1105 	 * should we need to discard this query.
1106 	 */
1107 	if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
1108 		/*
1109 		 * A general query with a source list has undefined
1110 		 * behaviour; discard it.
1111 		 */
1112 		if (nsrc > 0) {
1113 			err = EINVAL;
1114 			goto done;
1115 		}
1116 		is_general_query = 1;
1117 	} else {
1118 		/*
1119 		 * Embed scope ID of receiving interface in MLD query for
1120 		 * lookup whilst we don't hold other locks (due to KAME
1121 		 * locking lameness). We own this mbuf chain just now.
1122 		 */
1123 		(void)in6_setscope(&mld->mld_addr, ifp, NULL);
1124 	}
1125 
1126 	mli = MLD_IFINFO(ifp);
1127 	VERIFY(mli != NULL);
1128 
1129 	MLI_LOCK(mli);
1130 	/*
1131 	 * Discard the v2 query if we're in Compatibility Mode.
1132 	 * The RFC is pretty clear that hosts need to stay in MLDv1 mode
1133 	 * until the Old Version Querier Present timer expires.
1134 	 */
1135 	if (mli->mli_version != MLD_VERSION_2) {
1136 		MLI_UNLOCK(mli);
1137 		goto done;
1138 	}
1139 
1140 	mtp.qpt = mld_set_version(mli, MLD_VERSION_2);
1141 	mli->mli_rv = qrv;
1142 	mli->mli_qi = qqi;
1143 	mli->mli_qri = MAX(timer, MLD_QRI_MIN);
1144 
1145 	MLD_PRINTF(("%s: qrv %d qi %d qri %d\n", __func__, mli->mli_rv,
1146 	    mli->mli_qi, mli->mli_qri));
1147 
1148 	if (is_general_query) {
1149 		/*
1150 		 * MLDv2 General Query.
1151 		 *
1152 		 * Schedule a current-state report on this ifp for
1153 		 * all groups, possibly containing source lists.
1154 		 *
1155 		 * If there is a pending General Query response
1156 		 * scheduled earlier than the selected delay, do
1157 		 * not schedule any other reports.
1158 		 * Otherwise, reset the interface timer.
1159 		 */
1160 		os_log_debug(OS_LOG_DEFAULT, "%s: process v2 general query on ifp %s\n",
1161 		    __func__, if_name(ifp));
1162 		if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
1163 			mtp.it = mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
1164 		}
1165 		MLI_UNLOCK(mli);
1166 	} else {
1167 		MLI_UNLOCK(mli);
1168 		/*
1169 		 * MLDv2 Group-specific or Group-and-source-specific Query.
1170 		 *
1171 		 * Group-source-specific queries are throttled on
1172 		 * a per-group basis to defeat denial-of-service attempts.
1173 		 * Queries for groups we are not a member of on this
1174 		 * link are simply ignored.
1175 		 */
1176 		in6_multihead_lock_shared();
1177 		IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
1178 		in6_multihead_lock_done();
1179 		if (inm == NULL) {
1180 			goto done;
1181 		}
1182 
1183 		IN6M_LOCK(inm);
1184 		if (nsrc > 0) {
1185 			if (!ratecheck(&inm->in6m_lastgsrtv,
1186 			    &mld_gsrdelay)) {
1187 				os_log_info(OS_LOG_DEFAULT, "%s: GS query throttled\n",
1188 				    __func__);
1189 				IN6M_UNLOCK(inm);
1190 				IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1191 				goto done;
1192 			}
1193 		}
1194 		os_log_debug(OS_LOG_DEFAULT, "%s: process v2 group query on ifp %s\n",
1195 		    __func__, if_name(ifp));
1196 		/*
1197 		 * If there is a pending General Query response
1198 		 * scheduled sooner than the selected delay, no
1199 		 * further report need be scheduled.
1200 		 * Otherwise, prepare to respond to the
1201 		 * group-specific or group-and-source query.
1202 		 */
1203 		MLI_LOCK(mli);
1204 		mtp.it = mli->mli_v2_timer;
1205 		MLI_UNLOCK(mli);
1206 		if (mtp.it == 0 || mtp.it >= timer) {
1207 			(void) mld_v2_process_group_query(inm, timer, m, off);
1208 			mtp.cst = inm->in6m_timer;
1209 		}
1210 		IN6M_UNLOCK(inm);
1211 		IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1212 		/* XXX Clear embedded scope ID as userland won't expect it. */
1213 		in6_clearscope(&mld->mld_addr);
1214 	}
1215 done:
1216 	if (mtp.it > 0) {
1217 		os_log_debug(OS_LOG_DEFAULT, "%s: v2 general query response scheduled in "
1218 		    "T+%d seconds on ifp %s\n", __func__, mtp.it,
1219 		    if_name(ifp));
1220 	}
1221 	mld_set_timeout(&mtp);
1222 
1223 	return err;
1224 }
1225 
1226 /*
1227  * Process a recieved MLDv2 group-specific or group-and-source-specific
1228  * query.
1229  * Return <0 if any error occured. Currently this is ignored.
1230  */
1231 static int
mld_v2_process_group_query(struct in6_multi * inm,int timer,struct mbuf * m0,const int off)1232 mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0,
1233     const int off)
1234 {
1235 	struct mldv2_query      *mld;
1236 	int                      retval;
1237 	uint16_t                 nsrc;
1238 
1239 	IN6M_LOCK_ASSERT_HELD(inm);
1240 
1241 	retval = 0;
1242 	mld = (struct mldv2_query *)(mtod(m0, uint8_t *) + off);
1243 
1244 	switch (inm->in6m_state) {
1245 	case MLD_NOT_MEMBER:
1246 	case MLD_SILENT_MEMBER:
1247 	case MLD_SLEEPING_MEMBER:
1248 	case MLD_LAZY_MEMBER:
1249 	case MLD_AWAKENING_MEMBER:
1250 	case MLD_IDLE_MEMBER:
1251 	case MLD_LEAVING_MEMBER:
1252 		return retval;
1253 	case MLD_REPORTING_MEMBER:
1254 	case MLD_G_QUERY_PENDING_MEMBER:
1255 	case MLD_SG_QUERY_PENDING_MEMBER:
1256 		break;
1257 	}
1258 
1259 	nsrc = ntohs(mld->mld_numsrc);
1260 
1261 	/*
1262 	 * Deal with group-specific queries upfront.
1263 	 * If any group query is already pending, purge any recorded
1264 	 * source-list state if it exists, and schedule a query response
1265 	 * for this group-specific query.
1266 	 */
1267 	if (nsrc == 0) {
1268 		if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
1269 		    inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
1270 			in6m_clear_recorded(inm);
1271 			timer = min(inm->in6m_timer, timer);
1272 		}
1273 		inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
1274 		inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1275 		return retval;
1276 	}
1277 
1278 	/*
1279 	 * Deal with the case where a group-and-source-specific query has
1280 	 * been received but a group-specific query is already pending.
1281 	 */
1282 	if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
1283 		timer = min(inm->in6m_timer, timer);
1284 		inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1285 		return retval;
1286 	}
1287 
1288 	/*
1289 	 * Finally, deal with the case where a group-and-source-specific
1290 	 * query has been received, where a response to a previous g-s-r
1291 	 * query exists, or none exists.
1292 	 * In this case, we need to parse the source-list which the Querier
1293 	 * has provided us with and check if we have any source list filter
1294 	 * entries at T1 for these sources. If we do not, there is no need
1295 	 * schedule a report and the query may be dropped.
1296 	 * If we do, we must record them and schedule a current-state
1297 	 * report for those sources.
1298 	 */
1299 	if (inm->in6m_nsrc > 0) {
1300 		struct mbuf             *m;
1301 		struct in6_addr          addr;
1302 		int                      i, nrecorded;
1303 		int                      soff;
1304 
1305 		m = m0;
1306 		soff = off + sizeof(struct mldv2_query);
1307 		nrecorded = 0;
1308 		for (i = 0; i < nsrc; i++) {
1309 			m_copydata(m, soff, sizeof(addr), &addr);
1310 			retval = in6m_record_source(inm, &addr);
1311 			if (retval < 0) {
1312 				break;
1313 			}
1314 			nrecorded += retval;
1315 			soff += sizeof(struct in6_addr);
1316 
1317 			while (m && (soff >= m->m_len)) {
1318 				soff -= m->m_len;
1319 				m = m->m_next;
1320 			}
1321 
1322 			/* should not be possible: */
1323 			if (m == NULL) {
1324 				break;
1325 			}
1326 		}
1327 		if (nrecorded > 0) {
1328 			MLD_PRINTF(("%s: schedule response to SG query\n",
1329 			    __func__));
1330 			inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
1331 			inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1332 		}
1333 	}
1334 
1335 	return retval;
1336 }
1337 
1338 /*
1339  * Process a received MLDv1 host membership report.
1340  * Assumes mld points to mld_hdr in pulled up mbuf chain.
1341  *
1342  * NOTE: Can't be fully const correct as we temporarily embed scope ID in
1343  * mld_addr. This is OK as we own the mbuf chain.
1344  */
1345 static int
mld_v1_input_report(struct ifnet * ifp,struct mbuf * m,const struct ip6_hdr * ip6,struct mld_hdr * mld)1346 mld_v1_input_report(struct ifnet *ifp, struct mbuf *m,
1347     const struct ip6_hdr *ip6, /*const*/ struct mld_hdr *mld)
1348 {
1349 	struct in6_addr          src, dst;
1350 	struct in6_ifaddr       *ia;
1351 	struct in6_multi        *inm;
1352 
1353 	if (!mld_v1enable) {
1354 		os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 report on ifp %s\n",
1355 		    __func__, if_name(ifp));
1356 		return 0;
1357 	}
1358 
1359 	if ((ifp->if_flags & IFF_LOOPBACK) ||
1360 	    (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1361 		return 0;
1362 	}
1363 
1364 	/*
1365 	 * MLDv1 reports must originate from a host's link-local address,
1366 	 * or the unspecified address (when booting).
1367 	 */
1368 	src = ip6->ip6_src;
1369 	in6_clearscope(&src);
1370 	if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
1371 		os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 query src %s on ifp %s\n",
1372 		    __func__, ip6_sprintf(&ip6->ip6_src),
1373 		    if_name(ifp));
1374 		return EINVAL;
1375 	}
1376 
1377 	/*
1378 	 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
1379 	 * group, and must be directed to the group itself.
1380 	 */
1381 	dst = ip6->ip6_dst;
1382 	in6_clearscope(&dst);
1383 	if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
1384 	    !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
1385 		os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 query dst %s on ifp %s\n",
1386 		    __func__, ip6_sprintf(&ip6->ip6_dst),
1387 		    if_name(ifp));
1388 		return EINVAL;
1389 	}
1390 
1391 	/*
1392 	 * Make sure we don't hear our own membership report, as fast
1393 	 * leave requires knowing that we are the only member of a
1394 	 * group. Assume we used the link-local address if available,
1395 	 * otherwise look for ::.
1396 	 *
1397 	 * XXX Note that scope ID comparison is needed for the address
1398 	 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
1399 	 * performed for the on-wire address.
1400 	 */
1401 	ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
1402 	if (ia != NULL) {
1403 		IFA_LOCK(&ia->ia_ifa);
1404 		if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))) {
1405 			IFA_UNLOCK(&ia->ia_ifa);
1406 			ifa_remref(&ia->ia_ifa);
1407 			return 0;
1408 		}
1409 		IFA_UNLOCK(&ia->ia_ifa);
1410 		ifa_remref(&ia->ia_ifa);
1411 	} else if (IN6_IS_ADDR_UNSPECIFIED(&src)) {
1412 		return 0;
1413 	}
1414 
1415 	os_log_debug(OS_LOG_DEFAULT, "%s: process v1 report %s on ifp %s\n",
1416 	    __func__, ip6_sprintf(&mld->mld_addr),
1417 	    if_name(ifp));
1418 
1419 	/*
1420 	 * Embed scope ID of receiving interface in MLD query for lookup
1421 	 * whilst we don't hold other locks (due to KAME locking lameness).
1422 	 */
1423 	if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
1424 		(void)in6_setscope(&mld->mld_addr, ifp, NULL);
1425 	}
1426 
1427 	/*
1428 	 * MLDv1 report suppression.
1429 	 * If we are a member of this group, and our membership should be
1430 	 * reported, and our group timer is pending or about to be reset,
1431 	 * stop our group timer by transitioning to the 'lazy' state.
1432 	 */
1433 	in6_multihead_lock_shared();
1434 	IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
1435 	in6_multihead_lock_done();
1436 
1437 	if (inm != NULL) {
1438 		struct mld_ifinfo *mli;
1439 
1440 		IN6M_LOCK(inm);
1441 		mli = inm->in6m_mli;
1442 		VERIFY(mli != NULL);
1443 
1444 		MLI_LOCK(mli);
1445 		/*
1446 		 * If we are in MLDv2 host mode, do not allow the
1447 		 * other host's MLDv1 report to suppress our reports.
1448 		 */
1449 		if (mli->mli_version == MLD_VERSION_2) {
1450 			MLI_UNLOCK(mli);
1451 			IN6M_UNLOCK(inm);
1452 			IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1453 			goto out;
1454 		}
1455 		MLI_UNLOCK(mli);
1456 
1457 		inm->in6m_timer = 0;
1458 
1459 		switch (inm->in6m_state) {
1460 		case MLD_NOT_MEMBER:
1461 		case MLD_SILENT_MEMBER:
1462 		case MLD_SLEEPING_MEMBER:
1463 			break;
1464 		case MLD_REPORTING_MEMBER:
1465 		case MLD_IDLE_MEMBER:
1466 		case MLD_AWAKENING_MEMBER:
1467 			MLD_PRINTF(("%s: report suppressed for %s on "
1468 			    "ifp 0x%llx(%s)\n", __func__,
1469 			    ip6_sprintf(&mld->mld_addr),
1470 			    (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1471 			OS_FALLTHROUGH;
1472 		case MLD_LAZY_MEMBER:
1473 			inm->in6m_state = MLD_LAZY_MEMBER;
1474 			break;
1475 		case MLD_G_QUERY_PENDING_MEMBER:
1476 		case MLD_SG_QUERY_PENDING_MEMBER:
1477 		case MLD_LEAVING_MEMBER:
1478 			break;
1479 		}
1480 		IN6M_UNLOCK(inm);
1481 		IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1482 	}
1483 
1484 out:
1485 	/* XXX Clear embedded scope ID as userland won't expect it. */
1486 	in6_clearscope(&mld->mld_addr);
1487 
1488 	return 0;
1489 }
1490 
1491 /*
1492  * MLD input path.
1493  *
1494  * Assume query messages which fit in a single ICMPv6 message header
1495  * have been pulled up.
1496  * Assume that userland will want to see the message, even if it
1497  * otherwise fails kernel input validation; do not free it.
1498  * Pullup may however free the mbuf chain m if it fails.
1499  *
1500  * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
1501  */
1502 int
mld_input(struct mbuf * m,int off,int icmp6len)1503 mld_input(struct mbuf *m, int off, int icmp6len)
1504 {
1505 	struct ifnet    *ifp = NULL;
1506 	struct ip6_hdr  *ip6 = NULL;
1507 	struct mld_hdr  *mld = NULL;
1508 	int              mldlen = 0;
1509 
1510 	MLD_PRINTF(("%s: called w/mbuf (0x%llx,%d)\n", __func__,
1511 	    (uint64_t)VM_KERNEL_ADDRPERM(m), off));
1512 
1513 	ifp = m->m_pkthdr.rcvif;
1514 
1515 	/* Pullup to appropriate size. */
1516 	mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
1517 	if (mld->mld_type == MLD_LISTENER_QUERY &&
1518 	    icmp6len >= sizeof(struct mldv2_query)) {
1519 		mldlen = sizeof(struct mldv2_query);
1520 	} else {
1521 		mldlen = sizeof(struct mld_hdr);
1522 	}
1523 	// check if mldv2_query/mld_hdr fits in the first mbuf
1524 	IP6_EXTHDR_CHECK(m, off, mldlen, return IPPROTO_DONE);
1525 	IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen);
1526 	if (mld == NULL) {
1527 		icmp6stat.icp6s_badlen++;
1528 		return IPPROTO_DONE;
1529 	}
1530 	ip6 = mtod(m, struct ip6_hdr *);
1531 
1532 	/*
1533 	 * Userland needs to see all of this traffic for implementing
1534 	 * the endpoint discovery portion of multicast routing.
1535 	 */
1536 	switch (mld->mld_type) {
1537 	case MLD_LISTENER_QUERY:
1538 		icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
1539 		if (icmp6len == sizeof(struct mld_hdr)) {
1540 			if (mld_v1_input_query(ifp, ip6, mld) != 0) {
1541 				return 0;
1542 			}
1543 		} else if (icmp6len >= sizeof(struct mldv2_query)) {
1544 			if (mld_v2_input_query(ifp, ip6, m, off,
1545 			    icmp6len) != 0) {
1546 				return 0;
1547 			}
1548 		}
1549 		break;
1550 	case MLD_LISTENER_REPORT:
1551 		icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1552 		if (mld_v1_input_report(ifp, m, ip6, mld) != 0) {
1553 			return 0;
1554 		}
1555 		break;
1556 	case MLDV2_LISTENER_REPORT:
1557 		icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1558 		break;
1559 	case MLD_LISTENER_DONE:
1560 		icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
1561 		break;
1562 	default:
1563 		break;
1564 	}
1565 
1566 	return 0;
1567 }
1568 
1569 /*
1570  * Schedule MLD timer based on various parameters; caller must ensure that
1571  * lock ordering is maintained as this routine acquires MLD global lock.
1572  */
1573 void
mld_set_timeout(struct mld_tparams * mtp)1574 mld_set_timeout(struct mld_tparams *mtp)
1575 {
1576 	MLD_LOCK_ASSERT_NOTHELD();
1577 	VERIFY(mtp != NULL);
1578 
1579 	if (mtp->qpt != 0 || mtp->it != 0 || mtp->cst != 0 || mtp->sct != 0) {
1580 		MLD_LOCK();
1581 		if (mtp->qpt != 0) {
1582 			querier_present_timers_running6 = 1;
1583 		}
1584 		if (mtp->it != 0) {
1585 			interface_timers_running6 = 1;
1586 		}
1587 		if (mtp->cst != 0) {
1588 			current_state_timers_running6 = 1;
1589 		}
1590 		if (mtp->sct != 0) {
1591 			state_change_timers_running6 = 1;
1592 		}
1593 		if (mtp->fast) {
1594 			mld_sched_fast_timeout();
1595 		} else {
1596 			mld_sched_timeout();
1597 		}
1598 		MLD_UNLOCK();
1599 	}
1600 }
1601 
1602 void
mld_set_fast_timeout(struct mld_tparams * mtp)1603 mld_set_fast_timeout(struct mld_tparams *mtp)
1604 {
1605 	VERIFY(mtp != NULL);
1606 	mtp->fast = true;
1607 	mld_set_timeout(mtp);
1608 }
1609 
1610 /*
1611  * MLD6 timer handler (per 1 second).
1612  */
1613 static void
mld_timeout(thread_call_param_t arg0,thread_call_param_t arg1 __unused)1614 mld_timeout(thread_call_param_t arg0, thread_call_param_t arg1 __unused)
1615 {
1616 	struct ifqueue           scq;   /* State-change packets */
1617 	struct ifqueue           qrq;   /* Query response packets */
1618 	struct ifnet            *ifp;
1619 	struct mld_ifinfo       *mli;
1620 	struct in6_multi        *inm;
1621 	int                      uri_sec = 0;
1622 	unsigned int genid = mld_mli_list_genid;
1623 	bool                     fast = arg0 != NULL;
1624 
1625 	SLIST_HEAD(, in6_multi) in6m_dthead;
1626 
1627 	SLIST_INIT(&in6m_dthead);
1628 
1629 	/*
1630 	 * Update coarse-grained networking timestamp (in sec.); the idea
1631 	 * is to piggy-back on the timeout callout to update the counter
1632 	 * returnable via net_uptime().
1633 	 */
1634 	net_update_uptime();
1635 
1636 	MLD_LOCK();
1637 
1638 	MLD_PRINTF(("%s: qpt %d, it %d, cst %d, sct %d, fast %d\n", __func__,
1639 	    querier_present_timers_running6, interface_timers_running6,
1640 	    current_state_timers_running6, state_change_timers_running6, fast));
1641 
1642 	if (fast) {
1643 		/*
1644 		 * When running the fast timer, skip processing
1645 		 * of "querier present" timers since they are
1646 		 * based on 1-second intervals.
1647 		 */
1648 		goto skip_query_timers;
1649 	}
1650 	/*
1651 	 * MLDv1 querier present timer processing.
1652 	 */
1653 	if (querier_present_timers_running6) {
1654 		querier_present_timers_running6 = 0;
1655 		LIST_FOREACH(mli, &mli_head, mli_link) {
1656 			MLI_LOCK(mli);
1657 			mld_v1_process_querier_timers(mli);
1658 			if (mli->mli_v1_timer > 0) {
1659 				querier_present_timers_running6 = 1;
1660 			}
1661 			MLI_UNLOCK(mli);
1662 		}
1663 	}
1664 
1665 	/*
1666 	 * MLDv2 General Query response timer processing.
1667 	 */
1668 	if (interface_timers_running6) {
1669 		MLD_PRINTF(("%s: interface timers running\n", __func__));
1670 		interface_timers_running6 = 0;
1671 		mli = LIST_FIRST(&mli_head);
1672 
1673 		while (mli != NULL) {
1674 			if (mli->mli_flags & MLIF_PROCESSED) {
1675 				mli = LIST_NEXT(mli, mli_link);
1676 				continue;
1677 			}
1678 
1679 			MLI_LOCK(mli);
1680 			if (mli->mli_version != MLD_VERSION_2) {
1681 				MLI_UNLOCK(mli);
1682 				mli = LIST_NEXT(mli, mli_link);
1683 				continue;
1684 			}
1685 			/*
1686 			 * XXX The logic below ends up calling
1687 			 * mld_dispatch_packet which can unlock mli
1688 			 * and the global MLD lock.
1689 			 * Therefore grab a reference on MLI and also
1690 			 * check for generation count to see if we should
1691 			 * iterate the list again.
1692 			 */
1693 			MLI_ADDREF_LOCKED(mli);
1694 
1695 			if (mli->mli_v2_timer == 0) {
1696 				/* Do nothing. */
1697 			} else if (--mli->mli_v2_timer == 0) {
1698 				if (mld_v2_dispatch_general_query(mli) > 0) {
1699 					interface_timers_running6 = 1;
1700 				}
1701 			} else {
1702 				interface_timers_running6 = 1;
1703 			}
1704 			mli->mli_flags |= MLIF_PROCESSED;
1705 			MLI_UNLOCK(mli);
1706 			MLI_REMREF(mli);
1707 
1708 			if (genid != mld_mli_list_genid) {
1709 				MLD_PRINTF(("%s: MLD information list changed "
1710 				    "in the middle of iteration! Restart iteration.\n",
1711 				    __func__));
1712 				mli = LIST_FIRST(&mli_head);
1713 				genid = mld_mli_list_genid;
1714 			} else {
1715 				mli = LIST_NEXT(mli, mli_link);
1716 			}
1717 		}
1718 
1719 		LIST_FOREACH(mli, &mli_head, mli_link)
1720 		mli->mli_flags &= ~MLIF_PROCESSED;
1721 	}
1722 
1723 skip_query_timers:
1724 	if (!current_state_timers_running6 &&
1725 	    !state_change_timers_running6) {
1726 		goto out_locked;
1727 	}
1728 
1729 	current_state_timers_running6 = 0;
1730 	state_change_timers_running6 = 0;
1731 
1732 	MLD_PRINTF(("%s: state change timers running\n", __func__));
1733 
1734 	memset(&qrq, 0, sizeof(struct ifqueue));
1735 	qrq.ifq_maxlen = MLD_MAX_G_GS_PACKETS;
1736 
1737 	memset(&scq, 0, sizeof(struct ifqueue));
1738 	scq.ifq_maxlen = MLD_MAX_STATE_CHANGE_PACKETS;
1739 
1740 	/*
1741 	 * MLD host report and state-change timer processing.
1742 	 * Note: Processing a v2 group timer may remove a node.
1743 	 */
1744 	mli = LIST_FIRST(&mli_head);
1745 
1746 	while (mli != NULL) {
1747 		struct in6_multistep step;
1748 
1749 		if (mli->mli_flags & MLIF_PROCESSED) {
1750 			mli = LIST_NEXT(mli, mli_link);
1751 			continue;
1752 		}
1753 
1754 		MLI_LOCK(mli);
1755 		ifp = mli->mli_ifp;
1756 		uri_sec = MLD_RANDOM_DELAY(mli->mli_uri);
1757 		MLI_UNLOCK(mli);
1758 
1759 		in6_multihead_lock_shared();
1760 		IN6_FIRST_MULTI(step, inm);
1761 		while (inm != NULL) {
1762 			IN6M_LOCK(inm);
1763 			if (inm->in6m_ifp != ifp) {
1764 				goto next;
1765 			}
1766 
1767 			MLI_LOCK(mli);
1768 			switch (mli->mli_version) {
1769 			case MLD_VERSION_1:
1770 				mld_v1_process_group_timer(inm,
1771 				    mli->mli_version);
1772 				break;
1773 			case MLD_VERSION_2:
1774 				mld_v2_process_group_timers(mli, &qrq,
1775 				    &scq, inm, uri_sec);
1776 				break;
1777 			}
1778 			MLI_UNLOCK(mli);
1779 next:
1780 			IN6M_UNLOCK(inm);
1781 			IN6_NEXT_MULTI(step, inm);
1782 		}
1783 		in6_multihead_lock_done();
1784 
1785 		/*
1786 		 * XXX The logic below ends up calling
1787 		 * mld_dispatch_packet which can unlock mli
1788 		 * and the global MLD lock.
1789 		 * Therefore grab a reference on MLI and also
1790 		 * check for generation count to see if we should
1791 		 * iterate the list again.
1792 		 */
1793 		MLI_LOCK(mli);
1794 		MLI_ADDREF_LOCKED(mli);
1795 		if (mli->mli_version == MLD_VERSION_1) {
1796 			mld_dispatch_queue_locked(mli, &mli->mli_v1q, 0);
1797 		} else if (mli->mli_version == MLD_VERSION_2) {
1798 			MLI_UNLOCK(mli);
1799 			mld_dispatch_queue_locked(NULL, &qrq, 0);
1800 			mld_dispatch_queue_locked(NULL, &scq, 0);
1801 			VERIFY(qrq.ifq_len == 0);
1802 			VERIFY(scq.ifq_len == 0);
1803 			MLI_LOCK(mli);
1804 		}
1805 		/*
1806 		 * In case there are still any pending membership reports
1807 		 * which didn't get drained at version change time.
1808 		 */
1809 		IF_DRAIN(&mli->mli_v1q);
1810 		/*
1811 		 * Release all deferred inm records, and drain any locally
1812 		 * enqueued packets; do it even if the current MLD version
1813 		 * for the link is no longer MLDv2, in order to handle the
1814 		 * version change case.
1815 		 */
1816 		mld_flush_relq(mli, (struct mld_in6m_relhead *)&in6m_dthead);
1817 		mli->mli_flags |= MLIF_PROCESSED;
1818 		MLI_UNLOCK(mli);
1819 		MLI_REMREF(mli);
1820 
1821 		IF_DRAIN(&qrq);
1822 		IF_DRAIN(&scq);
1823 
1824 		if (genid != mld_mli_list_genid) {
1825 			MLD_PRINTF(("%s: MLD information list changed "
1826 			    "in the middle of iteration! Restart iteration.\n",
1827 			    __func__));
1828 			mli = LIST_FIRST(&mli_head);
1829 			genid = mld_mli_list_genid;
1830 		} else {
1831 			mli = LIST_NEXT(mli, mli_link);
1832 		}
1833 	}
1834 
1835 	LIST_FOREACH(mli, &mli_head, mli_link)
1836 	mli->mli_flags &= ~MLIF_PROCESSED;
1837 
1838 out_locked:
1839 	/* re-arm the timer if there's work to do */
1840 	if (fast) {
1841 		mld_fast_timeout_run = false;
1842 	} else {
1843 		mld_timeout_run = false;
1844 	}
1845 	mld_sched_timeout();
1846 	MLD_UNLOCK();
1847 
1848 	/* Now that we're dropped all locks, release detached records */
1849 	MLD_REMOVE_DETACHED_IN6M(&in6m_dthead);
1850 }
1851 
1852 static void
mld_sched_timeout(void)1853 mld_sched_timeout(void)
1854 {
1855 	static thread_call_t mld_timeout_tcall;
1856 	uint64_t deadline = 0, leeway = 0;
1857 
1858 	MLD_LOCK_ASSERT_HELD();
1859 	if (mld_timeout_tcall == NULL) {
1860 		mld_timeout_tcall =
1861 		    thread_call_allocate_with_options(mld_timeout,
1862 		    NULL,
1863 		    THREAD_CALL_PRIORITY_KERNEL,
1864 		    THREAD_CALL_OPTIONS_ONCE);
1865 	}
1866 
1867 	if (!mld_timeout_run &&
1868 	    (querier_present_timers_running6 || current_state_timers_running6 ||
1869 	    interface_timers_running6 || state_change_timers_running6)) {
1870 		mld_timeout_run = true;
1871 		clock_interval_to_deadline(mld_timeout_delay, NSEC_PER_MSEC,
1872 		    &deadline);
1873 		clock_interval_to_absolutetime_interval(mld_timeout_leeway,
1874 		    NSEC_PER_MSEC, &leeway);
1875 		thread_call_enter_delayed_with_leeway(mld_timeout_tcall, NULL,
1876 		    deadline, leeway,
1877 		    THREAD_CALL_DELAY_LEEWAY);
1878 	}
1879 }
1880 
1881 static void
mld_sched_fast_timeout(void)1882 mld_sched_fast_timeout(void)
1883 {
1884 	static thread_call_t mld_fast_timeout_tcall;
1885 
1886 	MLD_LOCK_ASSERT_HELD();
1887 	if (mld_fast_timeout_tcall == NULL) {
1888 		mld_fast_timeout_tcall =
1889 		    thread_call_allocate_with_options(mld_timeout,
1890 		    mld_sched_fast_timeout,
1891 		    THREAD_CALL_PRIORITY_KERNEL,
1892 		    THREAD_CALL_OPTIONS_ONCE);
1893 	}
1894 	if (!mld_fast_timeout_run &&
1895 	    (current_state_timers_running6 || state_change_timers_running6)) {
1896 		mld_fast_timeout_run = true;
1897 		thread_call_enter(mld_fast_timeout_tcall);
1898 	}
1899 }
1900 
1901 /*
1902  * Appends an in6_multi to the list to be released later.
1903  *
1904  * Caller must be holding mli_lock.
1905  */
1906 static void
mld_append_relq(struct mld_ifinfo * mli,struct in6_multi * inm)1907 mld_append_relq(struct mld_ifinfo *mli, struct in6_multi *inm)
1908 {
1909 	MLI_LOCK_ASSERT_HELD(mli);
1910 	if (inm->in6m_in_nrele) {
1911 		os_log_debug(OS_LOG_DEFAULT, "%s: inm %llx already on relq ifp %s\n",
1912 		    __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm),
1913 		    mli->mli_ifp != NULL ? if_name(mli->mli_ifp) : "<null>");
1914 		return;
1915 	}
1916 	os_log_debug(OS_LOG_DEFAULT, "%s: adding inm %llx on relq ifp %s\n",
1917 	    __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm),
1918 	    mli->mli_ifp != NULL ? if_name(mli->mli_ifp) : "<null>");
1919 	inm->in6m_in_nrele = true;
1920 	SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm, in6m_nrele);
1921 }
1922 
1923 /*
1924  * Free the in6_multi reference(s) for this MLD lifecycle.
1925  *
1926  * Caller must be holding mli_lock.
1927  */
1928 static void
mld_flush_relq(struct mld_ifinfo * mli,struct mld_in6m_relhead * in6m_dthead)1929 mld_flush_relq(struct mld_ifinfo *mli, struct mld_in6m_relhead *in6m_dthead)
1930 {
1931 	struct in6_multi *inm;
1932 	SLIST_HEAD(, in6_multi) temp_relinmhead;
1933 
1934 	/*
1935 	 * Before dropping the mli_lock, copy all the items in the
1936 	 * release list to a temporary list to prevent other threads
1937 	 * from changing mli_relinmhead while we are traversing it.
1938 	 */
1939 	MLI_LOCK_ASSERT_HELD(mli);
1940 	SLIST_INIT(&temp_relinmhead);
1941 	while ((inm = SLIST_FIRST(&mli->mli_relinmhead)) != NULL) {
1942 		SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
1943 		SLIST_INSERT_HEAD(&temp_relinmhead, inm, in6m_nrele);
1944 	}
1945 	MLI_UNLOCK(mli);
1946 	in6_multihead_lock_exclusive();
1947 	while ((inm = SLIST_FIRST(&temp_relinmhead)) != NULL) {
1948 		int lastref;
1949 
1950 		SLIST_REMOVE_HEAD(&temp_relinmhead, in6m_nrele);
1951 		IN6M_LOCK(inm);
1952 		os_log_debug(OS_LOG_DEFAULT, "%s: flushing inm %llx on relq ifp %s\n",
1953 		    __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm),
1954 		    inm->in6m_ifp != NULL ? if_name(inm->in6m_ifp) : "<null>");
1955 		VERIFY(inm->in6m_in_nrele == true);
1956 		inm->in6m_in_nrele = false;
1957 		VERIFY(inm->in6m_nrelecnt != 0);
1958 		inm->in6m_nrelecnt--;
1959 		lastref = in6_multi_detach(inm);
1960 		VERIFY(!lastref || (!(inm->in6m_debug & IFD_ATTACHED) &&
1961 		    inm->in6m_reqcnt == 0));
1962 		IN6M_UNLOCK(inm);
1963 		/* from mli_relinmhead */
1964 		IN6M_REMREF(inm);
1965 		/* from in6_multihead_list */
1966 		if (lastref) {
1967 			/*
1968 			 * Defer releasing our final reference, as we
1969 			 * are holding the MLD lock at this point, and
1970 			 * we could end up with locking issues later on
1971 			 * (while issuing SIOCDELMULTI) when this is the
1972 			 * final reference count.  Let the caller do it
1973 			 * when it is safe.
1974 			 */
1975 			MLD_ADD_DETACHED_IN6M(in6m_dthead, inm);
1976 		}
1977 	}
1978 	in6_multihead_lock_done();
1979 	MLI_LOCK(mli);
1980 }
1981 
1982 /*
1983  * Update host report group timer.
1984  * Will update the global pending timer flags.
1985  */
1986 static void
mld_v1_process_group_timer(struct in6_multi * inm,const int mld_version)1987 mld_v1_process_group_timer(struct in6_multi *inm, const int mld_version)
1988 {
1989 #pragma unused(mld_version)
1990 	int report_timer_expired;
1991 
1992 	MLD_LOCK_ASSERT_HELD();
1993 	IN6M_LOCK_ASSERT_HELD(inm);
1994 	MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
1995 
1996 	if (inm->in6m_timer == 0) {
1997 		report_timer_expired = 0;
1998 	} else if (--inm->in6m_timer == 0) {
1999 		report_timer_expired = 1;
2000 	} else {
2001 		current_state_timers_running6 = 1;
2002 		/* caller will schedule timer */
2003 		return;
2004 	}
2005 
2006 	switch (inm->in6m_state) {
2007 	case MLD_NOT_MEMBER:
2008 	case MLD_SILENT_MEMBER:
2009 	case MLD_IDLE_MEMBER:
2010 	case MLD_LAZY_MEMBER:
2011 	case MLD_SLEEPING_MEMBER:
2012 	case MLD_AWAKENING_MEMBER:
2013 		break;
2014 	case MLD_REPORTING_MEMBER:
2015 		if (report_timer_expired) {
2016 			inm->in6m_state = MLD_IDLE_MEMBER;
2017 			(void) mld_v1_transmit_report(inm,
2018 			    MLD_LISTENER_REPORT);
2019 			IN6M_LOCK_ASSERT_HELD(inm);
2020 			MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
2021 		}
2022 		break;
2023 	case MLD_G_QUERY_PENDING_MEMBER:
2024 	case MLD_SG_QUERY_PENDING_MEMBER:
2025 	case MLD_LEAVING_MEMBER:
2026 		break;
2027 	}
2028 }
2029 
2030 /*
2031  * Update a group's timers for MLDv2.
2032  * Will update the global pending timer flags.
2033  * Note: Unlocked read from mli.
2034  */
2035 static void
mld_v2_process_group_timers(struct mld_ifinfo * mli,struct ifqueue * qrq,struct ifqueue * scq,struct in6_multi * inm,const int uri_sec)2036 mld_v2_process_group_timers(struct mld_ifinfo *mli,
2037     struct ifqueue *qrq, struct ifqueue *scq,
2038     struct in6_multi *inm, const int uri_sec)
2039 {
2040 	int query_response_timer_expired;
2041 	int state_change_retransmit_timer_expired;
2042 
2043 	MLD_LOCK_ASSERT_HELD();
2044 	IN6M_LOCK_ASSERT_HELD(inm);
2045 	MLI_LOCK_ASSERT_HELD(mli);
2046 	VERIFY(mli == inm->in6m_mli);
2047 
2048 	query_response_timer_expired = 0;
2049 	state_change_retransmit_timer_expired = 0;
2050 
2051 	/*
2052 	 * During a transition from compatibility mode back to MLDv2,
2053 	 * a group record in REPORTING state may still have its group
2054 	 * timer active. This is a no-op in this function; it is easier
2055 	 * to deal with it here than to complicate the timeout path.
2056 	 */
2057 	if (inm->in6m_timer == 0) {
2058 		query_response_timer_expired = 0;
2059 	} else if (--inm->in6m_timer == 0) {
2060 		query_response_timer_expired = 1;
2061 	} else {
2062 		current_state_timers_running6 = 1;
2063 		/* caller will schedule timer */
2064 	}
2065 
2066 	if (inm->in6m_sctimer == 0) {
2067 		state_change_retransmit_timer_expired = 0;
2068 	} else if (--inm->in6m_sctimer == 0) {
2069 		state_change_retransmit_timer_expired = 1;
2070 	} else {
2071 		state_change_timers_running6 = 1;
2072 		/* caller will schedule timer */
2073 	}
2074 
2075 	/* We are in timer callback, so be quick about it. */
2076 	if (!state_change_retransmit_timer_expired &&
2077 	    !query_response_timer_expired) {
2078 		return;
2079 	}
2080 
2081 	switch (inm->in6m_state) {
2082 	case MLD_NOT_MEMBER:
2083 	case MLD_SILENT_MEMBER:
2084 	case MLD_SLEEPING_MEMBER:
2085 	case MLD_LAZY_MEMBER:
2086 	case MLD_AWAKENING_MEMBER:
2087 	case MLD_IDLE_MEMBER:
2088 		break;
2089 	case MLD_G_QUERY_PENDING_MEMBER:
2090 	case MLD_SG_QUERY_PENDING_MEMBER:
2091 		/*
2092 		 * Respond to a previously pending Group-Specific
2093 		 * or Group-and-Source-Specific query by enqueueing
2094 		 * the appropriate Current-State report for
2095 		 * immediate transmission.
2096 		 */
2097 		if (query_response_timer_expired) {
2098 			int retval;
2099 
2100 			retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1,
2101 			    (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER),
2102 			    0);
2103 			MLD_PRINTF(("%s: enqueue record = %d\n",
2104 			    __func__, retval));
2105 			inm->in6m_state = MLD_REPORTING_MEMBER;
2106 			in6m_clear_recorded(inm);
2107 		}
2108 		OS_FALLTHROUGH;
2109 	case MLD_REPORTING_MEMBER:
2110 	case MLD_LEAVING_MEMBER:
2111 		if (state_change_retransmit_timer_expired) {
2112 			/*
2113 			 * State-change retransmission timer fired.
2114 			 * If there are any further pending retransmissions,
2115 			 * set the global pending state-change flag, and
2116 			 * reset the timer.
2117 			 */
2118 			if (--inm->in6m_scrv > 0) {
2119 				inm->in6m_sctimer = (uint16_t)uri_sec;
2120 				state_change_timers_running6 = 1;
2121 				/* caller will schedule timer */
2122 			}
2123 			/*
2124 			 * Retransmit the previously computed state-change
2125 			 * report. If there are no further pending
2126 			 * retransmissions, the mbuf queue will be consumed.
2127 			 * Update T0 state to T1 as we have now sent
2128 			 * a state-change.
2129 			 */
2130 			(void) mld_v2_merge_state_changes(inm, scq);
2131 
2132 			in6m_commit(inm);
2133 			MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
2134 			    ip6_sprintf(&inm->in6m_addr),
2135 			    if_name(inm->in6m_ifp)));
2136 
2137 			/*
2138 			 * If we are leaving the group for good, make sure
2139 			 * we release MLD's reference to it.
2140 			 * This release must be deferred using a SLIST,
2141 			 * as we are called from a loop which traverses
2142 			 * the in_ifmultiaddr TAILQ.
2143 			 */
2144 			if (inm->in6m_state == MLD_LEAVING_MEMBER &&
2145 			    inm->in6m_scrv == 0) {
2146 				inm->in6m_state = MLD_NOT_MEMBER;
2147 				/*
2148 				 * A reference has already been held in
2149 				 * mld_final_leave() for this inm, so
2150 				 * no need to hold another one.  We also
2151 				 * bumped up its request count then, so
2152 				 * that it stays in in6_multihead.  Both
2153 				 * of them will be released when it is
2154 				 * dequeued later on.
2155 				 */
2156 				VERIFY(inm->in6m_nrelecnt != 0);
2157 				mld_append_relq(mli, inm);
2158 			}
2159 		}
2160 		break;
2161 	}
2162 }
2163 
2164 /*
2165  * Switch to a different version on the given interface,
2166  * as per Section 9.12.
2167  */
2168 static uint32_t
mld_set_version(struct mld_ifinfo * mli,const int mld_version)2169 mld_set_version(struct mld_ifinfo *mli, const int mld_version)
2170 {
2171 	int old_version_timer;
2172 
2173 	MLI_LOCK_ASSERT_HELD(mli);
2174 
2175 	os_log(OS_LOG_DEFAULT, "%s: switching to v%d on ifp %s\n", __func__,
2176 	    mld_version, if_name(mli->mli_ifp));
2177 
2178 	if (mld_version == MLD_VERSION_1) {
2179 		/*
2180 		 * Compute the "Older Version Querier Present" timer as per
2181 		 * Section 9.12, in seconds.
2182 		 */
2183 		old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
2184 		mli->mli_v1_timer = old_version_timer;
2185 	}
2186 
2187 	if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) {
2188 		mli->mli_version = MLD_VERSION_1;
2189 		mld_v2_cancel_link_timers(mli);
2190 	}
2191 
2192 	MLI_LOCK_ASSERT_HELD(mli);
2193 
2194 	return mli->mli_v1_timer;
2195 }
2196 
2197 /*
2198  * Cancel pending MLDv2 timers for the given link and all groups
2199  * joined on it; state-change, general-query, and group-query timers.
2200  *
2201  * Only ever called on a transition from v2 to Compatibility mode. Kill
2202  * the timers stone dead (this may be expensive for large N groups), they
2203  * will be restarted if Compatibility Mode deems that they must be due to
2204  * query processing.
2205  */
2206 static void
mld_v2_cancel_link_timers(struct mld_ifinfo * mli)2207 mld_v2_cancel_link_timers(struct mld_ifinfo *mli)
2208 {
2209 	struct ifnet            *ifp;
2210 	struct in6_multi        *inm;
2211 	struct in6_multistep    step;
2212 
2213 	MLI_LOCK_ASSERT_HELD(mli);
2214 
2215 	MLD_PRINTF(("%s: cancel v2 timers on ifp 0x%llx(%s)\n", __func__,
2216 	    (uint64_t)VM_KERNEL_ADDRPERM(mli->mli_ifp), if_name(mli->mli_ifp)));
2217 
2218 	/*
2219 	 * Stop the v2 General Query Response on this link stone dead.
2220 	 * If timer is woken up due to interface_timers_running6,
2221 	 * the flag will be cleared if there are no pending link timers.
2222 	 */
2223 	mli->mli_v2_timer = 0;
2224 
2225 	/*
2226 	 * Now clear the current-state and state-change report timers
2227 	 * for all memberships scoped to this link.
2228 	 */
2229 	ifp = mli->mli_ifp;
2230 	MLI_UNLOCK(mli);
2231 
2232 	in6_multihead_lock_shared();
2233 	IN6_FIRST_MULTI(step, inm);
2234 	while (inm != NULL) {
2235 		IN6M_LOCK(inm);
2236 		if (inm->in6m_ifp != ifp) {
2237 			goto next;
2238 		}
2239 
2240 		switch (inm->in6m_state) {
2241 		case MLD_NOT_MEMBER:
2242 		case MLD_SILENT_MEMBER:
2243 		case MLD_IDLE_MEMBER:
2244 		case MLD_LAZY_MEMBER:
2245 		case MLD_SLEEPING_MEMBER:
2246 		case MLD_AWAKENING_MEMBER:
2247 			/*
2248 			 * These states are either not relevant in v2 mode,
2249 			 * or are unreported. Do nothing.
2250 			 */
2251 			break;
2252 		case MLD_LEAVING_MEMBER:
2253 			/*
2254 			 * If we are leaving the group and switching
2255 			 * version, we need to release the final
2256 			 * reference held for issuing the INCLUDE {}.
2257 			 * During mld_final_leave(), we bumped up both the
2258 			 * request and reference counts.  Since we cannot
2259 			 * call in6_multi_detach() here, defer this task to
2260 			 * the timer routine.
2261 			 */
2262 			VERIFY(inm->in6m_nrelecnt != 0);
2263 			MLI_LOCK(mli);
2264 			mld_append_relq(mli, inm);
2265 			MLI_UNLOCK(mli);
2266 			OS_FALLTHROUGH;
2267 		case MLD_G_QUERY_PENDING_MEMBER:
2268 		case MLD_SG_QUERY_PENDING_MEMBER:
2269 			in6m_clear_recorded(inm);
2270 			OS_FALLTHROUGH;
2271 		case MLD_REPORTING_MEMBER:
2272 			inm->in6m_state = MLD_REPORTING_MEMBER;
2273 			break;
2274 		}
2275 		/*
2276 		 * Always clear state-change and group report timers.
2277 		 * Free any pending MLDv2 state-change records.
2278 		 */
2279 		inm->in6m_sctimer = 0;
2280 		inm->in6m_timer = 0;
2281 		IF_DRAIN(&inm->in6m_scq);
2282 next:
2283 		IN6M_UNLOCK(inm);
2284 		IN6_NEXT_MULTI(step, inm);
2285 	}
2286 	in6_multihead_lock_done();
2287 
2288 	MLI_LOCK(mli);
2289 }
2290 
2291 /*
2292  * Update the Older Version Querier Present timers for a link.
2293  * See Section 9.12 of RFC 3810.
2294  */
2295 static void
mld_v1_process_querier_timers(struct mld_ifinfo * mli)2296 mld_v1_process_querier_timers(struct mld_ifinfo *mli)
2297 {
2298 	MLI_LOCK_ASSERT_HELD(mli);
2299 
2300 	if (mld_v2enable && mli->mli_version != MLD_VERSION_2 &&
2301 	    --mli->mli_v1_timer == 0) {
2302 		/*
2303 		 * MLDv1 Querier Present timer expired; revert to MLDv2.
2304 		 */
2305 		os_log(OS_LOG_DEFAULT, "%s: transition from v%d -> v%d on %s\n",
2306 		    __func__, mli->mli_version, MLD_VERSION_2,
2307 		    if_name(mli->mli_ifp));
2308 		mli->mli_version = MLD_VERSION_2;
2309 	}
2310 }
2311 
2312 /*
2313  * Transmit an MLDv1 report immediately.
2314  */
2315 static int
mld_v1_transmit_report(struct in6_multi * in6m,const uint8_t type)2316 mld_v1_transmit_report(struct in6_multi *in6m, const uint8_t type)
2317 {
2318 	struct ifnet            *ifp;
2319 	struct in6_ifaddr       *ia;
2320 	struct ip6_hdr          *ip6;
2321 	struct mbuf             *mh, *md;
2322 	struct mld_hdr          *mld;
2323 	int                     error = 0;
2324 
2325 	IN6M_LOCK_ASSERT_HELD(in6m);
2326 	MLI_LOCK_ASSERT_HELD(in6m->in6m_mli);
2327 
2328 	ifp = in6m->in6m_ifp;
2329 	/* ia may be NULL if link-local address is tentative. */
2330 	ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
2331 
2332 	MGETHDR(mh, M_DONTWAIT, MT_HEADER);
2333 	if (mh == NULL) {
2334 		if (ia != NULL) {
2335 			ifa_remref(&ia->ia_ifa);
2336 		}
2337 		return ENOMEM;
2338 	}
2339 	MGET(md, M_DONTWAIT, MT_DATA);
2340 	if (md == NULL) {
2341 		m_free(mh);
2342 		if (ia != NULL) {
2343 			ifa_remref(&ia->ia_ifa);
2344 		}
2345 		return ENOMEM;
2346 	}
2347 	mh->m_next = md;
2348 
2349 	/*
2350 	 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
2351 	 * that ether_output() does not need to allocate another mbuf
2352 	 * for the header in the most common case.
2353 	 */
2354 	MH_ALIGN(mh, sizeof(struct ip6_hdr));
2355 	mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr);
2356 	mh->m_len = sizeof(struct ip6_hdr);
2357 
2358 	ip6 = mtod(mh, struct ip6_hdr *);
2359 	ip6->ip6_flow = 0;
2360 	ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2361 	ip6->ip6_vfc |= IPV6_VERSION;
2362 	ip6->ip6_nxt = IPPROTO_ICMPV6;
2363 	if (ia != NULL) {
2364 		IFA_LOCK(&ia->ia_ifa);
2365 	}
2366 	ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
2367 	ip6_output_setsrcifscope(mh, IFSCOPE_NONE, ia);
2368 	if (ia != NULL) {
2369 		IFA_UNLOCK(&ia->ia_ifa);
2370 		ifa_remref(&ia->ia_ifa);
2371 		ia = NULL;
2372 	}
2373 	ip6->ip6_dst = in6m->in6m_addr;
2374 	ip6_output_setdstifscope(mh, in6m->ifscope, NULL);
2375 
2376 	md->m_len = sizeof(struct mld_hdr);
2377 	mld = mtod(md, struct mld_hdr *);
2378 	mld->mld_type = type;
2379 	mld->mld_code = 0;
2380 	mld->mld_cksum = 0;
2381 	mld->mld_maxdelay = 0;
2382 	mld->mld_reserved = 0;
2383 	mld->mld_addr = in6m->in6m_addr;
2384 	in6_clearscope(&mld->mld_addr);
2385 	mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
2386 	    sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
2387 
2388 	mld_save_context(mh, ifp);
2389 	mh->m_flags |= M_MLDV1;
2390 
2391 	/*
2392 	 * Due to the fact that at this point we are possibly holding
2393 	 * in6_multihead_lock in shared or exclusive mode, we can't call
2394 	 * mld_dispatch_packet() here since that will eventually call
2395 	 * ip6_output(), which will try to lock in6_multihead_lock and cause
2396 	 * a deadlock.
2397 	 * Instead we defer the work to the mld_timeout() thread, thus
2398 	 * avoiding unlocking in_multihead_lock here.
2399 	 */
2400 	if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) {
2401 		os_log_error(OS_LOG_DEFAULT, "%s: v1 outbound queue full\n", __func__);
2402 		error = ENOMEM;
2403 		m_freem(mh);
2404 	} else {
2405 		IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh);
2406 		VERIFY(error == 0);
2407 	}
2408 
2409 	return error;
2410 }
2411 
2412 /*
2413  * Process a state change from the upper layer for the given IPv6 group.
2414  *
2415  * Each socket holds a reference on the in6_multi in its own ip_moptions.
2416  * The socket layer will have made the necessary updates to.the group
2417  * state, it is now up to MLD to issue a state change report if there
2418  * has been any change between T0 (when the last state-change was issued)
2419  * and T1 (now).
2420  *
2421  * We use the MLDv2 state machine at group level. The MLd module
2422  * however makes the decision as to which MLD protocol version to speak.
2423  * A state change *from* INCLUDE {} always means an initial join.
2424  * A state change *to* INCLUDE {} always means a final leave.
2425  *
2426  * If delay is non-zero, and the state change is an initial multicast
2427  * join, the state change report will be delayed by 'delay' ticks
2428  * in units of seconds if MLDv1 is active on the link; otherwise
2429  * the initial MLDv2 state change report will be delayed by whichever
2430  * is sooner, a pending state-change timer or delay itself.
2431  */
2432 int
mld_change_state(struct in6_multi * inm,struct mld_tparams * mtp,const int delay)2433 mld_change_state(struct in6_multi *inm, struct mld_tparams *mtp,
2434     const int delay)
2435 {
2436 	struct mld_ifinfo *mli;
2437 	struct ifnet *ifp;
2438 	int error = 0;
2439 
2440 	VERIFY(mtp != NULL);
2441 	bzero(mtp, sizeof(*mtp));
2442 
2443 	IN6M_LOCK_ASSERT_HELD(inm);
2444 	VERIFY(inm->in6m_mli != NULL);
2445 	MLI_LOCK_ASSERT_NOTHELD(inm->in6m_mli);
2446 
2447 	/*
2448 	 * Try to detect if the upper layer just asked us to change state
2449 	 * for an interface which has now gone away.
2450 	 */
2451 	VERIFY(inm->in6m_ifma != NULL);
2452 	ifp = inm->in6m_ifma->ifma_ifp;
2453 	/*
2454 	 * Sanity check that netinet6's notion of ifp is the same as net's.
2455 	 */
2456 	VERIFY(inm->in6m_ifp == ifp);
2457 
2458 	mli = MLD_IFINFO(ifp);
2459 	VERIFY(mli != NULL);
2460 
2461 	/*
2462 	 * If we detect a state transition to or from MCAST_UNDEFINED
2463 	 * for this group, then we are starting or finishing an MLD
2464 	 * life cycle for this group.
2465 	 */
2466 	if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) {
2467 		MLD_PRINTF(("%s: inm transition %d -> %d\n", __func__,
2468 		    inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode));
2469 		if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
2470 			MLD_PRINTF(("%s: initial join\n", __func__));
2471 			error = mld_initial_join(inm, mli, mtp, delay);
2472 			goto out;
2473 		} else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
2474 			MLD_PRINTF(("%s: final leave\n", __func__));
2475 			mld_final_leave(inm, mli, mtp);
2476 			goto out;
2477 		}
2478 	} else {
2479 		MLD_PRINTF(("%s: filter set change\n", __func__));
2480 	}
2481 
2482 	error = mld_handle_state_change(inm, mli, mtp);
2483 out:
2484 	return error;
2485 }
2486 
2487 /*
2488  * Perform the initial join for an MLD group.
2489  *
2490  * When joining a group:
2491  *  If the group should have its MLD traffic suppressed, do nothing.
2492  *  MLDv1 starts sending MLDv1 host membership reports.
2493  *  MLDv2 will schedule an MLDv2 state-change report containing the
2494  *  initial state of the membership.
2495  *
2496  * If the delay argument is non-zero, then we must delay sending the
2497  * initial state change for delay ticks (in units of seconds).
2498  */
2499 static int
mld_initial_join(struct in6_multi * inm,struct mld_ifinfo * mli,struct mld_tparams * mtp,const int delay)2500 mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli,
2501     struct mld_tparams *mtp, const int delay)
2502 {
2503 	struct ifnet            *ifp;
2504 	struct ifqueue          *ifq;
2505 	int                      error, retval, syncstates;
2506 	int                      odelay;
2507 
2508 	IN6M_LOCK_ASSERT_HELD(inm);
2509 	MLI_LOCK_ASSERT_NOTHELD(mli);
2510 	VERIFY(mtp != NULL);
2511 
2512 	MLD_PRINTF(("%s: initial join %s on ifp 0x%llx(%s)\n",
2513 	    __func__, ip6_sprintf(&inm->in6m_addr),
2514 	    (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp),
2515 	    if_name(inm->in6m_ifp)));
2516 
2517 	error = 0;
2518 	syncstates = 1;
2519 
2520 	ifp = inm->in6m_ifp;
2521 
2522 	MLI_LOCK(mli);
2523 	VERIFY(mli->mli_ifp == ifp);
2524 
2525 	/*
2526 	 * Avoid MLD if group is :
2527 	 * 1. Joined on loopback, OR
2528 	 * 2. On a link that is marked MLIF_SILENT
2529 	 * 3. rdar://problem/19227650 Is link local scoped and
2530 	 *    on cellular interface
2531 	 * 4. Is a type that should not be reported (node local
2532 	 *    or all node link local multicast.
2533 	 * All other groups enter the appropriate state machine
2534 	 * for the version in use on this link.
2535 	 */
2536 	if ((ifp->if_flags & IFF_LOOPBACK) ||
2537 	    (mli->mli_flags & MLIF_SILENT) ||
2538 	    (IFNET_IS_CELLULAR(ifp) &&
2539 	    (IN6_IS_ADDR_MC_LINKLOCAL(&inm->in6m_addr) || IN6_IS_ADDR_MC_UNICAST_BASED_LINKLOCAL(&inm->in6m_addr))) ||
2540 	    !mld_is_addr_reported(&inm->in6m_addr)) {
2541 		MLD_PRINTF(("%s: not kicking state machine for silent group\n",
2542 		    __func__));
2543 		inm->in6m_state = MLD_SILENT_MEMBER;
2544 		inm->in6m_timer = 0;
2545 	} else {
2546 		/*
2547 		 * Deal with overlapping in6_multi lifecycle.
2548 		 * If this group was LEAVING, then make sure
2549 		 * we drop the reference we picked up to keep the
2550 		 * group around for the final INCLUDE {} enqueue.
2551 		 * Since we cannot call in6_multi_detach() here,
2552 		 * defer this task to the timer routine.
2553 		 */
2554 		if (mli->mli_version == MLD_VERSION_2 &&
2555 		    inm->in6m_state == MLD_LEAVING_MEMBER) {
2556 			VERIFY(inm->in6m_nrelecnt != 0);
2557 			mld_append_relq(mli, inm);
2558 		}
2559 
2560 		inm->in6m_state = MLD_REPORTING_MEMBER;
2561 
2562 		switch (mli->mli_version) {
2563 		case MLD_VERSION_1:
2564 			/*
2565 			 * If a delay was provided, only use it if
2566 			 * it is greater than the delay normally
2567 			 * used for an MLDv1 state change report,
2568 			 * and delay sending the initial MLDv1 report
2569 			 * by not transitioning to the IDLE state.
2570 			 */
2571 			odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI);
2572 			if (delay) {
2573 				inm->in6m_timer = max(delay, odelay);
2574 				mtp->cst = 1;
2575 			} else {
2576 				inm->in6m_state = MLD_IDLE_MEMBER;
2577 				error = mld_v1_transmit_report(inm,
2578 				    MLD_LISTENER_REPORT);
2579 
2580 				IN6M_LOCK_ASSERT_HELD(inm);
2581 				MLI_LOCK_ASSERT_HELD(mli);
2582 
2583 				if (error == 0) {
2584 					inm->in6m_timer = odelay;
2585 					mtp->cst = 1;
2586 				}
2587 			}
2588 			break;
2589 
2590 		case MLD_VERSION_2:
2591 			/*
2592 			 * Defer update of T0 to T1, until the first copy
2593 			 * of the state change has been transmitted.
2594 			 */
2595 			syncstates = 0;
2596 
2597 			/*
2598 			 * Immediately enqueue a State-Change Report for
2599 			 * this interface, freeing any previous reports.
2600 			 * Don't kick the timers if there is nothing to do,
2601 			 * or if an error occurred.
2602 			 */
2603 			ifq = &inm->in6m_scq;
2604 			IF_DRAIN(ifq);
2605 			retval = mld_v2_enqueue_group_record(ifq, inm, 1,
2606 			    0, 0, (mli->mli_flags & MLIF_USEALLOW));
2607 			mtp->cst = (ifq->ifq_len > 0);
2608 			MLD_PRINTF(("%s: enqueue record = %d\n",
2609 			    __func__, retval));
2610 			if (retval <= 0) {
2611 				error = retval * -1;
2612 				break;
2613 			}
2614 
2615 			/*
2616 			 * Schedule transmission of pending state-change
2617 			 * report up to RV times for this link. The timer
2618 			 * will fire at the next mld_timeout (1 second)),
2619 			 * giving us an opportunity to merge the reports.
2620 			 *
2621 			 * If a delay was provided to this function, only
2622 			 * use this delay if sooner than the existing one.
2623 			 */
2624 			VERIFY(mli->mli_rv > 1);
2625 			inm->in6m_scrv = (uint16_t)mli->mli_rv;
2626 			if (delay) {
2627 				if (inm->in6m_sctimer > 1) {
2628 					inm->in6m_sctimer =
2629 					    MIN(inm->in6m_sctimer, (uint16_t)delay);
2630 				} else {
2631 					inm->in6m_sctimer = (uint16_t)delay;
2632 				}
2633 			} else {
2634 				inm->in6m_sctimer = 1;
2635 			}
2636 			mtp->sct = 1;
2637 			error = 0;
2638 			break;
2639 		}
2640 	}
2641 	MLI_UNLOCK(mli);
2642 
2643 	/*
2644 	 * Only update the T0 state if state change is atomic,
2645 	 * i.e. we don't need to wait for a timer to fire before we
2646 	 * can consider the state change to have been communicated.
2647 	 */
2648 	if (syncstates) {
2649 		in6m_commit(inm);
2650 		MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
2651 		    ip6_sprintf(&inm->in6m_addr),
2652 		    if_name(inm->in6m_ifp)));
2653 	}
2654 
2655 	return error;
2656 }
2657 
2658 /*
2659  * Issue an intermediate state change during the life-cycle.
2660  */
2661 static int
mld_handle_state_change(struct in6_multi * inm,struct mld_ifinfo * mli,struct mld_tparams * mtp)2662 mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli,
2663     struct mld_tparams *mtp)
2664 {
2665 	struct ifnet            *ifp;
2666 	int                      retval = 0;
2667 
2668 	IN6M_LOCK_ASSERT_HELD(inm);
2669 	MLI_LOCK_ASSERT_NOTHELD(mli);
2670 	VERIFY(mtp != NULL);
2671 
2672 	MLD_PRINTF(("%s: state change for %s on ifp 0x%llx(%s)\n",
2673 	    __func__, ip6_sprintf(&inm->in6m_addr),
2674 	    (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp),
2675 	    if_name(inm->in6m_ifp)));
2676 
2677 	ifp = inm->in6m_ifp;
2678 
2679 	MLI_LOCK(mli);
2680 	VERIFY(mli->mli_ifp == ifp);
2681 
2682 	if ((ifp->if_flags & IFF_LOOPBACK) ||
2683 	    (mli->mli_flags & MLIF_SILENT) ||
2684 	    !mld_is_addr_reported(&inm->in6m_addr) ||
2685 	    (mli->mli_version != MLD_VERSION_2)) {
2686 		MLI_UNLOCK(mli);
2687 		if (!mld_is_addr_reported(&inm->in6m_addr)) {
2688 			MLD_PRINTF(("%s: not kicking state machine for silent "
2689 			    "group\n", __func__));
2690 		}
2691 		MLD_PRINTF(("%s: nothing to do\n", __func__));
2692 		in6m_commit(inm);
2693 		MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
2694 		    ip6_sprintf(&inm->in6m_addr),
2695 		    if_name(inm->in6m_ifp)));
2696 		goto done;
2697 	}
2698 
2699 	IF_DRAIN(&inm->in6m_scq);
2700 
2701 	retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
2702 	    (mli->mli_flags & MLIF_USEALLOW));
2703 	mtp->cst = (inm->in6m_scq.ifq_len > 0);
2704 	MLD_PRINTF(("%s: enqueue record = %d\n", __func__, retval));
2705 	if (retval <= 0) {
2706 		MLI_UNLOCK(mli);
2707 		retval *= -1;
2708 		goto done;
2709 	} else {
2710 		retval = 0;
2711 	}
2712 
2713 	/*
2714 	 * If record(s) were enqueued, start the state-change
2715 	 * report timer for this group.
2716 	 */
2717 	inm->in6m_scrv = (uint16_t)mli->mli_rv;
2718 	inm->in6m_sctimer = 1;
2719 	mtp->sct = 1;
2720 	MLI_UNLOCK(mli);
2721 
2722 done:
2723 	return retval;
2724 }
2725 
2726 /*
2727  * Perform the final leave for a multicast address.
2728  *
2729  * When leaving a group:
2730  *  MLDv1 sends a DONE message, if and only if we are the reporter.
2731  *  MLDv2 enqueues a state-change report containing a transition
2732  *  to INCLUDE {} for immediate transmission.
2733  */
2734 static void
mld_final_leave(struct in6_multi * inm,struct mld_ifinfo * mli,struct mld_tparams * mtp)2735 mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli,
2736     struct mld_tparams *mtp)
2737 {
2738 	int syncstates = 1;
2739 
2740 	IN6M_LOCK_ASSERT_HELD(inm);
2741 	MLI_LOCK_ASSERT_NOTHELD(mli);
2742 	VERIFY(mtp != NULL);
2743 
2744 	MLD_PRINTF(("%s: final leave %s on ifp 0x%llx(%s)\n",
2745 	    __func__, ip6_sprintf(&inm->in6m_addr),
2746 	    (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp),
2747 	    if_name(inm->in6m_ifp)));
2748 
2749 	switch (inm->in6m_state) {
2750 	case MLD_NOT_MEMBER:
2751 	case MLD_SILENT_MEMBER:
2752 	case MLD_LEAVING_MEMBER:
2753 		/* Already leaving or left; do nothing. */
2754 		MLD_PRINTF(("%s: not kicking state machine for silent group\n",
2755 		    __func__));
2756 		break;
2757 	case MLD_REPORTING_MEMBER:
2758 	case MLD_IDLE_MEMBER:
2759 	case MLD_G_QUERY_PENDING_MEMBER:
2760 	case MLD_SG_QUERY_PENDING_MEMBER:
2761 		MLI_LOCK(mli);
2762 		if (mli->mli_version == MLD_VERSION_1) {
2763 			if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
2764 			    inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
2765 				panic("%s: MLDv2 state reached, not MLDv2 "
2766 				    "mode\n", __func__);
2767 				/* NOTREACHED */
2768 			}
2769 			/* scheduler timer if enqueue is successful */
2770 			mtp->cst = (mld_v1_transmit_report(inm,
2771 			    MLD_LISTENER_DONE) == 0);
2772 
2773 			IN6M_LOCK_ASSERT_HELD(inm);
2774 			MLI_LOCK_ASSERT_HELD(mli);
2775 
2776 			inm->in6m_state = MLD_NOT_MEMBER;
2777 		} else if (mli->mli_version == MLD_VERSION_2) {
2778 			/*
2779 			 * Stop group timer and all pending reports.
2780 			 * Immediately enqueue a state-change report
2781 			 * TO_IN {} to be sent on the next timeout,
2782 			 * giving us an opportunity to merge reports.
2783 			 */
2784 			IF_DRAIN(&inm->in6m_scq);
2785 			inm->in6m_timer = 0;
2786 			inm->in6m_scrv = (uint16_t)mli->mli_rv;
2787 			MLD_PRINTF(("%s: Leaving %s/%s with %d "
2788 			    "pending retransmissions.\n", __func__,
2789 			    ip6_sprintf(&inm->in6m_addr),
2790 			    if_name(inm->in6m_ifp),
2791 			    inm->in6m_scrv));
2792 			if (inm->in6m_scrv == 0) {
2793 				inm->in6m_state = MLD_NOT_MEMBER;
2794 				inm->in6m_sctimer = 0;
2795 			} else {
2796 				int retval;
2797 				/*
2798 				 * Stick around in the in6_multihead list;
2799 				 * the final detach will be issued by
2800 				 * mld_v2_process_group_timers() when
2801 				 * the retransmit timer expires.
2802 				 */
2803 				IN6M_ADDREF_LOCKED(inm);
2804 				VERIFY(inm->in6m_debug & IFD_ATTACHED);
2805 				inm->in6m_reqcnt++;
2806 				VERIFY(inm->in6m_reqcnt >= 1);
2807 				inm->in6m_nrelecnt++;
2808 				VERIFY(inm->in6m_nrelecnt != 0);
2809 
2810 				retval = mld_v2_enqueue_group_record(
2811 					&inm->in6m_scq, inm, 1, 0, 0,
2812 					(mli->mli_flags & MLIF_USEALLOW));
2813 				mtp->cst = (inm->in6m_scq.ifq_len > 0);
2814 				KASSERT(retval != 0,
2815 				    ("%s: enqueue record = %d\n", __func__,
2816 				    retval));
2817 
2818 				inm->in6m_state = MLD_LEAVING_MEMBER;
2819 				inm->in6m_sctimer = 1;
2820 				mtp->sct = 1;
2821 				syncstates = 0;
2822 			}
2823 		}
2824 		MLI_UNLOCK(mli);
2825 		break;
2826 	case MLD_LAZY_MEMBER:
2827 	case MLD_SLEEPING_MEMBER:
2828 	case MLD_AWAKENING_MEMBER:
2829 		/* Our reports are suppressed; do nothing. */
2830 		break;
2831 	}
2832 
2833 	if (syncstates) {
2834 		in6m_commit(inm);
2835 		MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
2836 		    ip6_sprintf(&inm->in6m_addr),
2837 		    if_name(inm->in6m_ifp)));
2838 		inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
2839 		MLD_PRINTF(("%s: T1 now MCAST_UNDEFINED for 0x%llx/%s\n",
2840 		    __func__, (uint64_t)VM_KERNEL_ADDRPERM(&inm->in6m_addr),
2841 		    if_name(inm->in6m_ifp)));
2842 	}
2843 }
2844 
2845 /*
2846  * Enqueue an MLDv2 group record to the given output queue.
2847  *
2848  * If is_state_change is zero, a current-state record is appended.
2849  * If is_state_change is non-zero, a state-change report is appended.
2850  *
2851  * If is_group_query is non-zero, an mbuf packet chain is allocated.
2852  * If is_group_query is zero, and if there is a packet with free space
2853  * at the tail of the queue, it will be appended to providing there
2854  * is enough free space.
2855  * Otherwise a new mbuf packet chain is allocated.
2856  *
2857  * If is_source_query is non-zero, each source is checked to see if
2858  * it was recorded for a Group-Source query, and will be omitted if
2859  * it is not both in-mode and recorded.
2860  *
2861  * If use_block_allow is non-zero, state change reports for initial join
2862  * and final leave, on an inclusive mode group with a source list, will be
2863  * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
2864  *
2865  * The function will attempt to allocate leading space in the packet
2866  * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
2867  *
2868  * If successful the size of all data appended to the queue is returned,
2869  * otherwise an error code less than zero is returned, or zero if
2870  * no record(s) were appended.
2871  */
2872 static int
mld_v2_enqueue_group_record(struct ifqueue * ifq,struct in6_multi * inm,const int is_state_change,const int is_group_query,const int is_source_query,const int use_block_allow)2873 mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm,
2874     const int is_state_change, const int is_group_query,
2875     const int is_source_query, const int use_block_allow)
2876 {
2877 	struct mldv2_record      mr;
2878 	struct mldv2_record     *pmr;
2879 	struct ifnet            *ifp;
2880 	struct ip6_msource      *ims, *nims;
2881 	mbuf_ref_t               m0, m, md;
2882 	int                      error, is_filter_list_change;
2883 	int                      minrec0len, m0srcs, msrcs, nbytes, off;
2884 	int                      record_has_sources;
2885 	int                      now;
2886 	uint8_t                  type;
2887 	uint8_t                  mode;
2888 
2889 	IN6M_LOCK_ASSERT_HELD(inm);
2890 	MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
2891 
2892 	error = 0;
2893 	ifp = inm->in6m_ifp;
2894 	is_filter_list_change = 0;
2895 	m = NULL;
2896 	m0 = NULL;
2897 	m0srcs = 0;
2898 	msrcs = 0;
2899 	nbytes = 0;
2900 	nims = NULL;
2901 	record_has_sources = 1;
2902 	pmr = NULL;
2903 	type = MLD_DO_NOTHING;
2904 	mode = (uint8_t)inm->in6m_st[1].iss_fmode;
2905 
2906 	/*
2907 	 * If we did not transition out of ASM mode during t0->t1,
2908 	 * and there are no source nodes to process, we can skip
2909 	 * the generation of source records.
2910 	 */
2911 	if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
2912 	    inm->in6m_nsrc == 0) {
2913 		record_has_sources = 0;
2914 	}
2915 
2916 	if (is_state_change) {
2917 		/*
2918 		 * Queue a state change record.
2919 		 * If the mode did not change, and there are non-ASM
2920 		 * listeners or source filters present,
2921 		 * we potentially need to issue two records for the group.
2922 		 * If there are ASM listeners, and there was no filter
2923 		 * mode transition of any kind, do nothing.
2924 		 *
2925 		 * If we are transitioning to MCAST_UNDEFINED, we need
2926 		 * not send any sources. A transition to/from this state is
2927 		 * considered inclusive with some special treatment.
2928 		 *
2929 		 * If we are rewriting initial joins/leaves to use
2930 		 * ALLOW/BLOCK, and the group's membership is inclusive,
2931 		 * we need to send sources in all cases.
2932 		 */
2933 		if (mode != inm->in6m_st[0].iss_fmode) {
2934 			if (mode == MCAST_EXCLUDE) {
2935 				MLD_PRINTF(("%s: change to EXCLUDE\n",
2936 				    __func__));
2937 				type = MLD_CHANGE_TO_EXCLUDE_MODE;
2938 			} else {
2939 				MLD_PRINTF(("%s: change to INCLUDE\n",
2940 				    __func__));
2941 				if (use_block_allow) {
2942 					/*
2943 					 * XXX
2944 					 * Here we're interested in state
2945 					 * edges either direction between
2946 					 * MCAST_UNDEFINED and MCAST_INCLUDE.
2947 					 * Perhaps we should just check
2948 					 * the group state, rather than
2949 					 * the filter mode.
2950 					 */
2951 					if (mode == MCAST_UNDEFINED) {
2952 						type = MLD_BLOCK_OLD_SOURCES;
2953 					} else {
2954 						type = MLD_ALLOW_NEW_SOURCES;
2955 					}
2956 				} else {
2957 					type = MLD_CHANGE_TO_INCLUDE_MODE;
2958 					if (mode == MCAST_UNDEFINED) {
2959 						record_has_sources = 0;
2960 					}
2961 				}
2962 			}
2963 		} else {
2964 			if (record_has_sources) {
2965 				is_filter_list_change = 1;
2966 			} else {
2967 				type = MLD_DO_NOTHING;
2968 			}
2969 		}
2970 	} else {
2971 		/*
2972 		 * Queue a current state record.
2973 		 */
2974 		if (mode == MCAST_EXCLUDE) {
2975 			type = MLD_MODE_IS_EXCLUDE;
2976 		} else if (mode == MCAST_INCLUDE) {
2977 			type = MLD_MODE_IS_INCLUDE;
2978 			VERIFY(inm->in6m_st[1].iss_asm == 0);
2979 		}
2980 	}
2981 
2982 	/*
2983 	 * Generate the filter list changes using a separate function.
2984 	 */
2985 	if (is_filter_list_change) {
2986 		return mld_v2_enqueue_filter_change(ifq, inm);
2987 	}
2988 
2989 	if (type == MLD_DO_NOTHING) {
2990 		MLD_PRINTF(("%s: nothing to do for %s/%s\n",
2991 		    __func__, ip6_sprintf(&inm->in6m_addr),
2992 		    if_name(inm->in6m_ifp)));
2993 		return 0;
2994 	}
2995 
2996 	/*
2997 	 * If any sources are present, we must be able to fit at least
2998 	 * one in the trailing space of the tail packet's mbuf,
2999 	 * ideally more.
3000 	 */
3001 	minrec0len = sizeof(struct mldv2_record);
3002 	if (record_has_sources) {
3003 		minrec0len += sizeof(struct in6_addr);
3004 	}
3005 	MLD_PRINTF(("%s: queueing %s for %s/%s\n", __func__,
3006 	    mld_rec_type_to_str(type),
3007 	    ip6_sprintf(&inm->in6m_addr),
3008 	    if_name(inm->in6m_ifp)));
3009 
3010 	/*
3011 	 * Check if we have a packet in the tail of the queue for this
3012 	 * group into which the first group record for this group will fit.
3013 	 * Otherwise allocate a new packet.
3014 	 * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
3015 	 * Note: Group records for G/GSR query responses MUST be sent
3016 	 * in their own packet.
3017 	 */
3018 	m0 = ifq->ifq_tail;
3019 	if (!is_group_query &&
3020 	    m0 != NULL &&
3021 	    (m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
3022 	    (m0->m_pkthdr.len + minrec0len) <
3023 	    (ifp->if_mtu - MLD_MTUSPACE)) {
3024 		m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3025 		    sizeof(struct mldv2_record)) /
3026 		    sizeof(struct in6_addr);
3027 		m = m0;
3028 		MLD_PRINTF(("%s: use existing packet\n", __func__));
3029 	} else {
3030 		if (IF_QFULL(ifq)) {
3031 			os_log_error(OS_LOG_DEFAULT,
3032 			    "%s: outbound queue full\n", __func__);
3033 			return -ENOMEM;
3034 		}
3035 		m = NULL;
3036 		m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
3037 		    sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
3038 		if (!is_state_change && !is_group_query) {
3039 			m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3040 		}
3041 		if (m == NULL) {
3042 			m = m_gethdr(M_DONTWAIT, MT_DATA);
3043 		}
3044 		if (m == NULL) {
3045 			return -ENOMEM;
3046 		}
3047 
3048 		mld_save_context(m, ifp);
3049 
3050 		MLD_PRINTF(("%s: allocated first packet\n", __func__));
3051 	}
3052 
3053 	/*
3054 	 * Append group record.
3055 	 * If we have sources, we don't know how many yet.
3056 	 */
3057 	mr.mr_type = type;
3058 	mr.mr_datalen = 0;
3059 	mr.mr_numsrc = 0;
3060 	mr.mr_addr = inm->in6m_addr;
3061 	in6_clearscope(&mr.mr_addr);
3062 	if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
3063 		if (m != m0) {
3064 			m_freem(m);
3065 		}
3066 		os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed.\n", __func__);
3067 		return -ENOMEM;
3068 	}
3069 	nbytes += sizeof(struct mldv2_record);
3070 
3071 	/*
3072 	 * Append as many sources as will fit in the first packet.
3073 	 * If we are appending to a new packet, the chain allocation
3074 	 * may potentially use clusters; use m_getptr() in this case.
3075 	 * If we are appending to an existing packet, we need to obtain
3076 	 * a pointer to the group record after m_append(), in case a new
3077 	 * mbuf was allocated.
3078 	 *
3079 	 * Only append sources which are in-mode at t1. If we are
3080 	 * transitioning to MCAST_UNDEFINED state on the group, and
3081 	 * use_block_allow is zero, do not include source entries.
3082 	 * Otherwise, we need to include this source in the report.
3083 	 *
3084 	 * Only report recorded sources in our filter set when responding
3085 	 * to a group-source query.
3086 	 */
3087 	if (record_has_sources) {
3088 		if (m == m0) {
3089 			md = m_last(m);
3090 			pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
3091 			    md->m_len - nbytes);
3092 		} else {
3093 			md = m_getptr(m, 0, &off);
3094 			pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
3095 			    off);
3096 		}
3097 		msrcs = 0;
3098 		RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs,
3099 		    nims) {
3100 			MLD_PRINTF(("%s: visit node %s\n", __func__,
3101 			    ip6_sprintf(&ims->im6s_addr)));
3102 			now = im6s_get_mode(inm, ims, 1);
3103 			MLD_PRINTF(("%s: node is %d\n", __func__, now));
3104 			if ((now != mode) ||
3105 			    (now == mode &&
3106 			    (!use_block_allow && mode == MCAST_UNDEFINED))) {
3107 				MLD_PRINTF(("%s: skip node\n", __func__));
3108 				continue;
3109 			}
3110 			if (is_source_query && ims->im6s_stp == 0) {
3111 				MLD_PRINTF(("%s: skip unrecorded node\n",
3112 				    __func__));
3113 				continue;
3114 			}
3115 			MLD_PRINTF(("%s: append node\n", __func__));
3116 			if (!m_append(m, sizeof(struct in6_addr),
3117 			    (void *)&ims->im6s_addr)) {
3118 				if (m != m0) {
3119 					m_freem(m);
3120 				}
3121 				os_log_error(OS_LOG_DEFAULT,
3122 				    "%s: m_append() failed\n",
3123 				    __func__);
3124 				return -ENOMEM;
3125 			}
3126 			nbytes += sizeof(struct in6_addr);
3127 			++msrcs;
3128 			if (msrcs == m0srcs) {
3129 				break;
3130 			}
3131 		}
3132 		MLD_PRINTF(("%s: msrcs is %d this packet\n", __func__,
3133 		    msrcs));
3134 		pmr->mr_numsrc = htons((uint16_t)msrcs);
3135 		nbytes += (msrcs * sizeof(struct in6_addr));
3136 	}
3137 
3138 	if (is_source_query && msrcs == 0) {
3139 		MLD_PRINTF(("%s: no recorded sources to report\n", __func__));
3140 		if (m != m0) {
3141 			m_freem(m);
3142 		}
3143 		return 0;
3144 	}
3145 
3146 	/*
3147 	 * We are good to go with first packet.
3148 	 */
3149 	if (m != m0) {
3150 		MLD_PRINTF(("%s: enqueueing first packet\n", __func__));
3151 		m->m_pkthdr.vt_nrecs = 1;
3152 		IF_ENQUEUE(ifq, m);
3153 	} else {
3154 		m->m_pkthdr.vt_nrecs++;
3155 	}
3156 	/*
3157 	 * No further work needed if no source list in packet(s).
3158 	 */
3159 	if (!record_has_sources) {
3160 		return nbytes;
3161 	}
3162 
3163 	/*
3164 	 * Whilst sources remain to be announced, we need to allocate
3165 	 * a new packet and fill out as many sources as will fit.
3166 	 * Always try for a cluster first.
3167 	 */
3168 	while (nims != NULL) {
3169 		if (IF_QFULL(ifq)) {
3170 			os_log_error(OS_LOG_DEFAULT, "%s: outbound queue full\n", __func__);
3171 			return -ENOMEM;
3172 		}
3173 		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3174 		if (m == NULL) {
3175 			m = m_gethdr(M_DONTWAIT, MT_DATA);
3176 		}
3177 		if (m == NULL) {
3178 			return -ENOMEM;
3179 		}
3180 		mld_save_context(m, ifp);
3181 		md = m_getptr(m, 0, &off);
3182 		pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
3183 		MLD_PRINTF(("%s: allocated next packet\n", __func__));
3184 
3185 		if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
3186 			if (m != m0) {
3187 				m_freem(m);
3188 			}
3189 			os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed.\n", __func__);
3190 			return -ENOMEM;
3191 		}
3192 		m->m_pkthdr.vt_nrecs = 1;
3193 		nbytes += sizeof(struct mldv2_record);
3194 
3195 		m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
3196 		    sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
3197 
3198 		msrcs = 0;
3199 		RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
3200 			MLD_PRINTF(("%s: visit node %s\n",
3201 			    __func__, ip6_sprintf(&ims->im6s_addr)));
3202 			now = im6s_get_mode(inm, ims, 1);
3203 			if ((now != mode) ||
3204 			    (now == mode &&
3205 			    (!use_block_allow && mode == MCAST_UNDEFINED))) {
3206 				MLD_PRINTF(("%s: skip node\n", __func__));
3207 				continue;
3208 			}
3209 			if (is_source_query && ims->im6s_stp == 0) {
3210 				MLD_PRINTF(("%s: skip unrecorded node\n",
3211 				    __func__));
3212 				continue;
3213 			}
3214 			MLD_PRINTF(("%s: append node\n", __func__));
3215 			if (!m_append(m, sizeof(struct in6_addr),
3216 			    (void *)&ims->im6s_addr)) {
3217 				if (m != m0) {
3218 					m_freem(m);
3219 				}
3220 				os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed\n",
3221 				    __func__);
3222 				return -ENOMEM;
3223 			}
3224 			++msrcs;
3225 			if (msrcs == m0srcs) {
3226 				break;
3227 			}
3228 		}
3229 		pmr->mr_numsrc = htons((uint16_t)msrcs);
3230 		nbytes += (msrcs * sizeof(struct in6_addr));
3231 
3232 		MLD_PRINTF(("%s: enqueueing next packet\n", __func__));
3233 		IF_ENQUEUE(ifq, m);
3234 	}
3235 
3236 	return nbytes;
3237 }
3238 
3239 /*
3240  * Type used to mark record pass completion.
3241  * We exploit the fact we can cast to this easily from the
3242  * current filter modes on each ip_msource node.
3243  */
3244 typedef enum {
3245 	REC_NONE = 0x00,        /* MCAST_UNDEFINED */
3246 	REC_ALLOW = 0x01,       /* MCAST_INCLUDE */
3247 	REC_BLOCK = 0x02,       /* MCAST_EXCLUDE */
3248 	REC_FULL = REC_ALLOW | REC_BLOCK
3249 } rectype_t;
3250 
3251 /*
3252  * Enqueue an MLDv2 filter list change to the given output queue.
3253  *
3254  * Source list filter state is held in an RB-tree. When the filter list
3255  * for a group is changed without changing its mode, we need to compute
3256  * the deltas between T0 and T1 for each source in the filter set,
3257  * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
3258  *
3259  * As we may potentially queue two record types, and the entire R-B tree
3260  * needs to be walked at once, we break this out into its own function
3261  * so we can generate a tightly packed queue of packets.
3262  *
3263  * XXX This could be written to only use one tree walk, although that makes
3264  * serializing into the mbuf chains a bit harder. For now we do two walks
3265  * which makes things easier on us, and it may or may not be harder on
3266  * the L2 cache.
3267  *
3268  * If successful the size of all data appended to the queue is returned,
3269  * otherwise an error code less than zero is returned, or zero if
3270  * no record(s) were appended.
3271  */
3272 static int
mld_v2_enqueue_filter_change(struct ifqueue * ifq,struct in6_multi * inm)3273 mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm)
3274 {
3275 	static const int MINRECLEN =
3276 	    sizeof(struct mldv2_record) + sizeof(struct in6_addr);
3277 	struct ifnet            *ifp;
3278 	struct mldv2_record      mr;
3279 	struct mldv2_record     *pmr;
3280 	struct ip6_msource      *ims, *nims;
3281 	mbuf_ref_t               m, m0, md;
3282 	int                      m0srcs, nbytes, npbytes, off, rsrcs, schanged;
3283 	int                      nallow, nblock;
3284 	uint8_t                  mode, now, then;
3285 	rectype_t                crt, drt, nrt;
3286 
3287 	IN6M_LOCK_ASSERT_HELD(inm);
3288 
3289 	if (inm->in6m_nsrc == 0 ||
3290 	    (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) {
3291 		return 0;
3292 	}
3293 
3294 	ifp = inm->in6m_ifp;                    /* interface */
3295 	mode = (uint8_t)inm->in6m_st[1].iss_fmode;       /* filter mode at t1 */
3296 	crt = REC_NONE; /* current group record type */
3297 	drt = REC_NONE; /* mask of completed group record types */
3298 	nrt = REC_NONE; /* record type for current node */
3299 	m0srcs = 0;     /* # source which will fit in current mbuf chain */
3300 	npbytes = 0;    /* # of bytes appended this packet */
3301 	nbytes = 0;     /* # of bytes appended to group's state-change queue */
3302 	rsrcs = 0;      /* # sources encoded in current record */
3303 	schanged = 0;   /* # nodes encoded in overall filter change */
3304 	nallow = 0;     /* # of source entries in ALLOW_NEW */
3305 	nblock = 0;     /* # of source entries in BLOCK_OLD */
3306 	nims = NULL;    /* next tree node pointer */
3307 
3308 	/*
3309 	 * For each possible filter record mode.
3310 	 * The first kind of source we encounter tells us which
3311 	 * is the first kind of record we start appending.
3312 	 * If a node transitioned to UNDEFINED at t1, its mode is treated
3313 	 * as the inverse of the group's filter mode.
3314 	 */
3315 	while (drt != REC_FULL) {
3316 		do {
3317 			m0 = ifq->ifq_tail;
3318 			if (m0 != NULL &&
3319 			    (m0->m_pkthdr.vt_nrecs + 1 <=
3320 			    MLD_V2_REPORT_MAXRECS) &&
3321 			    (m0->m_pkthdr.len + MINRECLEN) <
3322 			    (ifp->if_mtu - MLD_MTUSPACE)) {
3323 				m = m0;
3324 				m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3325 				    sizeof(struct mldv2_record)) /
3326 				    sizeof(struct in6_addr);
3327 				MLD_PRINTF(("%s: use previous packet\n",
3328 				    __func__));
3329 			} else {
3330 				m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3331 				if (m == NULL) {
3332 					m = m_gethdr(M_DONTWAIT, MT_DATA);
3333 				}
3334 				if (m == NULL) {
3335 					os_log_error(OS_LOG_DEFAULT, "%s: m_get*() failed\n",
3336 					    __func__);
3337 					return -ENOMEM;
3338 				}
3339 				m->m_pkthdr.vt_nrecs = 0;
3340 				mld_save_context(m, ifp);
3341 				m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
3342 				    sizeof(struct mldv2_record)) /
3343 				    sizeof(struct in6_addr);
3344 				npbytes = 0;
3345 				MLD_PRINTF(("%s: allocated new packet\n",
3346 				    __func__));
3347 			}
3348 			/*
3349 			 * Append the MLD group record header to the
3350 			 * current packet's data area.
3351 			 * Recalculate pointer to free space for next
3352 			 * group record, in case m_append() allocated
3353 			 * a new mbuf or cluster.
3354 			 */
3355 			memset(&mr, 0, sizeof(mr));
3356 			mr.mr_addr = inm->in6m_addr;
3357 			in6_clearscope(&mr.mr_addr);
3358 			if (!m_append(m, sizeof(mr), (void *)&mr)) {
3359 				if (m != m0) {
3360 					m_freem(m);
3361 				}
3362 				os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed\n",
3363 				    __func__);
3364 				return -ENOMEM;
3365 			}
3366 			npbytes += sizeof(struct mldv2_record);
3367 			if (m != m0) {
3368 				/* new packet; offset in chain */
3369 				md = m_getptr(m, npbytes -
3370 				    sizeof(struct mldv2_record), &off);
3371 				pmr = (struct mldv2_record *)(mtod(md,
3372 				    uint8_t *) + off);
3373 			} else {
3374 				/* current packet; offset from last append */
3375 				md = m_last(m);
3376 				pmr = (struct mldv2_record *)(mtod(md,
3377 				    uint8_t *) + md->m_len -
3378 				    sizeof(struct mldv2_record));
3379 			}
3380 			/*
3381 			 * Begin walking the tree for this record type
3382 			 * pass, or continue from where we left off
3383 			 * previously if we had to allocate a new packet.
3384 			 * Only report deltas in-mode at t1.
3385 			 * We need not report included sources as allowed
3386 			 * if we are in inclusive mode on the group,
3387 			 * however the converse is not true.
3388 			 */
3389 			rsrcs = 0;
3390 			if (nims == NULL) {
3391 				nims = RB_MIN(ip6_msource_tree,
3392 				    &inm->in6m_srcs);
3393 			}
3394 			RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
3395 				MLD_PRINTF(("%s: visit node %s\n", __func__,
3396 				    ip6_sprintf(&ims->im6s_addr)));
3397 				now = im6s_get_mode(inm, ims, 1);
3398 				then = im6s_get_mode(inm, ims, 0);
3399 				MLD_PRINTF(("%s: mode: t0 %d, t1 %d\n",
3400 				    __func__, then, now));
3401 				if (now == then) {
3402 					MLD_PRINTF(("%s: skip unchanged\n",
3403 					    __func__));
3404 					continue;
3405 				}
3406 				if (mode == MCAST_EXCLUDE &&
3407 				    now == MCAST_INCLUDE) {
3408 					MLD_PRINTF(("%s: skip IN src on EX "
3409 					    "group\n", __func__));
3410 					continue;
3411 				}
3412 				nrt = (rectype_t)now;
3413 				if (nrt == REC_NONE) {
3414 					nrt = (rectype_t)(~mode & REC_FULL);
3415 				}
3416 				if (schanged++ == 0) {
3417 					crt = nrt;
3418 				} else if (crt != nrt) {
3419 					continue;
3420 				}
3421 				if (!m_append(m, sizeof(struct in6_addr),
3422 				    (void *)&ims->im6s_addr)) {
3423 					if (m != m0) {
3424 						m_freem(m);
3425 					}
3426 					os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed\n",
3427 					    __func__);
3428 					return -ENOMEM;
3429 				}
3430 				nallow += !!(crt == REC_ALLOW);
3431 				nblock += !!(crt == REC_BLOCK);
3432 				if (++rsrcs == m0srcs) {
3433 					break;
3434 				}
3435 			}
3436 			/*
3437 			 * If we did not append any tree nodes on this
3438 			 * pass, back out of allocations.
3439 			 */
3440 			if (rsrcs == 0) {
3441 				npbytes -= sizeof(struct mldv2_record);
3442 				if (m != m0) {
3443 					MLD_PRINTF(("%s: m_free(m)\n",
3444 					    __func__));
3445 					m_freem(m);
3446 				} else {
3447 					MLD_PRINTF(("%s: m_adj(m, -mr)\n",
3448 					    __func__));
3449 					m_adj(m, -((int)sizeof(
3450 						    struct mldv2_record)));
3451 				}
3452 				continue;
3453 			}
3454 			npbytes += (rsrcs * sizeof(struct in6_addr));
3455 			if (crt == REC_ALLOW) {
3456 				pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
3457 			} else if (crt == REC_BLOCK) {
3458 				pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
3459 			}
3460 			pmr->mr_numsrc = htons((uint16_t)rsrcs);
3461 			/*
3462 			 * Count the new group record, and enqueue this
3463 			 * packet if it wasn't already queued.
3464 			 */
3465 			m->m_pkthdr.vt_nrecs++;
3466 			if (m != m0) {
3467 				IF_ENQUEUE(ifq, m);
3468 			}
3469 			nbytes += npbytes;
3470 		} while (nims != NULL);
3471 		drt |= crt;
3472 		crt = (~crt & REC_FULL);
3473 	}
3474 
3475 	MLD_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__,
3476 	    nallow, nblock));
3477 
3478 	return nbytes;
3479 }
3480 
3481 static int
mld_v2_merge_state_changes(struct in6_multi * inm,struct ifqueue * ifscq)3482 mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq)
3483 {
3484 	struct ifqueue  *gq;
3485 	mbuf_ref_t       m;    /* pending state-change */
3486 	mbuf_ref_t       m0;   /* copy of pending state-change */
3487 	mbuf_ref_t       mt;   /* last state-change in packet */
3488 	mbuf_ref_t       n;
3489 	int              docopy, domerge;
3490 	u_int            recslen;
3491 
3492 	IN6M_LOCK_ASSERT_HELD(inm);
3493 
3494 	docopy = 0;
3495 	domerge = 0;
3496 	recslen = 0;
3497 
3498 	/*
3499 	 * If there are further pending retransmissions, make a writable
3500 	 * copy of each queued state-change message before merging.
3501 	 */
3502 	if (inm->in6m_scrv > 0) {
3503 		docopy = 1;
3504 	}
3505 
3506 	gq = &inm->in6m_scq;
3507 #ifdef MLD_DEBUG
3508 	if (gq->ifq_head == NULL) {
3509 		MLD_PRINTF(("%s: WARNING: queue for inm 0x%llx is empty\n",
3510 		    __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm)));
3511 	}
3512 #endif
3513 
3514 	/*
3515 	 * Use IF_REMQUEUE() instead of IF_DEQUEUE() below, since the
3516 	 * packet might not always be at the head of the ifqueue.
3517 	 */
3518 	m = gq->ifq_head;
3519 	while (m != NULL) {
3520 		/*
3521 		 * Only merge the report into the current packet if
3522 		 * there is sufficient space to do so; an MLDv2 report
3523 		 * packet may only contain 65,535 group records.
3524 		 * Always use a simple mbuf chain concatentation to do this,
3525 		 * as large state changes for single groups may have
3526 		 * allocated clusters.
3527 		 */
3528 		domerge = 0;
3529 		mt = ifscq->ifq_tail;
3530 		if (mt != NULL) {
3531 			recslen = m_length(m);
3532 
3533 			if ((mt->m_pkthdr.vt_nrecs +
3534 			    m->m_pkthdr.vt_nrecs <=
3535 			    MLD_V2_REPORT_MAXRECS) &&
3536 			    (mt->m_pkthdr.len + recslen <=
3537 			    (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) {
3538 				domerge = 1;
3539 			}
3540 		}
3541 
3542 		if (!domerge && IF_QFULL(gq)) {
3543 			os_log_info(OS_LOG_DEFAULT, "%s: outbound queue full",
3544 			    __func__);
3545 			n = m->m_nextpkt;
3546 			if (!docopy) {
3547 				IF_REMQUEUE(gq, m);
3548 				m_freem(m);
3549 			}
3550 			m = n;
3551 			continue;
3552 		}
3553 
3554 		if (!docopy) {
3555 			MLD_PRINTF(("%s: dequeueing 0x%llx\n", __func__,
3556 			    (uint64_t)VM_KERNEL_ADDRPERM(m)));
3557 			n = m->m_nextpkt;
3558 			IF_REMQUEUE(gq, m);
3559 			m0 = m;
3560 			m = n;
3561 		} else {
3562 			MLD_PRINTF(("%s: copying 0x%llx\n", __func__,
3563 			    (uint64_t)VM_KERNEL_ADDRPERM(m)));
3564 			m0 = m_dup(m, M_NOWAIT);
3565 			if (m0 == NULL) {
3566 				return ENOMEM;
3567 			}
3568 			m0->m_nextpkt = NULL;
3569 			m = m->m_nextpkt;
3570 		}
3571 
3572 		if (!domerge) {
3573 			MLD_PRINTF(("%s: queueing 0x%llx to ifscq 0x%llx)\n",
3574 			    __func__, (uint64_t)VM_KERNEL_ADDRPERM(m0),
3575 			    (uint64_t)VM_KERNEL_ADDRPERM(ifscq)));
3576 			IF_ENQUEUE(ifscq, m0);
3577 		} else {
3578 			struct mbuf *mtl;       /* last mbuf of packet mt */
3579 
3580 			MLD_PRINTF(("%s: merging 0x%llx with ifscq tail "
3581 			    "0x%llx)\n", __func__,
3582 			    (uint64_t)VM_KERNEL_ADDRPERM(m0),
3583 			    (uint64_t)VM_KERNEL_ADDRPERM(mt)));
3584 
3585 			mtl = m_last(mt);
3586 			m0->m_flags &= ~M_PKTHDR;
3587 			mt->m_pkthdr.len += recslen;
3588 			mt->m_pkthdr.vt_nrecs +=
3589 			    m0->m_pkthdr.vt_nrecs;
3590 
3591 			mtl->m_next = m0;
3592 		}
3593 	}
3594 
3595 	return 0;
3596 }
3597 
3598 /*
3599  * Respond to a pending MLDv2 General Query.
3600  */
3601 static uint32_t
mld_v2_dispatch_general_query(struct mld_ifinfo * mli)3602 mld_v2_dispatch_general_query(struct mld_ifinfo *mli)
3603 {
3604 	struct ifnet            *ifp;
3605 	struct in6_multi        *inm;
3606 	struct in6_multistep    step;
3607 	int                      retval;
3608 
3609 	MLI_LOCK_ASSERT_HELD(mli);
3610 
3611 	VERIFY(mli->mli_version == MLD_VERSION_2);
3612 
3613 	ifp = mli->mli_ifp;
3614 	MLI_UNLOCK(mli);
3615 
3616 	in6_multihead_lock_shared();
3617 	IN6_FIRST_MULTI(step, inm);
3618 	while (inm != NULL) {
3619 		IN6M_LOCK(inm);
3620 		if (inm->in6m_ifp != ifp) {
3621 			goto next;
3622 		}
3623 
3624 		switch (inm->in6m_state) {
3625 		case MLD_NOT_MEMBER:
3626 		case MLD_SILENT_MEMBER:
3627 			break;
3628 		case MLD_REPORTING_MEMBER:
3629 		case MLD_IDLE_MEMBER:
3630 		case MLD_LAZY_MEMBER:
3631 		case MLD_SLEEPING_MEMBER:
3632 		case MLD_AWAKENING_MEMBER:
3633 			inm->in6m_state = MLD_REPORTING_MEMBER;
3634 			MLI_LOCK(mli);
3635 			retval = mld_v2_enqueue_group_record(&mli->mli_gq,
3636 			    inm, 0, 0, 0, 0);
3637 			MLI_UNLOCK(mli);
3638 			MLD_PRINTF(("%s: enqueue record = %d\n",
3639 			    __func__, retval));
3640 			break;
3641 		case MLD_G_QUERY_PENDING_MEMBER:
3642 		case MLD_SG_QUERY_PENDING_MEMBER:
3643 		case MLD_LEAVING_MEMBER:
3644 			break;
3645 		}
3646 next:
3647 		IN6M_UNLOCK(inm);
3648 		IN6_NEXT_MULTI(step, inm);
3649 	}
3650 	in6_multihead_lock_done();
3651 
3652 	MLI_LOCK(mli);
3653 	mld_dispatch_queue_locked(mli, &mli->mli_gq, MLD_MAX_RESPONSE_BURST);
3654 	MLI_LOCK_ASSERT_HELD(mli);
3655 
3656 	/*
3657 	 * Slew transmission of bursts over 1 second intervals.
3658 	 */
3659 	if (mli->mli_gq.ifq_head != NULL) {
3660 		mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
3661 			MLD_RESPONSE_BURST_INTERVAL);
3662 	}
3663 
3664 	return mli->mli_v2_timer;
3665 }
3666 
3667 /*
3668  * Transmit the next pending message in the output queue.
3669  *
3670  * Must not be called with in6m_lockm or mli_lock held.
3671  */
3672 __attribute__((noinline))
3673 static void
mld_dispatch_packet(struct mbuf * m)3674 mld_dispatch_packet(struct mbuf *m)
3675 {
3676 	struct ip6_moptions     *im6o;
3677 	struct ifnet            *ifp;
3678 	struct ifnet            *__single oifp = NULL;
3679 	mbuf_ref_t               m0, md;
3680 	struct ip6_hdr          *ip6;
3681 	struct icmp6_hdr        *icmp6;
3682 	int                      error;
3683 	int                      off;
3684 	int                      type;
3685 
3686 	MLD_PRINTF(("%s: transmit 0x%llx\n", __func__,
3687 	    (uint64_t)VM_KERNEL_ADDRPERM(m)));
3688 
3689 	/*
3690 	 * Check if the ifnet is still attached.
3691 	 */
3692 	ifp = mld_restore_context(m);
3693 	if (ifp == NULL || !ifnet_is_attached(ifp, 0)) {
3694 		os_log_error(OS_LOG_DEFAULT, "%s: dropped 0x%llx as interface went away\n",
3695 		    __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
3696 		m_freem(m);
3697 		ip6stat.ip6s_noroute++;
3698 		return;
3699 	}
3700 	im6o = ip6_allocmoptions(Z_WAITOK);
3701 	if (im6o == NULL) {
3702 		m_freem(m);
3703 		return;
3704 	}
3705 
3706 	im6o->im6o_multicast_hlim  = 1;
3707 	im6o->im6o_multicast_loop = 0;
3708 	im6o->im6o_multicast_ifp = ifp;
3709 	if (m->m_flags & M_MLDV1) {
3710 		m0 = m;
3711 	} else {
3712 		m0 = mld_v2_encap_report(ifp, m);
3713 		if (m0 == NULL) {
3714 			os_log_error(OS_LOG_DEFAULT, "%s: dropped 0x%llx\n", __func__,
3715 			    (uint64_t)VM_KERNEL_ADDRPERM(m));
3716 			/*
3717 			 * mld_v2_encap_report() has already freed our mbuf.
3718 			 */
3719 			IM6O_REMREF(im6o);
3720 			ip6stat.ip6s_odropped++;
3721 			return;
3722 		}
3723 	}
3724 	mld_scrub_context(m0);
3725 	m->m_flags &= ~(M_PROTOFLAGS);
3726 	m0->m_pkthdr.rcvif = lo_ifp;
3727 
3728 	ip6 = mtod(m0, struct ip6_hdr *);
3729 	(void)in6_setscope(&ip6->ip6_dst, ifp, NULL);
3730 	ip6_output_setdstifscope(m0, ifp->if_index, NULL);
3731 	/*
3732 	 * Retrieve the ICMPv6 type before handoff to ip6_output(),
3733 	 * so we can bump the stats.
3734 	 */
3735 	md = m_getptr(m0, sizeof(struct ip6_hdr), &off);
3736 	icmp6 = (struct icmp6_hdr *)(mtod(md, uint8_t *) + off);
3737 	type = icmp6->icmp6_type;
3738 
3739 	if (ifp->if_eflags & IFEF_TXSTART) {
3740 		/*
3741 		 * Use control service class if the outgoing
3742 		 * interface supports transmit-start model.
3743 		 */
3744 		(void) m_set_service_class(m0, MBUF_SC_CTL);
3745 	}
3746 
3747 	error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, im6o,
3748 	    &oifp, NULL);
3749 
3750 	IM6O_REMREF(im6o);
3751 
3752 	if (error) {
3753 		os_log_error(OS_LOG_DEFAULT, "%s: ip6_output(0x%llx) = %d\n", __func__,
3754 		    (uint64_t)VM_KERNEL_ADDRPERM(m0), error);
3755 		if (oifp != NULL) {
3756 			ifnet_release(oifp);
3757 		}
3758 		return;
3759 	}
3760 
3761 	icmp6stat.icp6s_outhist[type]++;
3762 	if (oifp != NULL) {
3763 		icmp6_ifstat_inc(oifp, ifs6_out_msg);
3764 		switch (type) {
3765 		case MLD_LISTENER_REPORT:
3766 		case MLDV2_LISTENER_REPORT:
3767 			icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
3768 			break;
3769 		case MLD_LISTENER_DONE:
3770 			icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
3771 			break;
3772 		}
3773 		ifnet_release(oifp);
3774 	}
3775 }
3776 
3777 /*
3778  * Encapsulate an MLDv2 report.
3779  *
3780  * KAME IPv6 requires that hop-by-hop options be passed separately,
3781  * and that the IPv6 header be prepended in a separate mbuf.
3782  *
3783  * Returns a pointer to the new mbuf chain head, or NULL if the
3784  * allocation failed.
3785  */
3786 static struct mbuf *
mld_v2_encap_report(struct ifnet * ifp,struct mbuf * m)3787 mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
3788 {
3789 	struct mbuf             *mh;
3790 	struct mldv2_report     *mld;
3791 	struct ip6_hdr          *ip6;
3792 	struct in6_ifaddr       *ia;
3793 	int                      mldreclen;
3794 
3795 	VERIFY(m->m_flags & M_PKTHDR);
3796 
3797 	/*
3798 	 * RFC3590: OK to send as :: or tentative during DAD.
3799 	 */
3800 	ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
3801 	if (ia == NULL) {
3802 		MLD_PRINTF(("%s: warning: ia is NULL\n", __func__));
3803 	}
3804 
3805 	MGETHDR(mh, M_DONTWAIT, MT_HEADER);
3806 	if (mh == NULL) {
3807 		if (ia != NULL) {
3808 			ifa_remref(&ia->ia_ifa);
3809 		}
3810 		m_freem(m);
3811 		return NULL;
3812 	}
3813 	MH_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
3814 
3815 	mldreclen = m_length(m);
3816 	MLD_PRINTF(("%s: mldreclen is %d\n", __func__, mldreclen));
3817 
3818 	mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report);
3819 	mh->m_pkthdr.len = sizeof(struct ip6_hdr) +
3820 	    sizeof(struct mldv2_report) + mldreclen;
3821 
3822 	ip6 = mtod(mh, struct ip6_hdr *);
3823 	ip6->ip6_flow = 0;
3824 	ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
3825 	ip6->ip6_vfc |= IPV6_VERSION;
3826 	ip6->ip6_nxt = IPPROTO_ICMPV6;
3827 	if (ia != NULL) {
3828 		IFA_LOCK(&ia->ia_ifa);
3829 	}
3830 	ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
3831 	ip6_output_setsrcifscope(mh, IFSCOPE_NONE, ia);
3832 
3833 	if (ia != NULL) {
3834 		IFA_UNLOCK(&ia->ia_ifa);
3835 		ifa_remref(&ia->ia_ifa);
3836 		ia = NULL;
3837 	}
3838 	ip6->ip6_dst = in6addr_linklocal_allv2routers;
3839 	ip6_output_setdstifscope(mh, ifp->if_index, NULL);
3840 	/* scope ID will be set in netisr */
3841 
3842 	mld = (struct mldv2_report *)(ip6 + 1);
3843 	mld->mld_type = MLDV2_LISTENER_REPORT;
3844 	mld->mld_code = 0;
3845 	mld->mld_cksum = 0;
3846 	mld->mld_v2_reserved = 0;
3847 	mld->mld_v2_numrecs = htons(m->m_pkthdr.vt_nrecs);
3848 	m->m_pkthdr.vt_nrecs = 0;
3849 	m->m_flags &= ~M_PKTHDR;
3850 
3851 	mh->m_next = m;
3852 	mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
3853 	    sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
3854 	return mh;
3855 }
3856 
3857 #ifdef MLD_DEBUG
3858 static const char *
mld_rec_type_to_str(const int type)3859 mld_rec_type_to_str(const int type)
3860 {
3861 	switch (type) {
3862 	case MLD_CHANGE_TO_EXCLUDE_MODE:
3863 		return "TO_EX";
3864 	case MLD_CHANGE_TO_INCLUDE_MODE:
3865 		return "TO_IN";
3866 	case MLD_MODE_IS_EXCLUDE:
3867 		return "MODE_EX";
3868 	case MLD_MODE_IS_INCLUDE:
3869 		return "MODE_IN";
3870 	case MLD_ALLOW_NEW_SOURCES:
3871 		return "ALLOW_NEW";
3872 	case MLD_BLOCK_OLD_SOURCES:
3873 		return "BLOCK_OLD";
3874 	default:
3875 		break;
3876 	}
3877 	return "unknown";
3878 }
3879 #endif
3880 
3881 void
mld_init(void)3882 mld_init(void)
3883 {
3884 	os_log(OS_LOG_DEFAULT, "%s: initializing\n", __func__);
3885 
3886 	ip6_initpktopts(&mld_po);
3887 	mld_po.ip6po_hlim = 1;
3888 	mld_po.ip6po_hbh = &mld_ra.hbh;
3889 	mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER;
3890 	mld_po.ip6po_flags = IP6PO_DONTFRAG;
3891 	LIST_INIT(&mli_head);
3892 }
3893