1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*-
29 * Copyright (c) 2009 Bruce Simpson.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. The name of the author may not be used to endorse or promote
40 * products derived from this software without specific prior written
41 * permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 */
55
56 /*
57 * Copyright (c) 1988 Stephen Deering.
58 * Copyright (c) 1992, 1993
59 * The Regents of the University of California. All rights reserved.
60 *
61 * This code is derived from software contributed to Berkeley by
62 * Stephen Deering of Stanford University.
63 *
64 * Redistribution and use in source and binary forms, with or without
65 * modification, are permitted provided that the following conditions
66 * are met:
67 * 1. Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * 2. Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in the
71 * documentation and/or other materials provided with the distribution.
72 * 3. All advertising materials mentioning features or use of this software
73 * must display the following acknowledgement:
74 * This product includes software developed by the University of
75 * California, Berkeley and its contributors.
76 * 4. Neither the name of the University nor the names of its contributors
77 * may be used to endorse or promote products derived from this software
78 * without specific prior written permission.
79 *
80 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
81 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
82 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
83 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
84 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
85 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
86 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
87 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
88 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
89 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
90 * SUCH DAMAGE.
91 *
92 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
93 */
94 /*
95 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
96 * support for mandatory and extensible security protections. This notice
97 * is included in support of clause 2.2 (b) of the Apple Public License,
98 * Version 2.0.
99 */
100
101 #include <sys/cdefs.h>
102
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/mbuf.h>
106 #include <sys/socket.h>
107 #include <sys/protosw.h>
108 #include <sys/sysctl.h>
109 #include <sys/kernel.h>
110 #include <sys/malloc.h>
111 #include <sys/mcache.h>
112
113 #include <dev/random/randomdev.h>
114
115 #include <kern/zalloc.h>
116
117 #include <net/if.h>
118 #include <net/route.h>
119
120 #include <netinet/in.h>
121 #include <netinet/in_var.h>
122 #include <netinet6/in6_var.h>
123 #include <netinet/ip6.h>
124 #include <netinet6/ip6_var.h>
125 #include <netinet6/scope6_var.h>
126 #include <netinet/icmp6.h>
127 #include <netinet6/mld6.h>
128 #include <netinet6/mld6_var.h>
129
130 #include <os/log.h>
131
132 /* Lock group and attribute for mld_mtx */
133 static LCK_ATTR_DECLARE(mld_mtx_attr, 0, 0);
134 static LCK_GRP_DECLARE(mld_mtx_grp, "mld_mtx");
135
136 /*
137 * Locking and reference counting:
138 *
139 * mld_mtx mainly protects mli_head. In cases where both mld_mtx and
140 * in6_multihead_lock must be held, the former must be acquired first in order
141 * to maintain lock ordering. It is not a requirement that mld_mtx be
142 * acquired first before in6_multihead_lock, but in case both must be acquired
143 * in succession, the correct lock ordering must be followed.
144 *
145 * Instead of walking the if_multiaddrs list at the interface and returning
146 * the ifma_protospec value of a matching entry, we search the global list
147 * of in6_multi records and find it that way; this is done with in6_multihead
148 * lock held. Doing so avoids the race condition issues that many other BSDs
149 * suffer from (therefore in our implementation, ifma_protospec will never be
150 * NULL for as long as the in6_multi is valid.)
151 *
152 * The above creates a requirement for the in6_multi to stay in in6_multihead
153 * list even after the final MLD leave (in MLDv2 mode) until no longer needs
154 * be retransmitted (this is not required for MLDv1.) In order to handle
155 * this, the request and reference counts of the in6_multi are bumped up when
156 * the state changes to MLD_LEAVING_MEMBER, and later dropped in the timeout
157 * handler. Each in6_multi holds a reference to the underlying mld_ifinfo.
158 *
159 * Thus, the permitted lock order is:
160 *
161 * mld_mtx, in6_multihead_lock, inm6_lock, mli_lock
162 *
163 * Any may be taken independently, but if any are held at the same time,
164 * the above lock order must be followed.
165 */
166 static LCK_MTX_DECLARE_ATTR(mld_mtx, &mld_mtx_grp, &mld_mtx_attr);
167
168 SLIST_HEAD(mld_in6m_relhead, in6_multi);
169
170 static void mli_initvar(struct mld_ifinfo *, struct ifnet *, int);
171 static struct mld_ifinfo *mli_alloc(zalloc_flags_t);
172 static void mli_free(struct mld_ifinfo *);
173 static void mli_delete(const struct ifnet *, struct mld_in6m_relhead *);
174 static void mld_dispatch_packet(struct mbuf *);
175 static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *,
176 struct mld_tparams *);
177 static int mld_handle_state_change(struct in6_multi *, struct mld_ifinfo *,
178 struct mld_tparams *);
179 static int mld_initial_join(struct in6_multi *, struct mld_ifinfo *,
180 struct mld_tparams *, const int);
181 #ifdef MLD_DEBUG
182 static const char * mld_rec_type_to_str(const int);
183 #endif
184 static uint32_t mld_set_version(struct mld_ifinfo *, const int);
185 static void mld_append_relq(struct mld_ifinfo *, struct in6_multi *);
186 static void mld_flush_relq(struct mld_ifinfo *, struct mld_in6m_relhead *);
187 static void mld_dispatch_queue_locked(struct mld_ifinfo *, struct ifqueue *, int);
188 static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
189 /*const*/ struct mld_hdr *);
190 static int mld_v1_input_report(struct ifnet *, struct mbuf *,
191 const struct ip6_hdr *, /*const*/ struct mld_hdr *);
192 static void mld_v1_process_group_timer(struct in6_multi *, const int);
193 static void mld_v1_process_querier_timers(struct mld_ifinfo *);
194 static int mld_v1_transmit_report(struct in6_multi *, const uint8_t);
195 static uint32_t mld_v1_update_group(struct in6_multi *, const int);
196 static void mld_v2_cancel_link_timers(struct mld_ifinfo *);
197 static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *);
198 static struct mbuf *
199 mld_v2_encap_report(struct ifnet *, struct mbuf *);
200 static int mld_v2_enqueue_filter_change(struct ifqueue *,
201 struct in6_multi *);
202 static int mld_v2_enqueue_group_record(struct ifqueue *,
203 struct in6_multi *, const int, const int, const int,
204 const int);
205 static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
206 struct mbuf *, const int, const int);
207 static int mld_v2_merge_state_changes(struct in6_multi *,
208 struct ifqueue *);
209 static void mld_v2_process_group_timers(struct mld_ifinfo *,
210 struct ifqueue *, struct ifqueue *,
211 struct in6_multi *, const int);
212 static int mld_v2_process_group_query(struct in6_multi *,
213 int, struct mbuf *, const int);
214 static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS;
215 static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS;
216 static int sysctl_mld_v2enable SYSCTL_HANDLER_ARGS;
217
218 static const uint32_t mld_timeout_delay = 1000; /* in milliseconds */
219 static const uint32_t mld_timeout_leeway = 500; /* in millseconds */
220 static bool mld_timeout_run; /* MLD timer is scheduled to run */
221 static bool mld_fast_timeout_run; /* MLD fast timer is scheduled to run */
222 static void mld_timeout(thread_call_param_t, thread_call_param_t);
223 static void mld_sched_timeout(void);
224 static void mld_sched_fast_timeout(void);
225
226 /*
227 * Normative references: RFC 2710, RFC 3590, RFC 3810.
228 */
229 static struct timeval mld_gsrdelay = {.tv_sec = 10, .tv_usec = 0};
230 static LIST_HEAD(, mld_ifinfo) mli_head;
231
232 static int querier_present_timers_running6;
233 static int interface_timers_running6;
234 static int state_change_timers_running6;
235 static int current_state_timers_running6;
236
237 static unsigned int mld_mli_list_genid;
238 /*
239 * Subsystem lock macros.
240 */
241 #define MLD_LOCK() \
242 lck_mtx_lock(&mld_mtx)
243 #define MLD_LOCK_ASSERT_HELD() \
244 LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_OWNED)
245 #define MLD_LOCK_ASSERT_NOTHELD() \
246 LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_NOTOWNED)
247 #define MLD_UNLOCK() \
248 lck_mtx_unlock(&mld_mtx)
249
250 #define MLD_ADD_DETACHED_IN6M(_head, _in6m) { \
251 SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle); \
252 }
253
254 #define MLD_REMOVE_DETACHED_IN6M(_head) { \
255 struct in6_multi *_in6m, *_inm_tmp; \
256 SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) { \
257 SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle); \
258 IN6M_REMREF(_in6m); \
259 } \
260 VERIFY(SLIST_EMPTY(_head)); \
261 }
262
263 static KALLOC_TYPE_DEFINE(mli_zone, struct mld_ifinfo, NET_KT_DEFAULT);
264
265 SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */
266
267 SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
268 "IPv6 Multicast Listener Discovery");
269 SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay,
270 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
271 &mld_gsrdelay.tv_sec, 0, sysctl_mld_gsr, "I",
272 "Rate limit for MLDv2 Group-and-Source queries in seconds");
273
274 SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_LOCKED,
275 sysctl_mld_ifinfo, "Per-interface MLDv2 state");
276
277 static int mld_v1enable = 1;
278 SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RW | CTLFLAG_LOCKED,
279 &mld_v1enable, 0, "Enable fallback to MLDv1");
280
281 static int mld_v2enable = 1;
282 SYSCTL_PROC(_net_inet6_mld, OID_AUTO, v2enable,
283 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
284 &mld_v2enable, 0, sysctl_mld_v2enable, "I",
285 "Enable MLDv2 (debug purposes only)");
286
287 static int mld_use_allow = 1;
288 SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RW | CTLFLAG_LOCKED,
289 &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
290
291 #ifdef MLD_DEBUG
292 int mld_debug = 0;
293 SYSCTL_INT(_net_inet6_mld, OID_AUTO,
294 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, "");
295 #endif
296 /*
297 * Packed Router Alert option structure declaration.
298 */
299 struct mld_raopt {
300 struct ip6_hbh hbh;
301 struct ip6_opt pad;
302 struct ip6_opt_router ra;
303 } __packed;
304
305 /*
306 * Router Alert hop-by-hop option header.
307 */
308 static struct mld_raopt mld_ra = {
309 .hbh = { .ip6h_nxt = 0, .ip6h_len = 0 },
310 .pad = { .ip6o_type = IP6OPT_PADN, .ip6o_len = 0 },
311 .ra = {
312 .ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT,
313 .ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2),
314 .ip6or_value = {((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
315 (IP6OPT_RTALERT_MLD & 0xFF) }
316 }
317 };
318 static struct ip6_pktopts mld_po;
319
320 /* Store MLDv2 record count in the module private scratch space */
321 #define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0]
322
323 static __inline void
mld_save_context(struct mbuf * m,struct ifnet * ifp)324 mld_save_context(struct mbuf *m, struct ifnet *ifp)
325 {
326 m->m_pkthdr.rcvif = ifp;
327 }
328
329 static __inline void
mld_scrub_context(struct mbuf * m)330 mld_scrub_context(struct mbuf *m)
331 {
332 m->m_pkthdr.rcvif = NULL;
333 }
334
335 /*
336 * Restore context from a queued output chain.
337 * Return saved ifp.
338 */
339 static __inline struct ifnet *
mld_restore_context(struct mbuf * m)340 mld_restore_context(struct mbuf *m)
341 {
342 return m->m_pkthdr.rcvif;
343 }
344
345 /*
346 * Retrieve or set threshold between group-source queries in seconds.
347 */
348 static int
349 sysctl_mld_gsr SYSCTL_HANDLER_ARGS
350 {
351 #pragma unused(arg1, arg2)
352 int error;
353 int i;
354
355 MLD_LOCK();
356
357 i = (int)mld_gsrdelay.tv_sec;
358
359 error = sysctl_handle_int(oidp, &i, 0, req);
360 if (error || !req->newptr) {
361 goto out_locked;
362 }
363
364 if (i < -1 || i >= 60) {
365 error = EINVAL;
366 goto out_locked;
367 }
368
369 mld_gsrdelay.tv_sec = i;
370
371 out_locked:
372 MLD_UNLOCK();
373 return error;
374 }
375 /*
376 * Expose struct mld_ifinfo to userland, keyed by ifindex.
377 * For use by ifmcstat(8).
378 *
379 */
380 static int
381 sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS
382 {
383 #pragma unused(oidp)
384 int *name;
385 int error;
386 u_int namelen;
387 struct ifnet *ifp;
388 struct mld_ifinfo *mli;
389 struct mld_ifinfo_u mli_u;
390
391 name = (int *)arg1;
392 namelen = arg2;
393
394 if (req->newptr != USER_ADDR_NULL) {
395 return EPERM;
396 }
397
398 if (namelen != 1) {
399 return EINVAL;
400 }
401
402 MLD_LOCK();
403
404 if (name[0] <= 0 || name[0] > (u_int)if_index) {
405 error = ENOENT;
406 goto out_locked;
407 }
408
409 error = ENOENT;
410
411 ifnet_head_lock_shared();
412 ifp = ifindex2ifnet[name[0]];
413 ifnet_head_done();
414 if (ifp == NULL) {
415 goto out_locked;
416 }
417
418 bzero(&mli_u, sizeof(mli_u));
419
420 LIST_FOREACH(mli, &mli_head, mli_link) {
421 MLI_LOCK(mli);
422 if (ifp != mli->mli_ifp) {
423 MLI_UNLOCK(mli);
424 continue;
425 }
426
427 mli_u.mli_ifindex = mli->mli_ifp->if_index;
428 mli_u.mli_version = mli->mli_version;
429 mli_u.mli_v1_timer = mli->mli_v1_timer;
430 mli_u.mli_v2_timer = mli->mli_v2_timer;
431 mli_u.mli_flags = mli->mli_flags;
432 mli_u.mli_rv = mli->mli_rv;
433 mli_u.mli_qi = mli->mli_qi;
434 mli_u.mli_qri = mli->mli_qri;
435 mli_u.mli_uri = mli->mli_uri;
436 MLI_UNLOCK(mli);
437
438 error = SYSCTL_OUT(req, &mli_u, sizeof(mli_u));
439 break;
440 }
441
442 out_locked:
443 MLD_UNLOCK();
444 return error;
445 }
446
447 static int
448 sysctl_mld_v2enable SYSCTL_HANDLER_ARGS
449 {
450 #pragma unused(arg1, arg2)
451 int error;
452 int i;
453 struct mld_ifinfo *mli;
454 struct mld_tparams mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 };
455
456 MLD_LOCK();
457
458 i = mld_v2enable;
459
460 error = sysctl_handle_int(oidp, &i, 0, req);
461 if (error || !req->newptr) {
462 goto out_locked;
463 }
464
465 if (i < 0 || i > 1) {
466 error = EINVAL;
467 goto out_locked;
468 }
469
470 mld_v2enable = i;
471 /*
472 * If we enabled v2, the state transition will take care of upgrading
473 * the MLD version back to v2. Otherwise, we have to explicitly
474 * downgrade. Note that this functionality is to be used for debugging.
475 */
476 if (mld_v2enable == 1) {
477 goto out_locked;
478 }
479
480 LIST_FOREACH(mli, &mli_head, mli_link) {
481 MLI_LOCK(mli);
482 if (mld_set_version(mli, MLD_VERSION_1) > 0) {
483 mtp.qpt = 1;
484 }
485 MLI_UNLOCK(mli);
486 }
487
488 out_locked:
489 MLD_UNLOCK();
490
491 mld_set_timeout(&mtp);
492
493 return error;
494 }
495
496 /*
497 * Dispatch an entire queue of pending packet chains.
498 *
499 * Must not be called with in6m_lock held.
500 * XXX This routine unlocks MLD global lock and also mli locks.
501 * Make sure that the calling routine takes reference on the mli
502 * before calling this routine.
503 * Also if we are traversing mli_head, remember to check for
504 * mli list generation count and restart the loop if generation count
505 * has changed.
506 */
507 static void
mld_dispatch_queue_locked(struct mld_ifinfo * mli,struct ifqueue * ifq,int limit)508 mld_dispatch_queue_locked(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit)
509 {
510 struct mbuf *m;
511
512 MLD_LOCK_ASSERT_HELD();
513
514 if (mli != NULL) {
515 MLI_LOCK_ASSERT_HELD(mli);
516 }
517
518 for (;;) {
519 IF_DEQUEUE(ifq, m);
520 if (m == NULL) {
521 break;
522 }
523 MLD_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n", __func__,
524 (uint64_t)VM_KERNEL_ADDRPERM(ifq),
525 (uint64_t)VM_KERNEL_ADDRPERM(m)));
526
527 if (mli != NULL) {
528 MLI_UNLOCK(mli);
529 }
530 MLD_UNLOCK();
531
532 mld_dispatch_packet(m);
533
534 MLD_LOCK();
535 if (mli != NULL) {
536 MLI_LOCK(mli);
537 }
538
539 if (--limit == 0) {
540 break;
541 }
542 }
543
544 if (mli != NULL) {
545 MLI_LOCK_ASSERT_HELD(mli);
546 }
547 }
548
549 /*
550 * Filter outgoing MLD report state by group.
551 *
552 * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
553 * and node-local addresses. However, kernel and socket consumers
554 * always embed the KAME scope ID in the address provided, so strip it
555 * when performing comparison.
556 * Note: This is not the same as the *multicast* scope.
557 *
558 * Return zero if the given group is one for which MLD reports
559 * should be suppressed, or non-zero if reports should be issued.
560 */
561 static __inline__ int
mld_is_addr_reported(const struct in6_addr * addr)562 mld_is_addr_reported(const struct in6_addr *addr)
563 {
564 VERIFY(IN6_IS_ADDR_MULTICAST(addr));
565
566 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL) {
567 return 0;
568 }
569
570 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL && !IN6_IS_ADDR_UNICAST_BASED_MULTICAST(addr)) {
571 struct in6_addr tmp = *addr;
572 in6_clearscope(&tmp);
573 if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes)) {
574 return 0;
575 }
576 }
577
578 return 1;
579 }
580
581 /*
582 * Attach MLD when PF_INET6 is attached to an interface.
583 */
584 struct mld_ifinfo *
mld_domifattach(struct ifnet * ifp,zalloc_flags_t how)585 mld_domifattach(struct ifnet *ifp, zalloc_flags_t how)
586 {
587 struct mld_ifinfo *mli;
588
589 os_log_debug(OS_LOG_DEFAULT, "%s: called for ifp %s\n", __func__,
590 if_name(ifp));
591
592 mli = mli_alloc(how);
593 if (mli == NULL) {
594 return NULL;
595 }
596
597 MLD_LOCK();
598
599 MLI_LOCK(mli);
600 mli_initvar(mli, ifp, 0);
601 mli->mli_debug |= IFD_ATTACHED;
602 MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */
603 MLI_ADDREF_LOCKED(mli); /* hold a reference for caller */
604 MLI_UNLOCK(mli);
605 ifnet_lock_shared(ifp);
606 mld6_initsilent(ifp, mli);
607 ifnet_lock_done(ifp);
608
609 LIST_INSERT_HEAD(&mli_head, mli, mli_link);
610 mld_mli_list_genid++;
611
612 MLD_UNLOCK();
613
614 os_log_info(OS_LOG_DEFAULT, "%s: allocated mld_ifinfo for ifp %s\n",
615 __func__, if_name(ifp));
616
617 return mli;
618 }
619
620 /*
621 * Attach MLD when PF_INET6 is reattached to an interface. Caller is
622 * expected to have an outstanding reference to the mli.
623 */
624 void
mld_domifreattach(struct mld_ifinfo * mli)625 mld_domifreattach(struct mld_ifinfo *mli)
626 {
627 struct ifnet *ifp;
628
629 MLD_LOCK();
630
631 MLI_LOCK(mli);
632 VERIFY(!(mli->mli_debug & IFD_ATTACHED));
633 ifp = mli->mli_ifp;
634 VERIFY(ifp != NULL);
635 mli_initvar(mli, ifp, 1);
636 mli->mli_debug |= IFD_ATTACHED;
637 MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */
638 MLI_UNLOCK(mli);
639 ifnet_lock_shared(ifp);
640 mld6_initsilent(ifp, mli);
641 ifnet_lock_done(ifp);
642
643 LIST_INSERT_HEAD(&mli_head, mli, mli_link);
644 mld_mli_list_genid++;
645
646 MLD_UNLOCK();
647
648 os_log_info(OS_LOG_DEFAULT, "%s: reattached mld_ifinfo for ifp %s\n",
649 __func__, if_name(ifp));
650 }
651
652 /*
653 * Hook for domifdetach.
654 */
655 void
mld_domifdetach(struct ifnet * ifp)656 mld_domifdetach(struct ifnet *ifp)
657 {
658 SLIST_HEAD(, in6_multi) in6m_dthead;
659
660 SLIST_INIT(&in6m_dthead);
661
662 os_log_info(OS_LOG_DEFAULT, "%s: called for ifp %s\n", __func__,
663 if_name(ifp));
664
665 MLD_LOCK();
666 mli_delete(ifp, (struct mld_in6m_relhead *)&in6m_dthead);
667 MLD_UNLOCK();
668
669 /* Now that we're dropped all locks, release detached records */
670 MLD_REMOVE_DETACHED_IN6M(&in6m_dthead);
671 }
672
673 /*
674 * Called at interface detach time. Note that we only flush all deferred
675 * responses and record releases; all remaining inm records and their source
676 * entries related to this interface are left intact, in order to handle
677 * the reattach case.
678 */
679 static void
mli_delete(const struct ifnet * ifp,struct mld_in6m_relhead * in6m_dthead)680 mli_delete(const struct ifnet *ifp, struct mld_in6m_relhead *in6m_dthead)
681 {
682 struct mld_ifinfo *mli, *tmli;
683
684 MLD_LOCK_ASSERT_HELD();
685
686 LIST_FOREACH_SAFE(mli, &mli_head, mli_link, tmli) {
687 MLI_LOCK(mli);
688 if (mli->mli_ifp == ifp) {
689 /*
690 * Free deferred General Query responses.
691 */
692 IF_DRAIN(&mli->mli_gq);
693 IF_DRAIN(&mli->mli_v1q);
694 mld_flush_relq(mli, in6m_dthead);
695 mli->mli_debug &= ~IFD_ATTACHED;
696 MLI_UNLOCK(mli);
697
698 LIST_REMOVE(mli, mli_link);
699 MLI_REMREF(mli); /* release mli_head reference */
700 mld_mli_list_genid++;
701 return;
702 }
703 MLI_UNLOCK(mli);
704 }
705 panic("%s: mld_ifinfo not found for ifp %p(%s)", __func__,
706 ifp, ifp->if_xname);
707 }
708
709 __private_extern__ void
mld6_initsilent(struct ifnet * ifp,struct mld_ifinfo * mli)710 mld6_initsilent(struct ifnet *ifp, struct mld_ifinfo *mli)
711 {
712 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED);
713
714 MLI_LOCK_ASSERT_NOTHELD(mli);
715 MLI_LOCK(mli);
716 if (!(ifp->if_flags & IFF_MULTICAST) &&
717 (ifp->if_eflags & (IFEF_IPV6_ND6ALT | IFEF_LOCALNET_PRIVATE))) {
718 mli->mli_flags |= MLIF_SILENT;
719 } else {
720 mli->mli_flags &= ~MLIF_SILENT;
721 }
722 MLI_UNLOCK(mli);
723 }
724
725 static void
mli_initvar(struct mld_ifinfo * mli,struct ifnet * ifp,int reattach)726 mli_initvar(struct mld_ifinfo *mli, struct ifnet *ifp, int reattach)
727 {
728 MLI_LOCK_ASSERT_HELD(mli);
729
730 mli->mli_ifp = ifp;
731 if (mld_v2enable) {
732 mli->mli_version = MLD_VERSION_2;
733 } else {
734 mli->mli_version = MLD_VERSION_1;
735 }
736 mli->mli_flags = 0;
737 mli->mli_rv = MLD_RV_INIT;
738 mli->mli_qi = MLD_QI_INIT;
739 mli->mli_qri = MLD_QRI_INIT;
740 mli->mli_uri = MLD_URI_INIT;
741
742 if (mld_use_allow) {
743 mli->mli_flags |= MLIF_USEALLOW;
744 }
745 if (!reattach) {
746 SLIST_INIT(&mli->mli_relinmhead);
747 }
748
749 /*
750 * Responses to general queries are subject to bounds.
751 */
752 mli->mli_gq.ifq_maxlen = MLD_MAX_RESPONSE_PACKETS;
753 mli->mli_v1q.ifq_maxlen = MLD_MAX_RESPONSE_PACKETS;
754 }
755
756 static struct mld_ifinfo *
mli_alloc(zalloc_flags_t how)757 mli_alloc(zalloc_flags_t how)
758 {
759 struct mld_ifinfo *mli = zalloc_flags(mli_zone, how | Z_ZERO);
760 if (mli != NULL) {
761 lck_mtx_init(&mli->mli_lock, &mld_mtx_grp, &mld_mtx_attr);
762 mli->mli_debug |= IFD_ALLOC;
763 }
764 return mli;
765 }
766
767 static void
mli_free(struct mld_ifinfo * mli)768 mli_free(struct mld_ifinfo *mli)
769 {
770 MLI_LOCK(mli);
771 if (mli->mli_debug & IFD_ATTACHED) {
772 panic("%s: attached mli=%p is being freed", __func__, mli);
773 /* NOTREACHED */
774 } else if (mli->mli_ifp != NULL) {
775 panic("%s: ifp not NULL for mli=%p", __func__, mli);
776 /* NOTREACHED */
777 } else if (!(mli->mli_debug & IFD_ALLOC)) {
778 panic("%s: mli %p cannot be freed", __func__, mli);
779 /* NOTREACHED */
780 } else if (mli->mli_refcnt != 0) {
781 panic("%s: non-zero refcnt mli=%p", __func__, mli);
782 /* NOTREACHED */
783 }
784 mli->mli_debug &= ~IFD_ALLOC;
785 MLI_UNLOCK(mli);
786
787 lck_mtx_destroy(&mli->mli_lock, &mld_mtx_grp);
788 zfree(mli_zone, mli);
789 }
790
791 void
mli_addref(struct mld_ifinfo * mli,int locked)792 mli_addref(struct mld_ifinfo *mli, int locked)
793 {
794 if (!locked) {
795 MLI_LOCK_SPIN(mli);
796 } else {
797 MLI_LOCK_ASSERT_HELD(mli);
798 }
799
800 if (++mli->mli_refcnt == 0) {
801 panic("%s: mli=%p wraparound refcnt", __func__, mli);
802 /* NOTREACHED */
803 }
804 if (!locked) {
805 MLI_UNLOCK(mli);
806 }
807 }
808
809 void
mli_remref(struct mld_ifinfo * mli)810 mli_remref(struct mld_ifinfo *mli)
811 {
812 SLIST_HEAD(, in6_multi) in6m_dthead;
813 struct ifnet *ifp;
814
815 MLI_LOCK_SPIN(mli);
816
817 if (mli->mli_refcnt == 0) {
818 panic("%s: mli=%p negative refcnt", __func__, mli);
819 /* NOTREACHED */
820 }
821
822 --mli->mli_refcnt;
823 if (mli->mli_refcnt > 0) {
824 MLI_UNLOCK(mli);
825 return;
826 }
827
828 ifp = mli->mli_ifp;
829 mli->mli_ifp = NULL;
830 IF_DRAIN(&mli->mli_gq);
831 IF_DRAIN(&mli->mli_v1q);
832 SLIST_INIT(&in6m_dthead);
833 mld_flush_relq(mli, (struct mld_in6m_relhead *)&in6m_dthead);
834 MLI_UNLOCK(mli);
835
836 /* Now that we're dropped all locks, release detached records */
837 MLD_REMOVE_DETACHED_IN6M(&in6m_dthead);
838
839 os_log(OS_LOG_DEFAULT, "%s: freeing mld_ifinfo for ifp %s\n",
840 __func__, if_name(ifp));
841
842 mli_free(mli);
843 }
844
845 /*
846 * Process a received MLDv1 general or address-specific query.
847 * Assumes that the query header has been pulled up to sizeof(mld_hdr).
848 *
849 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
850 * mld_addr. This is OK as we own the mbuf chain.
851 */
852 static int
mld_v1_input_query(struct ifnet * ifp,const struct ip6_hdr * ip6,struct mld_hdr * mld)853 mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
854 /*const*/ struct mld_hdr *mld)
855 {
856 struct mld_ifinfo *mli;
857 struct in6_multi *inm;
858 int err = 0, is_general_query;
859 uint16_t timer;
860 struct mld_tparams mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 };
861
862 MLD_LOCK_ASSERT_NOTHELD();
863
864 is_general_query = 0;
865
866 if (!mld_v1enable) {
867 os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 query on ifp %s\n",
868 __func__, if_name(ifp));
869 goto done;
870 }
871
872 /*
873 * RFC3810 Section 6.2: MLD queries must originate from
874 * a router's link-local address.
875 */
876 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
877 os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 query src %s on ifp %s\n",
878 __func__, ip6_sprintf(&ip6->ip6_src),
879 if_name(ifp));
880 goto done;
881 }
882
883 /*
884 * Do address field validation upfront before we accept
885 * the query.
886 */
887 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
888 /*
889 * MLDv1 General Query.
890 * If this was not sent to the all-nodes group, ignore it.
891 */
892 struct in6_addr dst;
893
894 dst = ip6->ip6_dst;
895 in6_clearscope(&dst);
896 if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes)) {
897 err = EINVAL;
898 goto done;
899 }
900 is_general_query = 1;
901 } else {
902 /*
903 * Embed scope ID of receiving interface in MLD query for
904 * lookup whilst we don't hold other locks.
905 */
906 (void)in6_setscope(&mld->mld_addr, ifp, NULL);
907 }
908
909 /*
910 * Switch to MLDv1 host compatibility mode.
911 */
912 mli = MLD_IFINFO(ifp);
913 VERIFY(mli != NULL);
914
915 MLI_LOCK(mli);
916 mtp.qpt = mld_set_version(mli, MLD_VERSION_1);
917 MLI_UNLOCK(mli);
918
919 timer = ntohs(mld->mld_maxdelay) / MLD_TIMER_SCALE;
920 if (timer == 0) {
921 timer = 1;
922 }
923
924 if (is_general_query) {
925 struct in6_multistep step;
926
927 os_log_debug(OS_LOG_DEFAULT, "%s: process v1 general query on ifp %s\n",
928 __func__, if_name(ifp));
929 /*
930 * For each reporting group joined on this
931 * interface, kick the report timer.
932 */
933 in6_multihead_lock_shared();
934 IN6_FIRST_MULTI(step, inm);
935 while (inm != NULL) {
936 IN6M_LOCK(inm);
937 if (inm->in6m_ifp == ifp) {
938 mtp.cst += mld_v1_update_group(inm, timer);
939 }
940 IN6M_UNLOCK(inm);
941 IN6_NEXT_MULTI(step, inm);
942 }
943 in6_multihead_lock_done();
944 } else {
945 /*
946 * MLDv1 Group-Specific Query.
947 * If this is a group-specific MLDv1 query, we need only
948 * look up the single group to process it.
949 */
950 in6_multihead_lock_shared();
951 IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
952 in6_multihead_lock_done();
953
954 if (inm != NULL) {
955 IN6M_LOCK(inm);
956 os_log_debug(OS_LOG_DEFAULT, "%s: process v1 query %s on "
957 "ifp %s\n", __func__,
958 ip6_sprintf(&mld->mld_addr),
959 if_name(ifp));
960 mtp.cst = mld_v1_update_group(inm, timer);
961 IN6M_UNLOCK(inm);
962 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
963 }
964 /* XXX Clear embedded scope ID as userland won't expect it. */
965 in6_clearscope(&mld->mld_addr);
966 }
967 done:
968 mld_set_timeout(&mtp);
969
970 return err;
971 }
972
973 /*
974 * Update the report timer on a group in response to an MLDv1 query.
975 *
976 * If we are becoming the reporting member for this group, start the timer.
977 * If we already are the reporting member for this group, and timer is
978 * below the threshold, reset it.
979 *
980 * We may be updating the group for the first time since we switched
981 * to MLDv2. If we are, then we must clear any recorded source lists,
982 * and transition to REPORTING state; the group timer is overloaded
983 * for group and group-source query responses.
984 *
985 * Unlike MLDv2, the delay per group should be jittered
986 * to avoid bursts of MLDv1 reports.
987 */
988 static uint32_t
mld_v1_update_group(struct in6_multi * inm,const int timer)989 mld_v1_update_group(struct in6_multi *inm, const int timer)
990 {
991 IN6M_LOCK_ASSERT_HELD(inm);
992
993 MLD_PRINTF(("%s: %s/%s timer=%d\n", __func__,
994 ip6_sprintf(&inm->in6m_addr),
995 if_name(inm->in6m_ifp), timer));
996
997 switch (inm->in6m_state) {
998 case MLD_NOT_MEMBER:
999 case MLD_SILENT_MEMBER:
1000 break;
1001 case MLD_REPORTING_MEMBER:
1002 if (inm->in6m_timer != 0 &&
1003 inm->in6m_timer <= timer) {
1004 MLD_PRINTF(("%s: REPORTING and timer running, "
1005 "skipping.\n", __func__));
1006 break;
1007 }
1008 OS_FALLTHROUGH;
1009 case MLD_SG_QUERY_PENDING_MEMBER:
1010 case MLD_G_QUERY_PENDING_MEMBER:
1011 case MLD_IDLE_MEMBER:
1012 case MLD_LAZY_MEMBER:
1013 case MLD_AWAKENING_MEMBER:
1014 MLD_PRINTF(("%s: ->REPORTING\n", __func__));
1015 inm->in6m_state = MLD_REPORTING_MEMBER;
1016 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1017 break;
1018 case MLD_SLEEPING_MEMBER:
1019 MLD_PRINTF(("%s: ->AWAKENING\n", __func__));
1020 inm->in6m_state = MLD_AWAKENING_MEMBER;
1021 break;
1022 case MLD_LEAVING_MEMBER:
1023 break;
1024 }
1025
1026 return inm->in6m_timer;
1027 }
1028
1029 /*
1030 * Process a received MLDv2 general, group-specific or
1031 * group-and-source-specific query.
1032 *
1033 * Assumes that the query header has been pulled up to sizeof(mldv2_query).
1034 *
1035 * Return 0 if successful, otherwise an appropriate error code is returned.
1036 */
1037 static int
mld_v2_input_query(struct ifnet * ifp,const struct ip6_hdr * ip6,struct mbuf * m,const int off,const int icmp6len)1038 mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
1039 struct mbuf *m, const int off, const int icmp6len)
1040 {
1041 struct mld_ifinfo *mli;
1042 struct mldv2_query *mld;
1043 struct in6_multi *inm;
1044 uint32_t maxdelay, nsrc, qqi, timer;
1045 int err = 0, is_general_query;
1046 uint8_t qrv;
1047 struct mld_tparams mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 };
1048
1049 MLD_LOCK_ASSERT_NOTHELD();
1050
1051 is_general_query = 0;
1052
1053 if (!mld_v2enable) {
1054 os_log_info(OS_LOG_DEFAULT, "%s: ignore v2 query on ifp %s\n",
1055 __func__, if_name(ifp));
1056 goto done;
1057 }
1058
1059 /*
1060 * RFC3810 Section 6.2: MLD queries must originate from
1061 * a router's link-local address.
1062 */
1063 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
1064 os_log_info(OS_LOG_DEFAULT,
1065 "%s: ignore v1 query src %s on ifp %s\n",
1066 __func__, ip6_sprintf(&ip6->ip6_src),
1067 if_name(ifp));
1068 goto done;
1069 }
1070
1071 os_log_debug(OS_LOG_DEFAULT,
1072 "%s: input v2 query on ifp %s\n", __func__,
1073 if_name(ifp));
1074
1075 mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off);
1076
1077 maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */
1078 if (maxdelay > SHRT_MAX) {
1079 maxdelay = (MLD_MRC_MANT((uint16_t)maxdelay) | 0x1000) <<
1080 (MLD_MRC_EXP((uint16_t)maxdelay) + 3);
1081 }
1082 timer = maxdelay / MLD_TIMER_SCALE;
1083 if (timer == 0) {
1084 timer = 1;
1085 }
1086
1087 qrv = MLD_QRV(mld->mld_misc);
1088 if (qrv < 2) {
1089 MLD_PRINTF(("%s: clamping qrv %d to %d\n", __func__,
1090 qrv, MLD_RV_INIT));
1091 qrv = MLD_RV_INIT;
1092 }
1093
1094 qqi = mld->mld_qqi;
1095 if (qqi >= 128) {
1096 qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
1097 (MLD_QQIC_EXP(mld->mld_qqi) + 3);
1098 }
1099
1100 nsrc = ntohs(mld->mld_numsrc);
1101 if (nsrc > MLD_MAX_GS_SOURCES) {
1102 err = EMSGSIZE;
1103 goto done;
1104 }
1105 if (icmp6len < sizeof(struct mldv2_query) +
1106 (nsrc * sizeof(struct in6_addr))) {
1107 err = EMSGSIZE;
1108 goto done;
1109 }
1110
1111 /*
1112 * Do further input validation upfront to avoid resetting timers
1113 * should we need to discard this query.
1114 */
1115 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
1116 /*
1117 * A general query with a source list has undefined
1118 * behaviour; discard it.
1119 */
1120 if (nsrc > 0) {
1121 err = EINVAL;
1122 goto done;
1123 }
1124 is_general_query = 1;
1125 } else {
1126 /*
1127 * Embed scope ID of receiving interface in MLD query for
1128 * lookup whilst we don't hold other locks (due to KAME
1129 * locking lameness). We own this mbuf chain just now.
1130 */
1131 (void)in6_setscope(&mld->mld_addr, ifp, NULL);
1132 }
1133
1134 mli = MLD_IFINFO(ifp);
1135 VERIFY(mli != NULL);
1136
1137 MLI_LOCK(mli);
1138 /*
1139 * Discard the v2 query if we're in Compatibility Mode.
1140 * The RFC is pretty clear that hosts need to stay in MLDv1 mode
1141 * until the Old Version Querier Present timer expires.
1142 */
1143 if (mli->mli_version != MLD_VERSION_2) {
1144 MLI_UNLOCK(mli);
1145 goto done;
1146 }
1147
1148 mtp.qpt = mld_set_version(mli, MLD_VERSION_2);
1149 mli->mli_rv = qrv;
1150 mli->mli_qi = qqi;
1151 mli->mli_qri = MAX(timer, MLD_QRI_MIN);
1152
1153 MLD_PRINTF(("%s: qrv %d qi %d qri %d\n", __func__, mli->mli_rv,
1154 mli->mli_qi, mli->mli_qri));
1155
1156 if (is_general_query) {
1157 /*
1158 * MLDv2 General Query.
1159 *
1160 * Schedule a current-state report on this ifp for
1161 * all groups, possibly containing source lists.
1162 *
1163 * If there is a pending General Query response
1164 * scheduled earlier than the selected delay, do
1165 * not schedule any other reports.
1166 * Otherwise, reset the interface timer.
1167 */
1168 os_log_debug(OS_LOG_DEFAULT, "%s: process v2 general query on ifp %s\n",
1169 __func__, if_name(ifp));
1170 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
1171 mtp.it = mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
1172 }
1173 MLI_UNLOCK(mli);
1174 } else {
1175 MLI_UNLOCK(mli);
1176 /*
1177 * MLDv2 Group-specific or Group-and-source-specific Query.
1178 *
1179 * Group-source-specific queries are throttled on
1180 * a per-group basis to defeat denial-of-service attempts.
1181 * Queries for groups we are not a member of on this
1182 * link are simply ignored.
1183 */
1184 in6_multihead_lock_shared();
1185 IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
1186 in6_multihead_lock_done();
1187 if (inm == NULL) {
1188 goto done;
1189 }
1190
1191 IN6M_LOCK(inm);
1192 if (nsrc > 0) {
1193 if (!ratecheck(&inm->in6m_lastgsrtv,
1194 &mld_gsrdelay)) {
1195 os_log_info(OS_LOG_DEFAULT, "%s: GS query throttled\n",
1196 __func__);
1197 IN6M_UNLOCK(inm);
1198 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1199 goto done;
1200 }
1201 }
1202 os_log_debug(OS_LOG_DEFAULT, "%s: process v2 group query on ifp %s\n",
1203 __func__, if_name(ifp));
1204 /*
1205 * If there is a pending General Query response
1206 * scheduled sooner than the selected delay, no
1207 * further report need be scheduled.
1208 * Otherwise, prepare to respond to the
1209 * group-specific or group-and-source query.
1210 */
1211 MLI_LOCK(mli);
1212 mtp.it = mli->mli_v2_timer;
1213 MLI_UNLOCK(mli);
1214 if (mtp.it == 0 || mtp.it >= timer) {
1215 (void) mld_v2_process_group_query(inm, timer, m, off);
1216 mtp.cst = inm->in6m_timer;
1217 }
1218 IN6M_UNLOCK(inm);
1219 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1220 /* XXX Clear embedded scope ID as userland won't expect it. */
1221 in6_clearscope(&mld->mld_addr);
1222 }
1223 done:
1224 if (mtp.it > 0) {
1225 os_log_debug(OS_LOG_DEFAULT, "%s: v2 general query response scheduled in "
1226 "T+%d seconds on ifp %s\n", __func__, mtp.it,
1227 if_name(ifp));
1228 }
1229 mld_set_timeout(&mtp);
1230
1231 return err;
1232 }
1233
1234 /*
1235 * Process a recieved MLDv2 group-specific or group-and-source-specific
1236 * query.
1237 * Return <0 if any error occured. Currently this is ignored.
1238 */
1239 static int
mld_v2_process_group_query(struct in6_multi * inm,int timer,struct mbuf * m0,const int off)1240 mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0,
1241 const int off)
1242 {
1243 struct mldv2_query *mld;
1244 int retval;
1245 uint16_t nsrc;
1246
1247 IN6M_LOCK_ASSERT_HELD(inm);
1248
1249 retval = 0;
1250 mld = (struct mldv2_query *)(mtod(m0, uint8_t *) + off);
1251
1252 switch (inm->in6m_state) {
1253 case MLD_NOT_MEMBER:
1254 case MLD_SILENT_MEMBER:
1255 case MLD_SLEEPING_MEMBER:
1256 case MLD_LAZY_MEMBER:
1257 case MLD_AWAKENING_MEMBER:
1258 case MLD_IDLE_MEMBER:
1259 case MLD_LEAVING_MEMBER:
1260 return retval;
1261 case MLD_REPORTING_MEMBER:
1262 case MLD_G_QUERY_PENDING_MEMBER:
1263 case MLD_SG_QUERY_PENDING_MEMBER:
1264 break;
1265 }
1266
1267 nsrc = ntohs(mld->mld_numsrc);
1268
1269 /*
1270 * Deal with group-specific queries upfront.
1271 * If any group query is already pending, purge any recorded
1272 * source-list state if it exists, and schedule a query response
1273 * for this group-specific query.
1274 */
1275 if (nsrc == 0) {
1276 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
1277 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
1278 in6m_clear_recorded(inm);
1279 timer = min(inm->in6m_timer, timer);
1280 }
1281 inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
1282 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1283 return retval;
1284 }
1285
1286 /*
1287 * Deal with the case where a group-and-source-specific query has
1288 * been received but a group-specific query is already pending.
1289 */
1290 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
1291 timer = min(inm->in6m_timer, timer);
1292 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1293 return retval;
1294 }
1295
1296 /*
1297 * Finally, deal with the case where a group-and-source-specific
1298 * query has been received, where a response to a previous g-s-r
1299 * query exists, or none exists.
1300 * In this case, we need to parse the source-list which the Querier
1301 * has provided us with and check if we have any source list filter
1302 * entries at T1 for these sources. If we do not, there is no need
1303 * schedule a report and the query may be dropped.
1304 * If we do, we must record them and schedule a current-state
1305 * report for those sources.
1306 */
1307 if (inm->in6m_nsrc > 0) {
1308 struct mbuf *m;
1309 struct in6_addr addr;
1310 int i, nrecorded;
1311 int soff;
1312
1313 m = m0;
1314 soff = off + sizeof(struct mldv2_query);
1315 nrecorded = 0;
1316 for (i = 0; i < nsrc; i++) {
1317 m_copydata(m, soff, sizeof(addr), &addr);
1318 retval = in6m_record_source(inm, &addr);
1319 if (retval < 0) {
1320 break;
1321 }
1322 nrecorded += retval;
1323 soff += sizeof(struct in6_addr);
1324
1325 while (m && (soff >= m->m_len)) {
1326 soff -= m->m_len;
1327 m = m->m_next;
1328 }
1329
1330 /* should not be possible: */
1331 if (m == NULL) {
1332 break;
1333 }
1334 }
1335 if (nrecorded > 0) {
1336 MLD_PRINTF(("%s: schedule response to SG query\n",
1337 __func__));
1338 inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
1339 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1340 }
1341 }
1342
1343 return retval;
1344 }
1345
1346 /*
1347 * Process a received MLDv1 host membership report.
1348 * Assumes mld points to mld_hdr in pulled up mbuf chain.
1349 *
1350 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
1351 * mld_addr. This is OK as we own the mbuf chain.
1352 */
1353 static int
mld_v1_input_report(struct ifnet * ifp,struct mbuf * m,const struct ip6_hdr * ip6,struct mld_hdr * mld)1354 mld_v1_input_report(struct ifnet *ifp, struct mbuf *m,
1355 const struct ip6_hdr *ip6, /*const*/ struct mld_hdr *mld)
1356 {
1357 struct in6_addr src, dst;
1358 struct in6_ifaddr *ia;
1359 struct in6_multi *inm;
1360
1361 if (!mld_v1enable) {
1362 os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 report on ifp %s\n",
1363 __func__, if_name(ifp));
1364 return 0;
1365 }
1366
1367 if ((ifp->if_flags & IFF_LOOPBACK) ||
1368 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1369 return 0;
1370 }
1371
1372 /*
1373 * MLDv1 reports must originate from a host's link-local address,
1374 * or the unspecified address (when booting).
1375 */
1376 src = ip6->ip6_src;
1377 in6_clearscope(&src);
1378 if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
1379 os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 query src %s on ifp %s\n",
1380 __func__, ip6_sprintf(&ip6->ip6_src),
1381 if_name(ifp));
1382 return EINVAL;
1383 }
1384
1385 /*
1386 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
1387 * group, and must be directed to the group itself.
1388 */
1389 dst = ip6->ip6_dst;
1390 in6_clearscope(&dst);
1391 if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
1392 !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
1393 os_log_info(OS_LOG_DEFAULT, "%s: ignore v1 query dst %s on ifp %s\n",
1394 __func__, ip6_sprintf(&ip6->ip6_dst),
1395 if_name(ifp));
1396 return EINVAL;
1397 }
1398
1399 /*
1400 * Make sure we don't hear our own membership report, as fast
1401 * leave requires knowing that we are the only member of a
1402 * group. Assume we used the link-local address if available,
1403 * otherwise look for ::.
1404 *
1405 * XXX Note that scope ID comparison is needed for the address
1406 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
1407 * performed for the on-wire address.
1408 */
1409 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
1410 if (ia != NULL) {
1411 IFA_LOCK(&ia->ia_ifa);
1412 if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))) {
1413 IFA_UNLOCK(&ia->ia_ifa);
1414 IFA_REMREF(&ia->ia_ifa);
1415 return 0;
1416 }
1417 IFA_UNLOCK(&ia->ia_ifa);
1418 IFA_REMREF(&ia->ia_ifa);
1419 } else if (IN6_IS_ADDR_UNSPECIFIED(&src)) {
1420 return 0;
1421 }
1422
1423 os_log_debug(OS_LOG_DEFAULT, "%s: process v1 report %s on ifp %s\n",
1424 __func__, ip6_sprintf(&mld->mld_addr),
1425 if_name(ifp));
1426
1427 /*
1428 * Embed scope ID of receiving interface in MLD query for lookup
1429 * whilst we don't hold other locks (due to KAME locking lameness).
1430 */
1431 if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
1432 (void)in6_setscope(&mld->mld_addr, ifp, NULL);
1433 }
1434
1435 /*
1436 * MLDv1 report suppression.
1437 * If we are a member of this group, and our membership should be
1438 * reported, and our group timer is pending or about to be reset,
1439 * stop our group timer by transitioning to the 'lazy' state.
1440 */
1441 in6_multihead_lock_shared();
1442 IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
1443 in6_multihead_lock_done();
1444
1445 if (inm != NULL) {
1446 struct mld_ifinfo *mli;
1447
1448 IN6M_LOCK(inm);
1449 mli = inm->in6m_mli;
1450 VERIFY(mli != NULL);
1451
1452 MLI_LOCK(mli);
1453 /*
1454 * If we are in MLDv2 host mode, do not allow the
1455 * other host's MLDv1 report to suppress our reports.
1456 */
1457 if (mli->mli_version == MLD_VERSION_2) {
1458 MLI_UNLOCK(mli);
1459 IN6M_UNLOCK(inm);
1460 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1461 goto out;
1462 }
1463 MLI_UNLOCK(mli);
1464
1465 inm->in6m_timer = 0;
1466
1467 switch (inm->in6m_state) {
1468 case MLD_NOT_MEMBER:
1469 case MLD_SILENT_MEMBER:
1470 case MLD_SLEEPING_MEMBER:
1471 break;
1472 case MLD_REPORTING_MEMBER:
1473 case MLD_IDLE_MEMBER:
1474 case MLD_AWAKENING_MEMBER:
1475 MLD_PRINTF(("%s: report suppressed for %s on "
1476 "ifp 0x%llx(%s)\n", __func__,
1477 ip6_sprintf(&mld->mld_addr),
1478 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1479 OS_FALLTHROUGH;
1480 case MLD_LAZY_MEMBER:
1481 inm->in6m_state = MLD_LAZY_MEMBER;
1482 break;
1483 case MLD_G_QUERY_PENDING_MEMBER:
1484 case MLD_SG_QUERY_PENDING_MEMBER:
1485 case MLD_LEAVING_MEMBER:
1486 break;
1487 }
1488 IN6M_UNLOCK(inm);
1489 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1490 }
1491
1492 out:
1493 /* XXX Clear embedded scope ID as userland won't expect it. */
1494 in6_clearscope(&mld->mld_addr);
1495
1496 return 0;
1497 }
1498
1499 /*
1500 * MLD input path.
1501 *
1502 * Assume query messages which fit in a single ICMPv6 message header
1503 * have been pulled up.
1504 * Assume that userland will want to see the message, even if it
1505 * otherwise fails kernel input validation; do not free it.
1506 * Pullup may however free the mbuf chain m if it fails.
1507 *
1508 * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
1509 */
1510 int
mld_input(struct mbuf * m,int off,int icmp6len)1511 mld_input(struct mbuf *m, int off, int icmp6len)
1512 {
1513 struct ifnet *ifp = NULL;
1514 struct ip6_hdr *ip6 = NULL;
1515 struct mld_hdr *mld = NULL;
1516 int mldlen = 0;
1517
1518 MLD_PRINTF(("%s: called w/mbuf (0x%llx,%d)\n", __func__,
1519 (uint64_t)VM_KERNEL_ADDRPERM(m), off));
1520
1521 ifp = m->m_pkthdr.rcvif;
1522
1523 /* Pullup to appropriate size. */
1524 mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
1525 if (mld->mld_type == MLD_LISTENER_QUERY &&
1526 icmp6len >= sizeof(struct mldv2_query)) {
1527 mldlen = sizeof(struct mldv2_query);
1528 } else {
1529 mldlen = sizeof(struct mld_hdr);
1530 }
1531 // check if mldv2_query/mld_hdr fits in the first mbuf
1532 IP6_EXTHDR_CHECK(m, off, mldlen, return IPPROTO_DONE);
1533 IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen);
1534 if (mld == NULL) {
1535 icmp6stat.icp6s_badlen++;
1536 return IPPROTO_DONE;
1537 }
1538 ip6 = mtod(m, struct ip6_hdr *);
1539
1540 /*
1541 * Userland needs to see all of this traffic for implementing
1542 * the endpoint discovery portion of multicast routing.
1543 */
1544 switch (mld->mld_type) {
1545 case MLD_LISTENER_QUERY:
1546 icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
1547 if (icmp6len == sizeof(struct mld_hdr)) {
1548 if (mld_v1_input_query(ifp, ip6, mld) != 0) {
1549 return 0;
1550 }
1551 } else if (icmp6len >= sizeof(struct mldv2_query)) {
1552 if (mld_v2_input_query(ifp, ip6, m, off,
1553 icmp6len) != 0) {
1554 return 0;
1555 }
1556 }
1557 break;
1558 case MLD_LISTENER_REPORT:
1559 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1560 if (mld_v1_input_report(ifp, m, ip6, mld) != 0) {
1561 return 0;
1562 }
1563 break;
1564 case MLDV2_LISTENER_REPORT:
1565 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1566 break;
1567 case MLD_LISTENER_DONE:
1568 icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
1569 break;
1570 default:
1571 break;
1572 }
1573
1574 return 0;
1575 }
1576
1577 /*
1578 * Schedule MLD timer based on various parameters; caller must ensure that
1579 * lock ordering is maintained as this routine acquires MLD global lock.
1580 */
1581 void
mld_set_timeout(struct mld_tparams * mtp)1582 mld_set_timeout(struct mld_tparams *mtp)
1583 {
1584 MLD_LOCK_ASSERT_NOTHELD();
1585 VERIFY(mtp != NULL);
1586
1587 if (mtp->qpt != 0 || mtp->it != 0 || mtp->cst != 0 || mtp->sct != 0) {
1588 MLD_LOCK();
1589 if (mtp->qpt != 0) {
1590 querier_present_timers_running6 = 1;
1591 }
1592 if (mtp->it != 0) {
1593 interface_timers_running6 = 1;
1594 }
1595 if (mtp->cst != 0) {
1596 current_state_timers_running6 = 1;
1597 }
1598 if (mtp->sct != 0) {
1599 state_change_timers_running6 = 1;
1600 }
1601 if (mtp->fast) {
1602 mld_sched_fast_timeout();
1603 } else {
1604 mld_sched_timeout();
1605 }
1606 MLD_UNLOCK();
1607 }
1608 }
1609
1610 void
mld_set_fast_timeout(struct mld_tparams * mtp)1611 mld_set_fast_timeout(struct mld_tparams *mtp)
1612 {
1613 VERIFY(mtp != NULL);
1614 mtp->fast = true;
1615 mld_set_timeout(mtp);
1616 }
1617
1618 /*
1619 * MLD6 timer handler (per 1 second).
1620 */
1621 static void
mld_timeout(thread_call_param_t arg0,thread_call_param_t arg1 __unused)1622 mld_timeout(thread_call_param_t arg0, thread_call_param_t arg1 __unused)
1623 {
1624 struct ifqueue scq; /* State-change packets */
1625 struct ifqueue qrq; /* Query response packets */
1626 struct ifnet *ifp;
1627 struct mld_ifinfo *mli;
1628 struct in6_multi *inm;
1629 int uri_sec = 0;
1630 unsigned int genid = mld_mli_list_genid;
1631 bool fast = arg0 != NULL;
1632
1633 SLIST_HEAD(, in6_multi) in6m_dthead;
1634
1635 SLIST_INIT(&in6m_dthead);
1636
1637 /*
1638 * Update coarse-grained networking timestamp (in sec.); the idea
1639 * is to piggy-back on the timeout callout to update the counter
1640 * returnable via net_uptime().
1641 */
1642 net_update_uptime();
1643
1644 MLD_LOCK();
1645
1646 MLD_PRINTF(("%s: qpt %d, it %d, cst %d, sct %d, fast %d\n", __func__,
1647 querier_present_timers_running6, interface_timers_running6,
1648 current_state_timers_running6, state_change_timers_running6, fast));
1649
1650 if (fast) {
1651 /*
1652 * When running the fast timer, skip processing
1653 * of "querier present" timers since they are
1654 * based on 1-second intervals.
1655 */
1656 goto skip_query_timers;
1657 }
1658 /*
1659 * MLDv1 querier present timer processing.
1660 */
1661 if (querier_present_timers_running6) {
1662 querier_present_timers_running6 = 0;
1663 LIST_FOREACH(mli, &mli_head, mli_link) {
1664 MLI_LOCK(mli);
1665 mld_v1_process_querier_timers(mli);
1666 if (mli->mli_v1_timer > 0) {
1667 querier_present_timers_running6 = 1;
1668 }
1669 MLI_UNLOCK(mli);
1670 }
1671 }
1672
1673 /*
1674 * MLDv2 General Query response timer processing.
1675 */
1676 if (interface_timers_running6) {
1677 MLD_PRINTF(("%s: interface timers running\n", __func__));
1678 interface_timers_running6 = 0;
1679 mli = LIST_FIRST(&mli_head);
1680
1681 while (mli != NULL) {
1682 if (mli->mli_flags & MLIF_PROCESSED) {
1683 mli = LIST_NEXT(mli, mli_link);
1684 continue;
1685 }
1686
1687 MLI_LOCK(mli);
1688 if (mli->mli_version != MLD_VERSION_2) {
1689 MLI_UNLOCK(mli);
1690 mli = LIST_NEXT(mli, mli_link);
1691 continue;
1692 }
1693 /*
1694 * XXX The logic below ends up calling
1695 * mld_dispatch_packet which can unlock mli
1696 * and the global MLD lock.
1697 * Therefore grab a reference on MLI and also
1698 * check for generation count to see if we should
1699 * iterate the list again.
1700 */
1701 MLI_ADDREF_LOCKED(mli);
1702
1703 if (mli->mli_v2_timer == 0) {
1704 /* Do nothing. */
1705 } else if (--mli->mli_v2_timer == 0) {
1706 if (mld_v2_dispatch_general_query(mli) > 0) {
1707 interface_timers_running6 = 1;
1708 }
1709 } else {
1710 interface_timers_running6 = 1;
1711 }
1712 mli->mli_flags |= MLIF_PROCESSED;
1713 MLI_UNLOCK(mli);
1714 MLI_REMREF(mli);
1715
1716 if (genid != mld_mli_list_genid) {
1717 MLD_PRINTF(("%s: MLD information list changed "
1718 "in the middle of iteration! Restart iteration.\n",
1719 __func__));
1720 mli = LIST_FIRST(&mli_head);
1721 genid = mld_mli_list_genid;
1722 } else {
1723 mli = LIST_NEXT(mli, mli_link);
1724 }
1725 }
1726
1727 LIST_FOREACH(mli, &mli_head, mli_link)
1728 mli->mli_flags &= ~MLIF_PROCESSED;
1729 }
1730
1731 skip_query_timers:
1732 if (!current_state_timers_running6 &&
1733 !state_change_timers_running6) {
1734 goto out_locked;
1735 }
1736
1737 current_state_timers_running6 = 0;
1738 state_change_timers_running6 = 0;
1739
1740 MLD_PRINTF(("%s: state change timers running\n", __func__));
1741
1742 memset(&qrq, 0, sizeof(struct ifqueue));
1743 qrq.ifq_maxlen = MLD_MAX_G_GS_PACKETS;
1744
1745 memset(&scq, 0, sizeof(struct ifqueue));
1746 scq.ifq_maxlen = MLD_MAX_STATE_CHANGE_PACKETS;
1747
1748 /*
1749 * MLD host report and state-change timer processing.
1750 * Note: Processing a v2 group timer may remove a node.
1751 */
1752 mli = LIST_FIRST(&mli_head);
1753
1754 while (mli != NULL) {
1755 struct in6_multistep step;
1756
1757 if (mli->mli_flags & MLIF_PROCESSED) {
1758 mli = LIST_NEXT(mli, mli_link);
1759 continue;
1760 }
1761
1762 MLI_LOCK(mli);
1763 ifp = mli->mli_ifp;
1764 uri_sec = MLD_RANDOM_DELAY(mli->mli_uri);
1765 MLI_UNLOCK(mli);
1766
1767 in6_multihead_lock_shared();
1768 IN6_FIRST_MULTI(step, inm);
1769 while (inm != NULL) {
1770 IN6M_LOCK(inm);
1771 if (inm->in6m_ifp != ifp) {
1772 goto next;
1773 }
1774
1775 MLI_LOCK(mli);
1776 switch (mli->mli_version) {
1777 case MLD_VERSION_1:
1778 mld_v1_process_group_timer(inm,
1779 mli->mli_version);
1780 break;
1781 case MLD_VERSION_2:
1782 mld_v2_process_group_timers(mli, &qrq,
1783 &scq, inm, uri_sec);
1784 break;
1785 }
1786 MLI_UNLOCK(mli);
1787 next:
1788 IN6M_UNLOCK(inm);
1789 IN6_NEXT_MULTI(step, inm);
1790 }
1791 in6_multihead_lock_done();
1792
1793 /*
1794 * XXX The logic below ends up calling
1795 * mld_dispatch_packet which can unlock mli
1796 * and the global MLD lock.
1797 * Therefore grab a reference on MLI and also
1798 * check for generation count to see if we should
1799 * iterate the list again.
1800 */
1801 MLI_LOCK(mli);
1802 MLI_ADDREF_LOCKED(mli);
1803 if (mli->mli_version == MLD_VERSION_1) {
1804 mld_dispatch_queue_locked(mli, &mli->mli_v1q, 0);
1805 } else if (mli->mli_version == MLD_VERSION_2) {
1806 MLI_UNLOCK(mli);
1807 mld_dispatch_queue_locked(NULL, &qrq, 0);
1808 mld_dispatch_queue_locked(NULL, &scq, 0);
1809 VERIFY(qrq.ifq_len == 0);
1810 VERIFY(scq.ifq_len == 0);
1811 MLI_LOCK(mli);
1812 }
1813 /*
1814 * In case there are still any pending membership reports
1815 * which didn't get drained at version change time.
1816 */
1817 IF_DRAIN(&mli->mli_v1q);
1818 /*
1819 * Release all deferred inm records, and drain any locally
1820 * enqueued packets; do it even if the current MLD version
1821 * for the link is no longer MLDv2, in order to handle the
1822 * version change case.
1823 */
1824 mld_flush_relq(mli, (struct mld_in6m_relhead *)&in6m_dthead);
1825 mli->mli_flags |= MLIF_PROCESSED;
1826 MLI_UNLOCK(mli);
1827 MLI_REMREF(mli);
1828
1829 IF_DRAIN(&qrq);
1830 IF_DRAIN(&scq);
1831
1832 if (genid != mld_mli_list_genid) {
1833 MLD_PRINTF(("%s: MLD information list changed "
1834 "in the middle of iteration! Restart iteration.\n",
1835 __func__));
1836 mli = LIST_FIRST(&mli_head);
1837 genid = mld_mli_list_genid;
1838 } else {
1839 mli = LIST_NEXT(mli, mli_link);
1840 }
1841 }
1842
1843 LIST_FOREACH(mli, &mli_head, mli_link)
1844 mli->mli_flags &= ~MLIF_PROCESSED;
1845
1846 out_locked:
1847 /* re-arm the timer if there's work to do */
1848 if (fast) {
1849 mld_fast_timeout_run = false;
1850 } else {
1851 mld_timeout_run = false;
1852 }
1853 mld_sched_timeout();
1854 MLD_UNLOCK();
1855
1856 /* Now that we're dropped all locks, release detached records */
1857 MLD_REMOVE_DETACHED_IN6M(&in6m_dthead);
1858 }
1859
1860 static void
mld_sched_timeout(void)1861 mld_sched_timeout(void)
1862 {
1863 static thread_call_t mld_timeout_tcall;
1864 uint64_t deadline = 0, leeway = 0;
1865
1866 MLD_LOCK_ASSERT_HELD();
1867 if (mld_timeout_tcall == NULL) {
1868 mld_timeout_tcall =
1869 thread_call_allocate_with_options(mld_timeout,
1870 NULL,
1871 THREAD_CALL_PRIORITY_KERNEL,
1872 THREAD_CALL_OPTIONS_ONCE);
1873 }
1874
1875 if (!mld_timeout_run &&
1876 (querier_present_timers_running6 || current_state_timers_running6 ||
1877 interface_timers_running6 || state_change_timers_running6)) {
1878 mld_timeout_run = true;
1879 clock_interval_to_deadline(mld_timeout_delay, NSEC_PER_MSEC,
1880 &deadline);
1881 clock_interval_to_absolutetime_interval(mld_timeout_leeway,
1882 NSEC_PER_MSEC, &leeway);
1883 thread_call_enter_delayed_with_leeway(mld_timeout_tcall, NULL,
1884 deadline, leeway,
1885 THREAD_CALL_DELAY_LEEWAY);
1886 }
1887 }
1888
1889 static void
mld_sched_fast_timeout(void)1890 mld_sched_fast_timeout(void)
1891 {
1892 static thread_call_t mld_fast_timeout_tcall;
1893
1894 MLD_LOCK_ASSERT_HELD();
1895 if (mld_fast_timeout_tcall == NULL) {
1896 mld_fast_timeout_tcall =
1897 thread_call_allocate_with_options(mld_timeout,
1898 mld_sched_fast_timeout,
1899 THREAD_CALL_PRIORITY_KERNEL,
1900 THREAD_CALL_OPTIONS_ONCE);
1901 }
1902 if (!mld_fast_timeout_run &&
1903 (current_state_timers_running6 || state_change_timers_running6)) {
1904 mld_fast_timeout_run = true;
1905 thread_call_enter(mld_fast_timeout_tcall);
1906 }
1907 }
1908
1909 /*
1910 * Appends an in6_multi to the list to be released later.
1911 *
1912 * Caller must be holding mli_lock.
1913 */
1914 static void
mld_append_relq(struct mld_ifinfo * mli,struct in6_multi * inm)1915 mld_append_relq(struct mld_ifinfo *mli, struct in6_multi *inm)
1916 {
1917 MLI_LOCK_ASSERT_HELD(mli);
1918 os_log(OS_LOG_DEFAULT, "%s: adding inm %llx on relq ifp %s\n",
1919 __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm),
1920 mli->mli_ifp != NULL ? if_name(mli->mli_ifp) : "<null>");
1921 SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm, in6m_nrele);
1922 }
1923
1924 /*
1925 * Free the in6_multi reference(s) for this MLD lifecycle.
1926 *
1927 * Caller must be holding mli_lock.
1928 */
1929 static void
mld_flush_relq(struct mld_ifinfo * mli,struct mld_in6m_relhead * in6m_dthead)1930 mld_flush_relq(struct mld_ifinfo *mli, struct mld_in6m_relhead *in6m_dthead)
1931 {
1932 struct in6_multi *inm;
1933 SLIST_HEAD(, in6_multi) temp_relinmhead;
1934
1935 /*
1936 * Before dropping the mli_lock, copy all the items in the
1937 * release list to a temporary list to prevent other threads
1938 * from changing mli_relinmhead while we are traversing it.
1939 */
1940 MLI_LOCK_ASSERT_HELD(mli);
1941 SLIST_INIT(&temp_relinmhead);
1942 while ((inm = SLIST_FIRST(&mli->mli_relinmhead)) != NULL) {
1943 SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
1944 SLIST_INSERT_HEAD(&temp_relinmhead, inm, in6m_nrele);
1945 }
1946 MLI_UNLOCK(mli);
1947 in6_multihead_lock_exclusive();
1948 while ((inm = SLIST_FIRST(&temp_relinmhead)) != NULL) {
1949 int lastref;
1950
1951 SLIST_REMOVE_HEAD(&temp_relinmhead, in6m_nrele);
1952 IN6M_LOCK(inm);
1953 os_log(OS_LOG_DEFAULT, "%s: flushing inm %llx on relq ifp %s\n",
1954 __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm),
1955 inm->in6m_ifp != NULL ? if_name(inm->in6m_ifp) : "<null>");
1956 VERIFY(inm->in6m_nrelecnt != 0);
1957 inm->in6m_nrelecnt--;
1958 lastref = in6_multi_detach(inm);
1959 VERIFY(!lastref || (!(inm->in6m_debug & IFD_ATTACHED) &&
1960 inm->in6m_reqcnt == 0));
1961 IN6M_UNLOCK(inm);
1962 /* from mli_relinmhead */
1963 IN6M_REMREF(inm);
1964 /* from in6_multihead_list */
1965 if (lastref) {
1966 /*
1967 * Defer releasing our final reference, as we
1968 * are holding the MLD lock at this point, and
1969 * we could end up with locking issues later on
1970 * (while issuing SIOCDELMULTI) when this is the
1971 * final reference count. Let the caller do it
1972 * when it is safe.
1973 */
1974 MLD_ADD_DETACHED_IN6M(in6m_dthead, inm);
1975 }
1976 }
1977 in6_multihead_lock_done();
1978 MLI_LOCK(mli);
1979 }
1980
1981 /*
1982 * Update host report group timer.
1983 * Will update the global pending timer flags.
1984 */
1985 static void
mld_v1_process_group_timer(struct in6_multi * inm,const int mld_version)1986 mld_v1_process_group_timer(struct in6_multi *inm, const int mld_version)
1987 {
1988 #pragma unused(mld_version)
1989 int report_timer_expired;
1990
1991 MLD_LOCK_ASSERT_HELD();
1992 IN6M_LOCK_ASSERT_HELD(inm);
1993 MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
1994
1995 if (inm->in6m_timer == 0) {
1996 report_timer_expired = 0;
1997 } else if (--inm->in6m_timer == 0) {
1998 report_timer_expired = 1;
1999 } else {
2000 current_state_timers_running6 = 1;
2001 /* caller will schedule timer */
2002 return;
2003 }
2004
2005 switch (inm->in6m_state) {
2006 case MLD_NOT_MEMBER:
2007 case MLD_SILENT_MEMBER:
2008 case MLD_IDLE_MEMBER:
2009 case MLD_LAZY_MEMBER:
2010 case MLD_SLEEPING_MEMBER:
2011 case MLD_AWAKENING_MEMBER:
2012 break;
2013 case MLD_REPORTING_MEMBER:
2014 if (report_timer_expired) {
2015 inm->in6m_state = MLD_IDLE_MEMBER;
2016 (void) mld_v1_transmit_report(inm,
2017 MLD_LISTENER_REPORT);
2018 IN6M_LOCK_ASSERT_HELD(inm);
2019 MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
2020 }
2021 break;
2022 case MLD_G_QUERY_PENDING_MEMBER:
2023 case MLD_SG_QUERY_PENDING_MEMBER:
2024 case MLD_LEAVING_MEMBER:
2025 break;
2026 }
2027 }
2028
2029 /*
2030 * Update a group's timers for MLDv2.
2031 * Will update the global pending timer flags.
2032 * Note: Unlocked read from mli.
2033 */
2034 static void
mld_v2_process_group_timers(struct mld_ifinfo * mli,struct ifqueue * qrq,struct ifqueue * scq,struct in6_multi * inm,const int uri_sec)2035 mld_v2_process_group_timers(struct mld_ifinfo *mli,
2036 struct ifqueue *qrq, struct ifqueue *scq,
2037 struct in6_multi *inm, const int uri_sec)
2038 {
2039 int query_response_timer_expired;
2040 int state_change_retransmit_timer_expired;
2041
2042 MLD_LOCK_ASSERT_HELD();
2043 IN6M_LOCK_ASSERT_HELD(inm);
2044 MLI_LOCK_ASSERT_HELD(mli);
2045 VERIFY(mli == inm->in6m_mli);
2046
2047 query_response_timer_expired = 0;
2048 state_change_retransmit_timer_expired = 0;
2049
2050 /*
2051 * During a transition from compatibility mode back to MLDv2,
2052 * a group record in REPORTING state may still have its group
2053 * timer active. This is a no-op in this function; it is easier
2054 * to deal with it here than to complicate the timeout path.
2055 */
2056 if (inm->in6m_timer == 0) {
2057 query_response_timer_expired = 0;
2058 } else if (--inm->in6m_timer == 0) {
2059 query_response_timer_expired = 1;
2060 } else {
2061 current_state_timers_running6 = 1;
2062 /* caller will schedule timer */
2063 }
2064
2065 if (inm->in6m_sctimer == 0) {
2066 state_change_retransmit_timer_expired = 0;
2067 } else if (--inm->in6m_sctimer == 0) {
2068 state_change_retransmit_timer_expired = 1;
2069 } else {
2070 state_change_timers_running6 = 1;
2071 /* caller will schedule timer */
2072 }
2073
2074 /* We are in timer callback, so be quick about it. */
2075 if (!state_change_retransmit_timer_expired &&
2076 !query_response_timer_expired) {
2077 return;
2078 }
2079
2080 switch (inm->in6m_state) {
2081 case MLD_NOT_MEMBER:
2082 case MLD_SILENT_MEMBER:
2083 case MLD_SLEEPING_MEMBER:
2084 case MLD_LAZY_MEMBER:
2085 case MLD_AWAKENING_MEMBER:
2086 case MLD_IDLE_MEMBER:
2087 break;
2088 case MLD_G_QUERY_PENDING_MEMBER:
2089 case MLD_SG_QUERY_PENDING_MEMBER:
2090 /*
2091 * Respond to a previously pending Group-Specific
2092 * or Group-and-Source-Specific query by enqueueing
2093 * the appropriate Current-State report for
2094 * immediate transmission.
2095 */
2096 if (query_response_timer_expired) {
2097 int retval;
2098
2099 retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1,
2100 (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER),
2101 0);
2102 MLD_PRINTF(("%s: enqueue record = %d\n",
2103 __func__, retval));
2104 inm->in6m_state = MLD_REPORTING_MEMBER;
2105 in6m_clear_recorded(inm);
2106 }
2107 OS_FALLTHROUGH;
2108 case MLD_REPORTING_MEMBER:
2109 case MLD_LEAVING_MEMBER:
2110 if (state_change_retransmit_timer_expired) {
2111 /*
2112 * State-change retransmission timer fired.
2113 * If there are any further pending retransmissions,
2114 * set the global pending state-change flag, and
2115 * reset the timer.
2116 */
2117 if (--inm->in6m_scrv > 0) {
2118 inm->in6m_sctimer = (uint16_t)uri_sec;
2119 state_change_timers_running6 = 1;
2120 /* caller will schedule timer */
2121 }
2122 /*
2123 * Retransmit the previously computed state-change
2124 * report. If there are no further pending
2125 * retransmissions, the mbuf queue will be consumed.
2126 * Update T0 state to T1 as we have now sent
2127 * a state-change.
2128 */
2129 (void) mld_v2_merge_state_changes(inm, scq);
2130
2131 in6m_commit(inm);
2132 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
2133 ip6_sprintf(&inm->in6m_addr),
2134 if_name(inm->in6m_ifp)));
2135
2136 /*
2137 * If we are leaving the group for good, make sure
2138 * we release MLD's reference to it.
2139 * This release must be deferred using a SLIST,
2140 * as we are called from a loop which traverses
2141 * the in_ifmultiaddr TAILQ.
2142 */
2143 if (inm->in6m_state == MLD_LEAVING_MEMBER &&
2144 inm->in6m_scrv == 0) {
2145 inm->in6m_state = MLD_NOT_MEMBER;
2146 /*
2147 * A reference has already been held in
2148 * mld_final_leave() for this inm, so
2149 * no need to hold another one. We also
2150 * bumped up its request count then, so
2151 * that it stays in in6_multihead. Both
2152 * of them will be released when it is
2153 * dequeued later on.
2154 */
2155 VERIFY(inm->in6m_nrelecnt != 0);
2156 mld_append_relq(mli, inm);
2157 }
2158 }
2159 break;
2160 }
2161 }
2162
2163 /*
2164 * Switch to a different version on the given interface,
2165 * as per Section 9.12.
2166 */
2167 static uint32_t
mld_set_version(struct mld_ifinfo * mli,const int mld_version)2168 mld_set_version(struct mld_ifinfo *mli, const int mld_version)
2169 {
2170 int old_version_timer;
2171
2172 MLI_LOCK_ASSERT_HELD(mli);
2173
2174 os_log(OS_LOG_DEFAULT, "%s: switching to v%d on ifp %s\n", __func__,
2175 mld_version, if_name(mli->mli_ifp));
2176
2177 if (mld_version == MLD_VERSION_1) {
2178 /*
2179 * Compute the "Older Version Querier Present" timer as per
2180 * Section 9.12, in seconds.
2181 */
2182 old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
2183 mli->mli_v1_timer = old_version_timer;
2184 }
2185
2186 if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) {
2187 mli->mli_version = MLD_VERSION_1;
2188 mld_v2_cancel_link_timers(mli);
2189 }
2190
2191 MLI_LOCK_ASSERT_HELD(mli);
2192
2193 return mli->mli_v1_timer;
2194 }
2195
2196 /*
2197 * Cancel pending MLDv2 timers for the given link and all groups
2198 * joined on it; state-change, general-query, and group-query timers.
2199 *
2200 * Only ever called on a transition from v2 to Compatibility mode. Kill
2201 * the timers stone dead (this may be expensive for large N groups), they
2202 * will be restarted if Compatibility Mode deems that they must be due to
2203 * query processing.
2204 */
2205 static void
mld_v2_cancel_link_timers(struct mld_ifinfo * mli)2206 mld_v2_cancel_link_timers(struct mld_ifinfo *mli)
2207 {
2208 struct ifnet *ifp;
2209 struct in6_multi *inm;
2210 struct in6_multistep step;
2211
2212 MLI_LOCK_ASSERT_HELD(mli);
2213
2214 MLD_PRINTF(("%s: cancel v2 timers on ifp 0x%llx(%s)\n", __func__,
2215 (uint64_t)VM_KERNEL_ADDRPERM(mli->mli_ifp), if_name(mli->mli_ifp)));
2216
2217 /*
2218 * Stop the v2 General Query Response on this link stone dead.
2219 * If timer is woken up due to interface_timers_running6,
2220 * the flag will be cleared if there are no pending link timers.
2221 */
2222 mli->mli_v2_timer = 0;
2223
2224 /*
2225 * Now clear the current-state and state-change report timers
2226 * for all memberships scoped to this link.
2227 */
2228 ifp = mli->mli_ifp;
2229 MLI_UNLOCK(mli);
2230
2231 in6_multihead_lock_shared();
2232 IN6_FIRST_MULTI(step, inm);
2233 while (inm != NULL) {
2234 IN6M_LOCK(inm);
2235 if (inm->in6m_ifp != ifp) {
2236 goto next;
2237 }
2238
2239 switch (inm->in6m_state) {
2240 case MLD_NOT_MEMBER:
2241 case MLD_SILENT_MEMBER:
2242 case MLD_IDLE_MEMBER:
2243 case MLD_LAZY_MEMBER:
2244 case MLD_SLEEPING_MEMBER:
2245 case MLD_AWAKENING_MEMBER:
2246 /*
2247 * These states are either not relevant in v2 mode,
2248 * or are unreported. Do nothing.
2249 */
2250 break;
2251 case MLD_LEAVING_MEMBER:
2252 /*
2253 * If we are leaving the group and switching
2254 * version, we need to release the final
2255 * reference held for issuing the INCLUDE {}.
2256 * During mld_final_leave(), we bumped up both the
2257 * request and reference counts. Since we cannot
2258 * call in6_multi_detach() here, defer this task to
2259 * the timer routine.
2260 */
2261 VERIFY(inm->in6m_nrelecnt != 0);
2262 MLI_LOCK(mli);
2263 mld_append_relq(mli, inm);
2264 MLI_UNLOCK(mli);
2265 OS_FALLTHROUGH;
2266 case MLD_G_QUERY_PENDING_MEMBER:
2267 case MLD_SG_QUERY_PENDING_MEMBER:
2268 in6m_clear_recorded(inm);
2269 OS_FALLTHROUGH;
2270 case MLD_REPORTING_MEMBER:
2271 inm->in6m_state = MLD_REPORTING_MEMBER;
2272 break;
2273 }
2274 /*
2275 * Always clear state-change and group report timers.
2276 * Free any pending MLDv2 state-change records.
2277 */
2278 inm->in6m_sctimer = 0;
2279 inm->in6m_timer = 0;
2280 IF_DRAIN(&inm->in6m_scq);
2281 next:
2282 IN6M_UNLOCK(inm);
2283 IN6_NEXT_MULTI(step, inm);
2284 }
2285 in6_multihead_lock_done();
2286
2287 MLI_LOCK(mli);
2288 }
2289
2290 /*
2291 * Update the Older Version Querier Present timers for a link.
2292 * See Section 9.12 of RFC 3810.
2293 */
2294 static void
mld_v1_process_querier_timers(struct mld_ifinfo * mli)2295 mld_v1_process_querier_timers(struct mld_ifinfo *mli)
2296 {
2297 MLI_LOCK_ASSERT_HELD(mli);
2298
2299 if (mld_v2enable && mli->mli_version != MLD_VERSION_2 &&
2300 --mli->mli_v1_timer == 0) {
2301 /*
2302 * MLDv1 Querier Present timer expired; revert to MLDv2.
2303 */
2304 os_log(OS_LOG_DEFAULT, "%s: transition from v%d -> v%d on %s\n",
2305 __func__, mli->mli_version, MLD_VERSION_2,
2306 if_name(mli->mli_ifp));
2307 mli->mli_version = MLD_VERSION_2;
2308 }
2309 }
2310
2311 /*
2312 * Transmit an MLDv1 report immediately.
2313 */
2314 static int
mld_v1_transmit_report(struct in6_multi * in6m,const uint8_t type)2315 mld_v1_transmit_report(struct in6_multi *in6m, const uint8_t type)
2316 {
2317 struct ifnet *ifp;
2318 struct in6_ifaddr *ia;
2319 struct ip6_hdr *ip6;
2320 struct mbuf *mh, *md;
2321 struct mld_hdr *mld;
2322 int error = 0;
2323
2324 IN6M_LOCK_ASSERT_HELD(in6m);
2325 MLI_LOCK_ASSERT_HELD(in6m->in6m_mli);
2326
2327 ifp = in6m->in6m_ifp;
2328 /* ia may be NULL if link-local address is tentative. */
2329 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
2330
2331 MGETHDR(mh, M_DONTWAIT, MT_HEADER);
2332 if (mh == NULL) {
2333 if (ia != NULL) {
2334 IFA_REMREF(&ia->ia_ifa);
2335 }
2336 return ENOMEM;
2337 }
2338 MGET(md, M_DONTWAIT, MT_DATA);
2339 if (md == NULL) {
2340 m_free(mh);
2341 if (ia != NULL) {
2342 IFA_REMREF(&ia->ia_ifa);
2343 }
2344 return ENOMEM;
2345 }
2346 mh->m_next = md;
2347
2348 /*
2349 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
2350 * that ether_output() does not need to allocate another mbuf
2351 * for the header in the most common case.
2352 */
2353 MH_ALIGN(mh, sizeof(struct ip6_hdr));
2354 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr);
2355 mh->m_len = sizeof(struct ip6_hdr);
2356
2357 ip6 = mtod(mh, struct ip6_hdr *);
2358 ip6->ip6_flow = 0;
2359 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2360 ip6->ip6_vfc |= IPV6_VERSION;
2361 ip6->ip6_nxt = IPPROTO_ICMPV6;
2362 if (ia != NULL) {
2363 IFA_LOCK(&ia->ia_ifa);
2364 }
2365 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
2366 ip6_output_setsrcifscope(mh, IFSCOPE_NONE, ia);
2367 if (ia != NULL) {
2368 IFA_UNLOCK(&ia->ia_ifa);
2369 IFA_REMREF(&ia->ia_ifa);
2370 ia = NULL;
2371 }
2372 ip6->ip6_dst = in6m->in6m_addr;
2373 ip6_output_setdstifscope(mh, in6m->ifscope, NULL);
2374
2375 md->m_len = sizeof(struct mld_hdr);
2376 mld = mtod(md, struct mld_hdr *);
2377 mld->mld_type = type;
2378 mld->mld_code = 0;
2379 mld->mld_cksum = 0;
2380 mld->mld_maxdelay = 0;
2381 mld->mld_reserved = 0;
2382 mld->mld_addr = in6m->in6m_addr;
2383 in6_clearscope(&mld->mld_addr);
2384 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
2385 sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
2386
2387 mld_save_context(mh, ifp);
2388 mh->m_flags |= M_MLDV1;
2389
2390 /*
2391 * Due to the fact that at this point we are possibly holding
2392 * in6_multihead_lock in shared or exclusive mode, we can't call
2393 * mld_dispatch_packet() here since that will eventually call
2394 * ip6_output(), which will try to lock in6_multihead_lock and cause
2395 * a deadlock.
2396 * Instead we defer the work to the mld_timeout() thread, thus
2397 * avoiding unlocking in_multihead_lock here.
2398 */
2399 if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) {
2400 os_log_error(OS_LOG_DEFAULT, "%s: v1 outbound queue full\n", __func__);
2401 error = ENOMEM;
2402 m_freem(mh);
2403 } else {
2404 IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh);
2405 VERIFY(error == 0);
2406 }
2407
2408 return error;
2409 }
2410
2411 /*
2412 * Process a state change from the upper layer for the given IPv6 group.
2413 *
2414 * Each socket holds a reference on the in6_multi in its own ip_moptions.
2415 * The socket layer will have made the necessary updates to.the group
2416 * state, it is now up to MLD to issue a state change report if there
2417 * has been any change between T0 (when the last state-change was issued)
2418 * and T1 (now).
2419 *
2420 * We use the MLDv2 state machine at group level. The MLd module
2421 * however makes the decision as to which MLD protocol version to speak.
2422 * A state change *from* INCLUDE {} always means an initial join.
2423 * A state change *to* INCLUDE {} always means a final leave.
2424 *
2425 * If delay is non-zero, and the state change is an initial multicast
2426 * join, the state change report will be delayed by 'delay' ticks
2427 * in units of seconds if MLDv1 is active on the link; otherwise
2428 * the initial MLDv2 state change report will be delayed by whichever
2429 * is sooner, a pending state-change timer or delay itself.
2430 */
2431 int
mld_change_state(struct in6_multi * inm,struct mld_tparams * mtp,const int delay)2432 mld_change_state(struct in6_multi *inm, struct mld_tparams *mtp,
2433 const int delay)
2434 {
2435 struct mld_ifinfo *mli;
2436 struct ifnet *ifp;
2437 int error = 0;
2438
2439 VERIFY(mtp != NULL);
2440 bzero(mtp, sizeof(*mtp));
2441
2442 IN6M_LOCK_ASSERT_HELD(inm);
2443 VERIFY(inm->in6m_mli != NULL);
2444 MLI_LOCK_ASSERT_NOTHELD(inm->in6m_mli);
2445
2446 /*
2447 * Try to detect if the upper layer just asked us to change state
2448 * for an interface which has now gone away.
2449 */
2450 VERIFY(inm->in6m_ifma != NULL);
2451 ifp = inm->in6m_ifma->ifma_ifp;
2452 /*
2453 * Sanity check that netinet6's notion of ifp is the same as net's.
2454 */
2455 VERIFY(inm->in6m_ifp == ifp);
2456
2457 mli = MLD_IFINFO(ifp);
2458 VERIFY(mli != NULL);
2459
2460 /*
2461 * If we detect a state transition to or from MCAST_UNDEFINED
2462 * for this group, then we are starting or finishing an MLD
2463 * life cycle for this group.
2464 */
2465 if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) {
2466 MLD_PRINTF(("%s: inm transition %d -> %d\n", __func__,
2467 inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode));
2468 if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
2469 MLD_PRINTF(("%s: initial join\n", __func__));
2470 error = mld_initial_join(inm, mli, mtp, delay);
2471 goto out;
2472 } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
2473 MLD_PRINTF(("%s: final leave\n", __func__));
2474 mld_final_leave(inm, mli, mtp);
2475 goto out;
2476 }
2477 } else {
2478 MLD_PRINTF(("%s: filter set change\n", __func__));
2479 }
2480
2481 error = mld_handle_state_change(inm, mli, mtp);
2482 out:
2483 return error;
2484 }
2485
2486 /*
2487 * Perform the initial join for an MLD group.
2488 *
2489 * When joining a group:
2490 * If the group should have its MLD traffic suppressed, do nothing.
2491 * MLDv1 starts sending MLDv1 host membership reports.
2492 * MLDv2 will schedule an MLDv2 state-change report containing the
2493 * initial state of the membership.
2494 *
2495 * If the delay argument is non-zero, then we must delay sending the
2496 * initial state change for delay ticks (in units of seconds).
2497 */
2498 static int
mld_initial_join(struct in6_multi * inm,struct mld_ifinfo * mli,struct mld_tparams * mtp,const int delay)2499 mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli,
2500 struct mld_tparams *mtp, const int delay)
2501 {
2502 struct ifnet *ifp;
2503 struct ifqueue *ifq;
2504 int error, retval, syncstates;
2505 int odelay;
2506
2507 IN6M_LOCK_ASSERT_HELD(inm);
2508 MLI_LOCK_ASSERT_NOTHELD(mli);
2509 VERIFY(mtp != NULL);
2510
2511 MLD_PRINTF(("%s: initial join %s on ifp 0x%llx(%s)\n",
2512 __func__, ip6_sprintf(&inm->in6m_addr),
2513 (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp),
2514 if_name(inm->in6m_ifp)));
2515
2516 error = 0;
2517 syncstates = 1;
2518
2519 ifp = inm->in6m_ifp;
2520
2521 MLI_LOCK(mli);
2522 VERIFY(mli->mli_ifp == ifp);
2523
2524 /*
2525 * Avoid MLD if group is :
2526 * 1. Joined on loopback, OR
2527 * 2. On a link that is marked MLIF_SILENT
2528 * 3. rdar://problem/19227650 Is link local scoped and
2529 * on cellular interface
2530 * 4. Is a type that should not be reported (node local
2531 * or all node link local multicast.
2532 * All other groups enter the appropriate state machine
2533 * for the version in use on this link.
2534 */
2535 if ((ifp->if_flags & IFF_LOOPBACK) ||
2536 (mli->mli_flags & MLIF_SILENT) ||
2537 (IFNET_IS_CELLULAR(ifp) &&
2538 (IN6_IS_ADDR_MC_LINKLOCAL(&inm->in6m_addr) || IN6_IS_ADDR_MC_UNICAST_BASED_LINKLOCAL(&inm->in6m_addr))) ||
2539 !mld_is_addr_reported(&inm->in6m_addr)) {
2540 MLD_PRINTF(("%s: not kicking state machine for silent group\n",
2541 __func__));
2542 inm->in6m_state = MLD_SILENT_MEMBER;
2543 inm->in6m_timer = 0;
2544 } else {
2545 /*
2546 * Deal with overlapping in6_multi lifecycle.
2547 * If this group was LEAVING, then make sure
2548 * we drop the reference we picked up to keep the
2549 * group around for the final INCLUDE {} enqueue.
2550 * Since we cannot call in6_multi_detach() here,
2551 * defer this task to the timer routine.
2552 */
2553 if (mli->mli_version == MLD_VERSION_2 &&
2554 inm->in6m_state == MLD_LEAVING_MEMBER) {
2555 VERIFY(inm->in6m_nrelecnt != 0);
2556 mld_append_relq(mli, inm);
2557 }
2558
2559 inm->in6m_state = MLD_REPORTING_MEMBER;
2560
2561 switch (mli->mli_version) {
2562 case MLD_VERSION_1:
2563 /*
2564 * If a delay was provided, only use it if
2565 * it is greater than the delay normally
2566 * used for an MLDv1 state change report,
2567 * and delay sending the initial MLDv1 report
2568 * by not transitioning to the IDLE state.
2569 */
2570 odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI);
2571 if (delay) {
2572 inm->in6m_timer = max(delay, odelay);
2573 mtp->cst = 1;
2574 } else {
2575 inm->in6m_state = MLD_IDLE_MEMBER;
2576 error = mld_v1_transmit_report(inm,
2577 MLD_LISTENER_REPORT);
2578
2579 IN6M_LOCK_ASSERT_HELD(inm);
2580 MLI_LOCK_ASSERT_HELD(mli);
2581
2582 if (error == 0) {
2583 inm->in6m_timer = odelay;
2584 mtp->cst = 1;
2585 }
2586 }
2587 break;
2588
2589 case MLD_VERSION_2:
2590 /*
2591 * Defer update of T0 to T1, until the first copy
2592 * of the state change has been transmitted.
2593 */
2594 syncstates = 0;
2595
2596 /*
2597 * Immediately enqueue a State-Change Report for
2598 * this interface, freeing any previous reports.
2599 * Don't kick the timers if there is nothing to do,
2600 * or if an error occurred.
2601 */
2602 ifq = &inm->in6m_scq;
2603 IF_DRAIN(ifq);
2604 retval = mld_v2_enqueue_group_record(ifq, inm, 1,
2605 0, 0, (mli->mli_flags & MLIF_USEALLOW));
2606 mtp->cst = (ifq->ifq_len > 0);
2607 MLD_PRINTF(("%s: enqueue record = %d\n",
2608 __func__, retval));
2609 if (retval <= 0) {
2610 error = retval * -1;
2611 break;
2612 }
2613
2614 /*
2615 * Schedule transmission of pending state-change
2616 * report up to RV times for this link. The timer
2617 * will fire at the next mld_timeout (1 second)),
2618 * giving us an opportunity to merge the reports.
2619 *
2620 * If a delay was provided to this function, only
2621 * use this delay if sooner than the existing one.
2622 */
2623 VERIFY(mli->mli_rv > 1);
2624 inm->in6m_scrv = (uint16_t)mli->mli_rv;
2625 if (delay) {
2626 if (inm->in6m_sctimer > 1) {
2627 inm->in6m_sctimer =
2628 MIN(inm->in6m_sctimer, (uint16_t)delay);
2629 } else {
2630 inm->in6m_sctimer = (uint16_t)delay;
2631 }
2632 } else {
2633 inm->in6m_sctimer = 1;
2634 }
2635 mtp->sct = 1;
2636 error = 0;
2637 break;
2638 }
2639 }
2640 MLI_UNLOCK(mli);
2641
2642 /*
2643 * Only update the T0 state if state change is atomic,
2644 * i.e. we don't need to wait for a timer to fire before we
2645 * can consider the state change to have been communicated.
2646 */
2647 if (syncstates) {
2648 in6m_commit(inm);
2649 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
2650 ip6_sprintf(&inm->in6m_addr),
2651 if_name(inm->in6m_ifp)));
2652 }
2653
2654 return error;
2655 }
2656
2657 /*
2658 * Issue an intermediate state change during the life-cycle.
2659 */
2660 static int
mld_handle_state_change(struct in6_multi * inm,struct mld_ifinfo * mli,struct mld_tparams * mtp)2661 mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli,
2662 struct mld_tparams *mtp)
2663 {
2664 struct ifnet *ifp;
2665 int retval = 0;
2666
2667 IN6M_LOCK_ASSERT_HELD(inm);
2668 MLI_LOCK_ASSERT_NOTHELD(mli);
2669 VERIFY(mtp != NULL);
2670
2671 MLD_PRINTF(("%s: state change for %s on ifp 0x%llx(%s)\n",
2672 __func__, ip6_sprintf(&inm->in6m_addr),
2673 (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp),
2674 if_name(inm->in6m_ifp)));
2675
2676 ifp = inm->in6m_ifp;
2677
2678 MLI_LOCK(mli);
2679 VERIFY(mli->mli_ifp == ifp);
2680
2681 if ((ifp->if_flags & IFF_LOOPBACK) ||
2682 (mli->mli_flags & MLIF_SILENT) ||
2683 !mld_is_addr_reported(&inm->in6m_addr) ||
2684 (mli->mli_version != MLD_VERSION_2)) {
2685 MLI_UNLOCK(mli);
2686 if (!mld_is_addr_reported(&inm->in6m_addr)) {
2687 MLD_PRINTF(("%s: not kicking state machine for silent "
2688 "group\n", __func__));
2689 }
2690 MLD_PRINTF(("%s: nothing to do\n", __func__));
2691 in6m_commit(inm);
2692 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
2693 ip6_sprintf(&inm->in6m_addr),
2694 if_name(inm->in6m_ifp)));
2695 goto done;
2696 }
2697
2698 IF_DRAIN(&inm->in6m_scq);
2699
2700 retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
2701 (mli->mli_flags & MLIF_USEALLOW));
2702 mtp->cst = (inm->in6m_scq.ifq_len > 0);
2703 MLD_PRINTF(("%s: enqueue record = %d\n", __func__, retval));
2704 if (retval <= 0) {
2705 MLI_UNLOCK(mli);
2706 retval *= -1;
2707 goto done;
2708 } else {
2709 retval = 0;
2710 }
2711
2712 /*
2713 * If record(s) were enqueued, start the state-change
2714 * report timer for this group.
2715 */
2716 inm->in6m_scrv = (uint16_t)mli->mli_rv;
2717 inm->in6m_sctimer = 1;
2718 mtp->sct = 1;
2719 MLI_UNLOCK(mli);
2720
2721 done:
2722 return retval;
2723 }
2724
2725 /*
2726 * Perform the final leave for a multicast address.
2727 *
2728 * When leaving a group:
2729 * MLDv1 sends a DONE message, if and only if we are the reporter.
2730 * MLDv2 enqueues a state-change report containing a transition
2731 * to INCLUDE {} for immediate transmission.
2732 */
2733 static void
mld_final_leave(struct in6_multi * inm,struct mld_ifinfo * mli,struct mld_tparams * mtp)2734 mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli,
2735 struct mld_tparams *mtp)
2736 {
2737 int syncstates = 1;
2738
2739 IN6M_LOCK_ASSERT_HELD(inm);
2740 MLI_LOCK_ASSERT_NOTHELD(mli);
2741 VERIFY(mtp != NULL);
2742
2743 MLD_PRINTF(("%s: final leave %s on ifp 0x%llx(%s)\n",
2744 __func__, ip6_sprintf(&inm->in6m_addr),
2745 (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp),
2746 if_name(inm->in6m_ifp)));
2747
2748 switch (inm->in6m_state) {
2749 case MLD_NOT_MEMBER:
2750 case MLD_SILENT_MEMBER:
2751 case MLD_LEAVING_MEMBER:
2752 /* Already leaving or left; do nothing. */
2753 MLD_PRINTF(("%s: not kicking state machine for silent group\n",
2754 __func__));
2755 break;
2756 case MLD_REPORTING_MEMBER:
2757 case MLD_IDLE_MEMBER:
2758 case MLD_G_QUERY_PENDING_MEMBER:
2759 case MLD_SG_QUERY_PENDING_MEMBER:
2760 MLI_LOCK(mli);
2761 if (mli->mli_version == MLD_VERSION_1) {
2762 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
2763 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
2764 panic("%s: MLDv2 state reached, not MLDv2 "
2765 "mode\n", __func__);
2766 /* NOTREACHED */
2767 }
2768 /* scheduler timer if enqueue is successful */
2769 mtp->cst = (mld_v1_transmit_report(inm,
2770 MLD_LISTENER_DONE) == 0);
2771
2772 IN6M_LOCK_ASSERT_HELD(inm);
2773 MLI_LOCK_ASSERT_HELD(mli);
2774
2775 inm->in6m_state = MLD_NOT_MEMBER;
2776 } else if (mli->mli_version == MLD_VERSION_2) {
2777 /*
2778 * Stop group timer and all pending reports.
2779 * Immediately enqueue a state-change report
2780 * TO_IN {} to be sent on the next timeout,
2781 * giving us an opportunity to merge reports.
2782 */
2783 IF_DRAIN(&inm->in6m_scq);
2784 inm->in6m_timer = 0;
2785 inm->in6m_scrv = (uint16_t)mli->mli_rv;
2786 MLD_PRINTF(("%s: Leaving %s/%s with %d "
2787 "pending retransmissions.\n", __func__,
2788 ip6_sprintf(&inm->in6m_addr),
2789 if_name(inm->in6m_ifp),
2790 inm->in6m_scrv));
2791 if (inm->in6m_scrv == 0) {
2792 inm->in6m_state = MLD_NOT_MEMBER;
2793 inm->in6m_sctimer = 0;
2794 } else {
2795 int retval;
2796 /*
2797 * Stick around in the in6_multihead list;
2798 * the final detach will be issued by
2799 * mld_v2_process_group_timers() when
2800 * the retransmit timer expires.
2801 */
2802 IN6M_ADDREF_LOCKED(inm);
2803 VERIFY(inm->in6m_debug & IFD_ATTACHED);
2804 inm->in6m_reqcnt++;
2805 VERIFY(inm->in6m_reqcnt >= 1);
2806 inm->in6m_nrelecnt++;
2807 VERIFY(inm->in6m_nrelecnt != 0);
2808
2809 retval = mld_v2_enqueue_group_record(
2810 &inm->in6m_scq, inm, 1, 0, 0,
2811 (mli->mli_flags & MLIF_USEALLOW));
2812 mtp->cst = (inm->in6m_scq.ifq_len > 0);
2813 KASSERT(retval != 0,
2814 ("%s: enqueue record = %d\n", __func__,
2815 retval));
2816
2817 inm->in6m_state = MLD_LEAVING_MEMBER;
2818 inm->in6m_sctimer = 1;
2819 mtp->sct = 1;
2820 syncstates = 0;
2821 }
2822 }
2823 MLI_UNLOCK(mli);
2824 break;
2825 case MLD_LAZY_MEMBER:
2826 case MLD_SLEEPING_MEMBER:
2827 case MLD_AWAKENING_MEMBER:
2828 /* Our reports are suppressed; do nothing. */
2829 break;
2830 }
2831
2832 if (syncstates) {
2833 in6m_commit(inm);
2834 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
2835 ip6_sprintf(&inm->in6m_addr),
2836 if_name(inm->in6m_ifp)));
2837 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
2838 MLD_PRINTF(("%s: T1 now MCAST_UNDEFINED for 0x%llx/%s\n",
2839 __func__, (uint64_t)VM_KERNEL_ADDRPERM(&inm->in6m_addr),
2840 if_name(inm->in6m_ifp)));
2841 }
2842 }
2843
2844 /*
2845 * Enqueue an MLDv2 group record to the given output queue.
2846 *
2847 * If is_state_change is zero, a current-state record is appended.
2848 * If is_state_change is non-zero, a state-change report is appended.
2849 *
2850 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2851 * If is_group_query is zero, and if there is a packet with free space
2852 * at the tail of the queue, it will be appended to providing there
2853 * is enough free space.
2854 * Otherwise a new mbuf packet chain is allocated.
2855 *
2856 * If is_source_query is non-zero, each source is checked to see if
2857 * it was recorded for a Group-Source query, and will be omitted if
2858 * it is not both in-mode and recorded.
2859 *
2860 * If use_block_allow is non-zero, state change reports for initial join
2861 * and final leave, on an inclusive mode group with a source list, will be
2862 * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
2863 *
2864 * The function will attempt to allocate leading space in the packet
2865 * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
2866 *
2867 * If successful the size of all data appended to the queue is returned,
2868 * otherwise an error code less than zero is returned, or zero if
2869 * no record(s) were appended.
2870 */
2871 static int
mld_v2_enqueue_group_record(struct ifqueue * ifq,struct in6_multi * inm,const int is_state_change,const int is_group_query,const int is_source_query,const int use_block_allow)2872 mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm,
2873 const int is_state_change, const int is_group_query,
2874 const int is_source_query, const int use_block_allow)
2875 {
2876 struct mldv2_record mr;
2877 struct mldv2_record *pmr;
2878 struct ifnet *ifp;
2879 struct ip6_msource *ims, *nims;
2880 struct mbuf *m0, *m, *md;
2881 int error, is_filter_list_change;
2882 int minrec0len, m0srcs, msrcs, nbytes, off;
2883 int record_has_sources;
2884 int now;
2885 uint8_t type;
2886 uint8_t mode;
2887
2888 IN6M_LOCK_ASSERT_HELD(inm);
2889 MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
2890
2891 error = 0;
2892 ifp = inm->in6m_ifp;
2893 is_filter_list_change = 0;
2894 m = NULL;
2895 m0 = NULL;
2896 m0srcs = 0;
2897 msrcs = 0;
2898 nbytes = 0;
2899 nims = NULL;
2900 record_has_sources = 1;
2901 pmr = NULL;
2902 type = MLD_DO_NOTHING;
2903 mode = (uint8_t)inm->in6m_st[1].iss_fmode;
2904
2905 /*
2906 * If we did not transition out of ASM mode during t0->t1,
2907 * and there are no source nodes to process, we can skip
2908 * the generation of source records.
2909 */
2910 if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
2911 inm->in6m_nsrc == 0) {
2912 record_has_sources = 0;
2913 }
2914
2915 if (is_state_change) {
2916 /*
2917 * Queue a state change record.
2918 * If the mode did not change, and there are non-ASM
2919 * listeners or source filters present,
2920 * we potentially need to issue two records for the group.
2921 * If there are ASM listeners, and there was no filter
2922 * mode transition of any kind, do nothing.
2923 *
2924 * If we are transitioning to MCAST_UNDEFINED, we need
2925 * not send any sources. A transition to/from this state is
2926 * considered inclusive with some special treatment.
2927 *
2928 * If we are rewriting initial joins/leaves to use
2929 * ALLOW/BLOCK, and the group's membership is inclusive,
2930 * we need to send sources in all cases.
2931 */
2932 if (mode != inm->in6m_st[0].iss_fmode) {
2933 if (mode == MCAST_EXCLUDE) {
2934 MLD_PRINTF(("%s: change to EXCLUDE\n",
2935 __func__));
2936 type = MLD_CHANGE_TO_EXCLUDE_MODE;
2937 } else {
2938 MLD_PRINTF(("%s: change to INCLUDE\n",
2939 __func__));
2940 if (use_block_allow) {
2941 /*
2942 * XXX
2943 * Here we're interested in state
2944 * edges either direction between
2945 * MCAST_UNDEFINED and MCAST_INCLUDE.
2946 * Perhaps we should just check
2947 * the group state, rather than
2948 * the filter mode.
2949 */
2950 if (mode == MCAST_UNDEFINED) {
2951 type = MLD_BLOCK_OLD_SOURCES;
2952 } else {
2953 type = MLD_ALLOW_NEW_SOURCES;
2954 }
2955 } else {
2956 type = MLD_CHANGE_TO_INCLUDE_MODE;
2957 if (mode == MCAST_UNDEFINED) {
2958 record_has_sources = 0;
2959 }
2960 }
2961 }
2962 } else {
2963 if (record_has_sources) {
2964 is_filter_list_change = 1;
2965 } else {
2966 type = MLD_DO_NOTHING;
2967 }
2968 }
2969 } else {
2970 /*
2971 * Queue a current state record.
2972 */
2973 if (mode == MCAST_EXCLUDE) {
2974 type = MLD_MODE_IS_EXCLUDE;
2975 } else if (mode == MCAST_INCLUDE) {
2976 type = MLD_MODE_IS_INCLUDE;
2977 VERIFY(inm->in6m_st[1].iss_asm == 0);
2978 }
2979 }
2980
2981 /*
2982 * Generate the filter list changes using a separate function.
2983 */
2984 if (is_filter_list_change) {
2985 return mld_v2_enqueue_filter_change(ifq, inm);
2986 }
2987
2988 if (type == MLD_DO_NOTHING) {
2989 MLD_PRINTF(("%s: nothing to do for %s/%s\n",
2990 __func__, ip6_sprintf(&inm->in6m_addr),
2991 if_name(inm->in6m_ifp)));
2992 return 0;
2993 }
2994
2995 /*
2996 * If any sources are present, we must be able to fit at least
2997 * one in the trailing space of the tail packet's mbuf,
2998 * ideally more.
2999 */
3000 minrec0len = sizeof(struct mldv2_record);
3001 if (record_has_sources) {
3002 minrec0len += sizeof(struct in6_addr);
3003 }
3004 MLD_PRINTF(("%s: queueing %s for %s/%s\n", __func__,
3005 mld_rec_type_to_str(type),
3006 ip6_sprintf(&inm->in6m_addr),
3007 if_name(inm->in6m_ifp)));
3008
3009 /*
3010 * Check if we have a packet in the tail of the queue for this
3011 * group into which the first group record for this group will fit.
3012 * Otherwise allocate a new packet.
3013 * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
3014 * Note: Group records for G/GSR query responses MUST be sent
3015 * in their own packet.
3016 */
3017 m0 = ifq->ifq_tail;
3018 if (!is_group_query &&
3019 m0 != NULL &&
3020 (m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
3021 (m0->m_pkthdr.len + minrec0len) <
3022 (ifp->if_mtu - MLD_MTUSPACE)) {
3023 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3024 sizeof(struct mldv2_record)) /
3025 sizeof(struct in6_addr);
3026 m = m0;
3027 MLD_PRINTF(("%s: use existing packet\n", __func__));
3028 } else {
3029 if (IF_QFULL(ifq)) {
3030 os_log_error(OS_LOG_DEFAULT,
3031 "%s: outbound queue full\n", __func__);
3032 return -ENOMEM;
3033 }
3034 m = NULL;
3035 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
3036 sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
3037 if (!is_state_change && !is_group_query) {
3038 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3039 }
3040 if (m == NULL) {
3041 m = m_gethdr(M_DONTWAIT, MT_DATA);
3042 }
3043 if (m == NULL) {
3044 return -ENOMEM;
3045 }
3046
3047 mld_save_context(m, ifp);
3048
3049 MLD_PRINTF(("%s: allocated first packet\n", __func__));
3050 }
3051
3052 /*
3053 * Append group record.
3054 * If we have sources, we don't know how many yet.
3055 */
3056 mr.mr_type = type;
3057 mr.mr_datalen = 0;
3058 mr.mr_numsrc = 0;
3059 mr.mr_addr = inm->in6m_addr;
3060 in6_clearscope(&mr.mr_addr);
3061 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
3062 if (m != m0) {
3063 m_freem(m);
3064 }
3065 os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed.\n", __func__);
3066 return -ENOMEM;
3067 }
3068 nbytes += sizeof(struct mldv2_record);
3069
3070 /*
3071 * Append as many sources as will fit in the first packet.
3072 * If we are appending to a new packet, the chain allocation
3073 * may potentially use clusters; use m_getptr() in this case.
3074 * If we are appending to an existing packet, we need to obtain
3075 * a pointer to the group record after m_append(), in case a new
3076 * mbuf was allocated.
3077 *
3078 * Only append sources which are in-mode at t1. If we are
3079 * transitioning to MCAST_UNDEFINED state on the group, and
3080 * use_block_allow is zero, do not include source entries.
3081 * Otherwise, we need to include this source in the report.
3082 *
3083 * Only report recorded sources in our filter set when responding
3084 * to a group-source query.
3085 */
3086 if (record_has_sources) {
3087 if (m == m0) {
3088 md = m_last(m);
3089 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
3090 md->m_len - nbytes);
3091 } else {
3092 md = m_getptr(m, 0, &off);
3093 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
3094 off);
3095 }
3096 msrcs = 0;
3097 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs,
3098 nims) {
3099 MLD_PRINTF(("%s: visit node %s\n", __func__,
3100 ip6_sprintf(&ims->im6s_addr)));
3101 now = im6s_get_mode(inm, ims, 1);
3102 MLD_PRINTF(("%s: node is %d\n", __func__, now));
3103 if ((now != mode) ||
3104 (now == mode &&
3105 (!use_block_allow && mode == MCAST_UNDEFINED))) {
3106 MLD_PRINTF(("%s: skip node\n", __func__));
3107 continue;
3108 }
3109 if (is_source_query && ims->im6s_stp == 0) {
3110 MLD_PRINTF(("%s: skip unrecorded node\n",
3111 __func__));
3112 continue;
3113 }
3114 MLD_PRINTF(("%s: append node\n", __func__));
3115 if (!m_append(m, sizeof(struct in6_addr),
3116 (void *)&ims->im6s_addr)) {
3117 if (m != m0) {
3118 m_freem(m);
3119 }
3120 os_log_error(OS_LOG_DEFAULT,
3121 "%s: m_append() failed\n",
3122 __func__);
3123 return -ENOMEM;
3124 }
3125 nbytes += sizeof(struct in6_addr);
3126 ++msrcs;
3127 if (msrcs == m0srcs) {
3128 break;
3129 }
3130 }
3131 MLD_PRINTF(("%s: msrcs is %d this packet\n", __func__,
3132 msrcs));
3133 pmr->mr_numsrc = htons((uint16_t)msrcs);
3134 nbytes += (msrcs * sizeof(struct in6_addr));
3135 }
3136
3137 if (is_source_query && msrcs == 0) {
3138 MLD_PRINTF(("%s: no recorded sources to report\n", __func__));
3139 if (m != m0) {
3140 m_freem(m);
3141 }
3142 return 0;
3143 }
3144
3145 /*
3146 * We are good to go with first packet.
3147 */
3148 if (m != m0) {
3149 MLD_PRINTF(("%s: enqueueing first packet\n", __func__));
3150 m->m_pkthdr.vt_nrecs = 1;
3151 IF_ENQUEUE(ifq, m);
3152 } else {
3153 m->m_pkthdr.vt_nrecs++;
3154 }
3155 /*
3156 * No further work needed if no source list in packet(s).
3157 */
3158 if (!record_has_sources) {
3159 return nbytes;
3160 }
3161
3162 /*
3163 * Whilst sources remain to be announced, we need to allocate
3164 * a new packet and fill out as many sources as will fit.
3165 * Always try for a cluster first.
3166 */
3167 while (nims != NULL) {
3168 if (IF_QFULL(ifq)) {
3169 os_log_error(OS_LOG_DEFAULT, "%s: outbound queue full\n", __func__);
3170 return -ENOMEM;
3171 }
3172 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3173 if (m == NULL) {
3174 m = m_gethdr(M_DONTWAIT, MT_DATA);
3175 }
3176 if (m == NULL) {
3177 return -ENOMEM;
3178 }
3179 mld_save_context(m, ifp);
3180 md = m_getptr(m, 0, &off);
3181 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
3182 MLD_PRINTF(("%s: allocated next packet\n", __func__));
3183
3184 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
3185 if (m != m0) {
3186 m_freem(m);
3187 }
3188 os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed.\n", __func__);
3189 return -ENOMEM;
3190 }
3191 m->m_pkthdr.vt_nrecs = 1;
3192 nbytes += sizeof(struct mldv2_record);
3193
3194 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
3195 sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
3196
3197 msrcs = 0;
3198 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
3199 MLD_PRINTF(("%s: visit node %s\n",
3200 __func__, ip6_sprintf(&ims->im6s_addr)));
3201 now = im6s_get_mode(inm, ims, 1);
3202 if ((now != mode) ||
3203 (now == mode &&
3204 (!use_block_allow && mode == MCAST_UNDEFINED))) {
3205 MLD_PRINTF(("%s: skip node\n", __func__));
3206 continue;
3207 }
3208 if (is_source_query && ims->im6s_stp == 0) {
3209 MLD_PRINTF(("%s: skip unrecorded node\n",
3210 __func__));
3211 continue;
3212 }
3213 MLD_PRINTF(("%s: append node\n", __func__));
3214 if (!m_append(m, sizeof(struct in6_addr),
3215 (void *)&ims->im6s_addr)) {
3216 if (m != m0) {
3217 m_freem(m);
3218 }
3219 os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed\n",
3220 __func__);
3221 return -ENOMEM;
3222 }
3223 ++msrcs;
3224 if (msrcs == m0srcs) {
3225 break;
3226 }
3227 }
3228 pmr->mr_numsrc = htons((uint16_t)msrcs);
3229 nbytes += (msrcs * sizeof(struct in6_addr));
3230
3231 MLD_PRINTF(("%s: enqueueing next packet\n", __func__));
3232 IF_ENQUEUE(ifq, m);
3233 }
3234
3235 return nbytes;
3236 }
3237
3238 /*
3239 * Type used to mark record pass completion.
3240 * We exploit the fact we can cast to this easily from the
3241 * current filter modes on each ip_msource node.
3242 */
3243 typedef enum {
3244 REC_NONE = 0x00, /* MCAST_UNDEFINED */
3245 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
3246 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
3247 REC_FULL = REC_ALLOW | REC_BLOCK
3248 } rectype_t;
3249
3250 /*
3251 * Enqueue an MLDv2 filter list change to the given output queue.
3252 *
3253 * Source list filter state is held in an RB-tree. When the filter list
3254 * for a group is changed without changing its mode, we need to compute
3255 * the deltas between T0 and T1 for each source in the filter set,
3256 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
3257 *
3258 * As we may potentially queue two record types, and the entire R-B tree
3259 * needs to be walked at once, we break this out into its own function
3260 * so we can generate a tightly packed queue of packets.
3261 *
3262 * XXX This could be written to only use one tree walk, although that makes
3263 * serializing into the mbuf chains a bit harder. For now we do two walks
3264 * which makes things easier on us, and it may or may not be harder on
3265 * the L2 cache.
3266 *
3267 * If successful the size of all data appended to the queue is returned,
3268 * otherwise an error code less than zero is returned, or zero if
3269 * no record(s) were appended.
3270 */
3271 static int
mld_v2_enqueue_filter_change(struct ifqueue * ifq,struct in6_multi * inm)3272 mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm)
3273 {
3274 static const int MINRECLEN =
3275 sizeof(struct mldv2_record) + sizeof(struct in6_addr);
3276 struct ifnet *ifp;
3277 struct mldv2_record mr;
3278 struct mldv2_record *pmr;
3279 struct ip6_msource *ims, *nims;
3280 struct mbuf *m, *m0, *md;
3281 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
3282 int nallow, nblock;
3283 uint8_t mode, now, then;
3284 rectype_t crt, drt, nrt;
3285
3286 IN6M_LOCK_ASSERT_HELD(inm);
3287
3288 if (inm->in6m_nsrc == 0 ||
3289 (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) {
3290 return 0;
3291 }
3292
3293 ifp = inm->in6m_ifp; /* interface */
3294 mode = (uint8_t)inm->in6m_st[1].iss_fmode; /* filter mode at t1 */
3295 crt = REC_NONE; /* current group record type */
3296 drt = REC_NONE; /* mask of completed group record types */
3297 nrt = REC_NONE; /* record type for current node */
3298 m0srcs = 0; /* # source which will fit in current mbuf chain */
3299 npbytes = 0; /* # of bytes appended this packet */
3300 nbytes = 0; /* # of bytes appended to group's state-change queue */
3301 rsrcs = 0; /* # sources encoded in current record */
3302 schanged = 0; /* # nodes encoded in overall filter change */
3303 nallow = 0; /* # of source entries in ALLOW_NEW */
3304 nblock = 0; /* # of source entries in BLOCK_OLD */
3305 nims = NULL; /* next tree node pointer */
3306
3307 /*
3308 * For each possible filter record mode.
3309 * The first kind of source we encounter tells us which
3310 * is the first kind of record we start appending.
3311 * If a node transitioned to UNDEFINED at t1, its mode is treated
3312 * as the inverse of the group's filter mode.
3313 */
3314 while (drt != REC_FULL) {
3315 do {
3316 m0 = ifq->ifq_tail;
3317 if (m0 != NULL &&
3318 (m0->m_pkthdr.vt_nrecs + 1 <=
3319 MLD_V2_REPORT_MAXRECS) &&
3320 (m0->m_pkthdr.len + MINRECLEN) <
3321 (ifp->if_mtu - MLD_MTUSPACE)) {
3322 m = m0;
3323 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3324 sizeof(struct mldv2_record)) /
3325 sizeof(struct in6_addr);
3326 MLD_PRINTF(("%s: use previous packet\n",
3327 __func__));
3328 } else {
3329 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3330 if (m == NULL) {
3331 m = m_gethdr(M_DONTWAIT, MT_DATA);
3332 }
3333 if (m == NULL) {
3334 os_log_error(OS_LOG_DEFAULT, "%s: m_get*() failed\n",
3335 __func__);
3336 return -ENOMEM;
3337 }
3338 m->m_pkthdr.vt_nrecs = 0;
3339 mld_save_context(m, ifp);
3340 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
3341 sizeof(struct mldv2_record)) /
3342 sizeof(struct in6_addr);
3343 npbytes = 0;
3344 MLD_PRINTF(("%s: allocated new packet\n",
3345 __func__));
3346 }
3347 /*
3348 * Append the MLD group record header to the
3349 * current packet's data area.
3350 * Recalculate pointer to free space for next
3351 * group record, in case m_append() allocated
3352 * a new mbuf or cluster.
3353 */
3354 memset(&mr, 0, sizeof(mr));
3355 mr.mr_addr = inm->in6m_addr;
3356 in6_clearscope(&mr.mr_addr);
3357 if (!m_append(m, sizeof(mr), (void *)&mr)) {
3358 if (m != m0) {
3359 m_freem(m);
3360 }
3361 os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed\n",
3362 __func__);
3363 return -ENOMEM;
3364 }
3365 npbytes += sizeof(struct mldv2_record);
3366 if (m != m0) {
3367 /* new packet; offset in chain */
3368 md = m_getptr(m, npbytes -
3369 sizeof(struct mldv2_record), &off);
3370 pmr = (struct mldv2_record *)(mtod(md,
3371 uint8_t *) + off);
3372 } else {
3373 /* current packet; offset from last append */
3374 md = m_last(m);
3375 pmr = (struct mldv2_record *)(mtod(md,
3376 uint8_t *) + md->m_len -
3377 sizeof(struct mldv2_record));
3378 }
3379 /*
3380 * Begin walking the tree for this record type
3381 * pass, or continue from where we left off
3382 * previously if we had to allocate a new packet.
3383 * Only report deltas in-mode at t1.
3384 * We need not report included sources as allowed
3385 * if we are in inclusive mode on the group,
3386 * however the converse is not true.
3387 */
3388 rsrcs = 0;
3389 if (nims == NULL) {
3390 nims = RB_MIN(ip6_msource_tree,
3391 &inm->in6m_srcs);
3392 }
3393 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
3394 MLD_PRINTF(("%s: visit node %s\n", __func__,
3395 ip6_sprintf(&ims->im6s_addr)));
3396 now = im6s_get_mode(inm, ims, 1);
3397 then = im6s_get_mode(inm, ims, 0);
3398 MLD_PRINTF(("%s: mode: t0 %d, t1 %d\n",
3399 __func__, then, now));
3400 if (now == then) {
3401 MLD_PRINTF(("%s: skip unchanged\n",
3402 __func__));
3403 continue;
3404 }
3405 if (mode == MCAST_EXCLUDE &&
3406 now == MCAST_INCLUDE) {
3407 MLD_PRINTF(("%s: skip IN src on EX "
3408 "group\n", __func__));
3409 continue;
3410 }
3411 nrt = (rectype_t)now;
3412 if (nrt == REC_NONE) {
3413 nrt = (rectype_t)(~mode & REC_FULL);
3414 }
3415 if (schanged++ == 0) {
3416 crt = nrt;
3417 } else if (crt != nrt) {
3418 continue;
3419 }
3420 if (!m_append(m, sizeof(struct in6_addr),
3421 (void *)&ims->im6s_addr)) {
3422 if (m != m0) {
3423 m_freem(m);
3424 }
3425 os_log_error(OS_LOG_DEFAULT, "%s: m_append() failed\n",
3426 __func__);
3427 return -ENOMEM;
3428 }
3429 nallow += !!(crt == REC_ALLOW);
3430 nblock += !!(crt == REC_BLOCK);
3431 if (++rsrcs == m0srcs) {
3432 break;
3433 }
3434 }
3435 /*
3436 * If we did not append any tree nodes on this
3437 * pass, back out of allocations.
3438 */
3439 if (rsrcs == 0) {
3440 npbytes -= sizeof(struct mldv2_record);
3441 if (m != m0) {
3442 MLD_PRINTF(("%s: m_free(m)\n",
3443 __func__));
3444 m_freem(m);
3445 } else {
3446 MLD_PRINTF(("%s: m_adj(m, -mr)\n",
3447 __func__));
3448 m_adj(m, -((int)sizeof(
3449 struct mldv2_record)));
3450 }
3451 continue;
3452 }
3453 npbytes += (rsrcs * sizeof(struct in6_addr));
3454 if (crt == REC_ALLOW) {
3455 pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
3456 } else if (crt == REC_BLOCK) {
3457 pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
3458 }
3459 pmr->mr_numsrc = htons((uint16_t)rsrcs);
3460 /*
3461 * Count the new group record, and enqueue this
3462 * packet if it wasn't already queued.
3463 */
3464 m->m_pkthdr.vt_nrecs++;
3465 if (m != m0) {
3466 IF_ENQUEUE(ifq, m);
3467 }
3468 nbytes += npbytes;
3469 } while (nims != NULL);
3470 drt |= crt;
3471 crt = (~crt & REC_FULL);
3472 }
3473
3474 MLD_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__,
3475 nallow, nblock));
3476
3477 return nbytes;
3478 }
3479
3480 static int
mld_v2_merge_state_changes(struct in6_multi * inm,struct ifqueue * ifscq)3481 mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq)
3482 {
3483 struct ifqueue *gq;
3484 struct mbuf *m; /* pending state-change */
3485 struct mbuf *m0; /* copy of pending state-change */
3486 struct mbuf *mt; /* last state-change in packet */
3487 struct mbuf *n;
3488 int docopy, domerge;
3489 u_int recslen;
3490
3491 IN6M_LOCK_ASSERT_HELD(inm);
3492
3493 docopy = 0;
3494 domerge = 0;
3495 recslen = 0;
3496
3497 /*
3498 * If there are further pending retransmissions, make a writable
3499 * copy of each queued state-change message before merging.
3500 */
3501 if (inm->in6m_scrv > 0) {
3502 docopy = 1;
3503 }
3504
3505 gq = &inm->in6m_scq;
3506 #ifdef MLD_DEBUG
3507 if (gq->ifq_head == NULL) {
3508 MLD_PRINTF(("%s: WARNING: queue for inm 0x%llx is empty\n",
3509 __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm)));
3510 }
3511 #endif
3512
3513 /*
3514 * Use IF_REMQUEUE() instead of IF_DEQUEUE() below, since the
3515 * packet might not always be at the head of the ifqueue.
3516 */
3517 m = gq->ifq_head;
3518 while (m != NULL) {
3519 /*
3520 * Only merge the report into the current packet if
3521 * there is sufficient space to do so; an MLDv2 report
3522 * packet may only contain 65,535 group records.
3523 * Always use a simple mbuf chain concatentation to do this,
3524 * as large state changes for single groups may have
3525 * allocated clusters.
3526 */
3527 domerge = 0;
3528 mt = ifscq->ifq_tail;
3529 if (mt != NULL) {
3530 recslen = m_length(m);
3531
3532 if ((mt->m_pkthdr.vt_nrecs +
3533 m->m_pkthdr.vt_nrecs <=
3534 MLD_V2_REPORT_MAXRECS) &&
3535 (mt->m_pkthdr.len + recslen <=
3536 (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) {
3537 domerge = 1;
3538 }
3539 }
3540
3541 if (!domerge && IF_QFULL(gq)) {
3542 os_log_info(OS_LOG_DEFAULT, "%s: outbound queue full",
3543 __func__);
3544 n = m->m_nextpkt;
3545 if (!docopy) {
3546 IF_REMQUEUE(gq, m);
3547 m_freem(m);
3548 }
3549 m = n;
3550 continue;
3551 }
3552
3553 if (!docopy) {
3554 MLD_PRINTF(("%s: dequeueing 0x%llx\n", __func__,
3555 (uint64_t)VM_KERNEL_ADDRPERM(m)));
3556 n = m->m_nextpkt;
3557 IF_REMQUEUE(gq, m);
3558 m0 = m;
3559 m = n;
3560 } else {
3561 MLD_PRINTF(("%s: copying 0x%llx\n", __func__,
3562 (uint64_t)VM_KERNEL_ADDRPERM(m)));
3563 m0 = m_dup(m, M_NOWAIT);
3564 if (m0 == NULL) {
3565 return ENOMEM;
3566 }
3567 m0->m_nextpkt = NULL;
3568 m = m->m_nextpkt;
3569 }
3570
3571 if (!domerge) {
3572 MLD_PRINTF(("%s: queueing 0x%llx to ifscq 0x%llx)\n",
3573 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m0),
3574 (uint64_t)VM_KERNEL_ADDRPERM(ifscq)));
3575 IF_ENQUEUE(ifscq, m0);
3576 } else {
3577 struct mbuf *mtl; /* last mbuf of packet mt */
3578
3579 MLD_PRINTF(("%s: merging 0x%llx with ifscq tail "
3580 "0x%llx)\n", __func__,
3581 (uint64_t)VM_KERNEL_ADDRPERM(m0),
3582 (uint64_t)VM_KERNEL_ADDRPERM(mt)));
3583
3584 mtl = m_last(mt);
3585 m0->m_flags &= ~M_PKTHDR;
3586 mt->m_pkthdr.len += recslen;
3587 mt->m_pkthdr.vt_nrecs +=
3588 m0->m_pkthdr.vt_nrecs;
3589
3590 mtl->m_next = m0;
3591 }
3592 }
3593
3594 return 0;
3595 }
3596
3597 /*
3598 * Respond to a pending MLDv2 General Query.
3599 */
3600 static uint32_t
mld_v2_dispatch_general_query(struct mld_ifinfo * mli)3601 mld_v2_dispatch_general_query(struct mld_ifinfo *mli)
3602 {
3603 struct ifnet *ifp;
3604 struct in6_multi *inm;
3605 struct in6_multistep step;
3606 int retval;
3607
3608 MLI_LOCK_ASSERT_HELD(mli);
3609
3610 VERIFY(mli->mli_version == MLD_VERSION_2);
3611
3612 ifp = mli->mli_ifp;
3613 MLI_UNLOCK(mli);
3614
3615 in6_multihead_lock_shared();
3616 IN6_FIRST_MULTI(step, inm);
3617 while (inm != NULL) {
3618 IN6M_LOCK(inm);
3619 if (inm->in6m_ifp != ifp) {
3620 goto next;
3621 }
3622
3623 switch (inm->in6m_state) {
3624 case MLD_NOT_MEMBER:
3625 case MLD_SILENT_MEMBER:
3626 break;
3627 case MLD_REPORTING_MEMBER:
3628 case MLD_IDLE_MEMBER:
3629 case MLD_LAZY_MEMBER:
3630 case MLD_SLEEPING_MEMBER:
3631 case MLD_AWAKENING_MEMBER:
3632 inm->in6m_state = MLD_REPORTING_MEMBER;
3633 MLI_LOCK(mli);
3634 retval = mld_v2_enqueue_group_record(&mli->mli_gq,
3635 inm, 0, 0, 0, 0);
3636 MLI_UNLOCK(mli);
3637 MLD_PRINTF(("%s: enqueue record = %d\n",
3638 __func__, retval));
3639 break;
3640 case MLD_G_QUERY_PENDING_MEMBER:
3641 case MLD_SG_QUERY_PENDING_MEMBER:
3642 case MLD_LEAVING_MEMBER:
3643 break;
3644 }
3645 next:
3646 IN6M_UNLOCK(inm);
3647 IN6_NEXT_MULTI(step, inm);
3648 }
3649 in6_multihead_lock_done();
3650
3651 MLI_LOCK(mli);
3652 mld_dispatch_queue_locked(mli, &mli->mli_gq, MLD_MAX_RESPONSE_BURST);
3653 MLI_LOCK_ASSERT_HELD(mli);
3654
3655 /*
3656 * Slew transmission of bursts over 1 second intervals.
3657 */
3658 if (mli->mli_gq.ifq_head != NULL) {
3659 mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
3660 MLD_RESPONSE_BURST_INTERVAL);
3661 }
3662
3663 return mli->mli_v2_timer;
3664 }
3665
3666 /*
3667 * Transmit the next pending message in the output queue.
3668 *
3669 * Must not be called with in6m_lockm or mli_lock held.
3670 */
3671 static void
mld_dispatch_packet(struct mbuf * m)3672 mld_dispatch_packet(struct mbuf *m)
3673 {
3674 struct ip6_moptions *im6o;
3675 struct ifnet *ifp;
3676 struct ifnet *oifp = NULL;
3677 struct mbuf *m0;
3678 struct mbuf *md;
3679 struct ip6_hdr *ip6;
3680 struct mld_hdr *mld;
3681 int error;
3682 int off;
3683 int type;
3684
3685 MLD_PRINTF(("%s: transmit 0x%llx\n", __func__,
3686 (uint64_t)VM_KERNEL_ADDRPERM(m)));
3687
3688 /*
3689 * Check if the ifnet is still attached.
3690 */
3691 ifp = mld_restore_context(m);
3692 if (ifp == NULL || !ifnet_is_attached(ifp, 0)) {
3693 os_log_error(OS_LOG_DEFAULT, "%s: dropped 0x%llx as interface went away\n",
3694 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
3695 m_freem(m);
3696 ip6stat.ip6s_noroute++;
3697 return;
3698 }
3699
3700 im6o = ip6_allocmoptions(Z_WAITOK);
3701 if (im6o == NULL) {
3702 m_freem(m);
3703 return;
3704 }
3705
3706 im6o->im6o_multicast_hlim = 1;
3707 im6o->im6o_multicast_loop = 0;
3708 im6o->im6o_multicast_ifp = ifp;
3709
3710 if (m->m_flags & M_MLDV1) {
3711 m0 = m;
3712 } else {
3713 m0 = mld_v2_encap_report(ifp, m);
3714 if (m0 == NULL) {
3715 os_log_error(OS_LOG_DEFAULT, "%s: dropped 0x%llx\n", __func__,
3716 (uint64_t)VM_KERNEL_ADDRPERM(m));
3717 /*
3718 * mld_v2_encap_report() has already freed our mbuf.
3719 */
3720 IM6O_REMREF(im6o);
3721 ip6stat.ip6s_odropped++;
3722 return;
3723 }
3724 }
3725
3726 mld_scrub_context(m0);
3727 m->m_flags &= ~(M_PROTOFLAGS);
3728 m0->m_pkthdr.rcvif = lo_ifp;
3729
3730 ip6 = mtod(m0, struct ip6_hdr *);
3731 (void)in6_setscope(&ip6->ip6_dst, ifp, NULL);
3732 ip6_output_setdstifscope(m0, ifp->if_index, NULL);
3733 /*
3734 * Retrieve the ICMPv6 type before handoff to ip6_output(),
3735 * so we can bump the stats.
3736 */
3737 md = m_getptr(m0, sizeof(struct ip6_hdr), &off);
3738 mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off);
3739 type = mld->mld_type;
3740
3741 if (ifp->if_eflags & IFEF_TXSTART) {
3742 /*
3743 * Use control service class if the outgoing
3744 * interface supports transmit-start model.
3745 */
3746 (void) m_set_service_class(m0, MBUF_SC_CTL);
3747 }
3748
3749 error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, im6o,
3750 &oifp, NULL);
3751
3752 IM6O_REMREF(im6o);
3753
3754 if (error) {
3755 os_log_error(OS_LOG_DEFAULT, "%s: ip6_output(0x%llx) = %d\n", __func__,
3756 (uint64_t)VM_KERNEL_ADDRPERM(m0), error);
3757 if (oifp != NULL) {
3758 ifnet_release(oifp);
3759 }
3760 return;
3761 }
3762
3763 icmp6stat.icp6s_outhist[type]++;
3764 if (oifp != NULL) {
3765 icmp6_ifstat_inc(oifp, ifs6_out_msg);
3766 switch (type) {
3767 case MLD_LISTENER_REPORT:
3768 case MLDV2_LISTENER_REPORT:
3769 icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
3770 break;
3771 case MLD_LISTENER_DONE:
3772 icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
3773 break;
3774 }
3775 ifnet_release(oifp);
3776 }
3777 }
3778
3779 /*
3780 * Encapsulate an MLDv2 report.
3781 *
3782 * KAME IPv6 requires that hop-by-hop options be passed separately,
3783 * and that the IPv6 header be prepended in a separate mbuf.
3784 *
3785 * Returns a pointer to the new mbuf chain head, or NULL if the
3786 * allocation failed.
3787 */
3788 static struct mbuf *
mld_v2_encap_report(struct ifnet * ifp,struct mbuf * m)3789 mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
3790 {
3791 struct mbuf *mh;
3792 struct mldv2_report *mld;
3793 struct ip6_hdr *ip6;
3794 struct in6_ifaddr *ia;
3795 int mldreclen;
3796
3797 VERIFY(m->m_flags & M_PKTHDR);
3798
3799 /*
3800 * RFC3590: OK to send as :: or tentative during DAD.
3801 */
3802 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
3803 if (ia == NULL) {
3804 MLD_PRINTF(("%s: warning: ia is NULL\n", __func__));
3805 }
3806
3807 MGETHDR(mh, M_DONTWAIT, MT_HEADER);
3808 if (mh == NULL) {
3809 if (ia != NULL) {
3810 IFA_REMREF(&ia->ia_ifa);
3811 }
3812 m_freem(m);
3813 return NULL;
3814 }
3815 MH_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
3816
3817 mldreclen = m_length(m);
3818 MLD_PRINTF(("%s: mldreclen is %d\n", __func__, mldreclen));
3819
3820 mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report);
3821 mh->m_pkthdr.len = sizeof(struct ip6_hdr) +
3822 sizeof(struct mldv2_report) + mldreclen;
3823
3824 ip6 = mtod(mh, struct ip6_hdr *);
3825 ip6->ip6_flow = 0;
3826 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
3827 ip6->ip6_vfc |= IPV6_VERSION;
3828 ip6->ip6_nxt = IPPROTO_ICMPV6;
3829 if (ia != NULL) {
3830 IFA_LOCK(&ia->ia_ifa);
3831 }
3832 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
3833 ip6_output_setsrcifscope(mh, IFSCOPE_NONE, ia);
3834
3835 if (ia != NULL) {
3836 IFA_UNLOCK(&ia->ia_ifa);
3837 IFA_REMREF(&ia->ia_ifa);
3838 ia = NULL;
3839 }
3840 ip6->ip6_dst = in6addr_linklocal_allv2routers;
3841 ip6_output_setdstifscope(mh, ifp->if_index, NULL);
3842 /* scope ID will be set in netisr */
3843
3844 mld = (struct mldv2_report *)(ip6 + 1);
3845 mld->mld_type = MLDV2_LISTENER_REPORT;
3846 mld->mld_code = 0;
3847 mld->mld_cksum = 0;
3848 mld->mld_v2_reserved = 0;
3849 mld->mld_v2_numrecs = htons(m->m_pkthdr.vt_nrecs);
3850 m->m_pkthdr.vt_nrecs = 0;
3851 m->m_flags &= ~M_PKTHDR;
3852
3853 mh->m_next = m;
3854 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
3855 sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
3856 return mh;
3857 }
3858
3859 #ifdef MLD_DEBUG
3860 static const char *
mld_rec_type_to_str(const int type)3861 mld_rec_type_to_str(const int type)
3862 {
3863 switch (type) {
3864 case MLD_CHANGE_TO_EXCLUDE_MODE:
3865 return "TO_EX";
3866 case MLD_CHANGE_TO_INCLUDE_MODE:
3867 return "TO_IN";
3868 case MLD_MODE_IS_EXCLUDE:
3869 return "MODE_EX";
3870 case MLD_MODE_IS_INCLUDE:
3871 return "MODE_IN";
3872 case MLD_ALLOW_NEW_SOURCES:
3873 return "ALLOW_NEW";
3874 case MLD_BLOCK_OLD_SOURCES:
3875 return "BLOCK_OLD";
3876 default:
3877 break;
3878 }
3879 return "unknown";
3880 }
3881 #endif
3882
3883 void
mld_init(void)3884 mld_init(void)
3885 {
3886 os_log(OS_LOG_DEFAULT, "%s: initializing\n", __func__);
3887
3888 ip6_initpktopts(&mld_po);
3889 mld_po.ip6po_hlim = 1;
3890 mld_po.ip6po_hbh = &mld_ra.hbh;
3891 mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER;
3892 mld_po.ip6po_flags = IP6PO_DONTFRAG;
3893 LIST_INIT(&mli_head);
3894 }
3895