1 /*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 #define _IP_VHL
62
63 /*
64 * RFC1827/2406 Encapsulated Security Payload.
65 */
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/malloc.h>
70 #include <sys/mbuf.h>
71 #include <sys/mcache.h>
72 #include <sys/domain.h>
73 #include <sys/protosw.h>
74 #include <sys/socket.h>
75 #include <sys/errno.h>
76 #include <sys/time.h>
77 #include <sys/kernel.h>
78 #include <sys/syslog.h>
79
80 #include <net/if.h>
81 #include <net/if_ipsec.h>
82 #include <net/multi_layer_pkt_log.h>
83 #include <net/route.h>
84 #include <net/if_ports_used.h>
85 #include <kern/cpu_number.h>
86 #include <kern/locks.h>
87
88 #include <netinet/in.h>
89 #include <netinet/in_systm.h>
90 #include <netinet/ip.h>
91 #include <netinet/ip_var.h>
92 #include <netinet/in_var.h>
93 #include <netinet/ip_ecn.h>
94 #include <netinet/in_pcb.h>
95 #include <netinet/udp.h>
96 #include <netinet/tcp.h>
97 #include <netinet/in_tclass.h>
98 #include <netinet6/ip6_ecn.h>
99
100 #include <netinet/ip6.h>
101 #include <netinet6/in6_pcb.h>
102 #include <netinet6/ip6_var.h>
103 #include <netinet/icmp6.h>
104 #include <netinet6/ip6protosw.h>
105
106 #include <netinet6/ipsec.h>
107 #include <netinet6/ipsec6.h>
108 #include <netinet6/ah.h>
109 #include <netinet6/ah6.h>
110 #include <netinet6/esp.h>
111 #include <netinet6/esp6.h>
112 #include <netkey/key.h>
113 #include <netkey/keydb.h>
114 #include <netkey/key_debug.h>
115
116 #include <net/kpi_protocol.h>
117 #include <netinet/kpi_ipfilter_var.h>
118
119 #include <net/net_osdep.h>
120 #include <mach/sdt.h>
121 #include <corecrypto/cc.h>
122
123 #include <sys/kdebug.h>
124 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
125 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
126 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
127 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
128 #define IPLEN_FLIPPED
129
130 #define ESPMAXLEN \
131 (sizeof(struct esp) < sizeof(struct newesp) \
132 ? sizeof(struct newesp) : sizeof(struct esp))
133
134 static struct ip *
esp4_input_strip_udp_encap(struct mbuf * m,int iphlen)135 esp4_input_strip_udp_encap(struct mbuf *m, int iphlen)
136 {
137 // strip the udp header that's encapsulating ESP
138 struct ip *ip;
139 u_int8_t stripsiz = (u_int8_t)sizeof(struct udphdr);
140
141 ip = mtod(m, __typeof__(ip));
142 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
143 m->m_data += stripsiz;
144 m->m_len -= stripsiz;
145 m->m_pkthdr.len -= stripsiz;
146 ip = mtod(m, __typeof__(ip));
147 ip->ip_len = ip->ip_len - stripsiz;
148 ip->ip_p = IPPROTO_ESP;
149 return ip;
150 }
151
152 static struct ip6_hdr *
esp6_input_strip_udp_encap(struct mbuf * m,int ip6hlen)153 esp6_input_strip_udp_encap(struct mbuf *m, int ip6hlen)
154 {
155 // strip the udp header that's encapsulating ESP
156 struct ip6_hdr *ip6;
157 u_int8_t stripsiz = (u_int8_t)sizeof(struct udphdr);
158
159 ip6 = mtod(m, __typeof__(ip6));
160 ovbcopy((caddr_t)ip6, (caddr_t)(((u_char *)ip6) + stripsiz), ip6hlen);
161 m->m_data += stripsiz;
162 m->m_len -= stripsiz;
163 m->m_pkthdr.len -= stripsiz;
164 ip6 = mtod(m, __typeof__(ip6));
165 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
166 ip6->ip6_nxt = IPPROTO_ESP;
167 return ip6;
168 }
169
170 static void
esp_input_log(struct mbuf * m,struct secasvar * sav,u_int32_t spi,u_int32_t seq)171 esp_input_log(struct mbuf *m, struct secasvar *sav, u_int32_t spi, u_int32_t seq)
172 {
173 if (net_mpklog_enabled &&
174 (sav->sah->ipsec_if->if_xflags & IFXF_MPK_LOG) == IFXF_MPK_LOG) {
175 struct tcphdr th = {};
176 u_int32_t proto_len = 0;
177 u_int8_t iphlen = 0;
178 u_int8_t proto = 0;
179
180 struct ip *inner_ip = mtod(m, struct ip *);
181 if (IP_VHL_V(inner_ip->ip_vhl) == 4) {
182 iphlen = (u_int8_t)(IP_VHL_HL(inner_ip->ip_vhl) << 2);
183 proto = inner_ip->ip_p;
184 } else if (IP_VHL_V(inner_ip->ip_vhl) == 6) {
185 struct ip6_hdr *inner_ip6 = mtod(m, struct ip6_hdr *);
186 iphlen = sizeof(struct ip6_hdr);
187 proto = inner_ip6->ip6_nxt;
188 }
189
190 if (proto == IPPROTO_TCP) {
191 if ((int)(iphlen + sizeof(th)) <= m->m_pkthdr.len) {
192 m_copydata(m, iphlen, sizeof(th), (u_int8_t *)&th);
193 }
194
195 proto_len = m->m_pkthdr.len - iphlen - (th.th_off << 2);
196 MPKL_ESP_INPUT_TCP(esp_mpkl_log_object,
197 ntohl(spi), seq,
198 ntohs(th.th_sport), ntohs(th.th_dport),
199 ntohl(th.th_seq), proto_len);
200 }
201 }
202 }
203
204 void
esp4_input(struct mbuf * m,int off)205 esp4_input(struct mbuf *m, int off)
206 {
207 (void)esp4_input_extended(m, off, NULL);
208 }
209
210 struct mbuf *
esp4_input_extended(struct mbuf * m,int off,ifnet_t interface)211 esp4_input_extended(struct mbuf *m, int off, ifnet_t interface)
212 {
213 struct ip *ip;
214 struct ip6_hdr *ip6;
215 struct esp *esp;
216 struct esptail esptail;
217 u_int32_t spi;
218 u_int32_t seq;
219 struct secasvar *sav = NULL;
220 size_t taillen;
221 u_int16_t nxt;
222 const struct esp_algorithm *algo;
223 int ivlen;
224 size_t esplen;
225 u_int8_t hlen;
226 sa_family_t ifamily;
227 struct mbuf *out_m = NULL;
228 mbuf_traffic_class_t traffic_class = 0;
229
230 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0, 0, 0, 0, 0);
231 /* sanity check for alignment. */
232 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
233 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
234 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
235 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
236 goto bad;
237 }
238
239 if (m->m_len < off + ESPMAXLEN) {
240 m = m_pullup(m, off + ESPMAXLEN);
241 if (!m) {
242 ipseclog((LOG_DEBUG,
243 "IPv4 ESP input: can't pullup in esp4_input\n"));
244 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
245 goto bad;
246 }
247 }
248
249 m->m_pkthdr.csum_flags &= ~CSUM_RX_FLAGS;
250
251 /* Expect 32-bit aligned data pointer on strict-align platforms */
252 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
253
254 ip = mtod(m, struct ip *);
255 // expect udp-encap and esp packets only
256 if (ip->ip_p != IPPROTO_ESP &&
257 !(ip->ip_p == IPPROTO_UDP && off >= sizeof(struct udphdr))) {
258 ipseclog((LOG_DEBUG,
259 "IPv4 ESP input: invalid protocol type\n"));
260 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
261 goto bad;
262 }
263 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
264 #ifdef _IP_VHL
265 hlen = (u_int8_t)(IP_VHL_HL(ip->ip_vhl) << 2);
266 #else
267 hlen = ip->ip_hl << 2;
268 #endif
269
270 /* find the sassoc. */
271 spi = esp->esp_spi;
272
273 if ((sav = key_allocsa_extended(AF_INET,
274 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, IFSCOPE_NONE,
275 IPPROTO_ESP, spi, interface)) == 0) {
276 ipseclog((LOG_WARNING,
277 "IPv4 ESP input: no key association found for spi %u (0x%08x)\n",
278 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
279 IPSEC_STAT_INCREMENT(ipsecstat.in_nosa);
280 goto bad;
281 }
282 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
283 printf("DP esp4_input called to allocate SA:0x%llx\n",
284 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
285 if (sav->state != SADB_SASTATE_MATURE
286 && sav->state != SADB_SASTATE_DYING) {
287 ipseclog((LOG_DEBUG,
288 "IPv4 ESP input: non-mature/dying SA found for spi %u (0x%08x)\n",
289 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
290 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
291 goto bad;
292 }
293 algo = esp_algorithm_lookup(sav->alg_enc);
294 if (!algo) {
295 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
296 "unsupported encryption algorithm for spi %u (0x%08x)\n",
297 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
298 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
299 goto bad;
300 }
301
302 /* check if we have proper ivlen information */
303 ivlen = sav->ivlen;
304 if (ivlen < 0) {
305 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
306 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
307 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
308 goto bad;
309 }
310
311 seq = ntohl(((struct newesp *)esp)->esp_seq);
312
313 if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
314 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
315 u_int8_t dscp = ip->ip_tos >> IPTOS_DSCP_SHIFT;
316 traffic_class = rfc4594_dscp_to_tc(dscp);
317 }
318
319 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL &&
320 ((sav->alg_auth && sav->key_auth) || algo->finalizedecrypt))) {
321 goto noreplaycheck;
322 }
323
324 if ((sav->alg_auth == SADB_X_AALG_NULL || sav->alg_auth == SADB_AALG_NONE) &&
325 !algo->finalizedecrypt) {
326 goto noreplaycheck;
327 }
328
329 /*
330 * check for sequence number.
331 */
332 _CASSERT(MBUF_TC_MAX <= UINT8_MAX);
333 if (ipsec_chkreplay(seq, sav, (u_int8_t)traffic_class)) {
334 ; /*okey*/
335 } else {
336 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
337 ipseclog((LOG_WARNING,
338 "replay packet in IPv4 ESP input: seq(%u) tc(%u) %s %s\n",
339 seq, (u_int8_t)traffic_class, ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
340 goto bad;
341 }
342
343 /* Save ICV from packet for verification later */
344 size_t siz = 0;
345 unsigned char saved_icv[AH_MAXSUMSIZE] __attribute__((aligned(4)));
346 if (algo->finalizedecrypt) {
347 siz = algo->icvlen;
348 VERIFY(siz <= USHRT_MAX);
349 m_copydata(m, m->m_pkthdr.len - (u_short)siz, (u_short)siz, (caddr_t) saved_icv);
350 } else {
351 /* check ICV immediately */
352 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
353 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
354 const struct ah_algorithm *sumalgo;
355
356 sumalgo = ah_algorithm_lookup(sav->alg_auth);
357 if (!sumalgo) {
358 goto noreplaycheck;
359 }
360 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
361 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
362 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
363 goto bad;
364 }
365 if (AH_MAXSUMSIZE < siz) {
366 ipseclog((LOG_DEBUG,
367 "internal error: AH_MAXSUMSIZE must be larger than %u\n",
368 (u_int32_t)siz));
369 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
370 goto bad;
371 }
372
373 m_copydata(m, m->m_pkthdr.len - (int)siz, (int)siz, (caddr_t) &sum0[0]);
374
375 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
376 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
377 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
378 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
379 goto bad;
380 }
381
382 if (cc_cmp_safe(siz, sum0, sum)) {
383 ipseclog((LOG_WARNING, "cc_cmp fail in IPv4 ESP input: %s %s\n",
384 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
385 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
386 goto bad;
387 }
388
389 m->m_flags |= M_AUTHIPDGM;
390 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc);
391
392 /*
393 * update replay window.
394 */
395 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL) {
396 if (ipsec_updatereplay(seq, sav, (u_int8_t)traffic_class)) {
397 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
398 goto bad;
399 }
400 }
401 }
402
403
404 /* strip off the authentication data */
405 m_adj(m, (int)-siz);
406 ip = mtod(m, struct ip *);
407 #ifdef IPLEN_FLIPPED
408 ip->ip_len = ip->ip_len - (u_short)siz;
409 #else
410 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
411 #endif
412
413 noreplaycheck:
414
415 /* process main esp header. */
416 if (sav->flags & SADB_X_EXT_OLD) {
417 /* RFC 1827 */
418 esplen = sizeof(struct esp);
419 } else {
420 /* RFC 2406 */
421 if (sav->flags & SADB_X_EXT_DERIV) {
422 esplen = sizeof(struct esp);
423 } else {
424 esplen = sizeof(struct newesp);
425 }
426 }
427
428 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
429 ipseclog((LOG_WARNING,
430 "IPv4 ESP input: packet too short\n"));
431 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
432 goto bad;
433 }
434
435 if (m->m_len < off + esplen + ivlen) {
436 m = m_pullup(m, (int)(off + esplen + ivlen));
437 if (!m) {
438 ipseclog((LOG_DEBUG,
439 "IPv4 ESP input: can't pullup in esp4_input\n"));
440 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
441 goto bad;
442 }
443 ip = mtod(m, struct ip *);
444 }
445
446 /*
447 * pre-compute and cache intermediate key
448 */
449 if (esp_schedule(algo, sav) != 0) {
450 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
451 goto bad;
452 }
453
454 /*
455 * decrypt the packet.
456 */
457 if (!algo->decrypt) {
458 panic("internal error: no decrypt function");
459 }
460 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0);
461 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
462 /* m is already freed */
463 m = NULL;
464 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
465 ipsec_logsastr(sav)));
466 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
467 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
468 goto bad;
469 }
470 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0);
471 IPSEC_STAT_INCREMENT(ipsecstat.in_esphist[sav->alg_enc]);
472
473 m->m_flags |= M_DECRYPTED;
474
475 if (algo->finalizedecrypt) {
476 if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
477 ipseclog((LOG_ERR, "esp4 packet decryption ICV failure: %s\n",
478 ipsec_logsastr(sav)));
479 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
480 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
481 goto bad;
482 } else {
483 m->m_flags |= M_AUTHIPDGM;
484 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc);
485
486 /*
487 * update replay window.
488 */
489 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL) {
490 if (ipsec_updatereplay(seq, sav, (u_int8_t)traffic_class)) {
491 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
492 goto bad;
493 }
494 }
495 }
496 }
497
498 /*
499 * find the trailer of the ESP.
500 */
501 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
502 (caddr_t)&esptail);
503 nxt = esptail.esp_nxt;
504 taillen = esptail.esp_padlen + sizeof(esptail);
505
506 if (m->m_pkthdr.len < taillen
507 || m->m_pkthdr.len - taillen < hlen) { /*?*/
508 ipseclog((LOG_WARNING,
509 "bad pad length in IPv4 ESP input: %s %s\n",
510 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
511 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
512 goto bad;
513 }
514
515 /* strip off the trailing pad area. */
516 m_adj(m, (int)-taillen);
517 ip = mtod(m, struct ip *);
518 #ifdef IPLEN_FLIPPED
519 ip->ip_len = ip->ip_len - (u_short)taillen;
520 #else
521 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
522 #endif
523 if (ip->ip_p == IPPROTO_UDP) {
524 // offset includes the outer ip and udp header lengths.
525 if (m->m_len < off) {
526 m = m_pullup(m, off);
527 if (!m) {
528 ipseclog((LOG_DEBUG,
529 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
530 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
531 goto bad;
532 }
533 ip = mtod(m, struct ip *);
534 }
535
536 // check the UDP encap header to detect changes in the source port, and then strip the header
537 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
538 // if peer is behind nat and this is the latest esp packet
539 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
540 (sav->flags & SADB_X_EXT_OLD) == 0 &&
541 seq && sav->replay[traffic_class] &&
542 seq >= sav->replay[traffic_class]->lastseq) {
543 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip + off);
544 if (encap_uh->uh_sport &&
545 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
546 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
547 }
548 }
549 ip = esp4_input_strip_udp_encap(m, off);
550 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
551 }
552
553 /* was it transmitted over the IPsec tunnel SA? */
554 if (ipsec4_tunnel_validate(m, (int)(off + esplen + ivlen), nxt, sav, &ifamily)) {
555 ifaddr_t ifa;
556 struct sockaddr_storage addr;
557
558 /*
559 * strip off all the headers that precedes ESP header.
560 * IP4 xx ESP IP4' payload -> IP4' payload
561 *
562 * XXX more sanity checks
563 * XXX relationship with gif?
564 */
565 u_int8_t tos, otos;
566 u_int8_t inner_ip_proto = 0;
567 int sum;
568
569 tos = ip->ip_tos;
570 m_adj(m, (int)(off + esplen + ivlen));
571 if (ifamily == AF_INET) {
572 struct sockaddr_in *ipaddr;
573
574 if (m->m_len < sizeof(*ip)) {
575 m = m_pullup(m, sizeof(*ip));
576 if (!m) {
577 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
578 goto bad;
579 }
580 }
581 ip = mtod(m, struct ip *);
582 /* ECN consideration. */
583
584 otos = ip->ip_tos;
585 if (ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos) == 0) {
586 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
587 goto bad;
588 }
589
590 if (otos != ip->ip_tos) {
591 sum = ~ntohs(ip->ip_sum) & 0xffff;
592 sum += (~otos & 0xffff) + ip->ip_tos;
593 sum = (sum >> 16) + (sum & 0xffff);
594 sum += (sum >> 16); /* add carry */
595 ip->ip_sum = htons(~sum & 0xffff);
596 }
597
598 if (!key_checktunnelsanity(sav, AF_INET,
599 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
600 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
601 "in ESP input: %s %s\n",
602 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
603 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
604 goto bad;
605 }
606
607 inner_ip_proto = ip->ip_p;
608
609 bzero(&addr, sizeof(addr));
610 ipaddr = (__typeof__(ipaddr)) & addr;
611 ipaddr->sin_family = AF_INET;
612 ipaddr->sin_len = sizeof(*ipaddr);
613 ipaddr->sin_addr = ip->ip_dst;
614 } else if (ifamily == AF_INET6) {
615 struct sockaddr_in6 *ip6addr;
616
617 /*
618 * m_pullup is prohibited in KAME IPv6 input processing
619 * but there's no other way!
620 */
621 if (m->m_len < sizeof(*ip6)) {
622 m = m_pullup(m, sizeof(*ip6));
623 if (!m) {
624 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
625 goto bad;
626 }
627 }
628
629 /*
630 * Expect 32-bit aligned data pointer on strict-align
631 * platforms.
632 */
633 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
634
635 ip6 = mtod(m, struct ip6_hdr *);
636
637 /* ECN consideration. */
638 if (ip64_ecn_egress(ip4_ipsec_ecn, &tos, &ip6->ip6_flow) == 0) {
639 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
640 goto bad;
641 }
642
643 if (!key_checktunnelsanity(sav, AF_INET6,
644 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
645 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
646 "in ESP input: %s %s\n",
647 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
648 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
649 goto bad;
650 }
651
652 inner_ip_proto = ip6->ip6_nxt;
653
654 bzero(&addr, sizeof(addr));
655 ip6addr = (__typeof__(ip6addr)) & addr;
656 ip6addr->sin6_family = AF_INET6;
657 ip6addr->sin6_len = sizeof(*ip6addr);
658 ip6addr->sin6_addr = ip6->ip6_dst;
659 } else {
660 ipseclog((LOG_ERR, "ipsec tunnel unsupported address family "
661 "in ESP input\n"));
662 goto bad;
663 }
664
665 key_sa_recordxfer(sav, m);
666 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
667 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
668 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
669 goto bad;
670 }
671
672 // update the receiving interface address based on the inner address
673 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
674 if (ifa) {
675 m->m_pkthdr.rcvif = ifa->ifa_ifp;
676 IFA_REMREF(ifa);
677 }
678
679 /* Clear the csum flags, they can't be valid for the inner headers */
680 m->m_pkthdr.csum_flags = 0;
681
682 // Input via IPsec interface
683 lck_mtx_lock(sadb_mutex);
684 ifnet_t ipsec_if = sav->sah->ipsec_if;
685 if (ipsec_if != NULL) {
686 // If an interface is found, add a reference count before dropping the lock
687 ifnet_reference(ipsec_if);
688 }
689 lck_mtx_unlock(sadb_mutex);
690
691 if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == PKTF_WAKE_PKT) {
692 if (m->m_pkthdr.rcvif != NULL) {
693 if_ports_used_match_mbuf(m->m_pkthdr.rcvif, ifamily, m);
694 } else {
695 ipseclog((LOG_ERR, "no input interface for ipsec wake packet\n"));
696 }
697 }
698
699 if (ipsec_if != NULL) {
700 esp_input_log(m, sav, spi, seq);
701 ipsec_save_wake_packet(m, ntohl(spi), seq);
702
703 // Return mbuf
704 if (interface != NULL &&
705 interface == ipsec_if) {
706 out_m = m;
707 ifnet_release(ipsec_if);
708 goto done;
709 }
710
711 errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
712 ifnet_release(ipsec_if);
713
714 if (inject_error == 0) {
715 m = NULL;
716 goto done;
717 } else {
718 goto bad;
719 }
720 }
721
722 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0) {
723 goto bad;
724 }
725
726 nxt = IPPROTO_DONE;
727 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2, 0, 0, 0, 0);
728 } else {
729 /*
730 * strip off ESP header and IV.
731 * even in m_pulldown case, we need to strip off ESP so that
732 * we can always compute checksum for AH correctly.
733 */
734 size_t stripsiz;
735
736 stripsiz = esplen + ivlen;
737
738 ip = mtod(m, struct ip *);
739 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
740 m->m_data += stripsiz;
741 m->m_len -= stripsiz;
742 m->m_pkthdr.len -= stripsiz;
743
744 ip = mtod(m, struct ip *);
745 #ifdef IPLEN_FLIPPED
746 ip->ip_len = ip->ip_len - (u_short)stripsiz;
747 #else
748 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
749 #endif
750 ip->ip_p = (u_int8_t)nxt;
751
752 key_sa_recordxfer(sav, m);
753 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
754 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
755 goto bad;
756 }
757
758 /*
759 * Set the csum valid flag, if we authenticated the
760 * packet, the payload shouldn't be corrupt unless
761 * it was corrupted before being signed on the other
762 * side.
763 */
764 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
765 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
766 m->m_pkthdr.csum_data = 0xFFFF;
767 _CASSERT(offsetof(struct pkthdr, csum_data) == offsetof(struct pkthdr, csum_rx_val));
768 }
769
770 if (nxt != IPPROTO_DONE) {
771 if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 &&
772 ipsec4_in_reject(m, NULL)) {
773 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
774 goto bad;
775 }
776 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3, 0, 0, 0, 0);
777
778 /* translate encapsulated UDP port ? */
779 if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
780 struct udphdr *udp;
781
782 if (nxt != IPPROTO_UDP) { /* not UPD packet - drop it */
783 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
784 goto bad;
785 }
786
787 if (m->m_len < off + sizeof(struct udphdr)) {
788 m = m_pullup(m, off + sizeof(struct udphdr));
789 if (!m) {
790 ipseclog((LOG_DEBUG,
791 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
792 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
793 goto bad;
794 }
795 ip = mtod(m, struct ip *);
796 }
797 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + off);
798
799 lck_mtx_lock(sadb_mutex);
800 if (sav->natt_encapsulated_src_port == 0) {
801 sav->natt_encapsulated_src_port = udp->uh_sport;
802 } else if (sav->natt_encapsulated_src_port != udp->uh_sport) { /* something wrong */
803 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
804 lck_mtx_unlock(sadb_mutex);
805 goto bad;
806 }
807 lck_mtx_unlock(sadb_mutex);
808 udp->uh_sport = htons(sav->remote_ike_port);
809 udp->uh_sum = 0;
810 }
811
812 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
813 struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif,
814 struct ip *, ip, struct ip6_hdr *, NULL);
815
816 // Input via IPsec interface legacy path
817 lck_mtx_lock(sadb_mutex);
818 ifnet_t ipsec_if = sav->sah->ipsec_if;
819 if (ipsec_if != NULL) {
820 // If an interface is found, add a reference count before dropping the lock
821 ifnet_reference(ipsec_if);
822 }
823 lck_mtx_unlock(sadb_mutex);
824 if (ipsec_if != NULL) {
825 int mlen;
826 if ((mlen = m_length2(m, NULL)) < hlen) {
827 ipseclog((LOG_DEBUG,
828 "IPv4 ESP input: decrypted packet too short %d < %u\n",
829 mlen, hlen));
830 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
831 ifnet_release(ipsec_if);
832 goto bad;
833 }
834 ip->ip_len = htons(ip->ip_len + hlen);
835 ip->ip_off = htons(ip->ip_off);
836 ip->ip_sum = 0;
837 ip->ip_sum = ip_cksum_hdr_in(m, hlen);
838
839 esp_input_log(m, sav, spi, seq);
840 ipsec_save_wake_packet(m, ntohl(spi), seq);
841
842 if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == PKTF_WAKE_PKT) {
843 if_ports_used_match_mbuf(ipsec_if, PF_INET, m);
844 }
845
846 // Return mbuf
847 if (interface != NULL &&
848 interface == ipsec_if) {
849 out_m = m;
850 ifnet_release(ipsec_if);
851 goto done;
852 }
853
854 errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
855 ifnet_release(ipsec_if);
856
857 if (inject_error == 0) {
858 m = NULL;
859 goto done;
860 } else {
861 goto bad;
862 }
863 }
864
865 if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == PKTF_WAKE_PKT) {
866 if_ports_used_match_mbuf(m->m_pkthdr.rcvif, PF_INET, m);
867 if (m->m_pkthdr.rcvif == NULL) {
868 ipseclog((LOG_ERR, "no input interface for ipsec wake packet\n"));
869 }
870 }
871
872 ip_proto_dispatch_in(m, off, (u_int8_t)nxt, 0);
873 } else {
874 m_freem(m);
875 }
876 m = NULL;
877 }
878
879 done:
880 if (sav) {
881 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
882 printf("DP esp4_input call free SA:0x%llx\n",
883 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
884 key_freesav(sav, KEY_SADB_UNLOCKED);
885 }
886 IPSEC_STAT_INCREMENT(ipsecstat.in_success);
887 return out_m;
888 bad:
889 if (sav) {
890 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
891 printf("DP esp4_input call free SA:0x%llx\n",
892 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
893 key_freesav(sav, KEY_SADB_UNLOCKED);
894 }
895 if (m) {
896 m_freem(m);
897 }
898 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4, 0, 0, 0, 0);
899 return out_m;
900 }
901
902 int
esp6_input(struct mbuf ** mp,int * offp,int proto)903 esp6_input(struct mbuf **mp, int *offp, int proto)
904 {
905 return esp6_input_extended(mp, offp, proto, NULL);
906 }
907
908 int
esp6_input_extended(struct mbuf ** mp,int * offp,int proto,ifnet_t interface)909 esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface)
910 {
911 #pragma unused(proto)
912 struct mbuf *m = *mp;
913 int off = *offp;
914 struct ip *ip;
915 struct ip6_hdr *ip6;
916 struct esp *esp;
917 struct esptail esptail;
918 u_int32_t spi;
919 u_int32_t seq;
920 struct secasvar *sav = NULL;
921 u_int16_t nxt;
922 char *nproto;
923 const struct esp_algorithm *algo;
924 int ivlen;
925 size_t esplen;
926 u_int16_t taillen;
927 sa_family_t ifamily;
928 mbuf_traffic_class_t traffic_class = 0;
929
930 /* sanity check for alignment. */
931 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
932 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
933 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
934 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
935 goto bad;
936 }
937
938 #ifndef PULLDOWN_TEST
939 IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, {return IPPROTO_DONE;});
940 esp = (struct esp *)(void *)(mtod(m, caddr_t) + off);
941 #else
942 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
943 if (esp == NULL) {
944 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
945 return IPPROTO_DONE;
946 }
947 #endif
948 m->m_pkthdr.csum_flags &= ~CSUM_RX_FLAGS;
949
950 /* Expect 32-bit data aligned pointer on strict-align platforms */
951 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
952
953 ip6 = mtod(m, struct ip6_hdr *);
954
955 if (ntohs(ip6->ip6_plen) == 0) {
956 ipseclog((LOG_ERR, "IPv6 ESP input: "
957 "ESP with IPv6 jumbogram is not supported.\n"));
958 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
959 goto bad;
960 }
961
962 nproto = ip6_get_prevhdr(m, off);
963 if (nproto == NULL || (*nproto != IPPROTO_ESP &&
964 !(*nproto == IPPROTO_UDP && off >= sizeof(struct udphdr)))) {
965 ipseclog((LOG_DEBUG, "IPv6 ESP input: invalid protocol type\n"));
966 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
967 goto bad;
968 }
969
970 /* find the sassoc. */
971 spi = esp->esp_spi;
972
973 if ((sav = key_allocsa_extended(AF_INET6,
974 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst, interface != NULL ? interface->if_index : IFSCOPE_UNKNOWN,
975 IPPROTO_ESP, spi, interface)) == 0) {
976 ipseclog((LOG_WARNING,
977 "IPv6 ESP input: no key association found for spi %u (0x%08x) seq %u"
978 " src %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
979 " dst %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x if %s\n",
980 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi), ntohl(((struct newesp *)esp)->esp_seq),
981 ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[0]), ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[1]),
982 ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[2]), ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[3]),
983 ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[4]), ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[5]),
984 ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[6]), ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[7]),
985 ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[0]), ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[1]),
986 ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[2]), ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[3]),
987 ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[4]), ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[5]),
988 ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[6]), ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[7]),
989 ((interface != NULL) ? if_name(interface) : "NONE")));
990 IPSEC_STAT_INCREMENT(ipsec6stat.in_nosa);
991 goto bad;
992 }
993 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
994 printf("DP esp6_input called to allocate SA:0x%llx\n",
995 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
996 if (sav->state != SADB_SASTATE_MATURE
997 && sav->state != SADB_SASTATE_DYING) {
998 ipseclog((LOG_DEBUG,
999 "IPv6 ESP input: non-mature/dying SA found for spi %u (0x%08x)\n",
1000 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
1001 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
1002 goto bad;
1003 }
1004 algo = esp_algorithm_lookup(sav->alg_enc);
1005 if (!algo) {
1006 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
1007 "unsupported encryption algorithm for spi %u (0x%08x)\n",
1008 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
1009 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
1010 goto bad;
1011 }
1012
1013 /* check if we have proper ivlen information */
1014 ivlen = sav->ivlen;
1015 if (ivlen < 0) {
1016 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
1017 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1018 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
1019 goto bad;
1020 }
1021
1022 seq = ntohl(((struct newesp *)esp)->esp_seq);
1023
1024 if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
1025 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
1026 u_int8_t dscp = (ntohl(ip6->ip6_flow) & IP6FLOW_DSCP_MASK) >> IP6FLOW_DSCP_SHIFT;
1027 traffic_class = rfc4594_dscp_to_tc(dscp);
1028 }
1029
1030 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL &&
1031 ((sav->alg_auth && sav->key_auth) || algo->finalizedecrypt))) {
1032 goto noreplaycheck;
1033 }
1034
1035 if ((sav->alg_auth == SADB_X_AALG_NULL || sav->alg_auth == SADB_AALG_NONE) &&
1036 !algo->finalizedecrypt) {
1037 goto noreplaycheck;
1038 }
1039
1040 /*
1041 * check for sequence number.
1042 */
1043 if (ipsec_chkreplay(seq, sav, (u_int8_t)traffic_class)) {
1044 ; /*okey*/
1045 } else {
1046 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
1047 ipseclog((LOG_WARNING,
1048 "replay packet in IPv6 ESP input: seq(%u) tc(%u) %s %s\n",
1049 seq, (u_int8_t)traffic_class, ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1050 goto bad;
1051 }
1052
1053 /* Save ICV from packet for verification later */
1054 size_t siz = 0;
1055 unsigned char saved_icv[AH_MAXSUMSIZE] __attribute__((aligned(4)));
1056 if (algo->finalizedecrypt) {
1057 siz = algo->icvlen;
1058 VERIFY(siz <= UINT16_MAX);
1059 m_copydata(m, m->m_pkthdr.len - (int)siz, (int)siz, (caddr_t) saved_icv);
1060 } else {
1061 /* check ICV immediately */
1062 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
1063 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
1064 const struct ah_algorithm *sumalgo;
1065
1066 sumalgo = ah_algorithm_lookup(sav->alg_auth);
1067 if (!sumalgo) {
1068 goto noreplaycheck;
1069 }
1070 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
1071 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
1072 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1073 goto bad;
1074 }
1075 if (AH_MAXSUMSIZE < siz) {
1076 ipseclog((LOG_DEBUG,
1077 "internal error: AH_MAXSUMSIZE must be larger than %u\n",
1078 (u_int32_t)siz));
1079 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1080 goto bad;
1081 }
1082
1083 m_copydata(m, m->m_pkthdr.len - (int)siz, (int)siz, (caddr_t) &sum0[0]);
1084
1085 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
1086 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
1087 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1088 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
1089 goto bad;
1090 }
1091
1092 if (cc_cmp_safe(siz, sum0, sum)) {
1093 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
1094 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1095 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
1096 goto bad;
1097 }
1098
1099 m->m_flags |= M_AUTHIPDGM;
1100 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc);
1101
1102 /*
1103 * update replay window.
1104 */
1105 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL) {
1106 if (ipsec_updatereplay(seq, sav, (u_int8_t)traffic_class)) {
1107 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
1108 goto bad;
1109 }
1110 }
1111 }
1112
1113 /* strip off the authentication data */
1114 m_adj(m, (int)-siz);
1115 ip6 = mtod(m, struct ip6_hdr *);
1116 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - (u_int16_t)siz);
1117
1118 noreplaycheck:
1119
1120 /* process main esp header. */
1121 if (sav->flags & SADB_X_EXT_OLD) {
1122 /* RFC 1827 */
1123 esplen = sizeof(struct esp);
1124 } else {
1125 /* RFC 2406 */
1126 if (sav->flags & SADB_X_EXT_DERIV) {
1127 esplen = sizeof(struct esp);
1128 } else {
1129 esplen = sizeof(struct newesp);
1130 }
1131 }
1132
1133 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
1134 ipseclog((LOG_WARNING,
1135 "IPv6 ESP input: packet too short\n"));
1136 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1137 goto bad;
1138 }
1139
1140 #ifndef PULLDOWN_TEST
1141 IP6_EXTHDR_CHECK(m, off, (int)(esplen + ivlen), return IPPROTO_DONE); /*XXX*/
1142 #else
1143 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
1144 if (esp == NULL) {
1145 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1146 m = NULL;
1147 goto bad;
1148 }
1149 #endif
1150 ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/
1151
1152 /*
1153 * pre-compute and cache intermediate key
1154 */
1155 if (esp_schedule(algo, sav) != 0) {
1156 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1157 goto bad;
1158 }
1159
1160 /*
1161 * decrypt the packet.
1162 */
1163 if (!algo->decrypt) {
1164 panic("internal error: no decrypt function");
1165 }
1166 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0);
1167 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
1168 /* m is already freed */
1169 m = NULL;
1170 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
1171 ipsec_logsastr(sav)));
1172 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1173 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
1174 goto bad;
1175 }
1176 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0);
1177 IPSEC_STAT_INCREMENT(ipsec6stat.in_esphist[sav->alg_enc]);
1178
1179 m->m_flags |= M_DECRYPTED;
1180
1181 if (algo->finalizedecrypt) {
1182 if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
1183 ipseclog((LOG_ERR, "esp6 packet decryption ICV failure: %s\n",
1184 ipsec_logsastr(sav)));
1185 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
1186 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
1187 goto bad;
1188 } else {
1189 m->m_flags |= M_AUTHIPDGM;
1190 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc);
1191
1192 /*
1193 * update replay window.
1194 */
1195 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL) {
1196 if (ipsec_updatereplay(seq, sav, (u_int8_t)traffic_class)) {
1197 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
1198 goto bad;
1199 }
1200 }
1201 }
1202 }
1203
1204 /*
1205 * find the trailer of the ESP.
1206 */
1207 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
1208 (caddr_t)&esptail);
1209 nxt = esptail.esp_nxt;
1210 taillen = esptail.esp_padlen + sizeof(esptail);
1211
1212 if (m->m_pkthdr.len < taillen
1213 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/
1214 ipseclog((LOG_WARNING,
1215 "bad pad length in IPv6 ESP input: %s %s\n",
1216 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1217 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1218 goto bad;
1219 }
1220
1221 /* strip off the trailing pad area. */
1222 m_adj(m, -taillen);
1223 ip6 = mtod(m, struct ip6_hdr *);
1224 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
1225
1226 if (*nproto == IPPROTO_UDP) {
1227 // offset includes the outer ip and udp header lengths.
1228 if (m->m_len < off) {
1229 m = m_pullup(m, off);
1230 if (!m) {
1231 ipseclog((LOG_DEBUG,
1232 "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1233 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1234 goto bad;
1235 }
1236 ip6 = mtod(m, struct ip6_hdr *);
1237 }
1238
1239 // check the UDP encap header to detect changes in the source port, and then strip the header
1240 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
1241 // if peer is behind nat and this is the latest esp packet
1242 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
1243 (sav->flags & SADB_X_EXT_OLD) == 0 &&
1244 seq && sav->replay[traffic_class] &&
1245 seq >= sav->replay[traffic_class]->lastseq) {
1246 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip6 + off);
1247 if (encap_uh->uh_sport &&
1248 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
1249 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
1250 }
1251 }
1252 ip6 = esp6_input_strip_udp_encap(m, off);
1253 esp = (struct esp *)(void *)(((u_int8_t *)ip6) + off);
1254 }
1255
1256
1257 /* was it transmitted over the IPsec tunnel SA? */
1258 if (ipsec6_tunnel_validate(m, (int)(off + esplen + ivlen), nxt, sav, &ifamily)) {
1259 ifaddr_t ifa;
1260 struct sockaddr_storage addr;
1261 u_int8_t inner_ip_proto = 0;
1262
1263 /*
1264 * strip off all the headers that precedes ESP header.
1265 * IP6 xx ESP IP6' payload -> IP6' payload
1266 *
1267 * XXX more sanity checks
1268 * XXX relationship with gif?
1269 */
1270 u_int32_t flowinfo; /*net endian*/
1271 flowinfo = ip6->ip6_flow;
1272 m_adj(m, (int)(off + esplen + ivlen));
1273 if (ifamily == AF_INET6) {
1274 struct sockaddr_in6 *ip6addr;
1275
1276 if (m->m_len < sizeof(*ip6)) {
1277 #ifndef PULLDOWN_TEST
1278 /*
1279 * m_pullup is prohibited in KAME IPv6 input processing
1280 * but there's no other way!
1281 */
1282 #else
1283 /* okay to pullup in m_pulldown style */
1284 #endif
1285 m = m_pullup(m, sizeof(*ip6));
1286 if (!m) {
1287 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1288 goto bad;
1289 }
1290 }
1291 ip6 = mtod(m, struct ip6_hdr *);
1292 /* ECN consideration. */
1293 if (ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow) == 0) {
1294 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1295 goto bad;
1296 }
1297 if (!key_checktunnelsanity(sav, AF_INET6,
1298 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
1299 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1300 "in IPv6 ESP input: %s %s\n",
1301 ipsec6_logpacketstr(ip6, spi),
1302 ipsec_logsastr(sav)));
1303 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1304 goto bad;
1305 }
1306
1307 inner_ip_proto = ip6->ip6_nxt;
1308
1309 bzero(&addr, sizeof(addr));
1310 ip6addr = (__typeof__(ip6addr)) & addr;
1311 ip6addr->sin6_family = AF_INET6;
1312 ip6addr->sin6_len = sizeof(*ip6addr);
1313 ip6addr->sin6_addr = ip6->ip6_dst;
1314 } else if (ifamily == AF_INET) {
1315 struct sockaddr_in *ipaddr;
1316
1317 if (m->m_len < sizeof(*ip)) {
1318 m = m_pullup(m, sizeof(*ip));
1319 if (!m) {
1320 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1321 goto bad;
1322 }
1323 }
1324
1325 u_int8_t otos;
1326 int sum;
1327
1328 ip = mtod(m, struct ip *);
1329 otos = ip->ip_tos;
1330 /* ECN consideration. */
1331 if (ip46_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip->ip_tos) == 0) {
1332 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1333 goto bad;
1334 }
1335
1336 if (otos != ip->ip_tos) {
1337 sum = ~ntohs(ip->ip_sum) & 0xffff;
1338 sum += (~otos & 0xffff) + ip->ip_tos;
1339 sum = (sum >> 16) + (sum & 0xffff);
1340 sum += (sum >> 16); /* add carry */
1341 ip->ip_sum = htons(~sum & 0xffff);
1342 }
1343
1344 if (!key_checktunnelsanity(sav, AF_INET,
1345 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
1346 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1347 "in ESP input: %s %s\n",
1348 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
1349 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1350 goto bad;
1351 }
1352
1353 inner_ip_proto = ip->ip_p;
1354
1355 bzero(&addr, sizeof(addr));
1356 ipaddr = (__typeof__(ipaddr)) & addr;
1357 ipaddr->sin_family = AF_INET;
1358 ipaddr->sin_len = sizeof(*ipaddr);
1359 ipaddr->sin_addr = ip->ip_dst;
1360 }
1361
1362 key_sa_recordxfer(sav, m);
1363 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
1364 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
1365 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1366 goto bad;
1367 }
1368
1369 // update the receiving interface address based on the inner address
1370 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
1371 if (ifa) {
1372 m->m_pkthdr.rcvif = ifa->ifa_ifp;
1373 IFA_REMREF(ifa);
1374 }
1375
1376 // Input via IPsec interface
1377 lck_mtx_lock(sadb_mutex);
1378 ifnet_t ipsec_if = sav->sah->ipsec_if;
1379 if (ipsec_if != NULL) {
1380 // If an interface is found, add a reference count before dropping the lock
1381 ifnet_reference(ipsec_if);
1382 }
1383 lck_mtx_unlock(sadb_mutex);
1384
1385 if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == PKTF_WAKE_PKT) {
1386 if_ports_used_match_mbuf(m->m_pkthdr.rcvif, ifamily, m);
1387 if (m->m_pkthdr.rcvif == NULL) {
1388 ipseclog((LOG_ERR, "no input interface for ipsec wake packet\n"));
1389 }
1390 }
1391
1392 if (ipsec_if != NULL) {
1393 esp_input_log(m, sav, spi, seq);
1394 ipsec_save_wake_packet(m, ntohl(spi), seq);
1395
1396 // Return mbuf
1397 if (interface != NULL &&
1398 interface == ipsec_if) {
1399 ifnet_release(ipsec_if);
1400 goto done;
1401 }
1402
1403 errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
1404 ifnet_release(ipsec_if);
1405
1406 if (inject_error == 0) {
1407 m = NULL;
1408 nxt = IPPROTO_DONE;
1409 goto done;
1410 } else {
1411 goto bad;
1412 }
1413 }
1414
1415 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0) {
1416 goto bad;
1417 }
1418 nxt = IPPROTO_DONE;
1419 } else {
1420 /*
1421 * strip off ESP header and IV.
1422 * even in m_pulldown case, we need to strip off ESP so that
1423 * we can always compute checksum for AH correctly.
1424 */
1425 u_int16_t stripsiz;
1426 char *prvnxtp;
1427
1428 /*
1429 * Set the next header field of the previous header correctly.
1430 */
1431 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
1432 *prvnxtp = (u_int8_t)nxt;
1433
1434 VERIFY(esplen + ivlen <= UINT16_MAX);
1435 stripsiz = (u_int16_t)(esplen + ivlen);
1436
1437 ip6 = mtod(m, struct ip6_hdr *);
1438 if (m->m_len >= stripsiz + off) {
1439 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
1440 m->m_data += stripsiz;
1441 m->m_len -= stripsiz;
1442 m->m_pkthdr.len -= stripsiz;
1443 } else {
1444 /*
1445 * this comes with no copy if the boundary is on
1446 * cluster
1447 */
1448 struct mbuf *n;
1449
1450 n = m_split(m, off, M_DONTWAIT);
1451 if (n == NULL) {
1452 /* m is retained by m_split */
1453 goto bad;
1454 }
1455 m_adj(n, stripsiz);
1456 /* m_cat does not update m_pkthdr.len */
1457 m->m_pkthdr.len += n->m_pkthdr.len;
1458 m_cat(m, n);
1459 }
1460
1461 #ifndef PULLDOWN_TEST
1462 /*
1463 * KAME requires that the packet to be contiguous on the
1464 * mbuf. We need to make that sure.
1465 * this kind of code should be avoided.
1466 * XXX other conditions to avoid running this part?
1467 */
1468 if (m->m_len != m->m_pkthdr.len) {
1469 struct mbuf *n = NULL;
1470 int maxlen;
1471
1472 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
1473 maxlen = MHLEN;
1474 if (n) {
1475 M_COPY_PKTHDR(n, m);
1476 }
1477 if (n && m->m_pkthdr.len > maxlen) {
1478 MCLGET(n, M_DONTWAIT);
1479 maxlen = MCLBYTES;
1480 if ((n->m_flags & M_EXT) == 0) {
1481 m_free(n);
1482 n = NULL;
1483 }
1484 }
1485 if (!n) {
1486 printf("esp6_input: mbuf allocation failed\n");
1487 goto bad;
1488 }
1489
1490 if (m->m_pkthdr.len <= maxlen) {
1491 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
1492 n->m_len = m->m_pkthdr.len;
1493 n->m_pkthdr.len = m->m_pkthdr.len;
1494 n->m_next = NULL;
1495 m_freem(m);
1496 } else {
1497 m_copydata(m, 0, maxlen, mtod(n, caddr_t));
1498 n->m_len = maxlen;
1499 n->m_pkthdr.len = m->m_pkthdr.len;
1500 n->m_next = m;
1501 m_adj(m, maxlen);
1502 m->m_flags &= ~M_PKTHDR;
1503 }
1504 m = n;
1505 }
1506 #endif
1507 ip6 = mtod(m, struct ip6_hdr *);
1508 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
1509
1510 key_sa_recordxfer(sav, m);
1511 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
1512 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1513 goto bad;
1514 }
1515
1516 /*
1517 * Set the csum valid flag, if we authenticated the
1518 * packet, the payload shouldn't be corrupt unless
1519 * it was corrupted before being signed on the other
1520 * side.
1521 */
1522 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
1523 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1524 m->m_pkthdr.csum_data = 0xFFFF;
1525 _CASSERT(offsetof(struct pkthdr, csum_data) == offsetof(struct pkthdr, csum_rx_val));
1526 }
1527
1528 // Input via IPsec interface
1529 lck_mtx_lock(sadb_mutex);
1530 ifnet_t ipsec_if = sav->sah->ipsec_if;
1531 if (ipsec_if != NULL) {
1532 // If an interface is found, add a reference count before dropping the lock
1533 ifnet_reference(ipsec_if);
1534 }
1535 lck_mtx_unlock(sadb_mutex);
1536 if (ipsec_if != NULL) {
1537 esp_input_log(m, sav, spi, seq);
1538 ipsec_save_wake_packet(m, ntohl(spi), seq);
1539
1540 if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == PKTF_WAKE_PKT) {
1541 if_ports_used_match_mbuf(ipsec_if, PF_INET6, m);
1542 }
1543
1544 // Return mbuf
1545 if (interface != NULL &&
1546 interface == ipsec_if) {
1547 ifnet_release(ipsec_if);
1548 goto done;
1549 }
1550
1551 errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
1552 ifnet_release(ipsec_if);
1553
1554 if (inject_error == 0) {
1555 m = NULL;
1556 nxt = IPPROTO_DONE;
1557 goto done;
1558 } else {
1559 goto bad;
1560 }
1561 } else {
1562 if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == PKTF_WAKE_PKT) {
1563 if_ports_used_match_mbuf(m->m_pkthdr.rcvif, PF_INET, m);
1564 if (m->m_pkthdr.rcvif == NULL) {
1565 ipseclog((LOG_ERR, "no input interface for ipsec wake packet\n"));
1566 }
1567 }
1568 }
1569 }
1570
1571 done:
1572 *offp = off;
1573 *mp = m;
1574 if (sav) {
1575 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1576 printf("DP esp6_input call free SA:0x%llx\n",
1577 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1578 key_freesav(sav, KEY_SADB_UNLOCKED);
1579 }
1580 IPSEC_STAT_INCREMENT(ipsec6stat.in_success);
1581 return nxt;
1582
1583 bad:
1584 if (sav) {
1585 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1586 printf("DP esp6_input call free SA:0x%llx\n",
1587 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1588 key_freesav(sav, KEY_SADB_UNLOCKED);
1589 }
1590 if (m) {
1591 m_freem(m);
1592 }
1593 if (interface != NULL) {
1594 *mp = NULL;
1595 }
1596 return IPPROTO_DONE;
1597 }
1598
1599 void
esp6_ctlinput(int cmd,struct sockaddr * sa,void * d,__unused struct ifnet * ifp)1600 esp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp)
1601 {
1602 const struct newesp *espp;
1603 struct newesp esp;
1604 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
1605 struct secasvar *sav;
1606 struct ip6_hdr *ip6;
1607 struct mbuf *m;
1608 int off = 0;
1609 struct sockaddr_in6 *sa6_src, *sa6_dst;
1610
1611 if (sa->sa_family != AF_INET6 ||
1612 sa->sa_len != sizeof(struct sockaddr_in6)) {
1613 return;
1614 }
1615 if ((unsigned)cmd >= PRC_NCMDS) {
1616 return;
1617 }
1618
1619 /* if the parameter is from icmp6, decode it. */
1620 if (d != NULL) {
1621 ip6cp = (struct ip6ctlparam *)d;
1622 m = ip6cp->ip6c_m;
1623 ip6 = ip6cp->ip6c_ip6;
1624 off = ip6cp->ip6c_off;
1625 } else {
1626 m = NULL;
1627 ip6 = NULL;
1628 }
1629
1630 if (ip6) {
1631 /*
1632 * Notify the error to all possible sockets via pfctlinput2.
1633 * Since the upper layer information (such as protocol type,
1634 * source and destination ports) is embedded in the encrypted
1635 * data and might have been cut, we can't directly call
1636 * an upper layer ctlinput function. However, the pcbnotify
1637 * function will consider source and destination addresses
1638 * as well as the flow info value, and may be able to find
1639 * some PCB that should be notified.
1640 * Although pfctlinput2 will call esp6_ctlinput(), there is
1641 * no possibility of an infinite loop of function calls,
1642 * because we don't pass the inner IPv6 header.
1643 */
1644 bzero(&ip6cp1, sizeof(ip6cp1));
1645 ip6cp1.ip6c_src = ip6cp->ip6c_src;
1646 pfctlinput2(cmd, sa, (void *)&ip6cp1);
1647
1648 /*
1649 * Then go to special cases that need ESP header information.
1650 * XXX: We assume that when ip6 is non NULL,
1651 * M and OFF are valid.
1652 */
1653
1654 /* check if we can safely examine src and dst ports */
1655 if (m->m_pkthdr.len < off + sizeof(esp)) {
1656 return;
1657 }
1658
1659 if (m->m_len < off + sizeof(esp)) {
1660 /*
1661 * this should be rare case,
1662 * so we compromise on this copy...
1663 */
1664 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
1665 espp = &esp;
1666 } else {
1667 espp = (struct newesp*)(void *)(mtod(m, caddr_t) + off);
1668 }
1669
1670 if (cmd == PRC_MSGSIZE) {
1671 int valid = 0;
1672
1673 /*
1674 * Check to see if we have a valid SA corresponding to
1675 * the address in the ICMP message payload.
1676 */
1677 sa6_src = ip6cp->ip6c_src;
1678 sa6_dst = (struct sockaddr_in6 *)(void *)sa;
1679 sav = key_allocsa(AF_INET6,
1680 (caddr_t)&sa6_src->sin6_addr,
1681 (caddr_t)&sa6_dst->sin6_addr,
1682 sa6_dst->sin6_scope_id,
1683 IPPROTO_ESP, espp->esp_spi);
1684 if (sav) {
1685 if (sav->state == SADB_SASTATE_MATURE ||
1686 sav->state == SADB_SASTATE_DYING) {
1687 valid++;
1688 }
1689 key_freesav(sav, KEY_SADB_UNLOCKED);
1690 }
1691
1692 /* XXX Further validation? */
1693
1694 /*
1695 * Depending on the value of "valid" and routing table
1696 * size (mtudisc_{hi,lo}wat), we will:
1697 * - recalcurate the new MTU and create the
1698 * corresponding routing entry, or
1699 * - ignore the MTU change notification.
1700 */
1701 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1702 }
1703 } else {
1704 /* we normally notify any pcb here */
1705 }
1706 }
1707