1 /*
2 * Copyright (c) 2008-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/ipsec.c,v 1.3.2.7 2001/07/19 06:37:23 kris Exp $ */
30 /* $KAME: ipsec.c,v 1.103 2001/05/24 07:14:18 sakane Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 /*
62 * IPsec controller part.
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/mcache.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/errno.h>
75 #include <sys/time.h>
76 #include <sys/kernel.h>
77 #include <sys/syslog.h>
78 #include <sys/sysctl.h>
79 #include <sys/priv.h>
80 #include <kern/locks.h>
81 #include <sys/kauth.h>
82 #include <sys/bitstring.h>
83
84 #include <libkern/OSAtomic.h>
85 #include <libkern/sysctl.h>
86
87 #include <net/if.h>
88 #include <net/route.h>
89 #include <net/if_ipsec.h>
90 #include <net/if_ports_used.h>
91
92 #include <netinet/in.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #include <netinet/ip_var.h>
96 #include <netinet/in_var.h>
97 #include <netinet/udp.h>
98 #include <netinet/udp_var.h>
99 #include <netinet/ip_ecn.h>
100 #include <netinet6/ip6_ecn.h>
101 #include <netinet/tcp.h>
102 #include <netinet/udp.h>
103
104 #include <netinet/ip6.h>
105 #include <netinet6/ip6_var.h>
106 #include <netinet/in_pcb.h>
107 #include <netinet/icmp6.h>
108
109 #include <netinet6/ipsec.h>
110 #include <netinet6/ipsec6.h>
111 #include <netinet6/ah.h>
112 #include <netinet6/ah6.h>
113 #if IPSEC_ESP
114 #include <netinet6/esp.h>
115 #include <netinet6/esp6.h>
116 #endif
117 #include <netkey/key.h>
118 #include <netkey/keydb.h>
119 #include <netkey/key_debug.h>
120
121 #include <net/net_osdep.h>
122
123 #include <IOKit/pwr_mgt/IOPM.h>
124
125 #include <os/log_private.h>
126
127 #if IPSEC_DEBUG
128 int ipsec_debug = 1;
129 #else
130 int ipsec_debug = 0;
131 #endif
132
133 #include <sys/kdebug.h>
134 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
135 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
136 #define DBG_FNC_GETPOL_SOCK NETDBG_CODE(DBG_NETIPSEC, (1 << 8))
137 #define DBG_FNC_GETPOL_ADDR NETDBG_CODE(DBG_NETIPSEC, (2 << 8))
138 #define DBG_FNC_IPSEC_OUT NETDBG_CODE(DBG_NETIPSEC, (3 << 8))
139
140 struct ipsecstat ipsecstat;
141 int ip4_ah_cleartos = 1;
142 int ip4_ah_offsetmask = 0; /* maybe IP_DF? */
143 int ip4_ipsec_dfbit = 0; /* DF bit on encap. 0: clear 1: set 2: copy */
144 int ip4_esp_trans_deflev = IPSEC_LEVEL_USE;
145 int ip4_esp_net_deflev = IPSEC_LEVEL_USE;
146 int ip4_ah_trans_deflev = IPSEC_LEVEL_USE;
147 int ip4_ah_net_deflev = IPSEC_LEVEL_USE;
148 struct secpolicy ip4_def_policy;
149 int ip4_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
150 int ip4_esp_randpad = -1;
151 int esp_udp_encap_port = 0;
152 static int sysctl_def_policy SYSCTL_HANDLER_ARGS;
153 extern int natt_keepalive_interval;
154 extern u_int64_t natt_now;
155
156 struct ipsec_tag;
157
158 void *sleep_wake_handle = NULL;
159 bool ipsec_save_wake_pkt = false;
160
161 SYSCTL_DECL(_net_inet_ipsec);
162 SYSCTL_DECL(_net_inet6_ipsec6);
163 /* net.inet.ipsec */
164 SYSCTL_STRUCT(_net_inet_ipsec, IPSECCTL_STATS,
165 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsecstat, ipsecstat, "");
166 SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
167 &ip4_def_policy.policy, 0, &sysctl_def_policy, "I", "");
168 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
169 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_trans_deflev, 0, "");
170 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
171 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_net_deflev, 0, "");
172 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
173 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_trans_deflev, 0, "");
174 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
175 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_net_deflev, 0, "");
176 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS,
177 ah_cleartos, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_cleartos, 0, "");
178 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_OFFSETMASK,
179 ah_offsetmask, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_offsetmask, 0, "");
180 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT,
181 dfbit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_dfbit, 0, "");
182 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN,
183 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_ecn, 0, "");
184 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEBUG,
185 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
186 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ESP_RANDPAD,
187 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_randpad, 0, "");
188
189 /* for performance, we bypass ipsec until a security policy is set */
190 int ipsec_bypass = 1;
191 SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec_bypass, 0, "");
192
193 /*
194 * NAT Traversal requires a UDP port for encapsulation,
195 * esp_udp_encap_port controls which port is used. Racoon
196 * must set this port to the port racoon is using locally
197 * for nat traversal.
198 */
199 SYSCTL_INT(_net_inet_ipsec, OID_AUTO, esp_port,
200 CTLFLAG_RW | CTLFLAG_LOCKED, &esp_udp_encap_port, 0, "");
201
202 struct ipsecstat ipsec6stat;
203 int ip6_esp_trans_deflev = IPSEC_LEVEL_USE;
204 int ip6_esp_net_deflev = IPSEC_LEVEL_USE;
205 int ip6_ah_trans_deflev = IPSEC_LEVEL_USE;
206 int ip6_ah_net_deflev = IPSEC_LEVEL_USE;
207 struct secpolicy ip6_def_policy;
208 int ip6_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
209 int ip6_esp_randpad = -1;
210
211 /* net.inet6.ipsec6 */
212 SYSCTL_STRUCT(_net_inet6_ipsec6, IPSECCTL_STATS,
213 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec6stat, ipsecstat, "");
214 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY,
215 def_policy, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_def_policy.policy, 0, "");
216 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
217 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_trans_deflev, 0, "");
218 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
219 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_net_deflev, 0, "");
220 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
221 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_trans_deflev, 0, "");
222 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
223 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_net_deflev, 0, "");
224 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN,
225 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ipsec_ecn, 0, "");
226 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG,
227 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
228 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ESP_RANDPAD,
229 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_randpad, 0, "");
230
231 SYSCTL_DECL(_net_link_generic_system);
232
233 struct ipsec_wake_pkt_info ipsec_wake_pkt;
234
235 static int ipsec_setspidx_interface(struct secpolicyindex *, u_int8_t, struct mbuf *,
236 int, int, int);
237 static int ipsec_setspidx_mbuf(struct secpolicyindex *, u_int8_t, u_int,
238 struct mbuf *, int);
239 static int ipsec4_setspidx_inpcb(struct mbuf *, struct inpcb *pcb);
240 static int ipsec6_setspidx_in6pcb(struct mbuf *, struct in6pcb *pcb);
241 static int ipsec_setspidx(struct mbuf *, struct secpolicyindex *, int, int);
242 static void ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
243 static int ipsec4_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
244 static void ipsec6_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
245 static int ipsec6_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
246 static struct inpcbpolicy *ipsec_newpcbpolicy(void);
247 static void ipsec_delpcbpolicy(struct inpcbpolicy *);
248 static struct secpolicy *ipsec_deepcopy_policy(struct secpolicy *src);
249 static int ipsec_set_policy(struct secpolicy **pcb_sp,
250 int optname, caddr_t request, size_t len, int priv);
251 static void vshiftl(unsigned char *, int, size_t);
252 static int ipsec_in_reject(struct secpolicy *, struct mbuf *);
253 static int ipsec64_encapsulate(struct mbuf *, struct secasvar *, uint32_t);
254 static int ipsec6_update_routecache_and_output(struct ipsec_output_state *state, struct secasvar *sav);
255 static int ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav);
256 static struct ipsec_tag *ipsec_addaux(struct mbuf *);
257 static struct ipsec_tag *ipsec_findaux(struct mbuf *);
258 static void ipsec_optaux(struct mbuf *, struct ipsec_tag *);
259 int ipsec_send_natt_keepalive(struct secasvar *sav);
260 bool ipsec_fill_offload_frame(ifnet_t ifp, struct secasvar *sav, struct ifnet_keepalive_offload_frame *frame, size_t frame_data_offset);
261
262 extern bool IOPMCopySleepWakeUUIDKey(char *, size_t);
263
264 typedef IOReturn (*IOServiceInterestHandler)( void * target, void * refCon,
265 UInt32 messageType, void * provider,
266 void * messageArgument, vm_size_t argSize );
267 extern void *registerSleepWakeInterest(IOServiceInterestHandler, void *, void *);
268
269 static int
270 sysctl_def_policy SYSCTL_HANDLER_ARGS
271 {
272 int new_policy = ip4_def_policy.policy;
273 int error = sysctl_handle_int(oidp, &new_policy, 0, req);
274
275 #pragma unused(arg1, arg2)
276 if (error == 0) {
277 if (new_policy != IPSEC_POLICY_NONE &&
278 new_policy != IPSEC_POLICY_DISCARD) {
279 return EINVAL;
280 }
281 ip4_def_policy.policy = new_policy;
282
283 /* Turn off the bypass if the default security policy changes */
284 if (ipsec_bypass != 0 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
285 ipsec_bypass = 0;
286 }
287 }
288
289 return error;
290 }
291
292 /*
293 * For OUTBOUND packet having a socket. Searching SPD for packet,
294 * and return a pointer to SP.
295 * OUT: NULL: no apropreate SP found, the following value is set to error.
296 * 0 : bypass
297 * EACCES : discard packet.
298 * ENOENT : ipsec_acquire() in progress, maybe.
299 * others : error occurred.
300 * others: a pointer to SP
301 *
302 * NOTE: IPv6 mapped adddress concern is implemented here.
303 */
304 struct secpolicy *
ipsec4_getpolicybysock(struct mbuf * m,u_int8_t dir,struct socket * so,int * error)305 ipsec4_getpolicybysock(struct mbuf *m,
306 u_int8_t dir,
307 struct socket *so,
308 int *error)
309 {
310 struct inpcbpolicy *pcbsp = NULL;
311 struct secpolicy *currsp = NULL; /* policy on socket */
312 struct secpolicy *kernsp = NULL; /* policy on kernel */
313
314 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
315 /* sanity check */
316 if (m == NULL || so == NULL || error == NULL) {
317 panic("ipsec4_getpolicybysock: NULL pointer was passed.");
318 }
319
320 if (so->so_pcb == NULL) {
321 printf("ipsec4_getpolicybysock: so->so_pcb == NULL\n");
322 return ipsec4_getpolicybyaddr(m, dir, 0, error);
323 }
324
325 switch (SOCK_DOM(so)) {
326 case PF_INET:
327 pcbsp = sotoinpcb(so)->inp_sp;
328 break;
329 case PF_INET6:
330 pcbsp = sotoin6pcb(so)->in6p_sp;
331 break;
332 }
333
334 if (!pcbsp) {
335 /* Socket has not specified an IPSEC policy */
336 return ipsec4_getpolicybyaddr(m, dir, 0, error);
337 }
338
339 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_START, 0, 0, 0, 0, 0);
340
341 switch (SOCK_DOM(so)) {
342 case PF_INET:
343 /* set spidx in pcb */
344 *error = ipsec4_setspidx_inpcb(m, sotoinpcb(so));
345 break;
346 case PF_INET6:
347 /* set spidx in pcb */
348 *error = ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
349 break;
350 default:
351 panic("ipsec4_getpolicybysock: unsupported address family");
352 }
353 if (*error) {
354 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 1, *error, 0, 0, 0);
355 return NULL;
356 }
357
358 /* sanity check */
359 if (pcbsp == NULL) {
360 panic("ipsec4_getpolicybysock: pcbsp is NULL.");
361 }
362
363 switch (dir) {
364 case IPSEC_DIR_INBOUND:
365 currsp = pcbsp->sp_in;
366 break;
367 case IPSEC_DIR_OUTBOUND:
368 currsp = pcbsp->sp_out;
369 break;
370 default:
371 panic("ipsec4_getpolicybysock: illegal direction.");
372 }
373
374 /* sanity check */
375 if (currsp == NULL) {
376 panic("ipsec4_getpolicybysock: currsp is NULL.");
377 }
378
379 /* when privilieged socket */
380 if (pcbsp->priv) {
381 switch (currsp->policy) {
382 case IPSEC_POLICY_BYPASS:
383 lck_mtx_lock(sadb_mutex);
384 currsp->refcnt++;
385 lck_mtx_unlock(sadb_mutex);
386 *error = 0;
387 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 2, *error, 0, 0, 0);
388 return currsp;
389
390 case IPSEC_POLICY_ENTRUST:
391 /* look for a policy in SPD */
392 kernsp = key_allocsp(&currsp->spidx, dir);
393
394 /* SP found */
395 if (kernsp != NULL) {
396 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
397 printf("DP ipsec4_getpolicybysock called "
398 "to allocate SP:0x%llx\n",
399 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
400 *error = 0;
401 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 3, *error, 0, 0, 0);
402 return kernsp;
403 }
404
405 /* no SP found */
406 lck_mtx_lock(sadb_mutex);
407 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
408 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
409 ipseclog((LOG_INFO,
410 "fixed system default policy: %d->%d\n",
411 ip4_def_policy.policy, IPSEC_POLICY_NONE));
412 ip4_def_policy.policy = IPSEC_POLICY_NONE;
413 }
414 ip4_def_policy.refcnt++;
415 lck_mtx_unlock(sadb_mutex);
416 *error = 0;
417 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 4, *error, 0, 0, 0);
418 return &ip4_def_policy;
419
420 case IPSEC_POLICY_IPSEC:
421 lck_mtx_lock(sadb_mutex);
422 currsp->refcnt++;
423 lck_mtx_unlock(sadb_mutex);
424 *error = 0;
425 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 5, *error, 0, 0, 0);
426 return currsp;
427
428 default:
429 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
430 "Invalid policy for PCB %d\n", currsp->policy));
431 *error = EINVAL;
432 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 6, *error, 0, 0, 0);
433 return NULL;
434 }
435 /* NOTREACHED */
436 }
437
438 /* when non-privilieged socket */
439 /* look for a policy in SPD */
440 kernsp = key_allocsp(&currsp->spidx, dir);
441
442 /* SP found */
443 if (kernsp != NULL) {
444 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
445 printf("DP ipsec4_getpolicybysock called "
446 "to allocate SP:0x%llx\n",
447 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
448 *error = 0;
449 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 7, *error, 0, 0, 0);
450 return kernsp;
451 }
452
453 /* no SP found */
454 switch (currsp->policy) {
455 case IPSEC_POLICY_BYPASS:
456 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
457 "Illegal policy for non-priviliged defined %d\n",
458 currsp->policy));
459 *error = EINVAL;
460 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 8, *error, 0, 0, 0);
461 return NULL;
462
463 case IPSEC_POLICY_ENTRUST:
464 lck_mtx_lock(sadb_mutex);
465 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
466 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
467 ipseclog((LOG_INFO,
468 "fixed system default policy: %d->%d\n",
469 ip4_def_policy.policy, IPSEC_POLICY_NONE));
470 ip4_def_policy.policy = IPSEC_POLICY_NONE;
471 }
472 ip4_def_policy.refcnt++;
473 lck_mtx_unlock(sadb_mutex);
474 *error = 0;
475 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 9, *error, 0, 0, 0);
476 return &ip4_def_policy;
477
478 case IPSEC_POLICY_IPSEC:
479 lck_mtx_lock(sadb_mutex);
480 currsp->refcnt++;
481 lck_mtx_unlock(sadb_mutex);
482 *error = 0;
483 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 10, *error, 0, 0, 0);
484 return currsp;
485
486 default:
487 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
488 "Invalid policy for PCB %d\n", currsp->policy));
489 *error = EINVAL;
490 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 11, *error, 0, 0, 0);
491 return NULL;
492 }
493 /* NOTREACHED */
494 }
495
496 /*
497 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
498 * and return a pointer to SP.
499 * OUT: positive: a pointer to the entry for security policy leaf matched.
500 * NULL: no apropreate SP found, the following value is set to error.
501 * 0 : bypass
502 * EACCES : discard packet.
503 * ENOENT : ipsec_acquire() in progress, maybe.
504 * others : error occurred.
505 */
506 struct secpolicy *
ipsec4_getpolicybyaddr(struct mbuf * m,u_int8_t dir,int flag,int * error)507 ipsec4_getpolicybyaddr(struct mbuf *m,
508 u_int8_t dir,
509 int flag,
510 int *error)
511 {
512 struct secpolicy *sp = NULL;
513
514 if (ipsec_bypass != 0) {
515 return 0;
516 }
517
518 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
519
520 /* sanity check */
521 if (m == NULL || error == NULL) {
522 panic("ipsec4_getpolicybyaddr: NULL pointer was passed.");
523 }
524 {
525 struct secpolicyindex spidx;
526
527 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
528 bzero(&spidx, sizeof(spidx));
529
530 /* make a index to look for a policy */
531 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET, m,
532 (flag & IP_FORWARDING) ? 0 : 1);
533
534 if (*error != 0) {
535 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, *error, 0, 0, 0);
536 return NULL;
537 }
538
539 sp = key_allocsp(&spidx, dir);
540 }
541
542 /* SP found */
543 if (sp != NULL) {
544 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
545 printf("DP ipsec4_getpolicybyaddr called "
546 "to allocate SP:0x%llx\n",
547 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
548 *error = 0;
549 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0);
550 return sp;
551 }
552
553 /* no SP found */
554 lck_mtx_lock(sadb_mutex);
555 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
556 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
557 ipseclog((LOG_INFO, "fixed system default policy:%d->%d\n",
558 ip4_def_policy.policy,
559 IPSEC_POLICY_NONE));
560 ip4_def_policy.policy = IPSEC_POLICY_NONE;
561 }
562 ip4_def_policy.refcnt++;
563 lck_mtx_unlock(sadb_mutex);
564 *error = 0;
565 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 3, *error, 0, 0, 0);
566 return &ip4_def_policy;
567 }
568
569 /* Match with bound interface rather than src addr.
570 * Unlike getpolicybyaddr, do not set the default policy.
571 * Return 0 if should continue processing, or -1 if packet
572 * should be dropped.
573 */
574 int
ipsec4_getpolicybyinterface(struct mbuf * m,u_int8_t dir,int * flags,struct ip_out_args * ipoa,struct secpolicy ** sp)575 ipsec4_getpolicybyinterface(struct mbuf *m,
576 u_int8_t dir,
577 int *flags,
578 struct ip_out_args *ipoa,
579 struct secpolicy **sp)
580 {
581 struct secpolicyindex spidx;
582 int error = 0;
583
584 if (ipsec_bypass != 0) {
585 return 0;
586 }
587
588 /* Sanity check */
589 if (m == NULL || ipoa == NULL || sp == NULL) {
590 panic("ipsec4_getpolicybyinterface: NULL pointer was passed.");
591 }
592
593 if (ipoa->ipoa_boundif == IFSCOPE_NONE) {
594 return 0;
595 }
596
597 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
598 bzero(&spidx, sizeof(spidx));
599
600 /* make a index to look for a policy */
601 error = ipsec_setspidx_interface(&spidx, dir, m, (*flags & IP_FORWARDING) ? 0 : 1,
602 ipoa->ipoa_boundif, 4);
603
604 if (error != 0) {
605 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0);
606 return 0;
607 }
608
609 *sp = key_allocsp(&spidx, dir);
610
611 /* Return SP, whether NULL or not */
612 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
613 if ((*sp)->ipsec_if == NULL) {
614 /* Invalid to capture on an interface without redirect */
615 key_freesp(*sp, KEY_SADB_UNLOCKED);
616 *sp = NULL;
617 return -1;
618 } else if ((*sp)->disabled) {
619 /* Disabled policies go in the clear */
620 key_freesp(*sp, KEY_SADB_UNLOCKED);
621 *sp = NULL;
622 *flags |= IP_NOIPSEC; /* Avoid later IPsec check */
623 } else {
624 /* If policy is enabled, redirect to ipsec interface */
625 ipoa->ipoa_boundif = (*sp)->ipsec_if->if_index;
626 }
627 }
628
629 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, error, 0, 0, 0);
630
631 return 0;
632 }
633
634
635 /*
636 * For OUTBOUND packet having a socket. Searching SPD for packet,
637 * and return a pointer to SP.
638 * OUT: NULL: no apropreate SP found, the following value is set to error.
639 * 0 : bypass
640 * EACCES : discard packet.
641 * ENOENT : ipsec_acquire() in progress, maybe.
642 * others : error occurred.
643 * others: a pointer to SP
644 */
645 struct secpolicy *
ipsec6_getpolicybysock(struct mbuf * m,u_int8_t dir,struct socket * so,int * error)646 ipsec6_getpolicybysock(struct mbuf *m,
647 u_int8_t dir,
648 struct socket *so,
649 int *error)
650 {
651 struct inpcbpolicy *pcbsp = NULL;
652 struct secpolicy *currsp = NULL; /* policy on socket */
653 struct secpolicy *kernsp = NULL; /* policy on kernel */
654
655 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
656
657 /* sanity check */
658 if (m == NULL || so == NULL || error == NULL) {
659 panic("ipsec6_getpolicybysock: NULL pointer was passed.");
660 }
661
662 #if DIAGNOSTIC
663 if (SOCK_DOM(so) != PF_INET6) {
664 panic("ipsec6_getpolicybysock: socket domain != inet6");
665 }
666 #endif
667
668 pcbsp = sotoin6pcb(so)->in6p_sp;
669
670 if (!pcbsp) {
671 return ipsec6_getpolicybyaddr(m, dir, 0, error);
672 }
673
674 /* set spidx in pcb */
675 ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
676
677 /* sanity check */
678 if (pcbsp == NULL) {
679 panic("ipsec6_getpolicybysock: pcbsp is NULL.");
680 }
681
682 switch (dir) {
683 case IPSEC_DIR_INBOUND:
684 currsp = pcbsp->sp_in;
685 break;
686 case IPSEC_DIR_OUTBOUND:
687 currsp = pcbsp->sp_out;
688 break;
689 default:
690 panic("ipsec6_getpolicybysock: illegal direction.");
691 }
692
693 /* sanity check */
694 if (currsp == NULL) {
695 panic("ipsec6_getpolicybysock: currsp is NULL.");
696 }
697
698 /* when privilieged socket */
699 if (pcbsp->priv) {
700 switch (currsp->policy) {
701 case IPSEC_POLICY_BYPASS:
702 lck_mtx_lock(sadb_mutex);
703 currsp->refcnt++;
704 lck_mtx_unlock(sadb_mutex);
705 *error = 0;
706 return currsp;
707
708 case IPSEC_POLICY_ENTRUST:
709 /* look for a policy in SPD */
710 kernsp = key_allocsp(&currsp->spidx, dir);
711
712 /* SP found */
713 if (kernsp != NULL) {
714 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
715 printf("DP ipsec6_getpolicybysock called "
716 "to allocate SP:0x%llx\n",
717 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
718 *error = 0;
719 return kernsp;
720 }
721
722 /* no SP found */
723 lck_mtx_lock(sadb_mutex);
724 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
725 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
726 ipseclog((LOG_INFO,
727 "fixed system default policy: %d->%d\n",
728 ip6_def_policy.policy, IPSEC_POLICY_NONE));
729 ip6_def_policy.policy = IPSEC_POLICY_NONE;
730 }
731 ip6_def_policy.refcnt++;
732 lck_mtx_unlock(sadb_mutex);
733 *error = 0;
734 return &ip6_def_policy;
735
736 case IPSEC_POLICY_IPSEC:
737 lck_mtx_lock(sadb_mutex);
738 currsp->refcnt++;
739 lck_mtx_unlock(sadb_mutex);
740 *error = 0;
741 return currsp;
742
743 default:
744 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
745 "Invalid policy for PCB %d\n", currsp->policy));
746 *error = EINVAL;
747 return NULL;
748 }
749 /* NOTREACHED */
750 }
751
752 /* when non-privilieged socket */
753 /* look for a policy in SPD */
754 kernsp = key_allocsp(&currsp->spidx, dir);
755
756 /* SP found */
757 if (kernsp != NULL) {
758 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
759 printf("DP ipsec6_getpolicybysock called "
760 "to allocate SP:0x%llx\n",
761 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
762 *error = 0;
763 return kernsp;
764 }
765
766 /* no SP found */
767 switch (currsp->policy) {
768 case IPSEC_POLICY_BYPASS:
769 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
770 "Illegal policy for non-priviliged defined %d\n",
771 currsp->policy));
772 *error = EINVAL;
773 return NULL;
774
775 case IPSEC_POLICY_ENTRUST:
776 lck_mtx_lock(sadb_mutex);
777 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
778 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
779 ipseclog((LOG_INFO,
780 "fixed system default policy: %d->%d\n",
781 ip6_def_policy.policy, IPSEC_POLICY_NONE));
782 ip6_def_policy.policy = IPSEC_POLICY_NONE;
783 }
784 ip6_def_policy.refcnt++;
785 lck_mtx_unlock(sadb_mutex);
786 *error = 0;
787 return &ip6_def_policy;
788
789 case IPSEC_POLICY_IPSEC:
790 lck_mtx_lock(sadb_mutex);
791 currsp->refcnt++;
792 lck_mtx_unlock(sadb_mutex);
793 *error = 0;
794 return currsp;
795
796 default:
797 ipseclog((LOG_ERR,
798 "ipsec6_policybysock: Invalid policy for PCB %d\n",
799 currsp->policy));
800 *error = EINVAL;
801 return NULL;
802 }
803 /* NOTREACHED */
804 }
805
806 /*
807 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
808 * and return a pointer to SP.
809 * `flag' means that packet is to be forwarded whether or not.
810 * flag = 1: forwad
811 * OUT: positive: a pointer to the entry for security policy leaf matched.
812 * NULL: no apropreate SP found, the following value is set to error.
813 * 0 : bypass
814 * EACCES : discard packet.
815 * ENOENT : ipsec_acquire() in progress, maybe.
816 * others : error occurred.
817 */
818 #ifndef IP_FORWARDING
819 #define IP_FORWARDING 1
820 #endif
821
822 struct secpolicy *
ipsec6_getpolicybyaddr(struct mbuf * m,u_int8_t dir,int flag,int * error)823 ipsec6_getpolicybyaddr(struct mbuf *m,
824 u_int8_t dir,
825 int flag,
826 int *error)
827 {
828 struct secpolicy *sp = NULL;
829
830 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
831
832 /* sanity check */
833 if (m == NULL || error == NULL) {
834 panic("ipsec6_getpolicybyaddr: NULL pointer was passed.");
835 }
836
837 {
838 struct secpolicyindex spidx;
839
840 bzero(&spidx, sizeof(spidx));
841
842 /* make a index to look for a policy */
843 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET6, m,
844 (flag & IP_FORWARDING) ? 0 : 1);
845
846 if (*error != 0) {
847 return NULL;
848 }
849
850 sp = key_allocsp(&spidx, dir);
851 }
852
853 /* SP found */
854 if (sp != NULL) {
855 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
856 printf("DP ipsec6_getpolicybyaddr called "
857 "to allocate SP:0x%llx\n",
858 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
859 *error = 0;
860 return sp;
861 }
862
863 /* no SP found */
864 lck_mtx_lock(sadb_mutex);
865 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
866 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
867 ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n",
868 ip6_def_policy.policy, IPSEC_POLICY_NONE));
869 ip6_def_policy.policy = IPSEC_POLICY_NONE;
870 }
871 ip6_def_policy.refcnt++;
872 lck_mtx_unlock(sadb_mutex);
873 *error = 0;
874 return &ip6_def_policy;
875 }
876
877 /* Match with bound interface rather than src addr.
878 * Unlike getpolicybyaddr, do not set the default policy.
879 * Return 0 if should continue processing, or -1 if packet
880 * should be dropped.
881 */
882 int
ipsec6_getpolicybyinterface(struct mbuf * m,u_int8_t dir,int flag,struct ip6_out_args * ip6oap,int * noipsec,struct secpolicy ** sp)883 ipsec6_getpolicybyinterface(struct mbuf *m,
884 u_int8_t dir,
885 int flag,
886 struct ip6_out_args *ip6oap,
887 int *noipsec,
888 struct secpolicy **sp)
889 {
890 struct secpolicyindex spidx;
891 int error = 0;
892
893 if (ipsec_bypass != 0) {
894 return 0;
895 }
896
897 /* Sanity check */
898 if (m == NULL || sp == NULL || noipsec == NULL || ip6oap == NULL) {
899 panic("ipsec6_getpolicybyinterface: NULL pointer was passed.");
900 }
901
902 *noipsec = 0;
903
904 if (ip6oap->ip6oa_boundif == IFSCOPE_NONE) {
905 return 0;
906 }
907
908 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
909 bzero(&spidx, sizeof(spidx));
910
911 /* make a index to look for a policy */
912 error = ipsec_setspidx_interface(&spidx, dir, m, (flag & IP_FORWARDING) ? 0 : 1,
913 ip6oap->ip6oa_boundif, 6);
914
915 if (error != 0) {
916 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0);
917 return 0;
918 }
919
920 *sp = key_allocsp(&spidx, dir);
921
922 /* Return SP, whether NULL or not */
923 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
924 if ((*sp)->ipsec_if == NULL) {
925 /* Invalid to capture on an interface without redirect */
926 key_freesp(*sp, KEY_SADB_UNLOCKED);
927 *sp = NULL;
928 return -1;
929 } else if ((*sp)->disabled) {
930 /* Disabled policies go in the clear */
931 key_freesp(*sp, KEY_SADB_UNLOCKED);
932 *sp = NULL;
933 *noipsec = 1; /* Avoid later IPsec check */
934 } else {
935 /* If policy is enabled, redirect to ipsec interface */
936 ip6oap->ip6oa_boundif = (*sp)->ipsec_if->if_index;
937 }
938 }
939
940 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0);
941
942 return 0;
943 }
944
945 /*
946 * set IP address into spidx from mbuf.
947 * When Forwarding packet and ICMP echo reply, this function is used.
948 *
949 * IN: get the followings from mbuf.
950 * protocol family, src, dst, next protocol
951 * OUT:
952 * 0: success.
953 * other: failure, and set errno.
954 */
955 static int
ipsec_setspidx_mbuf(struct secpolicyindex * spidx,u_int8_t dir,__unused u_int family,struct mbuf * m,int needport)956 ipsec_setspidx_mbuf(
957 struct secpolicyindex *spidx,
958 u_int8_t dir,
959 __unused u_int family,
960 struct mbuf *m,
961 int needport)
962 {
963 int error;
964
965 /* sanity check */
966 if (spidx == NULL || m == NULL) {
967 panic("ipsec_setspidx_mbuf: NULL pointer was passed.");
968 }
969
970 bzero(spidx, sizeof(*spidx));
971
972 error = ipsec_setspidx(m, spidx, needport, 0);
973 if (error) {
974 goto bad;
975 }
976 spidx->dir = dir;
977
978 return 0;
979
980 bad:
981 /* XXX initialize */
982 bzero(spidx, sizeof(*spidx));
983 return EINVAL;
984 }
985
986 static int
ipsec_setspidx_interface(struct secpolicyindex * spidx,u_int8_t dir,struct mbuf * m,int needport,int ifindex,int ip_version)987 ipsec_setspidx_interface(
988 struct secpolicyindex *spidx,
989 u_int8_t dir,
990 struct mbuf *m,
991 int needport,
992 int ifindex,
993 int ip_version)
994 {
995 int error;
996
997 /* sanity check */
998 if (spidx == NULL || m == NULL) {
999 panic("ipsec_setspidx_interface: NULL pointer was passed.");
1000 }
1001
1002 bzero(spidx, sizeof(*spidx));
1003
1004 error = ipsec_setspidx(m, spidx, needport, ip_version);
1005 if (error) {
1006 goto bad;
1007 }
1008 spidx->dir = dir;
1009
1010 if (ifindex != 0) {
1011 ifnet_head_lock_shared();
1012 spidx->internal_if = ifindex2ifnet[ifindex];
1013 ifnet_head_done();
1014 } else {
1015 spidx->internal_if = NULL;
1016 }
1017
1018 return 0;
1019
1020 bad:
1021 return EINVAL;
1022 }
1023
1024 static int
ipsec4_setspidx_inpcb(struct mbuf * m,struct inpcb * pcb)1025 ipsec4_setspidx_inpcb(struct mbuf *m, struct inpcb *pcb)
1026 {
1027 struct secpolicyindex *spidx;
1028 int error;
1029
1030 if (ipsec_bypass != 0) {
1031 return 0;
1032 }
1033
1034 /* sanity check */
1035 if (pcb == NULL) {
1036 panic("ipsec4_setspidx_inpcb: no PCB found.");
1037 }
1038 if (pcb->inp_sp == NULL) {
1039 panic("ipsec4_setspidx_inpcb: no inp_sp found.");
1040 }
1041 if (pcb->inp_sp->sp_out == NULL || pcb->inp_sp->sp_in == NULL) {
1042 panic("ipsec4_setspidx_inpcb: no sp_in/out found.");
1043 }
1044
1045 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1046 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1047
1048 spidx = &pcb->inp_sp->sp_in->spidx;
1049 error = ipsec_setspidx(m, spidx, 1, 0);
1050 if (error) {
1051 goto bad;
1052 }
1053 spidx->dir = IPSEC_DIR_INBOUND;
1054
1055 spidx = &pcb->inp_sp->sp_out->spidx;
1056 error = ipsec_setspidx(m, spidx, 1, 0);
1057 if (error) {
1058 goto bad;
1059 }
1060 spidx->dir = IPSEC_DIR_OUTBOUND;
1061
1062 return 0;
1063
1064 bad:
1065 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1066 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1067 return error;
1068 }
1069
1070 static int
ipsec6_setspidx_in6pcb(struct mbuf * m,struct in6pcb * pcb)1071 ipsec6_setspidx_in6pcb(struct mbuf *m, struct in6pcb *pcb)
1072 {
1073 struct secpolicyindex *spidx;
1074 int error;
1075
1076 /* sanity check */
1077 if (pcb == NULL) {
1078 panic("ipsec6_setspidx_in6pcb: no PCB found.");
1079 }
1080 if (pcb->in6p_sp == NULL) {
1081 panic("ipsec6_setspidx_in6pcb: no in6p_sp found.");
1082 }
1083 if (pcb->in6p_sp->sp_out == NULL || pcb->in6p_sp->sp_in == NULL) {
1084 panic("ipsec6_setspidx_in6pcb: no sp_in/out found.");
1085 }
1086
1087 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1088 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1089
1090 spidx = &pcb->in6p_sp->sp_in->spidx;
1091 error = ipsec_setspidx(m, spidx, 1, 0);
1092 if (error) {
1093 goto bad;
1094 }
1095 spidx->dir = IPSEC_DIR_INBOUND;
1096
1097 spidx = &pcb->in6p_sp->sp_out->spidx;
1098 error = ipsec_setspidx(m, spidx, 1, 0);
1099 if (error) {
1100 goto bad;
1101 }
1102 spidx->dir = IPSEC_DIR_OUTBOUND;
1103
1104 return 0;
1105
1106 bad:
1107 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1108 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1109 return error;
1110 }
1111
1112 /*
1113 * configure security policy index (src/dst/proto/sport/dport)
1114 * by looking at the content of mbuf.
1115 * the caller is responsible for error recovery (like clearing up spidx).
1116 */
1117 static int
ipsec_setspidx(struct mbuf * m,struct secpolicyindex * spidx,int needport,int force_ip_version)1118 ipsec_setspidx(struct mbuf *m,
1119 struct secpolicyindex *spidx,
1120 int needport,
1121 int force_ip_version)
1122 {
1123 struct ip *ip = NULL;
1124 struct ip ipbuf;
1125 u_int v;
1126 struct mbuf *n;
1127 int len;
1128 int error;
1129
1130 if (m == NULL) {
1131 panic("ipsec_setspidx: m == 0 passed.");
1132 }
1133
1134 /*
1135 * validate m->m_pkthdr.len. we see incorrect length if we
1136 * mistakenly call this function with inconsistent mbuf chain
1137 * (like 4.4BSD tcp/udp processing). XXX should we panic here?
1138 */
1139 len = 0;
1140 for (n = m; n; n = n->m_next) {
1141 len += n->m_len;
1142 }
1143 if (m->m_pkthdr.len != len) {
1144 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1145 printf("ipsec_setspidx: "
1146 "total of m_len(%d) != pkthdr.len(%d), "
1147 "ignored.\n",
1148 len, m->m_pkthdr.len));
1149 return EINVAL;
1150 }
1151
1152 if (m->m_pkthdr.len < sizeof(struct ip)) {
1153 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1154 printf("ipsec_setspidx: "
1155 "pkthdr.len(%d) < sizeof(struct ip), ignored.\n",
1156 m->m_pkthdr.len));
1157 return EINVAL;
1158 }
1159
1160 if (m->m_len >= sizeof(*ip)) {
1161 ip = mtod(m, struct ip *);
1162 } else {
1163 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1164 ip = &ipbuf;
1165 }
1166
1167 if (force_ip_version) {
1168 v = force_ip_version;
1169 } else {
1170 #ifdef _IP_VHL
1171 v = _IP_VHL_V(ip->ip_vhl);
1172 #else
1173 v = ip->ip_v;
1174 #endif
1175 }
1176 switch (v) {
1177 case 4:
1178 error = ipsec4_setspidx_ipaddr(m, spidx);
1179 if (error) {
1180 return error;
1181 }
1182 ipsec4_get_ulp(m, spidx, needport);
1183 return 0;
1184 case 6:
1185 if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) {
1186 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1187 printf("ipsec_setspidx: "
1188 "pkthdr.len(%d) < sizeof(struct ip6_hdr), "
1189 "ignored.\n", m->m_pkthdr.len));
1190 return EINVAL;
1191 }
1192 error = ipsec6_setspidx_ipaddr(m, spidx);
1193 if (error) {
1194 return error;
1195 }
1196 ipsec6_get_ulp(m, spidx, needport);
1197 return 0;
1198 default:
1199 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1200 printf("ipsec_setspidx: "
1201 "unknown IP version %u, ignored.\n", v));
1202 return EINVAL;
1203 }
1204 }
1205
1206 static void
ipsec4_get_ulp(struct mbuf * m,struct secpolicyindex * spidx,int needport)1207 ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport)
1208 {
1209 struct ip ip;
1210 struct ip6_ext ip6e;
1211 u_int8_t nxt;
1212 int off;
1213 struct tcphdr th;
1214 struct udphdr uh;
1215
1216 /* sanity check */
1217 if (m == NULL) {
1218 panic("ipsec4_get_ulp: NULL pointer was passed.");
1219 }
1220 if (m->m_pkthdr.len < sizeof(ip)) {
1221 panic("ipsec4_get_ulp: too short");
1222 }
1223
1224 /* set default */
1225 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1226 ((struct sockaddr_in *)&spidx->src)->sin_port = IPSEC_PORT_ANY;
1227 ((struct sockaddr_in *)&spidx->dst)->sin_port = IPSEC_PORT_ANY;
1228
1229 m_copydata(m, 0, sizeof(ip), (caddr_t)&ip);
1230 /* ip_input() flips it into host endian XXX need more checking */
1231 if (ip.ip_off & (IP_MF | IP_OFFMASK)) {
1232 return;
1233 }
1234
1235 nxt = ip.ip_p;
1236 #ifdef _IP_VHL
1237 off = _IP_VHL_HL(ip->ip_vhl) << 2;
1238 #else
1239 off = ip.ip_hl << 2;
1240 #endif
1241 while (off < m->m_pkthdr.len) {
1242 switch (nxt) {
1243 case IPPROTO_TCP:
1244 spidx->ul_proto = nxt;
1245 if (!needport) {
1246 return;
1247 }
1248 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) {
1249 return;
1250 }
1251 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1252 ((struct sockaddr_in *)&spidx->src)->sin_port =
1253 th.th_sport;
1254 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1255 th.th_dport;
1256 return;
1257 case IPPROTO_UDP:
1258 spidx->ul_proto = nxt;
1259 if (!needport) {
1260 return;
1261 }
1262 if (off + sizeof(struct udphdr) > m->m_pkthdr.len) {
1263 return;
1264 }
1265 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1266 ((struct sockaddr_in *)&spidx->src)->sin_port =
1267 uh.uh_sport;
1268 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1269 uh.uh_dport;
1270 return;
1271 case IPPROTO_AH:
1272 if (off + sizeof(ip6e) > m->m_pkthdr.len) {
1273 return;
1274 }
1275 m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
1276 off += (ip6e.ip6e_len + 2) << 2;
1277 nxt = ip6e.ip6e_nxt;
1278 break;
1279 case IPPROTO_ICMP:
1280 default:
1281 /* XXX intermediate headers??? */
1282 spidx->ul_proto = nxt;
1283 return;
1284 }
1285 }
1286 }
1287
1288 /* assumes that m is sane */
1289 static int
ipsec4_setspidx_ipaddr(struct mbuf * m,struct secpolicyindex * spidx)1290 ipsec4_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx)
1291 {
1292 struct ip *ip = NULL;
1293 struct ip ipbuf;
1294 struct sockaddr_in *sin;
1295
1296 if (m->m_len >= sizeof(*ip)) {
1297 ip = mtod(m, struct ip *);
1298 } else {
1299 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1300 ip = &ipbuf;
1301 }
1302
1303 sin = (struct sockaddr_in *)&spidx->src;
1304 bzero(sin, sizeof(*sin));
1305 sin->sin_family = AF_INET;
1306 sin->sin_len = sizeof(struct sockaddr_in);
1307 bcopy(&ip->ip_src, &sin->sin_addr, sizeof(ip->ip_src));
1308 spidx->prefs = sizeof(struct in_addr) << 3;
1309
1310 sin = (struct sockaddr_in *)&spidx->dst;
1311 bzero(sin, sizeof(*sin));
1312 sin->sin_family = AF_INET;
1313 sin->sin_len = sizeof(struct sockaddr_in);
1314 bcopy(&ip->ip_dst, &sin->sin_addr, sizeof(ip->ip_dst));
1315 spidx->prefd = sizeof(struct in_addr) << 3;
1316
1317 return 0;
1318 }
1319
1320 static void
ipsec6_get_ulp(struct mbuf * m,struct secpolicyindex * spidx,int needport)1321 ipsec6_get_ulp(struct mbuf *m,
1322 struct secpolicyindex *spidx,
1323 int needport)
1324 {
1325 int off, nxt;
1326 struct tcphdr th;
1327 struct udphdr uh;
1328
1329 /* sanity check */
1330 if (m == NULL) {
1331 panic("ipsec6_get_ulp: NULL pointer was passed.");
1332 }
1333
1334 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1335 printf("ipsec6_get_ulp:\n"); kdebug_mbuf(m));
1336
1337 /* set default */
1338 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1339 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = IPSEC_PORT_ANY;
1340 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = IPSEC_PORT_ANY;
1341
1342 nxt = -1;
1343 off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt);
1344 if (off < 0 || m->m_pkthdr.len < off) {
1345 return;
1346 }
1347
1348 VERIFY(nxt <= UINT8_MAX);
1349 switch (nxt) {
1350 case IPPROTO_TCP:
1351 spidx->ul_proto = (u_int8_t)nxt;
1352 if (!needport) {
1353 break;
1354 }
1355 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) {
1356 break;
1357 }
1358 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1359 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = th.th_sport;
1360 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = th.th_dport;
1361 break;
1362 case IPPROTO_UDP:
1363 spidx->ul_proto = (u_int8_t)nxt;
1364 if (!needport) {
1365 break;
1366 }
1367 if (off + sizeof(struct udphdr) > m->m_pkthdr.len) {
1368 break;
1369 }
1370 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1371 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = uh.uh_sport;
1372 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = uh.uh_dport;
1373 break;
1374 case IPPROTO_ICMPV6:
1375 default:
1376 /* XXX intermediate headers??? */
1377 spidx->ul_proto = (u_int8_t)nxt;
1378 break;
1379 }
1380 }
1381
1382 /* assumes that m is sane */
1383 static int
ipsec6_setspidx_ipaddr(struct mbuf * m,struct secpolicyindex * spidx)1384 ipsec6_setspidx_ipaddr(struct mbuf *m,
1385 struct secpolicyindex *spidx)
1386 {
1387 struct ip6_hdr *ip6 = NULL;
1388 struct ip6_hdr ip6buf;
1389 struct sockaddr_in6 *sin6;
1390
1391 if (m->m_len >= sizeof(*ip6)) {
1392 ip6 = mtod(m, struct ip6_hdr *);
1393 } else {
1394 m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf);
1395 ip6 = &ip6buf;
1396 }
1397
1398 sin6 = (struct sockaddr_in6 *)&spidx->src;
1399 bzero(sin6, sizeof(*sin6));
1400 sin6->sin6_family = AF_INET6;
1401 sin6->sin6_len = sizeof(struct sockaddr_in6);
1402 bcopy(&ip6->ip6_src, &sin6->sin6_addr, sizeof(ip6->ip6_src));
1403 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
1404 if (m->m_pkthdr.pkt_flags & PKTF_IFAINFO) {
1405 ip6_getsrcifaddr_info(m, &sin6->sin6_scope_id, NULL);
1406 } else if (m->m_pkthdr.pkt_ext_flags & PKTF_EXT_OUTPUT_SCOPE) {
1407 sin6->sin6_scope_id = ip6_output_getsrcifscope(m);
1408 }
1409 in6_verify_ifscope(&ip6->ip6_src, sin6->sin6_scope_id);
1410 if (in6_embedded_scope) {
1411 sin6->sin6_addr.s6_addr16[1] = 0;
1412 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
1413 }
1414 }
1415 spidx->prefs = sizeof(struct in6_addr) << 3;
1416
1417 sin6 = (struct sockaddr_in6 *)&spidx->dst;
1418 bzero(sin6, sizeof(*sin6));
1419 sin6->sin6_family = AF_INET6;
1420 sin6->sin6_len = sizeof(struct sockaddr_in6);
1421 bcopy(&ip6->ip6_dst, &sin6->sin6_addr, sizeof(ip6->ip6_dst));
1422 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
1423 if (m->m_pkthdr.pkt_flags & PKTF_IFAINFO) {
1424 ip6_getdstifaddr_info(m, &sin6->sin6_scope_id, NULL);
1425 } else if (m->m_pkthdr.pkt_ext_flags & PKTF_EXT_OUTPUT_SCOPE) {
1426 sin6->sin6_scope_id = ip6_output_getdstifscope(m);
1427 }
1428 in6_verify_ifscope(&ip6->ip6_dst, sin6->sin6_scope_id);
1429 if (in6_embedded_scope) {
1430 sin6->sin6_addr.s6_addr16[1] = 0;
1431 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
1432 }
1433 }
1434 spidx->prefd = sizeof(struct in6_addr) << 3;
1435
1436 return 0;
1437 }
1438
1439 static struct inpcbpolicy *
ipsec_newpcbpolicy(void)1440 ipsec_newpcbpolicy(void)
1441 {
1442 struct inpcbpolicy *p;
1443
1444 p = kalloc_type(struct inpcbpolicy, Z_WAITOK | Z_ZERO);
1445 return p;
1446 }
1447
1448 static void
ipsec_delpcbpolicy(struct inpcbpolicy * p)1449 ipsec_delpcbpolicy(struct inpcbpolicy *p)
1450 {
1451 kfree_type(struct inpcbpolicy, p);
1452 }
1453
1454 /* initialize policy in PCB */
1455 int
ipsec_init_policy(struct socket * so,struct inpcbpolicy ** pcb_sp)1456 ipsec_init_policy(struct socket *so,
1457 struct inpcbpolicy **pcb_sp)
1458 {
1459 struct inpcbpolicy *new;
1460
1461 /* sanity check. */
1462 if (so == NULL || pcb_sp == NULL) {
1463 panic("ipsec_init_policy: NULL pointer was passed.");
1464 }
1465
1466 new = ipsec_newpcbpolicy();
1467 if (new == NULL) {
1468 ipseclog((LOG_DEBUG, "ipsec_init_policy: No more memory.\n"));
1469 return ENOBUFS;
1470 }
1471
1472 #ifdef __APPLE__
1473 if (kauth_cred_issuser(so->so_cred))
1474 #else
1475 if (so->so_cred != 0 && !suser(so->so_cred->pc_ucred, NULL))
1476 #endif
1477 { new->priv = 1;} else {
1478 new->priv = 0;
1479 }
1480
1481 if ((new->sp_in = key_newsp()) == NULL) {
1482 ipsec_delpcbpolicy(new);
1483 return ENOBUFS;
1484 }
1485 new->sp_in->state = IPSEC_SPSTATE_ALIVE;
1486 new->sp_in->policy = IPSEC_POLICY_ENTRUST;
1487
1488 if ((new->sp_out = key_newsp()) == NULL) {
1489 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1490 ipsec_delpcbpolicy(new);
1491 return ENOBUFS;
1492 }
1493 new->sp_out->state = IPSEC_SPSTATE_ALIVE;
1494 new->sp_out->policy = IPSEC_POLICY_ENTRUST;
1495
1496 *pcb_sp = new;
1497
1498 return 0;
1499 }
1500
1501 /* copy old ipsec policy into new */
1502 int
ipsec_copy_policy(struct inpcbpolicy * old,struct inpcbpolicy * new)1503 ipsec_copy_policy(struct inpcbpolicy *old,
1504 struct inpcbpolicy *new)
1505 {
1506 struct secpolicy *sp;
1507
1508 if (ipsec_bypass != 0) {
1509 return 0;
1510 }
1511
1512 sp = ipsec_deepcopy_policy(old->sp_in);
1513 if (sp) {
1514 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1515 new->sp_in = sp;
1516 } else {
1517 return ENOBUFS;
1518 }
1519
1520 sp = ipsec_deepcopy_policy(old->sp_out);
1521 if (sp) {
1522 key_freesp(new->sp_out, KEY_SADB_UNLOCKED);
1523 new->sp_out = sp;
1524 } else {
1525 return ENOBUFS;
1526 }
1527
1528 new->priv = old->priv;
1529
1530 return 0;
1531 }
1532
1533 /* deep-copy a policy in PCB */
1534 static struct secpolicy *
ipsec_deepcopy_policy(struct secpolicy * src)1535 ipsec_deepcopy_policy(struct secpolicy *src)
1536 {
1537 struct ipsecrequest *newchain = NULL;
1538 struct ipsecrequest *p;
1539 struct ipsecrequest **q;
1540 struct secpolicy *dst;
1541
1542 if (src == NULL) {
1543 return NULL;
1544 }
1545 dst = key_newsp();
1546 if (dst == NULL) {
1547 return NULL;
1548 }
1549
1550 /*
1551 * deep-copy IPsec request chain. This is required since struct
1552 * ipsecrequest is not reference counted.
1553 */
1554 q = &newchain;
1555 for (p = src->req; p; p = p->next) {
1556 *q = kalloc_type(struct ipsecrequest, Z_WAITOK_ZERO_NOFAIL);
1557
1558 (*q)->saidx.proto = p->saidx.proto;
1559 (*q)->saidx.mode = p->saidx.mode;
1560 (*q)->level = p->level;
1561 (*q)->saidx.reqid = p->saidx.reqid;
1562
1563 bcopy(&p->saidx.src, &(*q)->saidx.src, sizeof((*q)->saidx.src));
1564 bcopy(&p->saidx.dst, &(*q)->saidx.dst, sizeof((*q)->saidx.dst));
1565
1566 (*q)->sp = dst;
1567
1568 q = &((*q)->next);
1569 }
1570
1571 dst->req = newchain;
1572 dst->state = src->state;
1573 dst->policy = src->policy;
1574 /* do not touch the refcnt fields */
1575
1576 return dst;
1577 }
1578
1579 /* set policy and ipsec request if present. */
1580 static int
ipsec_set_policy(struct secpolicy ** pcb_sp,__unused int optname,caddr_t request,size_t len,int priv)1581 ipsec_set_policy(struct secpolicy **pcb_sp,
1582 __unused int optname,
1583 caddr_t request,
1584 size_t len,
1585 int priv)
1586 {
1587 struct sadb_x_policy *xpl;
1588 struct secpolicy *newsp = NULL;
1589 int error;
1590
1591 /* sanity check. */
1592 if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL) {
1593 return EINVAL;
1594 }
1595 if (len < sizeof(*xpl)) {
1596 return EINVAL;
1597 }
1598 xpl = (struct sadb_x_policy *)(void *)request;
1599
1600 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1601 printf("ipsec_set_policy: passed policy\n");
1602 kdebug_sadb_x_policy((struct sadb_ext *)xpl));
1603
1604 /* check policy type */
1605 /* ipsec_set_policy() accepts IPSEC, ENTRUST and BYPASS. */
1606 if (xpl->sadb_x_policy_type == IPSEC_POLICY_DISCARD
1607 || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE) {
1608 return EINVAL;
1609 }
1610
1611 /* check privileged socket */
1612 if (priv == 0 && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS) {
1613 return EACCES;
1614 }
1615
1616 /* allocation new SP entry */
1617 if ((newsp = key_msg2sp(xpl, len, &error)) == NULL) {
1618 return error;
1619 }
1620
1621 newsp->state = IPSEC_SPSTATE_ALIVE;
1622
1623 /* clear old SP and set new SP */
1624 key_freesp(*pcb_sp, KEY_SADB_UNLOCKED);
1625 *pcb_sp = newsp;
1626 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1627 printf("ipsec_set_policy: new policy\n");
1628 kdebug_secpolicy(newsp));
1629
1630 return 0;
1631 }
1632
1633 int
ipsec4_set_policy(struct inpcb * inp,int optname,caddr_t request,size_t len,int priv)1634 ipsec4_set_policy(struct inpcb *inp,
1635 int optname,
1636 caddr_t request,
1637 size_t len,
1638 int priv)
1639 {
1640 struct sadb_x_policy *xpl;
1641 struct secpolicy **pcb_sp;
1642 int error = 0;
1643 struct sadb_x_policy xpl_aligned_buf;
1644 u_int8_t *xpl_unaligned;
1645
1646 /* sanity check. */
1647 if (inp == NULL || request == NULL) {
1648 return EINVAL;
1649 }
1650 if (len < sizeof(*xpl)) {
1651 return EINVAL;
1652 }
1653 xpl = (struct sadb_x_policy *)(void *)request;
1654
1655 /* This is a new mbuf allocated by soopt_getm() */
1656 if (IPSEC_IS_P2ALIGNED(xpl)) {
1657 xpl_unaligned = NULL;
1658 } else {
1659 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1660 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1661 xpl = (__typeof__(xpl)) & xpl_aligned_buf;
1662 }
1663
1664 if (inp->inp_sp == NULL) {
1665 error = ipsec_init_policy(inp->inp_socket, &inp->inp_sp);
1666 if (error) {
1667 return error;
1668 }
1669 }
1670
1671 /* select direction */
1672 switch (xpl->sadb_x_policy_dir) {
1673 case IPSEC_DIR_INBOUND:
1674 pcb_sp = &inp->inp_sp->sp_in;
1675 break;
1676 case IPSEC_DIR_OUTBOUND:
1677 pcb_sp = &inp->inp_sp->sp_out;
1678 break;
1679 default:
1680 ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n",
1681 xpl->sadb_x_policy_dir));
1682 return EINVAL;
1683 }
1684
1685 /* turn bypass off */
1686 if (ipsec_bypass != 0) {
1687 ipsec_bypass = 0;
1688 }
1689
1690 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1691 }
1692
1693 /* delete policy in PCB */
1694 int
ipsec4_delete_pcbpolicy(struct inpcb * inp)1695 ipsec4_delete_pcbpolicy(struct inpcb *inp)
1696 {
1697 /* sanity check. */
1698 if (inp == NULL) {
1699 panic("ipsec4_delete_pcbpolicy: NULL pointer was passed.");
1700 }
1701
1702 if (inp->inp_sp == NULL) {
1703 return 0;
1704 }
1705
1706 if (inp->inp_sp->sp_in != NULL) {
1707 key_freesp(inp->inp_sp->sp_in, KEY_SADB_UNLOCKED);
1708 inp->inp_sp->sp_in = NULL;
1709 }
1710
1711 if (inp->inp_sp->sp_out != NULL) {
1712 key_freesp(inp->inp_sp->sp_out, KEY_SADB_UNLOCKED);
1713 inp->inp_sp->sp_out = NULL;
1714 }
1715
1716 ipsec_delpcbpolicy(inp->inp_sp);
1717 inp->inp_sp = NULL;
1718
1719 return 0;
1720 }
1721
1722 int
ipsec6_set_policy(struct in6pcb * in6p,int optname,caddr_t request,size_t len,int priv)1723 ipsec6_set_policy(struct in6pcb *in6p,
1724 int optname,
1725 caddr_t request,
1726 size_t len,
1727 int priv)
1728 {
1729 struct sadb_x_policy *xpl;
1730 struct secpolicy **pcb_sp;
1731 int error = 0;
1732 struct sadb_x_policy xpl_aligned_buf;
1733 u_int8_t *xpl_unaligned;
1734
1735 /* sanity check. */
1736 if (in6p == NULL || request == NULL) {
1737 return EINVAL;
1738 }
1739 if (len < sizeof(*xpl)) {
1740 return EINVAL;
1741 }
1742 xpl = (struct sadb_x_policy *)(void *)request;
1743
1744 /* This is a new mbuf allocated by soopt_getm() */
1745 if (IPSEC_IS_P2ALIGNED(xpl)) {
1746 xpl_unaligned = NULL;
1747 } else {
1748 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1749 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1750 xpl = (__typeof__(xpl)) & xpl_aligned_buf;
1751 }
1752
1753 if (in6p->in6p_sp == NULL) {
1754 error = ipsec_init_policy(in6p->inp_socket, &in6p->in6p_sp);
1755 if (error) {
1756 return error;
1757 }
1758 }
1759
1760 /* select direction */
1761 switch (xpl->sadb_x_policy_dir) {
1762 case IPSEC_DIR_INBOUND:
1763 pcb_sp = &in6p->in6p_sp->sp_in;
1764 break;
1765 case IPSEC_DIR_OUTBOUND:
1766 pcb_sp = &in6p->in6p_sp->sp_out;
1767 break;
1768 default:
1769 ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n",
1770 xpl->sadb_x_policy_dir));
1771 return EINVAL;
1772 }
1773
1774 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1775 }
1776
1777 int
ipsec6_delete_pcbpolicy(struct in6pcb * in6p)1778 ipsec6_delete_pcbpolicy(struct in6pcb *in6p)
1779 {
1780 /* sanity check. */
1781 if (in6p == NULL) {
1782 panic("ipsec6_delete_pcbpolicy: NULL pointer was passed.");
1783 }
1784
1785 if (in6p->in6p_sp == NULL) {
1786 return 0;
1787 }
1788
1789 if (in6p->in6p_sp->sp_in != NULL) {
1790 key_freesp(in6p->in6p_sp->sp_in, KEY_SADB_UNLOCKED);
1791 in6p->in6p_sp->sp_in = NULL;
1792 }
1793
1794 if (in6p->in6p_sp->sp_out != NULL) {
1795 key_freesp(in6p->in6p_sp->sp_out, KEY_SADB_UNLOCKED);
1796 in6p->in6p_sp->sp_out = NULL;
1797 }
1798
1799 ipsec_delpcbpolicy(in6p->in6p_sp);
1800 in6p->in6p_sp = NULL;
1801
1802 return 0;
1803 }
1804
1805 /*
1806 * return current level.
1807 * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned.
1808 */
1809 u_int
ipsec_get_reqlevel(struct ipsecrequest * isr)1810 ipsec_get_reqlevel(struct ipsecrequest *isr)
1811 {
1812 u_int level = 0;
1813 u_int esp_trans_deflev = 0, esp_net_deflev = 0, ah_trans_deflev = 0, ah_net_deflev = 0;
1814
1815 /* sanity check */
1816 if (isr == NULL || isr->sp == NULL) {
1817 panic("ipsec_get_reqlevel: NULL pointer is passed.");
1818 }
1819 if (((struct sockaddr *)&isr->sp->spidx.src)->sa_family
1820 != ((struct sockaddr *)&isr->sp->spidx.dst)->sa_family) {
1821 panic("ipsec_get_reqlevel: family mismatched.");
1822 }
1823
1824 /* XXX note that we have ipseclog() expanded here - code sync issue */
1825 #define IPSEC_CHECK_DEFAULT(lev) \
1826 (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \
1827 && (lev) != IPSEC_LEVEL_UNIQUE) \
1828 ? (ipsec_debug \
1829 ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\
1830 (lev), IPSEC_LEVEL_REQUIRE) \
1831 : (void)0), \
1832 (lev) = IPSEC_LEVEL_REQUIRE, \
1833 (lev) \
1834 : (lev))
1835
1836 /* set default level */
1837 switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) {
1838 case AF_INET:
1839 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_trans_deflev);
1840 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_net_deflev);
1841 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_trans_deflev);
1842 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_net_deflev);
1843 break;
1844 case AF_INET6:
1845 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_trans_deflev);
1846 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_net_deflev);
1847 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_trans_deflev);
1848 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_net_deflev);
1849 break;
1850 default:
1851 panic("key_get_reqlevel: Unknown family. %d",
1852 ((struct sockaddr *)&isr->sp->spidx.src)->sa_family);
1853 }
1854
1855 #undef IPSEC_CHECK_DEFAULT
1856
1857 /* set level */
1858 switch (isr->level) {
1859 case IPSEC_LEVEL_DEFAULT:
1860 switch (isr->saidx.proto) {
1861 case IPPROTO_ESP:
1862 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
1863 level = esp_net_deflev;
1864 } else {
1865 level = esp_trans_deflev;
1866 }
1867 break;
1868 case IPPROTO_AH:
1869 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
1870 level = ah_net_deflev;
1871 } else {
1872 level = ah_trans_deflev;
1873 }
1874 break;
1875 case IPPROTO_IPCOMP:
1876 ipseclog((LOG_ERR, "ipsec_get_reqlevel: "
1877 "still got IPCOMP - exiting\n"));
1878 break;
1879 default:
1880 panic("ipsec_get_reqlevel: "
1881 "Illegal protocol defined %u\n",
1882 isr->saidx.proto);
1883 }
1884 break;
1885
1886 case IPSEC_LEVEL_USE:
1887 case IPSEC_LEVEL_REQUIRE:
1888 level = isr->level;
1889 break;
1890 case IPSEC_LEVEL_UNIQUE:
1891 level = IPSEC_LEVEL_REQUIRE;
1892 break;
1893
1894 default:
1895 panic("ipsec_get_reqlevel: Illegal IPsec level %u",
1896 isr->level);
1897 }
1898
1899 return level;
1900 }
1901
1902 /*
1903 * Check AH/ESP integrity.
1904 * OUT:
1905 * 0: valid
1906 * 1: invalid
1907 */
1908 static int
ipsec_in_reject(struct secpolicy * sp,struct mbuf * m)1909 ipsec_in_reject(struct secpolicy *sp, struct mbuf *m)
1910 {
1911 struct ipsecrequest *isr;
1912 u_int level;
1913 int need_auth, need_conf, need_icv;
1914
1915 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
1916 printf("ipsec_in_reject: using SP\n");
1917 kdebug_secpolicy(sp));
1918
1919 /* check policy */
1920 switch (sp->policy) {
1921 case IPSEC_POLICY_DISCARD:
1922 case IPSEC_POLICY_GENERATE:
1923 return 1;
1924 case IPSEC_POLICY_BYPASS:
1925 case IPSEC_POLICY_NONE:
1926 return 0;
1927
1928 case IPSEC_POLICY_IPSEC:
1929 break;
1930
1931 case IPSEC_POLICY_ENTRUST:
1932 default:
1933 panic("ipsec_hdrsiz: Invalid policy found. %d", sp->policy);
1934 }
1935
1936 need_auth = 0;
1937 need_conf = 0;
1938 need_icv = 0;
1939
1940 /* XXX should compare policy against ipsec header history */
1941
1942 for (isr = sp->req; isr != NULL; isr = isr->next) {
1943 /* get current level */
1944 level = ipsec_get_reqlevel(isr);
1945
1946 switch (isr->saidx.proto) {
1947 case IPPROTO_ESP:
1948 if (level == IPSEC_LEVEL_REQUIRE) {
1949 need_conf++;
1950
1951 #if 0
1952 /* this won't work with multiple input threads - isr->sav would change
1953 * with every packet and is not necessarily related to the current packet
1954 * being processed. If ESP processing is required - the esp code should
1955 * make sure that the integrity check is present and correct. I don't see
1956 * why it would be necessary to check for the presence of the integrity
1957 * check value here. I think this is just wrong.
1958 * isr->sav has been removed.
1959 * %%%%%% this needs to be re-worked at some point but I think the code below can
1960 * be ignored for now.
1961 */
1962 if (isr->sav != NULL
1963 && isr->sav->flags == SADB_X_EXT_NONE
1964 && isr->sav->alg_auth != SADB_AALG_NONE) {
1965 need_icv++;
1966 }
1967 #endif
1968 }
1969 break;
1970 case IPPROTO_AH:
1971 if (level == IPSEC_LEVEL_REQUIRE) {
1972 need_auth++;
1973 need_icv++;
1974 }
1975 break;
1976 case IPPROTO_IPCOMP:
1977 /*
1978 * we don't really care, as IPcomp document says that
1979 * we shouldn't compress small packets, IPComp policy
1980 * should always be treated as being in "use" level.
1981 */
1982 break;
1983 }
1984 }
1985
1986 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1987 printf("ipsec_in_reject: auth:%d conf:%d icv:%d m_flags:%x\n",
1988 need_auth, need_conf, need_icv, m->m_flags));
1989
1990 if ((need_conf && !(m->m_flags & M_DECRYPTED))
1991 || (!need_auth && need_icv && !(m->m_flags & M_AUTHIPDGM))
1992 || (need_auth && !(m->m_flags & M_AUTHIPHDR))) {
1993 return 1;
1994 }
1995
1996 return 0;
1997 }
1998
1999 /*
2000 * Check AH/ESP integrity.
2001 * This function is called from tcp_input(), udp_input(),
2002 * and {ah,esp}4_input for tunnel mode
2003 */
2004 int
ipsec4_in_reject_so(struct mbuf * m,struct socket * so)2005 ipsec4_in_reject_so(struct mbuf *m, struct socket *so)
2006 {
2007 struct secpolicy *sp = NULL;
2008 int error;
2009 int result;
2010
2011 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2012 /* sanity check */
2013 if (m == NULL) {
2014 return 0; /* XXX should be panic ? */
2015 }
2016 /* get SP for this packet.
2017 * When we are called from ip_forward(), we call
2018 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2019 */
2020 if (so == NULL) {
2021 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2022 } else {
2023 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2024 }
2025
2026 if (sp == NULL) {
2027 return 0; /* XXX should be panic ?
2028 * -> No, there may be error. */
2029 }
2030 result = ipsec_in_reject(sp, m);
2031 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2032 printf("DP ipsec4_in_reject_so call free SP:0x%llx\n",
2033 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2034 key_freesp(sp, KEY_SADB_UNLOCKED);
2035
2036 return result;
2037 }
2038
2039 int
ipsec4_in_reject(struct mbuf * m,struct inpcb * inp)2040 ipsec4_in_reject(struct mbuf *m, struct inpcb *inp)
2041 {
2042 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2043 if (inp == NULL) {
2044 return ipsec4_in_reject_so(m, NULL);
2045 }
2046 if (inp->inp_socket) {
2047 return ipsec4_in_reject_so(m, inp->inp_socket);
2048 } else {
2049 panic("ipsec4_in_reject: invalid inpcb/socket");
2050 }
2051
2052 /* NOTREACHED */
2053 return 0;
2054 }
2055
2056 /*
2057 * Check AH/ESP integrity.
2058 * This function is called from tcp6_input(), udp6_input(),
2059 * and {ah,esp}6_input for tunnel mode
2060 */
2061 int
ipsec6_in_reject_so(struct mbuf * m,struct socket * so)2062 ipsec6_in_reject_so(struct mbuf *m, struct socket *so)
2063 {
2064 struct secpolicy *sp = NULL;
2065 int error;
2066 int result;
2067
2068 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2069 /* sanity check */
2070 if (m == NULL) {
2071 return 0; /* XXX should be panic ? */
2072 }
2073 /* get SP for this packet.
2074 * When we are called from ip_forward(), we call
2075 * ipsec6_getpolicybyaddr() with IP_FORWARDING flag.
2076 */
2077 if (so == NULL) {
2078 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2079 } else {
2080 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2081 }
2082
2083 if (sp == NULL) {
2084 return 0; /* XXX should be panic ? */
2085 }
2086 result = ipsec_in_reject(sp, m);
2087 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2088 printf("DP ipsec6_in_reject_so call free SP:0x%llx\n",
2089 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2090 key_freesp(sp, KEY_SADB_UNLOCKED);
2091
2092 return result;
2093 }
2094
2095 int
ipsec6_in_reject(struct mbuf * m,struct in6pcb * in6p)2096 ipsec6_in_reject(struct mbuf *m, struct in6pcb *in6p)
2097 {
2098 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2099 if (in6p == NULL) {
2100 return ipsec6_in_reject_so(m, NULL);
2101 }
2102 if (in6p->in6p_socket) {
2103 return ipsec6_in_reject_so(m, in6p->in6p_socket);
2104 } else {
2105 panic("ipsec6_in_reject: invalid in6p/socket");
2106 }
2107
2108 /* NOTREACHED */
2109 return 0;
2110 }
2111
2112 /*
2113 * compute the byte size to be occupied by IPsec header.
2114 * in case it is tunneled, it includes the size of outer IP header.
2115 * NOTE: SP passed is free in this function.
2116 */
2117 size_t
ipsec_hdrsiz(struct secpolicy * sp)2118 ipsec_hdrsiz(struct secpolicy *sp)
2119 {
2120 struct ipsecrequest *isr;
2121 size_t siz, clen;
2122
2123 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2124 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2125 printf("ipsec_hdrsiz: using SP\n");
2126 kdebug_secpolicy(sp));
2127
2128 /* check policy */
2129 switch (sp->policy) {
2130 case IPSEC_POLICY_DISCARD:
2131 case IPSEC_POLICY_GENERATE:
2132 case IPSEC_POLICY_BYPASS:
2133 case IPSEC_POLICY_NONE:
2134 return 0;
2135
2136 case IPSEC_POLICY_IPSEC:
2137 break;
2138
2139 case IPSEC_POLICY_ENTRUST:
2140 default:
2141 panic("ipsec_hdrsiz: Invalid policy found. %d", sp->policy);
2142 }
2143
2144 siz = 0;
2145
2146 for (isr = sp->req; isr != NULL; isr = isr->next) {
2147 clen = 0;
2148
2149 switch (isr->saidx.proto) {
2150 case IPPROTO_ESP:
2151 #if IPSEC_ESP
2152 clen = esp_hdrsiz(isr);
2153 #else
2154 clen = 0; /*XXX*/
2155 #endif
2156 break;
2157 case IPPROTO_AH:
2158 clen = ah_hdrsiz(isr);
2159 break;
2160 default:
2161 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2162 "unknown protocol %u\n",
2163 isr->saidx.proto));
2164 break;
2165 }
2166
2167 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
2168 switch (((struct sockaddr *)&isr->saidx.dst)->sa_family) {
2169 case AF_INET:
2170 clen += sizeof(struct ip);
2171 break;
2172 case AF_INET6:
2173 clen += sizeof(struct ip6_hdr);
2174 break;
2175 default:
2176 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2177 "unknown AF %d in IPsec tunnel SA\n",
2178 ((struct sockaddr *)&isr->saidx.dst)->sa_family));
2179 break;
2180 }
2181 }
2182 siz += clen;
2183 }
2184
2185 return siz;
2186 }
2187
2188 /* This function is called from ip_forward() and ipsec4_hdrsize_tcp(). */
2189 size_t
ipsec4_hdrsiz(struct mbuf * m,u_int8_t dir,struct inpcb * inp)2190 ipsec4_hdrsiz(struct mbuf *m, u_int8_t dir, struct inpcb *inp)
2191 {
2192 struct secpolicy *sp = NULL;
2193 int error;
2194 size_t size;
2195
2196 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2197 /* sanity check */
2198 if (m == NULL) {
2199 return 0; /* XXX should be panic ? */
2200 }
2201 if (inp != NULL && inp->inp_socket == NULL) {
2202 panic("ipsec4_hdrsize: why is socket NULL but there is PCB.");
2203 }
2204
2205 /* get SP for this packet.
2206 * When we are called from ip_forward(), we call
2207 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2208 */
2209 if (inp == NULL) {
2210 sp = ipsec4_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2211 } else {
2212 sp = ipsec4_getpolicybyaddr(m, dir, 0, &error);
2213 }
2214
2215 if (sp == NULL) {
2216 return 0; /* XXX should be panic ? */
2217 }
2218 size = ipsec_hdrsiz(sp);
2219 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2220 printf("DP ipsec4_hdrsiz call free SP:0x%llx\n",
2221 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2222 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2223 printf("ipsec4_hdrsiz: size:%lu.\n", (u_int32_t)size));
2224 key_freesp(sp, KEY_SADB_UNLOCKED);
2225
2226 return size;
2227 }
2228
2229 /* This function is called from ipsec6_hdrsize_tcp(),
2230 * and maybe from ip6_forward.()
2231 */
2232 size_t
ipsec6_hdrsiz(struct mbuf * m,u_int8_t dir,struct in6pcb * in6p)2233 ipsec6_hdrsiz(struct mbuf *m, u_int8_t dir, struct in6pcb *in6p)
2234 {
2235 struct secpolicy *sp = NULL;
2236 int error;
2237 size_t size;
2238
2239 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2240 /* sanity check */
2241 if (m == NULL) {
2242 return 0; /* XXX shoud be panic ? */
2243 }
2244 if (in6p != NULL && in6p->in6p_socket == NULL) {
2245 panic("ipsec6_hdrsize: why is socket NULL but there is PCB.");
2246 }
2247
2248 /* get SP for this packet */
2249 /* XXX Is it right to call with IP_FORWARDING. */
2250 if (in6p == NULL) {
2251 sp = ipsec6_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2252 } else {
2253 sp = ipsec6_getpolicybyaddr(m, dir, 0, &error);
2254 }
2255
2256 if (sp == NULL) {
2257 return 0;
2258 }
2259 size = ipsec_hdrsiz(sp);
2260 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2261 printf("DP ipsec6_hdrsiz call free SP:0x%llx\n",
2262 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2263 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2264 printf("ipsec6_hdrsiz: size:%lu.\n", (u_int32_t)size));
2265 key_freesp(sp, KEY_SADB_UNLOCKED);
2266
2267 return size;
2268 }
2269
2270 /*
2271 * encapsulate for ipsec tunnel.
2272 * ip->ip_src must be fixed later on.
2273 */
2274 int
ipsec4_encapsulate(struct mbuf * m,struct secasvar * sav)2275 ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav)
2276 {
2277 struct ip *oip;
2278 struct ip *ip;
2279 size_t plen;
2280 u_int32_t hlen;
2281
2282 /* can't tunnel between different AFs */
2283 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2284 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2285 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2286 m_freem(m);
2287 return EINVAL;
2288 }
2289
2290 if (m->m_len < sizeof(*ip)) {
2291 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2292 }
2293
2294 ip = mtod(m, struct ip *);
2295 #ifdef _IP_VHL
2296 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2297 #else
2298 hlen = ip->ip_hl << 2;
2299 #endif
2300
2301 if (m->m_len != hlen) {
2302 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2303 }
2304
2305 /* generate header checksum */
2306 ip->ip_sum = 0;
2307 #ifdef _IP_VHL
2308 ip->ip_sum = in_cksum(m, hlen);
2309 #else
2310 ip->ip_sum = in_cksum(m, hlen);
2311 #endif
2312
2313 plen = m->m_pkthdr.len;
2314
2315 /*
2316 * grow the mbuf to accomodate the new IPv4 header.
2317 * NOTE: IPv4 options will never be copied.
2318 */
2319 if (M_LEADINGSPACE(m->m_next) < hlen) {
2320 struct mbuf *n;
2321 MGET(n, M_DONTWAIT, MT_DATA);
2322 if (!n) {
2323 m_freem(m);
2324 return ENOBUFS;
2325 }
2326 n->m_len = hlen;
2327 n->m_next = m->m_next;
2328 m->m_next = n;
2329 m->m_pkthdr.len += hlen;
2330 oip = mtod(n, struct ip *);
2331 } else {
2332 m->m_next->m_len += hlen;
2333 m->m_next->m_data -= hlen;
2334 m->m_pkthdr.len += hlen;
2335 oip = mtod(m->m_next, struct ip *);
2336 }
2337 ip = mtod(m, struct ip *);
2338 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2339 m->m_len = sizeof(struct ip);
2340 m->m_pkthdr.len -= (hlen - sizeof(struct ip));
2341
2342 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2343 /* ECN consideration. */
2344 ip_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &oip->ip_tos);
2345 #ifdef _IP_VHL
2346 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
2347 #else
2348 ip->ip_hl = sizeof(struct ip) >> 2;
2349 #endif
2350 ip->ip_off &= htons(~IP_OFFMASK);
2351 ip->ip_off &= htons(~IP_MF);
2352 switch (ip4_ipsec_dfbit) {
2353 case 0: /* clear DF bit */
2354 ip->ip_off &= htons(~IP_DF);
2355 break;
2356 case 1: /* set DF bit */
2357 ip->ip_off |= htons(IP_DF);
2358 break;
2359 default: /* copy DF bit */
2360 break;
2361 }
2362 ip->ip_p = IPPROTO_IPIP;
2363 if (plen + sizeof(struct ip) < IP_MAXPACKET) {
2364 ip->ip_len = htons((u_int16_t)(plen + sizeof(struct ip)));
2365 } else {
2366 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2367 "leave ip_len as is (invalid packet)\n"));
2368 }
2369 if (rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off))) {
2370 ip->ip_id = 0;
2371 } else {
2372 ip->ip_id = ip_randomid((uint64_t)m);
2373 }
2374 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2375 &ip->ip_src, sizeof(ip->ip_src));
2376 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2377 &ip->ip_dst, sizeof(ip->ip_dst));
2378 ip->ip_ttl = IPDEFTTL;
2379
2380 /* XXX Should ip_src be updated later ? */
2381
2382 return 0;
2383 }
2384
2385
2386 int
ipsec6_encapsulate(struct mbuf * m,struct secasvar * sav)2387 ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav)
2388 {
2389 struct ip6_hdr *oip6;
2390 struct ip6_hdr *ip6;
2391 size_t plen;
2392
2393 /* can't tunnel between different AFs */
2394 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2395 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2396 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2397 m_freem(m);
2398 return EINVAL;
2399 }
2400
2401 plen = m->m_pkthdr.len;
2402
2403 /*
2404 * grow the mbuf to accomodate the new IPv6 header.
2405 */
2406 if (m->m_len != sizeof(struct ip6_hdr)) {
2407 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2408 }
2409 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2410 struct mbuf *n;
2411 MGET(n, M_DONTWAIT, MT_DATA);
2412 if (!n) {
2413 m_freem(m);
2414 return ENOBUFS;
2415 }
2416 n->m_len = sizeof(struct ip6_hdr);
2417 n->m_next = m->m_next;
2418 m->m_next = n;
2419 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2420 oip6 = mtod(n, struct ip6_hdr *);
2421 } else {
2422 m->m_next->m_len += sizeof(struct ip6_hdr);
2423 m->m_next->m_data -= sizeof(struct ip6_hdr);
2424 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2425 oip6 = mtod(m->m_next, struct ip6_hdr *);
2426 }
2427 ip6 = mtod(m, struct ip6_hdr *);
2428 ovbcopy((caddr_t)ip6, (caddr_t)oip6, sizeof(struct ip6_hdr));
2429
2430 /* Fake link-local scope-class addresses */
2431 if (in6_embedded_scope && IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src)) {
2432 oip6->ip6_src.s6_addr16[1] = 0;
2433 }
2434 if (in6_embedded_scope && IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst)) {
2435 oip6->ip6_dst.s6_addr16[1] = 0;
2436 }
2437
2438 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2439 /* ECN consideration. */
2440 ip6_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow);
2441 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
2442 ip6->ip6_plen = htons((u_int16_t)plen);
2443 } else {
2444 /* ip6->ip6_plen will be updated in ip6_output() */
2445 }
2446 ip6->ip6_nxt = IPPROTO_IPV6;
2447 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2448 &ip6->ip6_src, sizeof(ip6->ip6_src));
2449 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2450 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2451 ip6->ip6_hlim = IPV6_DEFHLIM;
2452
2453 if (in6_embedded_scope && IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
2454 ip6->ip6_src.s6_addr16[1] = htons((u_int16_t)sav->sah->outgoing_if);
2455 ip6->ip6_dst.s6_addr16[1] = htons((u_int16_t)sav->sah->outgoing_if);
2456 }
2457
2458 /* XXX Should ip6_src be updated later ? */
2459
2460 return 0;
2461 }
2462
2463 static int
ipsec64_encapsulate(struct mbuf * m,struct secasvar * sav,u_int32_t dscp_mapping)2464 ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav, u_int32_t dscp_mapping)
2465 {
2466 struct ip6_hdr *ip6, *ip6i;
2467 struct ip *ip;
2468 size_t plen;
2469
2470 /* tunneling over IPv4 */
2471 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2472 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2473 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2474 m_freem(m);
2475 return EINVAL;
2476 }
2477
2478 plen = m->m_pkthdr.len;
2479 ip6 = mtod(m, struct ip6_hdr *);
2480 /*
2481 * grow the mbuf to accomodate the new IPv4 header.
2482 */
2483 if (m->m_len != sizeof(struct ip6_hdr)) {
2484 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2485 }
2486 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2487 struct mbuf *n;
2488 MGET(n, M_DONTWAIT, MT_DATA);
2489 if (!n) {
2490 m_freem(m);
2491 return ENOBUFS;
2492 }
2493 n->m_len = sizeof(struct ip6_hdr);
2494 n->m_next = m->m_next;
2495 m->m_next = n;
2496 m->m_pkthdr.len += sizeof(struct ip);
2497 ip6i = mtod(n, struct ip6_hdr *);
2498 } else {
2499 m->m_next->m_len += sizeof(struct ip6_hdr);
2500 m->m_next->m_data -= sizeof(struct ip6_hdr);
2501 m->m_pkthdr.len += sizeof(struct ip);
2502 ip6i = mtod(m->m_next, struct ip6_hdr *);
2503 }
2504
2505 bcopy(ip6, ip6i, sizeof(struct ip6_hdr));
2506 ip = mtod(m, struct ip *);
2507 m->m_len = sizeof(struct ip);
2508 /*
2509 * Fill in some of the IPv4 fields - we don't need all of them
2510 * because the rest will be filled in by ip_output
2511 */
2512 ip->ip_v = IPVERSION;
2513 ip->ip_hl = sizeof(struct ip) >> 2;
2514 ip->ip_id = 0;
2515 ip->ip_sum = 0;
2516 ip->ip_tos = 0;
2517 ip->ip_off = 0;
2518 ip->ip_ttl = IPDEFTTL;
2519 ip->ip_p = IPPROTO_IPV6;
2520
2521 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2522 /* ECN consideration. */
2523 if (dscp_mapping == IPSEC_DSCP_MAPPING_COPY) {
2524 // Copy DSCP bits from inner IP to outer IP packet.
2525 ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6i->ip6_flow);
2526 } else if (dscp_mapping == IPSEC_DSCP_MAPPING_LEGACY) {
2527 // Copy DSCP bits in legacy style.
2528 ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow);
2529 }
2530
2531 if (plen + sizeof(struct ip) < IP_MAXPACKET) {
2532 ip->ip_len = htons((u_int16_t)(plen + sizeof(struct ip)));
2533 } else {
2534 ip->ip_len = htons((u_int16_t)plen);
2535 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2536 "leave ip_len as is (invalid packet)\n"));
2537 }
2538 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2539 &ip->ip_src, sizeof(ip->ip_src));
2540 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2541 &ip->ip_dst, sizeof(ip->ip_dst));
2542
2543 return 0;
2544 }
2545
2546 int
ipsec6_update_routecache_and_output(struct ipsec_output_state * state,struct secasvar * sav)2547 ipsec6_update_routecache_and_output(
2548 struct ipsec_output_state *state,
2549 struct secasvar *sav)
2550 {
2551 struct sockaddr_in6* dst6;
2552 struct route_in6 *ro6;
2553 struct ip6_hdr *ip6;
2554 errno_t error = 0;
2555
2556 int plen;
2557 struct ip6_out_args ip6oa;
2558 struct route_in6 ro6_new;
2559 struct flowadv *adv = NULL;
2560
2561 if (!state->m) {
2562 return EINVAL;
2563 }
2564 ip6 = mtod(state->m, struct ip6_hdr *);
2565
2566 // grab sadb_mutex, before updating sah's route cache
2567 lck_mtx_lock(sadb_mutex);
2568 ro6 = &sav->sah->sa_route;
2569 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
2570 if (ro6->ro_rt) {
2571 RT_LOCK(ro6->ro_rt);
2572 }
2573 if (ROUTE_UNUSABLE(ro6) ||
2574 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
2575 if (ro6->ro_rt != NULL) {
2576 RT_UNLOCK(ro6->ro_rt);
2577 }
2578 ROUTE_RELEASE(ro6);
2579 }
2580 if (ro6->ro_rt == 0) {
2581 bzero(dst6, sizeof(*dst6));
2582 dst6->sin6_family = AF_INET6;
2583 dst6->sin6_len = sizeof(*dst6);
2584 dst6->sin6_addr = ip6->ip6_dst;
2585 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
2586 if (ro6->ro_rt) {
2587 RT_LOCK(ro6->ro_rt);
2588 }
2589 }
2590 if (ro6->ro_rt == 0) {
2591 ip6stat.ip6s_noroute++;
2592 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
2593 error = EHOSTUNREACH;
2594 // release sadb_mutex, after updating sah's route cache
2595 lck_mtx_unlock(sadb_mutex);
2596 return error;
2597 }
2598
2599 /*
2600 * adjust state->dst if tunnel endpoint is offlink
2601 *
2602 * XXX: caching rt_gateway value in the state is
2603 * not really good, since it may point elsewhere
2604 * when the gateway gets modified to a larger
2605 * sockaddr via rt_setgate(). This is currently
2606 * addressed by SA_SIZE roundup in that routine.
2607 */
2608 if (ro6->ro_rt->rt_flags & RTF_GATEWAY) {
2609 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
2610 }
2611 RT_UNLOCK(ro6->ro_rt);
2612 ROUTE_RELEASE(&state->ro);
2613 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
2614 state->dst = (struct sockaddr *)dst6;
2615 state->tunneled = 6;
2616 // release sadb_mutex, after updating sah's route cache
2617 lck_mtx_unlock(sadb_mutex);
2618
2619 state->m = ipsec6_splithdr(state->m);
2620 if (!state->m) {
2621 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
2622 error = ENOMEM;
2623 return error;
2624 }
2625
2626 ip6 = mtod(state->m, struct ip6_hdr *);
2627 switch (sav->sah->saidx.proto) {
2628 case IPPROTO_ESP:
2629 #if IPSEC_ESP
2630 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2631 #else
2632 m_freem(state->m);
2633 error = EINVAL;
2634 #endif
2635 break;
2636 case IPPROTO_AH:
2637 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2638 break;
2639 default:
2640 ipseclog((LOG_ERR, "%s: unknown ipsec protocol %d\n", __FUNCTION__, sav->sah->saidx.proto));
2641 m_freem(state->m);
2642 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2643 error = EINVAL;
2644 break;
2645 }
2646 if (error) {
2647 // If error, packet already freed by above output routines
2648 state->m = NULL;
2649 return error;
2650 }
2651
2652 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
2653 if (plen > IPV6_MAXPACKET) {
2654 ipseclog((LOG_ERR, "%s: IPsec with IPv6 jumbogram is not supported\n", __FUNCTION__));
2655 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2656 error = EINVAL;/*XXX*/
2657 return error;
2658 }
2659 ip6 = mtod(state->m, struct ip6_hdr *);
2660 ip6->ip6_plen = htons((u_int16_t)plen);
2661
2662 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET6,
2663 sav->flowid);
2664 ipsec_set_ip6oa_for_interface(sav->sah->ipsec_if, &ip6oa);
2665
2666 /* Increment statistics */
2667 ifnet_stat_increment_out(sav->sah->ipsec_if, 1, (u_int32_t)mbuf_pkthdr_len(state->m), 0);
2668
2669 /* Send to ip6_output */
2670 bzero(&ro6_new, sizeof(ro6_new));
2671 bzero(&ip6oa, sizeof(ip6oa));
2672 ip6oa.ip6oa_flowadv.code = 0;
2673 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
2674 if (state->outgoing_if) {
2675 ip6oa.ip6oa_boundif = state->outgoing_if;
2676 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
2677 ip6_output_setsrcifscope(state->m, state->outgoing_if, NULL);
2678 ip6_output_setdstifscope(state->m, state->outgoing_if, NULL);
2679 }
2680
2681 adv = &ip6oa.ip6oa_flowadv;
2682 (void) ip6_output(state->m, NULL, &ro6_new, IPV6_OUTARGS, NULL, NULL, &ip6oa);
2683 state->m = NULL;
2684
2685 if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) {
2686 error = ENOBUFS;
2687 ifnet_disable_output(sav->sah->ipsec_if);
2688 return error;
2689 }
2690
2691 return 0;
2692 }
2693
2694 int
ipsec46_encapsulate(struct ipsec_output_state * state,struct secasvar * sav)2695 ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav)
2696 {
2697 struct mbuf *m;
2698 struct ip6_hdr *ip6;
2699 struct ip *oip;
2700 struct ip *ip;
2701 size_t plen;
2702 u_int32_t hlen;
2703
2704 m = state->m;
2705 if (!m) {
2706 return EINVAL;
2707 }
2708
2709 /* can't tunnel between different AFs */
2710 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2711 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2712 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2713 m_freem(m);
2714 return EINVAL;
2715 }
2716
2717 if (m->m_len < sizeof(*ip)) {
2718 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2719 return EINVAL;
2720 }
2721
2722 ip = mtod(m, struct ip *);
2723 #ifdef _IP_VHL
2724 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2725 #else
2726 hlen = ip->ip_hl << 2;
2727 #endif
2728
2729 if (m->m_len != hlen) {
2730 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2731 return EINVAL;
2732 }
2733
2734 /* generate header checksum */
2735 ip->ip_sum = 0;
2736 #ifdef _IP_VHL
2737 ip->ip_sum = in_cksum(m, hlen);
2738 #else
2739 ip->ip_sum = in_cksum(m, hlen);
2740 #endif
2741
2742 plen = m->m_pkthdr.len; // save original IPv4 packet len, this will be ipv6 payload len
2743
2744 /*
2745 * First move the IPv4 header to the second mbuf in the chain
2746 */
2747 if (M_LEADINGSPACE(m->m_next) < hlen) {
2748 struct mbuf *n;
2749 MGET(n, M_DONTWAIT, MT_DATA);
2750 if (!n) {
2751 m_freem(m);
2752 return ENOBUFS;
2753 }
2754 n->m_len = hlen;
2755 n->m_next = m->m_next;
2756 m->m_next = n;
2757 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2758 oip = mtod(n, struct ip *);
2759 } else {
2760 m->m_next->m_len += hlen;
2761 m->m_next->m_data -= hlen;
2762 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2763 oip = mtod(m->m_next, struct ip *);
2764 }
2765 ip = mtod(m, struct ip *);
2766 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2767
2768 /*
2769 * Grow the first mbuf to accomodate the new IPv6 header.
2770 */
2771 if (M_LEADINGSPACE(m) < sizeof(struct ip6_hdr) - hlen) {
2772 struct mbuf *n;
2773 MGETHDR(n, M_DONTWAIT, MT_HEADER);
2774 if (!n) {
2775 m_freem(m);
2776 return ENOBUFS;
2777 }
2778 M_COPY_PKTHDR(n, m);
2779 MH_ALIGN(n, sizeof(struct ip6_hdr));
2780 n->m_len = sizeof(struct ip6_hdr);
2781 n->m_next = m->m_next;
2782 m->m_next = NULL;
2783 m_freem(m);
2784 state->m = n;
2785 m = state->m;
2786 } else {
2787 m->m_len += (sizeof(struct ip6_hdr) - hlen);
2788 m->m_data -= (sizeof(struct ip6_hdr) - hlen);
2789 }
2790 ip6 = mtod(m, struct ip6_hdr *);
2791 ip6->ip6_flow = 0;
2792 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2793 ip6->ip6_vfc |= IPV6_VERSION;
2794
2795 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2796 /* ECN consideration. */
2797 if (state->dscp_mapping == IPSEC_DSCP_MAPPING_COPY) {
2798 // Copy DSCP bits from inner IP to outer IP packet.
2799 ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip->ip_tos);
2800 } else if (state->dscp_mapping == IPSEC_DSCP_MAPPING_LEGACY) {
2801 // Copy DSCP bits in legacy style.
2802 ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos);
2803 }
2804 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
2805 ip6->ip6_plen = htons((u_int16_t)plen);
2806 } else {
2807 /* ip6->ip6_plen will be updated in ip6_output() */
2808 }
2809
2810 ip6->ip6_nxt = IPPROTO_IPV4;
2811 ip6->ip6_hlim = IPV6_DEFHLIM;
2812
2813 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2814 &ip6->ip6_src, sizeof(ip6->ip6_src));
2815 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2816 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2817
2818 if (in6_embedded_scope && IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
2819 ip6->ip6_src.s6_addr16[1] = htons((u_int16_t)sav->sah->outgoing_if);
2820 ip6->ip6_dst.s6_addr16[1] = htons((u_int16_t)sav->sah->outgoing_if);
2821 }
2822
2823 return 0;
2824 }
2825
2826 /*
2827 * Check the variable replay window.
2828 * ipsec_chkreplay() performs replay check before ICV verification.
2829 * ipsec_updatereplay() updates replay bitmap. This must be called after
2830 * ICV verification (it also performs replay check, which is usually done
2831 * beforehand).
2832 * 0 (zero) is returned if packet disallowed, 1 if packet permitted.
2833 *
2834 * based on RFC 2401.
2835 */
2836 int
ipsec_chkreplay(u_int32_t seq,struct secasvar * sav,u_int8_t replay_index)2837 ipsec_chkreplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index)
2838 {
2839 const struct secreplay *replay;
2840 u_int32_t diff;
2841 size_t fr;
2842 size_t wsizeb; /* constant: bits of window size */
2843 size_t frlast; /* constant: last frame */
2844
2845
2846 /* sanity check */
2847 if (sav == NULL) {
2848 panic("ipsec_chkreplay: NULL pointer was passed.");
2849 }
2850
2851 lck_mtx_lock(sadb_mutex);
2852 replay = sav->replay[replay_index];
2853
2854 if (replay->wsize == 0) {
2855 lck_mtx_unlock(sadb_mutex);
2856 return 1; /* no need to check replay. */
2857 }
2858
2859 /* constant */
2860 frlast = replay->wsize - 1;
2861 wsizeb = replay->wsize << 3;
2862
2863 /* sequence number of 0 is invalid */
2864 if (seq == 0) {
2865 lck_mtx_unlock(sadb_mutex);
2866 return 0;
2867 }
2868
2869 /* first time is always okay */
2870 if (replay->count == 0) {
2871 lck_mtx_unlock(sadb_mutex);
2872 return 1;
2873 }
2874
2875 if (seq > replay->lastseq) {
2876 /* larger sequences are okay */
2877 lck_mtx_unlock(sadb_mutex);
2878 return 1;
2879 } else {
2880 /* seq is equal or less than lastseq. */
2881 diff = replay->lastseq - seq;
2882
2883 /* over range to check, i.e. too old or wrapped */
2884 if (diff >= wsizeb) {
2885 lck_mtx_unlock(sadb_mutex);
2886 return 0;
2887 }
2888
2889 fr = frlast - diff / 8;
2890
2891 /* this packet already seen ? */
2892 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2893 lck_mtx_unlock(sadb_mutex);
2894 return 0;
2895 }
2896
2897 /* out of order but good */
2898 lck_mtx_unlock(sadb_mutex);
2899 return 1;
2900 }
2901 }
2902
2903 /*
2904 * check replay counter whether to update or not.
2905 * OUT: 0: OK
2906 * 1: NG
2907 */
2908 int
ipsec_updatereplay(u_int32_t seq,struct secasvar * sav,u_int8_t replay_index)2909 ipsec_updatereplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index)
2910 {
2911 struct secreplay *replay;
2912 u_int32_t diff;
2913 size_t fr;
2914 size_t wsizeb; /* constant: bits of window size */
2915 size_t frlast; /* constant: last frame */
2916
2917 /* sanity check */
2918 if (sav == NULL) {
2919 panic("ipsec_chkreplay: NULL pointer was passed.");
2920 }
2921
2922 lck_mtx_lock(sadb_mutex);
2923 replay = sav->replay[replay_index];
2924
2925 if (replay->wsize == 0) {
2926 goto ok; /* no need to check replay. */
2927 }
2928 /* constant */
2929 frlast = replay->wsize - 1;
2930 wsizeb = replay->wsize << 3;
2931
2932 /* sequence number of 0 is invalid */
2933 if (seq == 0) {
2934 lck_mtx_unlock(sadb_mutex);
2935 return 1;
2936 }
2937
2938 /* first time */
2939 if (replay->count == 0) {
2940 replay->lastseq = seq;
2941 bzero(replay->bitmap, replay->wsize);
2942 (replay->bitmap)[frlast] = 1;
2943 goto ok;
2944 }
2945
2946 if (seq > replay->lastseq) {
2947 /* seq is larger than lastseq. */
2948 diff = seq - replay->lastseq;
2949
2950 /* new larger sequence number */
2951 if (diff < wsizeb) {
2952 /* In window */
2953 /* set bit for this packet */
2954 vshiftl((unsigned char *) replay->bitmap, diff, replay->wsize);
2955 (replay->bitmap)[frlast] |= 1;
2956 } else {
2957 /* this packet has a "way larger" */
2958 bzero(replay->bitmap, replay->wsize);
2959 (replay->bitmap)[frlast] = 1;
2960 }
2961 replay->lastseq = seq;
2962
2963 /* larger is good */
2964 } else {
2965 /* seq is equal or less than lastseq. */
2966 diff = replay->lastseq - seq;
2967
2968 /* over range to check, i.e. too old or wrapped */
2969 if (diff >= wsizeb) {
2970 lck_mtx_unlock(sadb_mutex);
2971 return 1;
2972 }
2973
2974 fr = frlast - diff / 8;
2975
2976 /* this packet already seen ? */
2977 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2978 lck_mtx_unlock(sadb_mutex);
2979 return 1;
2980 }
2981
2982 /* mark as seen */
2983 (replay->bitmap)[fr] |= (1 << (diff % 8));
2984
2985 /* out of order but good */
2986 }
2987
2988 ok:
2989 {
2990 u_int32_t max_count = ~0;
2991 if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
2992 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
2993 max_count = (1ULL << 32) / MAX_REPLAY_WINDOWS;
2994 }
2995
2996 if (replay->count == max_count) {
2997 /* set overflow flag */
2998 replay->overflow++;
2999
3000 /* don't increment, no more packets accepted */
3001 if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
3002 lck_mtx_unlock(sadb_mutex);
3003 return 1;
3004 }
3005
3006 ipseclog((LOG_WARNING, "replay counter made %d cycle. %s\n",
3007 replay->overflow, ipsec_logsastr(sav)));
3008 }
3009 }
3010
3011 replay->count++;
3012
3013 lck_mtx_unlock(sadb_mutex);
3014 return 0;
3015 }
3016
3017 /*
3018 * shift variable length buffer to left.
3019 * IN: bitmap: pointer to the buffer
3020 * nbit: the number of to shift.
3021 * wsize: buffer size (bytes).
3022 */
3023 static void
vshiftl(unsigned char * bitmap,int nbit,size_t wsize)3024 vshiftl(unsigned char *bitmap, int nbit, size_t wsize)
3025 {
3026 size_t i;
3027 int s, j;
3028 unsigned char over;
3029
3030 for (j = 0; j < nbit; j += 8) {
3031 s = (nbit - j < 8) ? (nbit - j): 8;
3032 bitmap[0] <<= s;
3033 for (i = 1; i < wsize; i++) {
3034 over = (bitmap[i] >> (8 - s));
3035 bitmap[i] <<= s;
3036 bitmap[i - 1] |= over;
3037 }
3038 }
3039
3040 return;
3041 }
3042
3043 const char *
ipsec4_logpacketstr(struct ip * ip,u_int32_t spi)3044 ipsec4_logpacketstr(struct ip *ip, u_int32_t spi)
3045 {
3046 static char buf[256] __attribute__((aligned(4)));
3047 char *p;
3048 u_int8_t *s, *d;
3049
3050 s = (u_int8_t *)(&ip->ip_src);
3051 d = (u_int8_t *)(&ip->ip_dst);
3052
3053 p = buf;
3054 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3055 while (p && *p) {
3056 p++;
3057 }
3058 snprintf(p, sizeof(buf) - (p - buf), "src=%u.%u.%u.%u",
3059 s[0], s[1], s[2], s[3]);
3060 while (p && *p) {
3061 p++;
3062 }
3063 snprintf(p, sizeof(buf) - (p - buf), " dst=%u.%u.%u.%u",
3064 d[0], d[1], d[2], d[3]);
3065 while (p && *p) {
3066 p++;
3067 }
3068 snprintf(p, sizeof(buf) - (p - buf), ")");
3069
3070 return buf;
3071 }
3072
3073 const char *
ipsec6_logpacketstr(struct ip6_hdr * ip6,u_int32_t spi)3074 ipsec6_logpacketstr(struct ip6_hdr *ip6, u_int32_t spi)
3075 {
3076 static char buf[256] __attribute__((aligned(4)));
3077 char *p;
3078
3079 p = buf;
3080 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3081 while (p && *p) {
3082 p++;
3083 }
3084 snprintf(p, sizeof(buf) - (p - buf), "src=%s",
3085 ip6_sprintf(&ip6->ip6_src));
3086 while (p && *p) {
3087 p++;
3088 }
3089 snprintf(p, sizeof(buf) - (p - buf), " dst=%s",
3090 ip6_sprintf(&ip6->ip6_dst));
3091 while (p && *p) {
3092 p++;
3093 }
3094 snprintf(p, sizeof(buf) - (p - buf), ")");
3095
3096 return buf;
3097 }
3098
3099 const char *
ipsec_logsastr(struct secasvar * sav)3100 ipsec_logsastr(struct secasvar *sav)
3101 {
3102 static char buf[256] __attribute__((aligned(4)));
3103 char *p;
3104 struct secasindex *saidx = &sav->sah->saidx;
3105
3106 /* validity check */
3107 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
3108 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family) {
3109 panic("ipsec_logsastr: family mismatched.");
3110 }
3111
3112 p = buf;
3113 snprintf(buf, sizeof(buf), "SA(SPI=%u ", (u_int32_t)ntohl(sav->spi));
3114 while (p && *p) {
3115 p++;
3116 }
3117 if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET) {
3118 u_int8_t *s, *d;
3119 s = (u_int8_t *)&((struct sockaddr_in *)&saidx->src)->sin_addr;
3120 d = (u_int8_t *)&((struct sockaddr_in *)&saidx->dst)->sin_addr;
3121 snprintf(p, sizeof(buf) - (p - buf),
3122 "src=%d.%d.%d.%d dst=%d.%d.%d.%d",
3123 s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]);
3124 } else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) {
3125 snprintf(p, sizeof(buf) - (p - buf),
3126 "src=%s",
3127 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr));
3128 while (p && *p) {
3129 p++;
3130 }
3131 snprintf(p, sizeof(buf) - (p - buf),
3132 " dst=%s",
3133 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr));
3134 }
3135 while (p && *p) {
3136 p++;
3137 }
3138 snprintf(p, sizeof(buf) - (p - buf), ")");
3139
3140 return buf;
3141 }
3142
3143 void
ipsec_dumpmbuf(struct mbuf * m)3144 ipsec_dumpmbuf(struct mbuf *m)
3145 {
3146 int totlen;
3147 int i;
3148 u_char *p;
3149
3150 totlen = 0;
3151 printf("---\n");
3152 while (m) {
3153 p = mtod(m, u_char *);
3154 for (i = 0; i < m->m_len; i++) {
3155 printf("%02x ", p[i]);
3156 totlen++;
3157 if (totlen % 16 == 0) {
3158 printf("\n");
3159 }
3160 }
3161 m = m->m_next;
3162 }
3163 if (totlen % 16 != 0) {
3164 printf("\n");
3165 }
3166 printf("---\n");
3167 }
3168
3169 #if INET
3170 /*
3171 * IPsec output logic for IPv4.
3172 */
3173 static int
ipsec4_output_internal(struct ipsec_output_state * state,struct secasvar * sav)3174 ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav)
3175 {
3176 struct ip *ip = NULL;
3177 int error = 0;
3178 struct sockaddr_in *dst4;
3179 struct route *ro4;
3180
3181 /* validity check */
3182 if (sav == NULL || sav->sah == NULL) {
3183 error = EINVAL;
3184 goto bad;
3185 }
3186
3187 /*
3188 * If there is no valid SA, we give up to process any
3189 * more. In such a case, the SA's status is changed
3190 * from DYING to DEAD after allocating. If a packet
3191 * send to the receiver by dead SA, the receiver can
3192 * not decode a packet because SA has been dead.
3193 */
3194 if (sav->state != SADB_SASTATE_MATURE
3195 && sav->state != SADB_SASTATE_DYING) {
3196 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3197 error = EINVAL;
3198 goto bad;
3199 }
3200
3201 state->outgoing_if = sav->sah->outgoing_if;
3202
3203 /*
3204 * There may be the case that SA status will be changed when
3205 * we are refering to one. So calling splsoftnet().
3206 */
3207
3208 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3209 /*
3210 * build IPsec tunnel.
3211 */
3212 state->m = ipsec4_splithdr(state->m);
3213 if (!state->m) {
3214 error = ENOMEM;
3215 goto bad;
3216 }
3217
3218 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3219 error = ipsec46_encapsulate(state, sav);
3220 if (error) {
3221 // packet already freed by encapsulation error handling
3222 state->m = NULL;
3223 return error;
3224 }
3225
3226 error = ipsec6_update_routecache_and_output(state, sav);
3227 return error;
3228 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3229 error = ipsec4_encapsulate(state->m, sav);
3230 if (error) {
3231 state->m = NULL;
3232 goto bad;
3233 }
3234 ip = mtod(state->m, struct ip *);
3235
3236 // grab sadb_mutex, before updating sah's route cache
3237 lck_mtx_lock(sadb_mutex);
3238 ro4 = (struct route *)&sav->sah->sa_route;
3239 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3240 if (ro4->ro_rt != NULL) {
3241 RT_LOCK(ro4->ro_rt);
3242 }
3243 if (ROUTE_UNUSABLE(ro4) ||
3244 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3245 if (ro4->ro_rt != NULL) {
3246 RT_UNLOCK(ro4->ro_rt);
3247 }
3248 ROUTE_RELEASE(ro4);
3249 }
3250 if (ro4->ro_rt == 0) {
3251 dst4->sin_family = AF_INET;
3252 dst4->sin_len = sizeof(*dst4);
3253 dst4->sin_addr = ip->ip_dst;
3254 rtalloc_scoped(ro4, sav->sah->outgoing_if);
3255 if (ro4->ro_rt == 0) {
3256 OSAddAtomic(1, &ipstat.ips_noroute);
3257 error = EHOSTUNREACH;
3258 // release sadb_mutex, after updating sah's route cache
3259 lck_mtx_unlock(sadb_mutex);
3260 goto bad;
3261 }
3262 RT_LOCK(ro4->ro_rt);
3263 }
3264
3265 /*
3266 * adjust state->dst if tunnel endpoint is offlink
3267 *
3268 * XXX: caching rt_gateway value in the state is
3269 * not really good, since it may point elsewhere
3270 * when the gateway gets modified to a larger
3271 * sockaddr via rt_setgate(). This is currently
3272 * addressed by SA_SIZE roundup in that routine.
3273 */
3274 if (ro4->ro_rt->rt_flags & RTF_GATEWAY) {
3275 dst4 = (struct sockaddr_in *)(void *)ro4->ro_rt->rt_gateway;
3276 }
3277 RT_UNLOCK(ro4->ro_rt);
3278 ROUTE_RELEASE(&state->ro);
3279 route_copyout((struct route *)&state->ro, ro4, sizeof(struct route));
3280 state->dst = (struct sockaddr *)dst4;
3281 state->tunneled = 4;
3282 // release sadb_mutex, after updating sah's route cache
3283 lck_mtx_unlock(sadb_mutex);
3284 } else {
3285 ipseclog((LOG_ERR, "%s: family mismatched between inner and outer spi=%u\n",
3286 __FUNCTION__, (u_int32_t)ntohl(sav->spi)));
3287 error = EAFNOSUPPORT;
3288 goto bad;
3289 }
3290 }
3291
3292 state->m = ipsec4_splithdr(state->m);
3293 if (!state->m) {
3294 error = ENOMEM;
3295 goto bad;
3296 }
3297 switch (sav->sah->saidx.proto) {
3298 case IPPROTO_ESP:
3299 #if IPSEC_ESP
3300 if ((error = esp4_output(state->m, sav)) != 0) {
3301 state->m = NULL;
3302 goto bad;
3303 }
3304 break;
3305 #else
3306 m_freem(state->m);
3307 state->m = NULL;
3308 error = EINVAL;
3309 goto bad;
3310 #endif
3311 case IPPROTO_AH:
3312 if ((error = ah4_output(state->m, sav)) != 0) {
3313 state->m = NULL;
3314 goto bad;
3315 }
3316 break;
3317 default:
3318 ipseclog((LOG_ERR,
3319 "ipsec4_output: unknown ipsec protocol %d\n",
3320 sav->sah->saidx.proto));
3321 m_freem(state->m);
3322 state->m = NULL;
3323 error = EPROTONOSUPPORT;
3324 goto bad;
3325 }
3326
3327 if (state->m == 0) {
3328 error = ENOMEM;
3329 goto bad;
3330 }
3331
3332 #if SKYWALK
3333 state->m->m_pkthdr.pkt_flowid = sav->flowid;
3334 state->m->m_pkthdr.pkt_flags |= PKTF_FLOW_ID;
3335 #endif /* !SKYWALK */
3336
3337 return 0;
3338
3339 bad:
3340 return error;
3341 }
3342
3343 int
ipsec4_interface_output(struct ipsec_output_state * state,ifnet_t interface)3344 ipsec4_interface_output(struct ipsec_output_state *state, ifnet_t interface)
3345 {
3346 int error = 0;
3347 struct secasvar *sav = NULL;
3348
3349 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3350
3351 if (state == NULL) {
3352 panic("state == NULL in ipsec4_output");
3353 }
3354 if (state->m == NULL) {
3355 panic("state->m == NULL in ipsec4_output");
3356 }
3357 if (state->dst == NULL) {
3358 panic("state->dst == NULL in ipsec4_output");
3359 }
3360
3361 struct ip *ip = mtod(state->m, struct ip *);
3362
3363 struct sockaddr_in src = {};
3364 src.sin_family = AF_INET;
3365 src.sin_len = sizeof(src);
3366 memcpy(&src.sin_addr, &ip->ip_src, sizeof(src.sin_addr));
3367
3368 struct sockaddr_in dst = {};
3369 dst.sin_family = AF_INET;
3370 dst.sin_len = sizeof(dst);
3371 memcpy(&dst.sin_addr, &ip->ip_dst, sizeof(dst.sin_addr));
3372
3373 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET,
3374 (struct sockaddr *)&src,
3375 (struct sockaddr *)&dst);
3376 if (sav == NULL) {
3377 goto bad;
3378 }
3379
3380 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3381 goto bad;
3382 }
3383
3384 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3385 if (sav) {
3386 key_freesav(sav, KEY_SADB_UNLOCKED);
3387 }
3388 return 0;
3389
3390 bad:
3391 if (sav) {
3392 key_freesav(sav, KEY_SADB_UNLOCKED);
3393 }
3394 m_freem(state->m);
3395 state->m = NULL;
3396 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0);
3397 return error;
3398 }
3399
3400 int
ipsec4_output(struct ipsec_output_state * state,struct secpolicy * sp,__unused int flags)3401 ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused int flags)
3402 {
3403 struct ip *ip = NULL;
3404 struct ipsecrequest *isr = NULL;
3405 struct secasindex saidx;
3406 struct secasvar *sav = NULL;
3407 int error = 0;
3408 struct sockaddr_in *sin;
3409
3410 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3411
3412 if (!state) {
3413 panic("state == NULL in ipsec4_output");
3414 }
3415 if (!state->m) {
3416 panic("state->m == NULL in ipsec4_output");
3417 }
3418 if (!state->dst) {
3419 panic("state->dst == NULL in ipsec4_output");
3420 }
3421
3422 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
3423
3424 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3425 printf("ipsec4_output: applied SP\n");
3426 kdebug_secpolicy(sp));
3427
3428 for (isr = sp->req; isr != NULL; isr = isr->next) {
3429 /* make SA index for search proper SA */
3430 ip = mtod(state->m, struct ip *);
3431 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3432 saidx.mode = isr->saidx.mode;
3433 saidx.reqid = isr->saidx.reqid;
3434 sin = (struct sockaddr_in *)&saidx.src;
3435 if (sin->sin_len == 0) {
3436 sin->sin_len = sizeof(*sin);
3437 sin->sin_family = AF_INET;
3438 sin->sin_port = IPSEC_PORT_ANY;
3439 bcopy(&ip->ip_src, &sin->sin_addr,
3440 sizeof(sin->sin_addr));
3441 }
3442 sin = (struct sockaddr_in *)&saidx.dst;
3443 if (sin->sin_len == 0) {
3444 sin->sin_len = sizeof(*sin);
3445 sin->sin_family = AF_INET;
3446 sin->sin_port = IPSEC_PORT_ANY;
3447 /*
3448 * Get port from packet if upper layer is UDP and nat traversal
3449 * is enabled and transport mode.
3450 */
3451
3452 if ((esp_udp_encap_port & 0xFFFF) != 0 &&
3453 isr->saidx.mode == IPSEC_MODE_TRANSPORT) {
3454 if (ip->ip_p == IPPROTO_UDP) {
3455 struct udphdr *udp;
3456 u_int32_t hlen;
3457 #ifdef _IP_VHL
3458 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
3459 #else
3460 hlen = ip->ip_hl << 2;
3461 #endif
3462 if (state->m->m_len < hlen + sizeof(struct udphdr)) {
3463 state->m = m_pullup(state->m, hlen + sizeof(struct udphdr));
3464 if (!state->m) {
3465 ipseclog((LOG_DEBUG, "IPv4 output: can't pullup UDP header\n"));
3466 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
3467 goto bad;
3468 }
3469 ip = mtod(state->m, struct ip *);
3470 }
3471 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + hlen);
3472 sin->sin_port = udp->uh_dport;
3473 }
3474 }
3475
3476 bcopy(&ip->ip_dst, &sin->sin_addr,
3477 sizeof(sin->sin_addr));
3478 }
3479
3480 if ((error = key_checkrequest(isr, &saidx, &sav)) != 0) {
3481 /*
3482 * IPsec processing is required, but no SA found.
3483 * I assume that key_acquire() had been called
3484 * to get/establish the SA. Here I discard
3485 * this packet because it is responsibility for
3486 * upper layer to retransmit the packet.
3487 */
3488 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3489 goto bad;
3490 }
3491
3492 /* validity check */
3493 if (sav == NULL) {
3494 switch (ipsec_get_reqlevel(isr)) {
3495 case IPSEC_LEVEL_USE:
3496 continue;
3497 case IPSEC_LEVEL_REQUIRE:
3498 /* must be not reached here. */
3499 panic("ipsec4_output: no SA found, but required.");
3500 }
3501 }
3502
3503 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3504 goto bad;
3505 }
3506 }
3507
3508 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3509 if (sav) {
3510 key_freesav(sav, KEY_SADB_UNLOCKED);
3511 }
3512 return 0;
3513
3514 bad:
3515 if (sav) {
3516 key_freesav(sav, KEY_SADB_UNLOCKED);
3517 }
3518 m_freem(state->m);
3519 state->m = NULL;
3520 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0);
3521 return error;
3522 }
3523
3524 #endif
3525
3526 /*
3527 * IPsec output logic for IPv6, transport mode.
3528 */
3529 static int
ipsec6_output_trans_internal(struct ipsec_output_state * state,struct secasvar * sav,u_char * nexthdrp,struct mbuf * mprev)3530 ipsec6_output_trans_internal(
3531 struct ipsec_output_state *state,
3532 struct secasvar *sav,
3533 u_char *nexthdrp,
3534 struct mbuf *mprev)
3535 {
3536 struct ip6_hdr *ip6;
3537 size_t plen;
3538 int error = 0;
3539
3540 /* validity check */
3541 if (sav == NULL || sav->sah == NULL) {
3542 error = EINVAL;
3543 goto bad;
3544 }
3545
3546 /*
3547 * If there is no valid SA, we give up to process.
3548 * see same place at ipsec4_output().
3549 */
3550 if (sav->state != SADB_SASTATE_MATURE
3551 && sav->state != SADB_SASTATE_DYING) {
3552 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3553 error = EINVAL;
3554 goto bad;
3555 }
3556
3557 state->outgoing_if = sav->sah->outgoing_if;
3558
3559 switch (sav->sah->saidx.proto) {
3560 case IPPROTO_ESP:
3561 #if IPSEC_ESP
3562 error = esp6_output(state->m, nexthdrp, mprev->m_next, sav);
3563 #else
3564 m_freem(state->m);
3565 error = EINVAL;
3566 #endif
3567 break;
3568 case IPPROTO_AH:
3569 error = ah6_output(state->m, nexthdrp, mprev->m_next, sav);
3570 break;
3571 default:
3572 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3573 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3574 m_freem(state->m);
3575 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3576 error = EPROTONOSUPPORT;
3577 break;
3578 }
3579 if (error) {
3580 state->m = NULL;
3581 goto bad;
3582 }
3583 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3584 if (plen > IPV6_MAXPACKET) {
3585 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3586 "IPsec with IPv6 jumbogram is not supported\n"));
3587 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3588 error = EINVAL; /*XXX*/
3589 goto bad;
3590 }
3591 ip6 = mtod(state->m, struct ip6_hdr *);
3592 ip6->ip6_plen = htons((u_int16_t)plen);
3593
3594 #if SKYWALK
3595 ASSERT(state->m != NULL);
3596 state->m->m_pkthdr.pkt_flowid = sav->flowid;
3597 state->m->m_pkthdr.pkt_flags |= PKTF_FLOW_ID;
3598 #endif /* !SKYWALK */
3599 return 0;
3600 bad:
3601 return error;
3602 }
3603
3604 int
ipsec6_output_trans(struct ipsec_output_state * state,u_char * nexthdrp,struct mbuf * mprev,struct secpolicy * sp,__unused int flags,int * tun)3605 ipsec6_output_trans(
3606 struct ipsec_output_state *state,
3607 u_char *nexthdrp,
3608 struct mbuf *mprev,
3609 struct secpolicy *sp,
3610 __unused int flags,
3611 int *tun)
3612 {
3613 struct ip6_hdr *ip6;
3614 struct ipsecrequest *isr = NULL;
3615 struct secasindex saidx;
3616 int error = 0;
3617 struct sockaddr_in6 *sin6;
3618 struct secasvar *sav = NULL;
3619
3620 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3621
3622 if (!state) {
3623 panic("state == NULL in ipsec6_output_trans");
3624 }
3625 if (!state->m) {
3626 panic("state->m == NULL in ipsec6_output_trans");
3627 }
3628 if (!nexthdrp) {
3629 panic("nexthdrp == NULL in ipsec6_output_trans");
3630 }
3631 if (!mprev) {
3632 panic("mprev == NULL in ipsec6_output_trans");
3633 }
3634 if (!sp) {
3635 panic("sp == NULL in ipsec6_output_trans");
3636 }
3637 if (!tun) {
3638 panic("tun == NULL in ipsec6_output_trans");
3639 }
3640
3641 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3642 printf("ipsec6_output_trans: applyed SP\n");
3643 kdebug_secpolicy(sp));
3644
3645 *tun = 0;
3646 for (isr = sp->req; isr; isr = isr->next) {
3647 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
3648 /* the rest will be handled by ipsec6_output_tunnel() */
3649 break;
3650 }
3651
3652 /* make SA index for search proper SA */
3653 ip6 = mtod(state->m, struct ip6_hdr *);
3654 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3655 saidx.mode = isr->saidx.mode;
3656 saidx.reqid = isr->saidx.reqid;
3657 sin6 = (struct sockaddr_in6 *)&saidx.src;
3658 if (sin6->sin6_len == 0) {
3659 sin6->sin6_len = sizeof(*sin6);
3660 sin6->sin6_family = AF_INET6;
3661 sin6->sin6_port = IPSEC_PORT_ANY;
3662 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
3663 sizeof(ip6->ip6_src));
3664 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
3665 /* fix scope id for comparing SPD */
3666 sin6->sin6_scope_id = ip6_output_getsrcifscope(state->m);
3667 in6_verify_ifscope(&ip6->ip6_src, sin6->sin6_scope_id);
3668 if (in6_embedded_scope) {
3669 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
3670 sin6->sin6_addr.s6_addr16[1] = 0;
3671 }
3672 }
3673 }
3674 sin6 = (struct sockaddr_in6 *)&saidx.dst;
3675 if (sin6->sin6_len == 0) {
3676 sin6->sin6_len = sizeof(*sin6);
3677 sin6->sin6_family = AF_INET6;
3678 sin6->sin6_port = IPSEC_PORT_ANY;
3679 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
3680 sizeof(ip6->ip6_dst));
3681 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
3682 /* fix scope id for comparing SPD */
3683 sin6->sin6_scope_id = ip6_output_getdstifscope(state->m);
3684 in6_verify_ifscope(&ip6->ip6_dst, sin6->sin6_scope_id);
3685 if (in6_embedded_scope) {
3686 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
3687 sin6->sin6_addr.s6_addr16[1] = 0;
3688 }
3689 }
3690 }
3691
3692 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
3693 /*
3694 * IPsec processing is required, but no SA found.
3695 * I assume that key_acquire() had been called
3696 * to get/establish the SA. Here I discard
3697 * this packet because it is responsibility for
3698 * upper layer to retransmit the packet.
3699 */
3700 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3701 error = ENOENT;
3702
3703 /*
3704 * Notify the fact that the packet is discarded
3705 * to ourselves. I believe this is better than
3706 * just silently discarding. ([email protected])
3707 * XXX: should we restrict the error to TCP packets?
3708 * XXX: should we directly notify sockets via
3709 * pfctlinputs?
3710 */
3711 icmp6_error(state->m, ICMP6_DST_UNREACH,
3712 ICMP6_DST_UNREACH_ADMIN, 0);
3713 state->m = NULL; /* icmp6_error freed the mbuf */
3714 goto bad;
3715 }
3716
3717 /* validity check */
3718 if (sav == NULL) {
3719 switch (ipsec_get_reqlevel(isr)) {
3720 case IPSEC_LEVEL_USE:
3721 continue;
3722 case IPSEC_LEVEL_REQUIRE:
3723 /* must be not reached here. */
3724 panic("ipsec6_output_trans: no SA found, but required.");
3725 }
3726 }
3727
3728 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
3729 goto bad;
3730 }
3731 }
3732
3733 /* if we have more to go, we need a tunnel mode processing */
3734 if (isr != NULL) {
3735 *tun = 1;
3736 }
3737
3738 if (sav) {
3739 key_freesav(sav, KEY_SADB_UNLOCKED);
3740 }
3741 return 0;
3742
3743 bad:
3744 if (sav) {
3745 key_freesav(sav, KEY_SADB_UNLOCKED);
3746 }
3747 m_freem(state->m);
3748 state->m = NULL;
3749 return error;
3750 }
3751
3752 /*
3753 * IPsec output logic for IPv6, tunnel mode.
3754 */
3755 static int
ipsec6_output_tunnel_internal(struct ipsec_output_state * state,struct secasvar * sav,int * must_be_last)3756 ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar *sav, int *must_be_last)
3757 {
3758 struct ip6_hdr *ip6;
3759 struct sockaddr_in6* dst6;
3760 struct route_in6 *ro6;
3761 size_t plen;
3762 int error = 0;
3763
3764 /* validity check */
3765 if (sav == NULL || sav->sah == NULL || sav->sah->saidx.mode != IPSEC_MODE_TUNNEL) {
3766 error = EINVAL;
3767 goto bad;
3768 }
3769
3770 /*
3771 * If there is no valid SA, we give up to process.
3772 * see same place at ipsec4_output().
3773 */
3774 if (sav->state != SADB_SASTATE_MATURE
3775 && sav->state != SADB_SASTATE_DYING) {
3776 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3777 error = EINVAL;
3778 goto bad;
3779 }
3780
3781 state->outgoing_if = sav->sah->outgoing_if;
3782
3783 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3784 /*
3785 * build IPsec tunnel.
3786 */
3787 state->m = ipsec6_splithdr(state->m);
3788 if (!state->m) {
3789 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3790 error = ENOMEM;
3791 goto bad;
3792 }
3793
3794 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3795 error = ipsec6_encapsulate(state->m, sav);
3796 if (error) {
3797 state->m = 0;
3798 goto bad;
3799 }
3800 ip6 = mtod(state->m, struct ip6_hdr *);
3801 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3802 struct ip *ip;
3803 struct sockaddr_in* dst4;
3804 struct route *ro4 = NULL;
3805 struct route ro4_copy;
3806 struct ip_out_args ipoa;
3807
3808 bzero(&ipoa, sizeof(ipoa));
3809 ipoa.ipoa_boundif = IFSCOPE_NONE;
3810 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
3811 ipoa.ipoa_sotc = SO_TC_UNSPEC;
3812 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3813
3814 if (must_be_last) {
3815 *must_be_last = 1;
3816 }
3817
3818 state->tunneled = 4; /* must not process any further in ip6_output */
3819 error = ipsec64_encapsulate(state->m, sav, state->dscp_mapping);
3820 if (error) {
3821 state->m = 0;
3822 goto bad;
3823 }
3824 /* Now we have an IPv4 packet */
3825 ip = mtod(state->m, struct ip *);
3826
3827 // grab sadb_mutex, to update sah's route cache and get a local copy of it
3828 lck_mtx_lock(sadb_mutex);
3829 ro4 = (struct route *)&sav->sah->sa_route;
3830 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3831 if (ro4->ro_rt) {
3832 RT_LOCK(ro4->ro_rt);
3833 }
3834 if (ROUTE_UNUSABLE(ro4) ||
3835 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3836 if (ro4->ro_rt != NULL) {
3837 RT_UNLOCK(ro4->ro_rt);
3838 }
3839 ROUTE_RELEASE(ro4);
3840 }
3841 if (ro4->ro_rt == NULL) {
3842 dst4->sin_family = AF_INET;
3843 dst4->sin_len = sizeof(*dst4);
3844 dst4->sin_addr = ip->ip_dst;
3845 } else {
3846 RT_UNLOCK(ro4->ro_rt);
3847 }
3848 route_copyout(&ro4_copy, ro4, sizeof(struct route));
3849 // release sadb_mutex, after updating sah's route cache and getting a local copy
3850 lck_mtx_unlock(sadb_mutex);
3851 state->m = ipsec4_splithdr(state->m);
3852 if (!state->m) {
3853 error = ENOMEM;
3854 ROUTE_RELEASE(&ro4_copy);
3855 goto bad;
3856 }
3857 switch (sav->sah->saidx.proto) {
3858 case IPPROTO_ESP:
3859 #if IPSEC_ESP
3860 if ((error = esp4_output(state->m, sav)) != 0) {
3861 state->m = NULL;
3862 ROUTE_RELEASE(&ro4_copy);
3863 goto bad;
3864 }
3865 break;
3866
3867 #else
3868 m_freem(state->m);
3869 state->m = NULL;
3870 error = EINVAL;
3871 ROUTE_RELEASE(&ro4_copy);
3872 goto bad;
3873 #endif
3874 case IPPROTO_AH:
3875 if ((error = ah4_output(state->m, sav)) != 0) {
3876 state->m = NULL;
3877 ROUTE_RELEASE(&ro4_copy);
3878 goto bad;
3879 }
3880 break;
3881 default:
3882 ipseclog((LOG_ERR,
3883 "ipsec4_output: unknown ipsec protocol %d\n",
3884 sav->sah->saidx.proto));
3885 m_freem(state->m);
3886 state->m = NULL;
3887 error = EPROTONOSUPPORT;
3888 ROUTE_RELEASE(&ro4_copy);
3889 goto bad;
3890 }
3891
3892 if (state->m == 0) {
3893 error = ENOMEM;
3894 ROUTE_RELEASE(&ro4_copy);
3895 goto bad;
3896 }
3897 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m,
3898 AF_INET, sav->flowid);
3899 ipsec_set_ipoa_for_interface(sav->sah->ipsec_if, &ipoa);
3900
3901 ip = mtod(state->m, struct ip *);
3902 ip->ip_len = ntohs(ip->ip_len); /* flip len field before calling ip_output */
3903 error = ip_output(state->m, NULL, &ro4_copy, IP_OUTARGS, NULL, &ipoa);
3904 state->m = NULL;
3905 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
3906 lck_mtx_lock(sadb_mutex);
3907 route_copyin(&ro4_copy, ro4, sizeof(struct route));
3908 lck_mtx_unlock(sadb_mutex);
3909 if (error != 0) {
3910 goto bad;
3911 }
3912 goto done;
3913 } else {
3914 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3915 "unsupported inner family, spi=%u\n",
3916 (u_int32_t)ntohl(sav->spi)));
3917 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3918 error = EAFNOSUPPORT;
3919 goto bad;
3920 }
3921
3922 // grab sadb_mutex, before updating sah's route cache
3923 lck_mtx_lock(sadb_mutex);
3924 ro6 = &sav->sah->sa_route;
3925 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
3926 if (ro6->ro_rt) {
3927 RT_LOCK(ro6->ro_rt);
3928 }
3929 if (ROUTE_UNUSABLE(ro6) ||
3930 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
3931 if (ro6->ro_rt != NULL) {
3932 RT_UNLOCK(ro6->ro_rt);
3933 }
3934 ROUTE_RELEASE(ro6);
3935 }
3936 if (ro6->ro_rt == 0) {
3937 bzero(dst6, sizeof(*dst6));
3938 dst6->sin6_family = AF_INET6;
3939 dst6->sin6_len = sizeof(*dst6);
3940 dst6->sin6_addr = ip6->ip6_dst;
3941 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
3942 if (ro6->ro_rt) {
3943 RT_LOCK(ro6->ro_rt);
3944 }
3945 }
3946 if (ro6->ro_rt == 0) {
3947 ip6stat.ip6s_noroute++;
3948 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
3949 error = EHOSTUNREACH;
3950 // release sadb_mutex, after updating sah's route cache
3951 lck_mtx_unlock(sadb_mutex);
3952 goto bad;
3953 }
3954
3955 /*
3956 * adjust state->dst if tunnel endpoint is offlink
3957 *
3958 * XXX: caching rt_gateway value in the state is
3959 * not really good, since it may point elsewhere
3960 * when the gateway gets modified to a larger
3961 * sockaddr via rt_setgate(). This is currently
3962 * addressed by SA_SIZE roundup in that routine.
3963 */
3964 if (ro6->ro_rt->rt_flags & RTF_GATEWAY) {
3965 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
3966 }
3967 RT_UNLOCK(ro6->ro_rt);
3968 ROUTE_RELEASE(&state->ro);
3969 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
3970 state->dst = (struct sockaddr *)dst6;
3971 state->tunneled = 6;
3972 // release sadb_mutex, after updating sah's route cache
3973 lck_mtx_unlock(sadb_mutex);
3974 }
3975
3976 state->m = ipsec6_splithdr(state->m);
3977 if (!state->m) {
3978 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3979 error = ENOMEM;
3980 goto bad;
3981 }
3982 ip6 = mtod(state->m, struct ip6_hdr *);
3983 switch (sav->sah->saidx.proto) {
3984 case IPPROTO_ESP:
3985 #if IPSEC_ESP
3986 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
3987 #else
3988 m_freem(state->m);
3989 error = EINVAL;
3990 #endif
3991 break;
3992 case IPPROTO_AH:
3993 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
3994 break;
3995 default:
3996 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3997 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3998 m_freem(state->m);
3999 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
4000 error = EINVAL;
4001 break;
4002 }
4003 if (error) {
4004 state->m = NULL;
4005 goto bad;
4006 }
4007 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
4008 if (plen > IPV6_MAXPACKET) {
4009 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4010 "IPsec with IPv6 jumbogram is not supported\n"));
4011 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
4012 error = EINVAL; /*XXX*/
4013 goto bad;
4014 }
4015 ip6 = mtod(state->m, struct ip6_hdr *);
4016 ip6->ip6_plen = htons((u_int16_t)plen);
4017 done:
4018 #if SKYWALK
4019 if (state->m != NULL) {
4020 state->m->m_pkthdr.pkt_flowid = sav->flowid;
4021 state->m->m_pkthdr.pkt_flags |= PKTF_FLOW_ID;
4022 }
4023 #endif /* !SKYWALK */
4024
4025 return 0;
4026
4027 bad:
4028 return error;
4029 }
4030
4031 int
ipsec6_output_tunnel(struct ipsec_output_state * state,struct secpolicy * sp,__unused int flags)4032 ipsec6_output_tunnel(
4033 struct ipsec_output_state *state,
4034 struct secpolicy *sp,
4035 __unused int flags)
4036 {
4037 struct ip6_hdr *ip6;
4038 struct ipsecrequest *isr = NULL;
4039 struct secasindex saidx;
4040 struct secasvar *sav = NULL;
4041 int error = 0;
4042
4043 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4044
4045 if (!state) {
4046 panic("state == NULL in ipsec6_output_tunnel");
4047 }
4048 if (!state->m) {
4049 panic("state->m == NULL in ipsec6_output_tunnel");
4050 }
4051 if (!sp) {
4052 panic("sp == NULL in ipsec6_output_tunnel");
4053 }
4054
4055 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
4056 printf("ipsec6_output_tunnel: applyed SP\n");
4057 kdebug_secpolicy(sp));
4058
4059 /*
4060 * transport mode ipsec (before the 1st tunnel mode) is already
4061 * processed by ipsec6_output_trans().
4062 */
4063 for (isr = sp->req; isr; isr = isr->next) {
4064 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
4065 break;
4066 }
4067 }
4068
4069 for (/* already initialized */; isr; isr = isr->next) {
4070 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
4071 /* When tunnel mode, SA peers must be specified. */
4072 bcopy(&isr->saidx, &saidx, sizeof(saidx));
4073 } else {
4074 /* make SA index to look for a proper SA */
4075 struct sockaddr_in6 *sin6;
4076
4077 bzero(&saidx, sizeof(saidx));
4078 saidx.proto = isr->saidx.proto;
4079 saidx.mode = isr->saidx.mode;
4080 saidx.reqid = isr->saidx.reqid;
4081
4082 ip6 = mtod(state->m, struct ip6_hdr *);
4083 sin6 = (struct sockaddr_in6 *)&saidx.src;
4084 if (sin6->sin6_len == 0) {
4085 sin6->sin6_len = sizeof(*sin6);
4086 sin6->sin6_family = AF_INET6;
4087 sin6->sin6_port = IPSEC_PORT_ANY;
4088 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
4089 sizeof(ip6->ip6_src));
4090 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
4091 /* fix scope id for comparing SPD */
4092 sin6->sin6_scope_id = ip6_output_getsrcifscope(state->m);
4093 in6_verify_ifscope(&ip6->ip6_src, sin6->sin6_scope_id);
4094 if (in6_embedded_scope) {
4095 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
4096 sin6->sin6_addr.s6_addr16[1] = 0;
4097 }
4098 }
4099 }
4100 sin6 = (struct sockaddr_in6 *)&saidx.dst;
4101 if (sin6->sin6_len == 0) {
4102 sin6->sin6_len = sizeof(*sin6);
4103 sin6->sin6_family = AF_INET6;
4104 sin6->sin6_port = IPSEC_PORT_ANY;
4105 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
4106 sizeof(ip6->ip6_dst));
4107 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
4108 /* fix scope id for comparing SPD */
4109 sin6->sin6_scope_id = ip6_output_getdstifscope(state->m);
4110 in6_verify_ifscope(&ip6->ip6_dst, sin6->sin6_scope_id);
4111 if (in6_embedded_scope) {
4112 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
4113 sin6->sin6_addr.s6_addr16[1] = 0;
4114 }
4115 }
4116 }
4117 }
4118
4119 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
4120 /*
4121 * IPsec processing is required, but no SA found.
4122 * I assume that key_acquire() had been called
4123 * to get/establish the SA. Here I discard
4124 * this packet because it is responsibility for
4125 * upper layer to retransmit the packet.
4126 */
4127 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4128 error = ENOENT;
4129 goto bad;
4130 }
4131
4132 /* validity check */
4133 if (sav == NULL) {
4134 switch (ipsec_get_reqlevel(isr)) {
4135 case IPSEC_LEVEL_USE:
4136 continue;
4137 case IPSEC_LEVEL_REQUIRE:
4138 /* must be not reached here. */
4139 panic("ipsec6_output_tunnel: no SA found, but required.");
4140 }
4141 }
4142
4143 /*
4144 * If there is no valid SA, we give up to process.
4145 * see same place at ipsec4_output().
4146 */
4147 if (sav->state != SADB_SASTATE_MATURE
4148 && sav->state != SADB_SASTATE_DYING) {
4149 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4150 error = EINVAL;
4151 goto bad;
4152 }
4153
4154 int must_be_last = 0;
4155
4156 if ((error = ipsec6_output_tunnel_internal(state, sav, &must_be_last)) != 0) {
4157 goto bad;
4158 }
4159
4160 if (must_be_last && isr->next) {
4161 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4162 "IPv4 must be outer layer, spi=%u\n",
4163 (u_int32_t)ntohl(sav->spi)));
4164 error = EINVAL;
4165 goto bad;
4166 }
4167 }
4168
4169 if (sav) {
4170 key_freesav(sav, KEY_SADB_UNLOCKED);
4171 }
4172 return 0;
4173
4174 bad:
4175 if (sav) {
4176 key_freesav(sav, KEY_SADB_UNLOCKED);
4177 }
4178 if (state->m) {
4179 m_freem(state->m);
4180 }
4181 state->m = NULL;
4182 return error;
4183 }
4184
4185 int
ipsec6_interface_output(struct ipsec_output_state * state,ifnet_t interface,u_char * nexthdrp,struct mbuf * mprev)4186 ipsec6_interface_output(struct ipsec_output_state *state, ifnet_t interface, u_char *nexthdrp, struct mbuf *mprev)
4187 {
4188 int error = 0;
4189 struct secasvar *sav = NULL;
4190
4191 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4192
4193 if (state == NULL) {
4194 panic("state == NULL in ipsec6_output");
4195 }
4196 if (state->m == NULL) {
4197 panic("state->m == NULL in ipsec6_output");
4198 }
4199 if (nexthdrp == NULL) {
4200 panic("nexthdrp == NULL in ipsec6_output");
4201 }
4202 if (mprev == NULL) {
4203 panic("mprev == NULL in ipsec6_output");
4204 }
4205
4206 struct ip6_hdr *ip6 = mtod(state->m, struct ip6_hdr *);
4207
4208 struct sockaddr_in6 src = {};
4209 src.sin6_family = AF_INET6;
4210 src.sin6_len = sizeof(src);
4211 memcpy(&src.sin6_addr, &ip6->ip6_src, sizeof(src.sin6_addr));
4212
4213 struct sockaddr_in6 dst = {};
4214 dst.sin6_family = AF_INET6;
4215 dst.sin6_len = sizeof(dst);
4216 memcpy(&dst.sin6_addr, &ip6->ip6_dst, sizeof(dst.sin6_addr));
4217
4218 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET6,
4219 (struct sockaddr *)&src,
4220 (struct sockaddr *)&dst);
4221 if (sav == NULL) {
4222 goto bad;
4223 }
4224
4225 if (sav->sah && sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
4226 if ((error = ipsec6_output_tunnel_internal(state, sav, NULL)) != 0) {
4227 goto bad;
4228 }
4229 } else {
4230 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
4231 goto bad;
4232 }
4233 }
4234
4235 if (sav) {
4236 key_freesav(sav, KEY_SADB_UNLOCKED);
4237 }
4238 return 0;
4239
4240 bad:
4241 if (sav) {
4242 key_freesav(sav, KEY_SADB_UNLOCKED);
4243 }
4244 m_freem(state->m);
4245 state->m = NULL;
4246 return error;
4247 }
4248
4249 #if INET
4250 /*
4251 * Chop IP header and option off from the payload.
4252 */
4253 struct mbuf *
ipsec4_splithdr(struct mbuf * m)4254 ipsec4_splithdr(struct mbuf *m)
4255 {
4256 struct mbuf *mh;
4257 struct ip *ip;
4258 int hlen;
4259
4260 if (m->m_len < sizeof(struct ip)) {
4261 panic("ipsec4_splithdr: first mbuf too short, m_len %d, pkt_len %d, m_flag %x", m->m_len, m->m_pkthdr.len, m->m_flags);
4262 }
4263 ip = mtod(m, struct ip *);
4264 #ifdef _IP_VHL
4265 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
4266 #else
4267 hlen = ip->ip_hl << 2;
4268 #endif
4269 if (m->m_len > hlen) {
4270 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4271 if (!mh) {
4272 m_freem(m);
4273 return NULL;
4274 }
4275 M_COPY_PKTHDR(mh, m);
4276 MH_ALIGN(mh, hlen);
4277 m->m_flags &= ~M_PKTHDR;
4278 m_mchtype(m, MT_DATA);
4279 m->m_len -= hlen;
4280 m->m_data += hlen;
4281 mh->m_next = m;
4282 m = mh;
4283 m->m_len = hlen;
4284 bcopy((caddr_t)ip, mtod(m, caddr_t), hlen);
4285 } else if (m->m_len < hlen) {
4286 m = m_pullup(m, hlen);
4287 if (!m) {
4288 return NULL;
4289 }
4290 }
4291 return m;
4292 }
4293 #endif
4294
4295 struct mbuf *
ipsec6_splithdr(struct mbuf * m)4296 ipsec6_splithdr(struct mbuf *m)
4297 {
4298 struct mbuf *mh;
4299 struct ip6_hdr *ip6;
4300 int hlen;
4301
4302 if (m->m_len < sizeof(struct ip6_hdr)) {
4303 panic("ipsec6_splithdr: first mbuf too short");
4304 }
4305 ip6 = mtod(m, struct ip6_hdr *);
4306 hlen = sizeof(struct ip6_hdr);
4307 if (m->m_len > hlen) {
4308 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4309 if (!mh) {
4310 m_freem(m);
4311 return NULL;
4312 }
4313 M_COPY_PKTHDR(mh, m);
4314 MH_ALIGN(mh, hlen);
4315 m->m_flags &= ~M_PKTHDR;
4316 m_mchtype(m, MT_DATA);
4317 m->m_len -= hlen;
4318 m->m_data += hlen;
4319 mh->m_next = m;
4320 m = mh;
4321 m->m_len = hlen;
4322 bcopy((caddr_t)ip6, mtod(m, caddr_t), hlen);
4323 } else if (m->m_len < hlen) {
4324 m = m_pullup(m, hlen);
4325 if (!m) {
4326 return NULL;
4327 }
4328 }
4329 return m;
4330 }
4331
4332 /* validate inbound IPsec tunnel packet. */
4333 int
ipsec4_tunnel_validate(struct mbuf * m,int off,u_int nxt0,struct secasvar * sav,sa_family_t * ifamily)4334 ipsec4_tunnel_validate(
4335 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4336 int off,
4337 u_int nxt0,
4338 struct secasvar *sav,
4339 sa_family_t *ifamily)
4340 {
4341 u_int8_t nxt = nxt0 & 0xff;
4342 struct sockaddr_in *sin;
4343 struct sockaddr_in osrc, odst, i4src, i4dst;
4344 struct sockaddr_in6 i6src, i6dst;
4345 int hlen;
4346 struct secpolicy *sp;
4347 struct ip *oip;
4348
4349 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4350
4351 /* do not decapsulate if the SA is for transport mode only */
4352 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) {
4353 return 0;
4354 }
4355
4356 #if DIAGNOSTIC
4357 if (m->m_len < sizeof(struct ip)) {
4358 panic("too short mbuf on ipsec4_tunnel_validate");
4359 }
4360 #endif
4361 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) {
4362 return 0;
4363 }
4364 if (m->m_pkthdr.len < off + sizeof(struct ip)) {
4365 return 0;
4366 }
4367
4368 oip = mtod(m, struct ip *);
4369 #ifdef _IP_VHL
4370 hlen = _IP_VHL_HL(oip->ip_vhl) << 2;
4371 #else
4372 hlen = oip->ip_hl << 2;
4373 #endif
4374 if (hlen != sizeof(struct ip)) {
4375 return 0;
4376 }
4377
4378 sin = (struct sockaddr_in *)&sav->sah->saidx.dst;
4379 if (sin->sin_family != AF_INET) {
4380 return 0;
4381 }
4382 if (bcmp(&oip->ip_dst, &sin->sin_addr, sizeof(oip->ip_dst)) != 0) {
4383 return 0;
4384 }
4385
4386 if (sav->sah->ipsec_if != NULL) {
4387 // the ipsec interface SAs don't have a policies.
4388 if (nxt == IPPROTO_IPV4) {
4389 *ifamily = AF_INET;
4390 } else if (nxt == IPPROTO_IPV6) {
4391 *ifamily = AF_INET6;
4392 } else {
4393 return 0;
4394 }
4395 return 1;
4396 }
4397
4398 /* XXX slow */
4399 bzero(&osrc, sizeof(osrc));
4400 bzero(&odst, sizeof(odst));
4401 osrc.sin_family = odst.sin_family = AF_INET;
4402 osrc.sin_len = odst.sin_len = sizeof(struct sockaddr_in);
4403 osrc.sin_addr = oip->ip_src;
4404 odst.sin_addr = oip->ip_dst;
4405 /*
4406 * RFC2401 5.2.1 (b): (assume that we are using tunnel mode)
4407 * - if the inner destination is multicast address, there can be
4408 * multiple permissible inner source address. implementation
4409 * may want to skip verification of inner source address against
4410 * SPD selector.
4411 * - if the inner protocol is ICMP, the packet may be an error report
4412 * from routers on the other side of the VPN cloud (R in the
4413 * following diagram). in this case, we cannot verify inner source
4414 * address against SPD selector.
4415 * me -- gw === gw -- R -- you
4416 *
4417 * we consider the first bullet to be users responsibility on SPD entry
4418 * configuration (if you need to encrypt multicast traffic, set
4419 * the source range of SPD selector to 0.0.0.0/0, or have explicit
4420 * address ranges for possible senders).
4421 * the second bullet is not taken care of (yet).
4422 *
4423 * therefore, we do not do anything special about inner source.
4424 */
4425 if (nxt == IPPROTO_IPV4) {
4426 bzero(&i4src, sizeof(struct sockaddr_in));
4427 bzero(&i4dst, sizeof(struct sockaddr_in));
4428 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4429 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4430 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4431 (caddr_t)&i4src.sin_addr);
4432 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4433 (caddr_t)&i4dst.sin_addr);
4434 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4435 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4436 } else if (nxt == IPPROTO_IPV6) {
4437 bzero(&i6src, sizeof(struct sockaddr_in6));
4438 bzero(&i6dst, sizeof(struct sockaddr_in6));
4439 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4440 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4441 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4442 (caddr_t)&i6src.sin6_addr);
4443 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4444 (caddr_t)&i6dst.sin6_addr);
4445 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4446 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4447 } else {
4448 return 0; /* unsupported family */
4449 }
4450 if (!sp) {
4451 return 0;
4452 }
4453
4454 key_freesp(sp, KEY_SADB_UNLOCKED);
4455
4456 return 1;
4457 }
4458
4459 /* validate inbound IPsec tunnel packet. */
4460 int
ipsec6_tunnel_validate(struct mbuf * m,int off,u_int nxt0,struct secasvar * sav,sa_family_t * ifamily)4461 ipsec6_tunnel_validate(
4462 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4463 int off,
4464 u_int nxt0,
4465 struct secasvar *sav,
4466 sa_family_t *ifamily)
4467 {
4468 u_int8_t nxt = nxt0 & 0xff;
4469 struct sockaddr_in6 *sin6;
4470 struct sockaddr_in i4src, i4dst;
4471 struct sockaddr_in6 osrc, odst, i6src, i6dst;
4472 struct secpolicy *sp;
4473 struct ip6_hdr *oip6;
4474
4475 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4476
4477 /* do not decapsulate if the SA is for transport mode only */
4478 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) {
4479 return 0;
4480 }
4481
4482 #if DIAGNOSTIC
4483 if (m->m_len < sizeof(struct ip6_hdr)) {
4484 panic("too short mbuf on ipsec6_tunnel_validate");
4485 }
4486 #endif
4487 if (nxt == IPPROTO_IPV4) {
4488 if (m->m_pkthdr.len < off + sizeof(struct ip)) {
4489 ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate pkthdr %d off %d ip6hdr %zu", m->m_pkthdr.len, off, sizeof(struct ip6_hdr)));
4490 return 0;
4491 }
4492 } else if (nxt == IPPROTO_IPV6) {
4493 if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr)) {
4494 ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate pkthdr %d off %d ip6hdr %zu", m->m_pkthdr.len, off, sizeof(struct ip6_hdr)));
4495 return 0;
4496 }
4497 } else {
4498 ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate invalid nxt(%u) protocol", nxt));
4499 return 0;
4500 }
4501
4502 oip6 = mtod(m, struct ip6_hdr *);
4503 /* AF_INET should be supported, but at this moment we don't. */
4504 sin6 = (struct sockaddr_in6 *)&sav->sah->saidx.dst;
4505 if (sin6->sin6_family != AF_INET6) {
4506 return 0;
4507 }
4508
4509 struct in6_addr tmp_sah_dst_addr = {};
4510 struct in6_addr *sah_dst_addr = &((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
4511 if (in6_embedded_scope && IN6_IS_SCOPE_LINKLOCAL(sah_dst_addr)) {
4512 memcpy(&tmp_sah_dst_addr, sah_dst_addr, sizeof(tmp_sah_dst_addr));
4513 tmp_sah_dst_addr.s6_addr16[1] = htons((u_int16_t)sav->sah->outgoing_if);
4514 sah_dst_addr = &tmp_sah_dst_addr;
4515 }
4516 if (!IN6_ARE_ADDR_EQUAL(&oip6->ip6_dst, sah_dst_addr)) {
4517 return 0;
4518 }
4519
4520 if (sav->sah->ipsec_if != NULL) {
4521 // the ipsec interface SAs don't have a policies.
4522 if (nxt == IPPROTO_IPV4) {
4523 *ifamily = AF_INET;
4524 } else if (nxt == IPPROTO_IPV6) {
4525 *ifamily = AF_INET6;
4526 } else {
4527 return 0;
4528 }
4529 return 1;
4530 }
4531
4532 /* XXX slow */
4533 bzero(&osrc, sizeof(osrc));
4534 bzero(&odst, sizeof(odst));
4535 osrc.sin6_family = odst.sin6_family = AF_INET6;
4536 osrc.sin6_len = odst.sin6_len = sizeof(struct sockaddr_in6);
4537 osrc.sin6_addr = oip6->ip6_src;
4538 odst.sin6_addr = oip6->ip6_dst;
4539
4540 /*
4541 * regarding to inner source address validation, see a long comment
4542 * in ipsec4_tunnel_validate.
4543 */
4544
4545 if (nxt == IPPROTO_IPV4) {
4546 bzero(&i4src, sizeof(struct sockaddr_in));
4547 bzero(&i4dst, sizeof(struct sockaddr_in));
4548 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4549 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4550 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4551 (caddr_t)&i4src.sin_addr);
4552 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4553 (caddr_t)&i4dst.sin_addr);
4554 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4555 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4556 } else if (nxt == IPPROTO_IPV6) {
4557 bzero(&i6src, sizeof(struct sockaddr_in6));
4558 bzero(&i6dst, sizeof(struct sockaddr_in6));
4559 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4560 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4561 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4562 (caddr_t)&i6src.sin6_addr);
4563 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4564 (caddr_t)&i6dst.sin6_addr);
4565 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4566 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4567 } else {
4568 return 0; /* unsupported family */
4569 }
4570 /*
4571 * when there is no suitable inbound policy for the packet of the ipsec
4572 * tunnel mode, the kernel never decapsulate the tunneled packet
4573 * as the ipsec tunnel mode even when the system wide policy is "none".
4574 * then the kernel leaves the generic tunnel module to process this
4575 * packet. if there is no rule of the generic tunnel, the packet
4576 * is rejected and the statistics will be counted up.
4577 */
4578 if (!sp) {
4579 return 0;
4580 }
4581 key_freesp(sp, KEY_SADB_UNLOCKED);
4582
4583 return 1;
4584 }
4585
4586 /*
4587 * Make a mbuf chain for encryption.
4588 * If the original mbuf chain contains a mbuf with a cluster,
4589 * allocate a new cluster and copy the data to the new cluster.
4590 * XXX: this hack is inefficient, but is necessary to handle cases
4591 * of TCP retransmission...
4592 */
4593 struct mbuf *
ipsec_copypkt(struct mbuf * m)4594 ipsec_copypkt(struct mbuf *m)
4595 {
4596 struct mbuf *n, **mpp, *mnew;
4597
4598 for (n = m, mpp = &m; n; n = n->m_next) {
4599 if (n->m_flags & M_EXT) {
4600 /*
4601 * Make a copy only if there are more than one references
4602 * to the cluster.
4603 * XXX: is this approach effective?
4604 */
4605 if (
4606 m_get_ext_free(n) != NULL ||
4607 m_mclhasreference(n)
4608 ) {
4609 int remain, copied;
4610 struct mbuf *mm;
4611
4612 if (n->m_flags & M_PKTHDR) {
4613 MGETHDR(mnew, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4614 if (mnew == NULL) {
4615 goto fail;
4616 }
4617 M_COPY_PKTHDR(mnew, n);
4618 } else {
4619 MGET(mnew, M_DONTWAIT, MT_DATA);
4620 if (mnew == NULL) {
4621 goto fail;
4622 }
4623 }
4624 mnew->m_len = 0;
4625 mm = mnew;
4626
4627 /*
4628 * Copy data. If we don't have enough space to
4629 * store the whole data, allocate a cluster
4630 * or additional mbufs.
4631 * XXX: we don't use m_copyback(), since the
4632 * function does not use clusters and thus is
4633 * inefficient.
4634 */
4635 remain = n->m_len;
4636 copied = 0;
4637 while (1) {
4638 int len;
4639 struct mbuf *mn;
4640
4641 if (remain <= (mm->m_flags & M_PKTHDR ? MHLEN : MLEN)) {
4642 len = remain;
4643 } else { /* allocate a cluster */
4644 MCLGET(mm, M_DONTWAIT);
4645 if (!(mm->m_flags & M_EXT)) {
4646 m_free(mm);
4647 goto fail;
4648 }
4649 len = remain < MCLBYTES ?
4650 remain : MCLBYTES;
4651 }
4652
4653 bcopy(n->m_data + copied, mm->m_data,
4654 len);
4655
4656 copied += len;
4657 remain -= len;
4658 mm->m_len = len;
4659
4660 if (remain <= 0) { /* completed? */
4661 break;
4662 }
4663
4664 /* need another mbuf */
4665 MGETHDR(mn, M_DONTWAIT, MT_HEADER); /* XXXMAC: tags copied next time in loop? */
4666 if (mn == NULL) {
4667 goto fail;
4668 }
4669 mn->m_pkthdr.rcvif = NULL;
4670 mm->m_next = mn;
4671 mm = mn;
4672 }
4673
4674 /* adjust chain */
4675 mm->m_next = m_free(n);
4676 n = mm;
4677 *mpp = mnew;
4678 mpp = &n->m_next;
4679
4680 continue;
4681 }
4682 }
4683 *mpp = n;
4684 mpp = &n->m_next;
4685 }
4686
4687 return m;
4688 fail:
4689 m_freem(m);
4690 return NULL;
4691 }
4692
4693 /*
4694 * Tags are allocated as mbufs for now, since our minimum size is MLEN, we
4695 * should make use of up to that much space.
4696 */
4697 #define IPSEC_TAG_HEADER \
4698
4699 struct ipsec_tag {
4700 struct socket *socket;
4701 u_int32_t history_count;
4702 struct ipsec_history history[];
4703 };
4704
4705 #define IPSEC_TAG_SIZE (MLEN - sizeof(struct m_tag))
4706 #define IPSEC_TAG_HDR_SIZE (offsetof(struct ipsec_tag, history[0]))
4707 #define IPSEC_HISTORY_MAX ((IPSEC_TAG_SIZE - IPSEC_TAG_HDR_SIZE) / \
4708 sizeof(struct ipsec_history))
4709
4710 static struct ipsec_tag *
ipsec_addaux(struct mbuf * m)4711 ipsec_addaux(
4712 struct mbuf *m)
4713 {
4714 struct m_tag *tag;
4715
4716 /* Check if the tag already exists */
4717 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4718
4719 if (tag == NULL) {
4720 struct ipsec_tag *itag;
4721
4722 /* Allocate a tag */
4723 tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC,
4724 IPSEC_TAG_SIZE, M_DONTWAIT, m);
4725
4726 if (tag) {
4727 itag = (struct ipsec_tag*)(tag + 1);
4728 itag->socket = 0;
4729 itag->history_count = 0;
4730
4731 m_tag_prepend(m, tag);
4732 }
4733 }
4734
4735 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4736 }
4737
4738 static struct ipsec_tag *
ipsec_findaux(struct mbuf * m)4739 ipsec_findaux(
4740 struct mbuf *m)
4741 {
4742 struct m_tag *tag;
4743
4744 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4745
4746 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4747 }
4748
4749 void
ipsec_delaux(struct mbuf * m)4750 ipsec_delaux(
4751 struct mbuf *m)
4752 {
4753 struct m_tag *tag;
4754
4755 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4756
4757 if (tag) {
4758 m_tag_delete(m, tag);
4759 }
4760 }
4761
4762 /* if the aux buffer is unnecessary, nuke it. */
4763 static void
ipsec_optaux(struct mbuf * m,struct ipsec_tag * itag)4764 ipsec_optaux(
4765 struct mbuf *m,
4766 struct ipsec_tag *itag)
4767 {
4768 if (itag && itag->socket == NULL && itag->history_count == 0) {
4769 m_tag_delete(m, ((struct m_tag*)itag) - 1);
4770 }
4771 }
4772
4773 int
ipsec_setsocket(struct mbuf * m,struct socket * so)4774 ipsec_setsocket(struct mbuf *m, struct socket *so)
4775 {
4776 struct ipsec_tag *tag;
4777
4778 /* if so == NULL, don't insist on getting the aux mbuf */
4779 if (so) {
4780 tag = ipsec_addaux(m);
4781 if (!tag) {
4782 return ENOBUFS;
4783 }
4784 } else {
4785 tag = ipsec_findaux(m);
4786 }
4787 if (tag) {
4788 tag->socket = so;
4789 ipsec_optaux(m, tag);
4790 }
4791 return 0;
4792 }
4793
4794 struct socket *
ipsec_getsocket(struct mbuf * m)4795 ipsec_getsocket(struct mbuf *m)
4796 {
4797 struct ipsec_tag *itag;
4798
4799 itag = ipsec_findaux(m);
4800 if (itag) {
4801 return itag->socket;
4802 } else {
4803 return NULL;
4804 }
4805 }
4806
4807 int
ipsec_addhist(struct mbuf * m,int proto,u_int32_t spi)4808 ipsec_addhist(
4809 struct mbuf *m,
4810 int proto,
4811 u_int32_t spi)
4812 {
4813 struct ipsec_tag *itag;
4814 struct ipsec_history *p;
4815 itag = ipsec_addaux(m);
4816 if (!itag) {
4817 return ENOBUFS;
4818 }
4819 if (itag->history_count == IPSEC_HISTORY_MAX) {
4820 return ENOSPC; /* XXX */
4821 }
4822 p = &itag->history[itag->history_count];
4823 itag->history_count++;
4824
4825 bzero(p, sizeof(*p));
4826 p->ih_proto = proto;
4827 p->ih_spi = spi;
4828
4829 return 0;
4830 }
4831
4832 struct ipsec_history *
ipsec_gethist(struct mbuf * m,int * lenp)4833 ipsec_gethist(
4834 struct mbuf *m,
4835 int *lenp)
4836 {
4837 struct ipsec_tag *itag;
4838
4839 itag = ipsec_findaux(m);
4840 if (!itag) {
4841 return NULL;
4842 }
4843 if (itag->history_count == 0) {
4844 return NULL;
4845 }
4846 if (lenp) {
4847 *lenp = (int)(itag->history_count * sizeof(struct ipsec_history));
4848 }
4849 return itag->history;
4850 }
4851
4852 void
ipsec_clearhist(struct mbuf * m)4853 ipsec_clearhist(
4854 struct mbuf *m)
4855 {
4856 struct ipsec_tag *itag;
4857
4858 itag = ipsec_findaux(m);
4859 if (itag) {
4860 itag->history_count = 0;
4861 }
4862 ipsec_optaux(m, itag);
4863 }
4864
4865 __private_extern__ boolean_t
ipsec_send_natt_keepalive(struct secasvar * sav)4866 ipsec_send_natt_keepalive(
4867 struct secasvar *sav)
4868 {
4869 struct mbuf *m = NULL;
4870 int error = 0;
4871 int keepalive_interval = natt_keepalive_interval;
4872
4873 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4874 lck_mtx_lock(sadb_mutex);
4875
4876 if (((esp_udp_encap_port & 0xFFFF) == 0 && sav->natt_encapsulated_src_port == 0) || sav->remote_ike_port == 0) {
4877 lck_mtx_unlock(sadb_mutex);
4878 return FALSE;
4879 }
4880
4881 if (sav->natt_interval != 0) {
4882 keepalive_interval = (int)sav->natt_interval;
4883 }
4884
4885 // natt timestamp may have changed... reverify
4886 if ((natt_now - sav->natt_last_activity) < keepalive_interval) {
4887 lck_mtx_unlock(sadb_mutex);
4888 return FALSE;
4889 }
4890
4891 if (sav->flags & SADB_X_EXT_ESP_KEEPALIVE) {
4892 lck_mtx_unlock(sadb_mutex);
4893 return FALSE; // don't send these from the kernel
4894 }
4895
4896 lck_mtx_unlock(sadb_mutex);
4897
4898 m = m_gethdr(M_NOWAIT, MT_DATA);
4899 if (m == NULL) {
4900 return FALSE;
4901 }
4902
4903 lck_mtx_lock(sadb_mutex);
4904 if (sav->sah->saidx.dst.ss_family == AF_INET) {
4905 struct ip_out_args ipoa = {};
4906 struct route ro = {};
4907
4908 ipoa.ipoa_boundif = IFSCOPE_NONE;
4909 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
4910 ipoa.ipoa_sotc = SO_TC_UNSPEC;
4911 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
4912
4913 struct ip *ip = (__typeof__(ip))m_mtod(m);
4914
4915 /*
4916 * Type 2: a UDP packet complete with IP header.
4917 * We must do this because UDP output requires
4918 * an inpcb which we don't have. UDP packet
4919 * contains one byte payload. The byte is set
4920 * to 0xFF.
4921 */
4922 struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip));
4923 m->m_len = sizeof(struct udpiphdr) + 1;
4924 bzero(m_mtod(m), m->m_len);
4925 m->m_pkthdr.len = m->m_len;
4926
4927 ip->ip_len = (u_short)m->m_len;
4928 ip->ip_ttl = (u_char)ip_defttl;
4929 ip->ip_p = IPPROTO_UDP;
4930 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4931 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4932 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4933 } else {
4934 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4935 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4936 }
4937 if (sav->natt_encapsulated_src_port != 0) {
4938 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
4939 } else {
4940 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4941 }
4942 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4943 uh->uh_dport = htons(sav->remote_ike_port);
4944 uh->uh_ulen = htons(1 + sizeof(*uh));
4945 uh->uh_sum = 0;
4946 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip) + sizeof(*uh)) = 0xFF;
4947
4948 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
4949 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET) {
4950 ROUTE_RELEASE(&sav->sah->sa_route);
4951 }
4952
4953 route_copyout(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
4954 lck_mtx_unlock(sadb_mutex);
4955
4956 necp_mark_packet_as_keepalive(m, TRUE);
4957 error = ip_output(m, NULL, &ro, IP_OUTARGS | IP_NOIPSEC, NULL, &ipoa);
4958
4959 lck_mtx_lock(sadb_mutex);
4960 route_copyin(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
4961 } else if (sav->sah->saidx.dst.ss_family == AF_INET6) {
4962 struct ip6_out_args ip6oa = {};
4963 struct route_in6 ro6 = {};
4964
4965 ip6oa.ip6oa_flowadv.code = 0;
4966 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
4967 if (sav->sah->outgoing_if) {
4968 ip6oa.ip6oa_boundif = sav->sah->outgoing_if;
4969 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
4970 }
4971
4972 struct ip6_hdr *ip6 = (__typeof__(ip6))m_mtod(m);
4973
4974 /*
4975 * Type 2: a UDP packet complete with IPv6 header.
4976 * We must do this because UDP output requires
4977 * an inpcb which we don't have. UDP packet
4978 * contains one byte payload. The byte is set
4979 * to 0xFF.
4980 */
4981 struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip6));
4982 m->m_len = sizeof(struct udphdr) + sizeof(struct ip6_hdr) + 1;
4983 bzero(m_mtod(m), m->m_len);
4984 m->m_pkthdr.len = m->m_len;
4985
4986 ip6->ip6_flow = 0;
4987 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
4988 ip6->ip6_vfc |= IPV6_VERSION;
4989 ip6->ip6_nxt = IPPROTO_UDP;
4990 ip6->ip6_hlim = (u_int8_t)ip6_defhlim;
4991 ip6->ip6_plen = htons(sizeof(struct udphdr) + 1);
4992 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4993 ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr;
4994 ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
4995 ip6_output_setsrcifscope(m, ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_scope_id, NULL);
4996 ip6_output_setdstifscope(m, ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_scope_id, NULL);
4997 } else {
4998 ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
4999 ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr;
5000 ip6_output_setdstifscope(m, ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_scope_id, NULL);
5001 ip6_output_setsrcifscope(m, ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_scope_id, NULL);
5002 }
5003
5004 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
5005 ip6->ip6_src.s6_addr16[1] = 0;
5006 }
5007 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
5008 ip6->ip6_dst.s6_addr16[1] = 0;
5009 }
5010
5011 if (sav->natt_encapsulated_src_port != 0) {
5012 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
5013 } else {
5014 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5015 }
5016 uh->uh_dport = htons(sav->remote_ike_port);
5017 uh->uh_ulen = htons(1 + sizeof(*uh));
5018 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip6) + sizeof(*uh)) = 0xFF;
5019 uh->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(uh->uh_ulen) + IPPROTO_UDP));
5020 m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
5021 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
5022
5023 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
5024 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET6) {
5025 ROUTE_RELEASE(&sav->sah->sa_route);
5026 }
5027
5028 route_copyout((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6));
5029 lck_mtx_unlock(sadb_mutex);
5030
5031 necp_mark_packet_as_keepalive(m, TRUE);
5032 error = ip6_output(m, NULL, &ro6, IPV6_OUTARGS, NULL, NULL, &ip6oa);
5033
5034 lck_mtx_lock(sadb_mutex);
5035 route_copyin((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6));
5036 } else {
5037 ipseclog((LOG_ERR, "nat keepalive: invalid address family %u\n", sav->sah->saidx.dst.ss_family));
5038 lck_mtx_unlock(sadb_mutex);
5039 m_freem(m);
5040 return FALSE;
5041 }
5042
5043 if (error == 0) {
5044 sav->natt_last_activity = natt_now;
5045 lck_mtx_unlock(sadb_mutex);
5046 return TRUE;
5047 }
5048
5049 lck_mtx_unlock(sadb_mutex);
5050 return FALSE;
5051 }
5052
5053 __private_extern__ bool
ipsec_fill_offload_frame(ifnet_t ifp,struct secasvar * sav,struct ifnet_keepalive_offload_frame * frame,size_t frame_data_offset)5054 ipsec_fill_offload_frame(ifnet_t ifp,
5055 struct secasvar *sav,
5056 struct ifnet_keepalive_offload_frame *frame,
5057 size_t frame_data_offset)
5058 {
5059 u_int8_t *data = NULL;
5060 struct ip *ip = NULL;
5061 struct udphdr *uh = NULL;
5062
5063 if (sav == NULL || sav->sah == NULL || frame == NULL ||
5064 (ifp != NULL && ifp->if_index != sav->sah->outgoing_if) ||
5065 sav->sah->saidx.dst.ss_family != AF_INET ||
5066 !(sav->flags & SADB_X_EXT_NATT) ||
5067 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE) ||
5068 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE_OFFLOAD) ||
5069 sav->flags & SADB_X_EXT_ESP_KEEPALIVE ||
5070 ((esp_udp_encap_port & 0xFFFF) == 0 && sav->natt_encapsulated_src_port == 0) ||
5071 sav->remote_ike_port == 0 ||
5072 (natt_keepalive_interval == 0 && sav->natt_interval == 0 && sav->natt_offload_interval == 0)) {
5073 /* SA is not eligible for keepalive offload on this interface */
5074 return FALSE;
5075 }
5076
5077 if (frame_data_offset + sizeof(struct udpiphdr) + 1 >
5078 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
5079 /* Not enough room in this data frame */
5080 return FALSE;
5081 }
5082
5083 data = frame->data;
5084 ip = (__typeof__(ip))(void *)(data + frame_data_offset);
5085 uh = (__typeof__(uh))(void *)(data + frame_data_offset + sizeof(*ip));
5086
5087 frame->length = (u_int8_t)(frame_data_offset + sizeof(struct udpiphdr) + 1);
5088 frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_IPSEC;
5089 frame->ether_type = IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
5090
5091 bzero(data, IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE);
5092
5093 ip->ip_v = IPVERSION;
5094 ip->ip_hl = sizeof(struct ip) >> 2;
5095 ip->ip_off &= htons(~IP_OFFMASK);
5096 ip->ip_off &= htons(~IP_MF);
5097 switch (ip4_ipsec_dfbit) {
5098 case 0: /* clear DF bit */
5099 ip->ip_off &= htons(~IP_DF);
5100 break;
5101 case 1: /* set DF bit */
5102 ip->ip_off |= htons(IP_DF);
5103 break;
5104 default: /* copy DF bit */
5105 break;
5106 }
5107 ip->ip_len = htons(sizeof(struct udpiphdr) + 1);
5108 if (rfc6864 && IP_OFF_IS_ATOMIC(htons(ip->ip_off))) {
5109 ip->ip_id = 0;
5110 } else {
5111 ip->ip_id = ip_randomid((uint64_t)data);
5112 }
5113 ip->ip_ttl = (u_char)ip_defttl;
5114 ip->ip_p = IPPROTO_UDP;
5115 ip->ip_sum = 0;
5116 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
5117 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5118 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5119 } else {
5120 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5121 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5122 }
5123 ip->ip_sum = in_cksum_hdr_opt(ip);
5124 /* Fill out the UDP header */
5125 if (sav->natt_encapsulated_src_port != 0) {
5126 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
5127 } else {
5128 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5129 }
5130 uh->uh_dport = htons(sav->remote_ike_port);
5131 uh->uh_ulen = htons(1 + sizeof(*uh));
5132 uh->uh_sum = 0;
5133 *(u_int8_t*)(data + frame_data_offset + sizeof(*ip) + sizeof(*uh)) = 0xFF;
5134
5135 if (sav->natt_offload_interval != 0) {
5136 frame->interval = sav->natt_offload_interval;
5137 } else if (sav->natt_interval != 0) {
5138 frame->interval = sav->natt_interval;
5139 } else {
5140 frame->interval = (u_int16_t)natt_keepalive_interval;
5141 }
5142 return TRUE;
5143 }
5144
5145 static int
5146 sysctl_ipsec_wake_packet SYSCTL_HANDLER_ARGS
5147 {
5148 #pragma unused(oidp, arg1, arg2)
5149 if (req->newptr != USER_ADDR_NULL) {
5150 ipseclog((LOG_ERR, "ipsec: invalid parameters"));
5151 return EINVAL;
5152 }
5153
5154 struct proc *p = current_proc();
5155 if (p != NULL) {
5156 uid_t uid = kauth_cred_getuid(kauth_cred_get());
5157 if (uid != 0 && priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_IPSEC_WAKE_PACKET, 0) != 0) {
5158 ipseclog((LOG_ERR, "process does not hold necessary entitlement to get ipsec wake packet"));
5159 return EPERM;
5160 }
5161
5162 int result = sysctl_io_opaque(req, &ipsec_wake_pkt, sizeof(ipsec_wake_pkt), NULL);
5163
5164 ipseclog((LOG_NOTICE, "%s: uuid %s spi %u seq %u len %u result %d",
5165 __func__,
5166 ipsec_wake_pkt.wake_uuid,
5167 ipsec_wake_pkt.wake_pkt_spi,
5168 ipsec_wake_pkt.wake_pkt_seq,
5169 ipsec_wake_pkt.wake_pkt_len,
5170 result));
5171
5172 return result;
5173 }
5174
5175 return EINVAL;
5176 }
5177
5178 SYSCTL_PROC(_net_link_generic_system, OID_AUTO, ipsec_wake_pkt, CTLTYPE_STRUCT | CTLFLAG_RD |
5179 CTLFLAG_LOCKED, 0, 0, &sysctl_ipsec_wake_packet, "S,ipsec wake packet", "");
5180
5181 void
ipsec_save_wake_packet(struct mbuf * wake_mbuf,u_int32_t spi,u_int32_t seq)5182 ipsec_save_wake_packet(struct mbuf *wake_mbuf, u_int32_t spi, u_int32_t seq)
5183 {
5184 if (wake_mbuf == NULL) {
5185 ipseclog((LOG_ERR, "ipsec: bad wake packet"));
5186 return;
5187 }
5188
5189 lck_mtx_lock(sadb_mutex);
5190 if (__probable(!ipsec_save_wake_pkt)) {
5191 goto done;
5192 }
5193
5194 u_int16_t max_len = (wake_mbuf->m_pkthdr.len > IPSEC_MAX_WAKE_PKT_LEN) ? IPSEC_MAX_WAKE_PKT_LEN : (u_int16_t)wake_mbuf->m_pkthdr.len;
5195 m_copydata(wake_mbuf, 0, max_len, (void *)ipsec_wake_pkt.wake_pkt);
5196 ipsec_wake_pkt.wake_pkt_len = max_len;
5197
5198 ipsec_wake_pkt.wake_pkt_spi = spi;
5199 ipsec_wake_pkt.wake_pkt_seq = seq;
5200
5201 ipseclog((LOG_NOTICE, "%s: uuid %s spi %u seq %u len %u",
5202 __func__,
5203 ipsec_wake_pkt.wake_uuid,
5204 ipsec_wake_pkt.wake_pkt_spi,
5205 ipsec_wake_pkt.wake_pkt_seq,
5206 ipsec_wake_pkt.wake_pkt_len));
5207
5208 struct kev_msg ev_msg;
5209 bzero(&ev_msg, sizeof(ev_msg));
5210
5211 ev_msg.vendor_code = KEV_VENDOR_APPLE;
5212 ev_msg.kev_class = KEV_NETWORK_CLASS;
5213 ev_msg.kev_subclass = KEV_IPSEC_SUBCLASS;
5214 ev_msg.event_code = KEV_IPSEC_WAKE_PACKET;
5215
5216 struct ipsec_wake_pkt_event_data event_data;
5217 strlcpy(event_data.wake_uuid, ipsec_wake_pkt.wake_uuid, sizeof(event_data.wake_uuid));
5218 ev_msg.dv[0].data_ptr = &event_data;
5219 ev_msg.dv[0].data_length = sizeof(event_data);
5220
5221 int result = kev_post_msg(&ev_msg);
5222 if (result != 0) {
5223 os_log_error(OS_LOG_DEFAULT, "%s: kev_post_msg() failed with error %d for wake uuid %s",
5224 __func__, result, ipsec_wake_pkt.wake_uuid);
5225 }
5226
5227 ipsec_save_wake_pkt = false;
5228 done:
5229 lck_mtx_unlock(sadb_mutex);
5230 return;
5231 }
5232
5233 static void
ipsec_get_local_ports(void)5234 ipsec_get_local_ports(void)
5235 {
5236 errno_t error;
5237 ifnet_t *ifp_list;
5238 uint32_t count, i;
5239 static uint8_t port_bitmap[bitstr_size(IP_PORTRANGE_SIZE)];
5240
5241 error = ifnet_list_get_all(IFNET_FAMILY_IPSEC, &ifp_list, &count);
5242 if (error != 0) {
5243 os_log_error(OS_LOG_DEFAULT, "%s: ifnet_list_get_all() failed %d",
5244 __func__, error);
5245 return;
5246 }
5247 for (i = 0; i < count; i++) {
5248 ifnet_t ifp = ifp_list[i];
5249
5250 /*
5251 * Get all the TCP and UDP ports for IPv4 and IPv6
5252 */
5253 error = ifnet_get_local_ports_extended(ifp, PF_UNSPEC,
5254 IFNET_GET_LOCAL_PORTS_WILDCARDOK |
5255 IFNET_GET_LOCAL_PORTS_NOWAKEUPOK |
5256 IFNET_GET_LOCAL_PORTS_ANYTCPSTATEOK,
5257 port_bitmap);
5258 if (error != 0) {
5259 os_log_error(OS_LOG_DEFAULT, "%s: ifnet_get_local_ports_extended(%s) failed %d",
5260 __func__, if_name(ifp), error);
5261 }
5262 }
5263 ifnet_list_free(ifp_list);
5264 }
5265
5266 static IOReturn
ipsec_sleep_wake_handler(void * target,void * refCon,UInt32 messageType,void * provider,void * messageArgument,vm_size_t argSize)5267 ipsec_sleep_wake_handler(void *target, void *refCon, UInt32 messageType,
5268 void *provider, void *messageArgument, vm_size_t argSize)
5269 {
5270 #pragma unused(target, refCon, provider, messageArgument, argSize)
5271 switch (messageType) {
5272 case kIOMessageSystemWillSleep:
5273 {
5274 ipsec_get_local_ports();
5275 ipsec_save_wake_pkt = false;
5276 memset(&ipsec_wake_pkt, 0, sizeof(ipsec_wake_pkt));
5277 IOPMCopySleepWakeUUIDKey(ipsec_wake_pkt.wake_uuid,
5278 sizeof(ipsec_wake_pkt.wake_uuid));
5279 ipseclog((LOG_NOTICE,
5280 "ipsec: system will sleep, uuid: %s", ipsec_wake_pkt.wake_uuid));
5281 break;
5282 }
5283 case kIOMessageSystemHasPoweredOn:
5284 {
5285 char wake_reason[128] = {0};
5286 size_t size = sizeof(wake_reason);
5287 if (kernel_sysctlbyname("kern.wakereason", wake_reason, &size, NULL, 0) == 0) {
5288 if (strnstr(wake_reason, "wlan", size) == 0 ||
5289 strnstr(wake_reason, "WL.OutboxNotEmpty", size) == 0 ||
5290 strnstr(wake_reason, "baseband", size) == 0 ||
5291 strnstr(wake_reason, "bluetooth", size) == 0 ||
5292 strnstr(wake_reason, "BT.OutboxNotEmpty", size) == 0) {
5293 ipsec_save_wake_pkt = true;
5294 ipseclog((LOG_NOTICE,
5295 "ipsec: system has powered on, uuid: %s reason %s", ipsec_wake_pkt.wake_uuid, wake_reason));
5296 }
5297 }
5298 break;
5299 }
5300 default:
5301 break;
5302 }
5303
5304 return IOPMAckImplied;
5305 }
5306
5307 void
ipsec_monitor_sleep_wake(void)5308 ipsec_monitor_sleep_wake(void)
5309 {
5310 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
5311
5312 if (sleep_wake_handle == NULL) {
5313 sleep_wake_handle = registerSleepWakeInterest(ipsec_sleep_wake_handler,
5314 NULL, NULL);
5315 if (sleep_wake_handle != NULL) {
5316 ipseclog((LOG_INFO,
5317 "ipsec: monitoring sleep wake"));
5318 }
5319 }
5320 }
5321