1 /*
2 * Copyright (c) 2008-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/ipsec.c,v 1.3.2.7 2001/07/19 06:37:23 kris Exp $ */
30 /* $KAME: ipsec.c,v 1.103 2001/05/24 07:14:18 sakane Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 /*
62 * IPsec controller part.
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/mcache.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/errno.h>
75 #include <sys/time.h>
76 #include <sys/kernel.h>
77 #include <sys/syslog.h>
78 #include <sys/sysctl.h>
79 #include <sys/priv.h>
80 #include <kern/locks.h>
81 #include <sys/kauth.h>
82 #include <sys/bitstring.h>
83
84 #include <libkern/OSAtomic.h>
85 #include <libkern/sysctl.h>
86
87 #include <net/if.h>
88 #include <net/route.h>
89 #include <net/if_ipsec.h>
90 #include <net/if_ports_used.h>
91
92 #include <netinet/in.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #include <netinet/ip_var.h>
96 #include <netinet/in_var.h>
97 #include <netinet/udp.h>
98 #include <netinet/udp_var.h>
99 #include <netinet/ip_ecn.h>
100 #include <netinet6/ip6_ecn.h>
101 #include <netinet/tcp.h>
102 #include <netinet/udp.h>
103
104 #include <netinet/ip6.h>
105 #include <netinet6/ip6_var.h>
106 #include <netinet/in_pcb.h>
107 #include <netinet/icmp6.h>
108
109 #include <netinet6/ipsec.h>
110 #include <netinet6/ipsec6.h>
111 #include <netinet6/ah.h>
112 #include <netinet6/ah6.h>
113 #if IPSEC_ESP
114 #include <netinet6/esp.h>
115 #include <netinet6/esp6.h>
116 #endif
117 #include <netkey/key.h>
118 #include <netkey/keydb.h>
119 #include <netkey/key_debug.h>
120
121 #include <net/net_osdep.h>
122
123 #include <IOKit/pwr_mgt/IOPM.h>
124
125 #include <os/log_private.h>
126
127 #if IPSEC_DEBUG
128 int ipsec_debug = 1;
129 #else
130 int ipsec_debug = 0;
131 #endif
132
133 #include <sys/kdebug.h>
134 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
135 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
136 #define DBG_FNC_GETPOL_SOCK NETDBG_CODE(DBG_NETIPSEC, (1 << 8))
137 #define DBG_FNC_GETPOL_ADDR NETDBG_CODE(DBG_NETIPSEC, (2 << 8))
138 #define DBG_FNC_IPSEC_OUT NETDBG_CODE(DBG_NETIPSEC, (3 << 8))
139
140 struct ipsecstat ipsecstat;
141 int ip4_ah_cleartos = 1;
142 int ip4_ah_offsetmask = 0; /* maybe IP_DF? */
143 int ip4_ipsec_dfbit = 0; /* DF bit on encap. 0: clear 1: set 2: copy */
144 int ip4_esp_trans_deflev = IPSEC_LEVEL_USE;
145 int ip4_esp_net_deflev = IPSEC_LEVEL_USE;
146 int ip4_ah_trans_deflev = IPSEC_LEVEL_USE;
147 int ip4_ah_net_deflev = IPSEC_LEVEL_USE;
148 struct secpolicy ip4_def_policy;
149 int ip4_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
150 int ip4_esp_randpad = -1;
151 int esp_udp_encap_port = 0;
152 static int sysctl_def_policy SYSCTL_HANDLER_ARGS;
153 extern int natt_keepalive_interval;
154 extern u_int64_t natt_now;
155
156 struct ipsec_tag;
157
158 void *sleep_wake_handle = NULL;
159 bool ipsec_save_wake_pkt = false;
160
161 SYSCTL_DECL(_net_inet_ipsec);
162 SYSCTL_DECL(_net_inet6_ipsec6);
163 /* net.inet.ipsec */
164 SYSCTL_STRUCT(_net_inet_ipsec, IPSECCTL_STATS,
165 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsecstat, ipsecstat, "");
166 SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
167 &ip4_def_policy.policy, 0, &sysctl_def_policy, "I", "");
168 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
169 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_trans_deflev, 0, "");
170 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
171 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_net_deflev, 0, "");
172 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
173 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_trans_deflev, 0, "");
174 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
175 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_net_deflev, 0, "");
176 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS,
177 ah_cleartos, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_cleartos, 0, "");
178 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_OFFSETMASK,
179 ah_offsetmask, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_offsetmask, 0, "");
180 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT,
181 dfbit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_dfbit, 0, "");
182 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN,
183 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_ecn, 0, "");
184 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEBUG,
185 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
186 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ESP_RANDPAD,
187 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_randpad, 0, "");
188
189 /* for performance, we bypass ipsec until a security policy is set */
190 int ipsec_bypass = 1;
191 SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec_bypass, 0, "");
192
193 /*
194 * NAT Traversal requires a UDP port for encapsulation,
195 * esp_udp_encap_port controls which port is used. Racoon
196 * must set this port to the port racoon is using locally
197 * for nat traversal.
198 */
199 SYSCTL_INT(_net_inet_ipsec, OID_AUTO, esp_port,
200 CTLFLAG_RW | CTLFLAG_LOCKED, &esp_udp_encap_port, 0, "");
201
202 struct ipsecstat ipsec6stat;
203 int ip6_esp_trans_deflev = IPSEC_LEVEL_USE;
204 int ip6_esp_net_deflev = IPSEC_LEVEL_USE;
205 int ip6_ah_trans_deflev = IPSEC_LEVEL_USE;
206 int ip6_ah_net_deflev = IPSEC_LEVEL_USE;
207 struct secpolicy ip6_def_policy;
208 int ip6_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
209 int ip6_esp_randpad = -1;
210
211 /* net.inet6.ipsec6 */
212 SYSCTL_STRUCT(_net_inet6_ipsec6, IPSECCTL_STATS,
213 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec6stat, ipsecstat, "");
214 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY,
215 def_policy, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_def_policy.policy, 0, "");
216 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
217 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_trans_deflev, 0, "");
218 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
219 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_net_deflev, 0, "");
220 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
221 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_trans_deflev, 0, "");
222 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
223 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_net_deflev, 0, "");
224 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN,
225 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ipsec_ecn, 0, "");
226 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG,
227 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
228 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ESP_RANDPAD,
229 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_randpad, 0, "");
230
231 SYSCTL_DECL(_net_link_generic_system);
232
233 struct ipsec_wake_pkt_info ipsec_wake_pkt;
234
235 static int ipsec_setspidx_interface(struct secpolicyindex *, u_int8_t, struct mbuf *,
236 int, int, int);
237 static int ipsec_setspidx_mbuf(struct secpolicyindex *, u_int8_t, u_int,
238 struct mbuf *, int);
239 static int ipsec4_setspidx_inpcb(struct mbuf *, struct inpcb *pcb);
240 static int ipsec6_setspidx_in6pcb(struct mbuf *, struct in6pcb *pcb);
241 static int ipsec_setspidx(struct mbuf *, struct secpolicyindex *, int, int);
242 static void ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
243 static int ipsec4_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
244 static void ipsec6_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
245 static int ipsec6_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
246 static struct inpcbpolicy *ipsec_newpcbpolicy(void);
247 static void ipsec_delpcbpolicy(struct inpcbpolicy *);
248 static struct secpolicy *ipsec_deepcopy_policy(struct secpolicy *src);
249 static int ipsec_set_policy(struct secpolicy **pcb_sp,
250 int optname, caddr_t request, size_t len, int priv);
251 static void vshiftl(unsigned char *, int, size_t);
252 static int ipsec_in_reject(struct secpolicy *, struct mbuf *);
253 static int ipsec64_encapsulate(struct mbuf *, struct secasvar *, uint32_t);
254 static int ipsec6_update_routecache_and_output(struct ipsec_output_state *state, struct secasvar *sav);
255 static int ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav);
256 static struct ipsec_tag *ipsec_addaux(struct mbuf *);
257 static struct ipsec_tag *ipsec_findaux(struct mbuf *);
258 static void ipsec_optaux(struct mbuf *, struct ipsec_tag *);
259 int ipsec_send_natt_keepalive(struct secasvar *sav);
260 bool ipsec_fill_offload_frame(ifnet_t ifp, struct secasvar *sav, struct ifnet_keepalive_offload_frame *frame, size_t frame_data_offset);
261
262 extern bool IOPMCopySleepWakeUUIDKey(char *, size_t);
263
264 typedef IOReturn (*IOServiceInterestHandler)( void * target, void * refCon,
265 UInt32 messageType, void * provider,
266 void * messageArgument, vm_size_t argSize );
267 extern void *registerSleepWakeInterest(IOServiceInterestHandler, void *, void *);
268
269 static int
270 sysctl_def_policy SYSCTL_HANDLER_ARGS
271 {
272 int new_policy = ip4_def_policy.policy;
273 int error = sysctl_handle_int(oidp, &new_policy, 0, req);
274
275 #pragma unused(arg1, arg2)
276 if (error == 0) {
277 if (new_policy != IPSEC_POLICY_NONE &&
278 new_policy != IPSEC_POLICY_DISCARD) {
279 return EINVAL;
280 }
281 ip4_def_policy.policy = new_policy;
282
283 /* Turn off the bypass if the default security policy changes */
284 if (ipsec_bypass != 0 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
285 ipsec_bypass = 0;
286 }
287 }
288
289 return error;
290 }
291
292 /*
293 * For OUTBOUND packet having a socket. Searching SPD for packet,
294 * and return a pointer to SP.
295 * OUT: NULL: no apropreate SP found, the following value is set to error.
296 * 0 : bypass
297 * EACCES : discard packet.
298 * ENOENT : ipsec_acquire() in progress, maybe.
299 * others : error occurred.
300 * others: a pointer to SP
301 *
302 * NOTE: IPv6 mapped adddress concern is implemented here.
303 */
304 struct secpolicy *
ipsec4_getpolicybysock(struct mbuf * m,u_int8_t dir,struct socket * so,int * error)305 ipsec4_getpolicybysock(struct mbuf *m,
306 u_int8_t dir,
307 struct socket *so,
308 int *error)
309 {
310 struct inpcbpolicy *pcbsp = NULL;
311 struct secpolicy *currsp = NULL; /* policy on socket */
312 struct secpolicy *kernsp = NULL; /* policy on kernel */
313
314 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
315 /* sanity check */
316 if (m == NULL || so == NULL || error == NULL) {
317 panic("ipsec4_getpolicybysock: NULL pointer was passed.");
318 }
319
320 if (so->so_pcb == NULL) {
321 printf("ipsec4_getpolicybysock: so->so_pcb == NULL\n");
322 return ipsec4_getpolicybyaddr(m, dir, 0, error);
323 }
324
325 switch (SOCK_DOM(so)) {
326 case PF_INET:
327 pcbsp = sotoinpcb(so)->inp_sp;
328 break;
329 case PF_INET6:
330 pcbsp = sotoin6pcb(so)->in6p_sp;
331 break;
332 }
333
334 if (!pcbsp) {
335 /* Socket has not specified an IPSEC policy */
336 return ipsec4_getpolicybyaddr(m, dir, 0, error);
337 }
338
339 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_START, 0, 0, 0, 0, 0);
340
341 switch (SOCK_DOM(so)) {
342 case PF_INET:
343 /* set spidx in pcb */
344 *error = ipsec4_setspidx_inpcb(m, sotoinpcb(so));
345 break;
346 case PF_INET6:
347 /* set spidx in pcb */
348 *error = ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
349 break;
350 default:
351 panic("ipsec4_getpolicybysock: unsupported address family");
352 }
353 if (*error) {
354 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 1, *error, 0, 0, 0);
355 return NULL;
356 }
357
358 /* sanity check */
359 if (pcbsp == NULL) {
360 panic("ipsec4_getpolicybysock: pcbsp is NULL.");
361 }
362
363 switch (dir) {
364 case IPSEC_DIR_INBOUND:
365 currsp = pcbsp->sp_in;
366 break;
367 case IPSEC_DIR_OUTBOUND:
368 currsp = pcbsp->sp_out;
369 break;
370 default:
371 panic("ipsec4_getpolicybysock: illegal direction.");
372 }
373
374 /* sanity check */
375 if (currsp == NULL) {
376 panic("ipsec4_getpolicybysock: currsp is NULL.");
377 }
378
379 /* when privilieged socket */
380 if (pcbsp->priv) {
381 switch (currsp->policy) {
382 case IPSEC_POLICY_BYPASS:
383 lck_mtx_lock(sadb_mutex);
384 currsp->refcnt++;
385 lck_mtx_unlock(sadb_mutex);
386 *error = 0;
387 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 2, *error, 0, 0, 0);
388 return currsp;
389
390 case IPSEC_POLICY_ENTRUST:
391 /* look for a policy in SPD */
392 kernsp = key_allocsp(&currsp->spidx, dir);
393
394 /* SP found */
395 if (kernsp != NULL) {
396 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
397 printf("DP ipsec4_getpolicybysock called "
398 "to allocate SP:0x%llx\n",
399 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
400 *error = 0;
401 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 3, *error, 0, 0, 0);
402 return kernsp;
403 }
404
405 /* no SP found */
406 lck_mtx_lock(sadb_mutex);
407 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
408 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
409 ipseclog((LOG_INFO,
410 "fixed system default policy: %d->%d\n",
411 ip4_def_policy.policy, IPSEC_POLICY_NONE));
412 ip4_def_policy.policy = IPSEC_POLICY_NONE;
413 }
414 ip4_def_policy.refcnt++;
415 lck_mtx_unlock(sadb_mutex);
416 *error = 0;
417 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 4, *error, 0, 0, 0);
418 return &ip4_def_policy;
419
420 case IPSEC_POLICY_IPSEC:
421 lck_mtx_lock(sadb_mutex);
422 currsp->refcnt++;
423 lck_mtx_unlock(sadb_mutex);
424 *error = 0;
425 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 5, *error, 0, 0, 0);
426 return currsp;
427
428 default:
429 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
430 "Invalid policy for PCB %d\n", currsp->policy));
431 *error = EINVAL;
432 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 6, *error, 0, 0, 0);
433 return NULL;
434 }
435 /* NOTREACHED */
436 }
437
438 /* when non-privilieged socket */
439 /* look for a policy in SPD */
440 kernsp = key_allocsp(&currsp->spidx, dir);
441
442 /* SP found */
443 if (kernsp != NULL) {
444 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
445 printf("DP ipsec4_getpolicybysock called "
446 "to allocate SP:0x%llx\n",
447 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
448 *error = 0;
449 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 7, *error, 0, 0, 0);
450 return kernsp;
451 }
452
453 /* no SP found */
454 switch (currsp->policy) {
455 case IPSEC_POLICY_BYPASS:
456 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
457 "Illegal policy for non-priviliged defined %d\n",
458 currsp->policy));
459 *error = EINVAL;
460 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 8, *error, 0, 0, 0);
461 return NULL;
462
463 case IPSEC_POLICY_ENTRUST:
464 lck_mtx_lock(sadb_mutex);
465 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
466 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
467 ipseclog((LOG_INFO,
468 "fixed system default policy: %d->%d\n",
469 ip4_def_policy.policy, IPSEC_POLICY_NONE));
470 ip4_def_policy.policy = IPSEC_POLICY_NONE;
471 }
472 ip4_def_policy.refcnt++;
473 lck_mtx_unlock(sadb_mutex);
474 *error = 0;
475 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 9, *error, 0, 0, 0);
476 return &ip4_def_policy;
477
478 case IPSEC_POLICY_IPSEC:
479 lck_mtx_lock(sadb_mutex);
480 currsp->refcnt++;
481 lck_mtx_unlock(sadb_mutex);
482 *error = 0;
483 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 10, *error, 0, 0, 0);
484 return currsp;
485
486 default:
487 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
488 "Invalid policy for PCB %d\n", currsp->policy));
489 *error = EINVAL;
490 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 11, *error, 0, 0, 0);
491 return NULL;
492 }
493 /* NOTREACHED */
494 }
495
496 /*
497 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
498 * and return a pointer to SP.
499 * OUT: positive: a pointer to the entry for security policy leaf matched.
500 * NULL: no apropreate SP found, the following value is set to error.
501 * 0 : bypass
502 * EACCES : discard packet.
503 * ENOENT : ipsec_acquire() in progress, maybe.
504 * others : error occurred.
505 */
506 struct secpolicy *
ipsec4_getpolicybyaddr(struct mbuf * m,u_int8_t dir,int flag,int * error)507 ipsec4_getpolicybyaddr(struct mbuf *m,
508 u_int8_t dir,
509 int flag,
510 int *error)
511 {
512 struct secpolicy *sp = NULL;
513
514 if (ipsec_bypass != 0) {
515 return 0;
516 }
517
518 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
519
520 /* sanity check */
521 if (m == NULL || error == NULL) {
522 panic("ipsec4_getpolicybyaddr: NULL pointer was passed.");
523 }
524 {
525 struct secpolicyindex spidx;
526
527 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
528 bzero(&spidx, sizeof(spidx));
529
530 /* make a index to look for a policy */
531 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET, m,
532 (flag & IP_FORWARDING) ? 0 : 1);
533
534 if (*error != 0) {
535 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, *error, 0, 0, 0);
536 return NULL;
537 }
538
539 sp = key_allocsp(&spidx, dir);
540 }
541
542 /* SP found */
543 if (sp != NULL) {
544 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
545 printf("DP ipsec4_getpolicybyaddr called "
546 "to allocate SP:0x%llx\n",
547 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
548 *error = 0;
549 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0);
550 return sp;
551 }
552
553 /* no SP found */
554 lck_mtx_lock(sadb_mutex);
555 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
556 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
557 ipseclog((LOG_INFO, "fixed system default policy:%d->%d\n",
558 ip4_def_policy.policy,
559 IPSEC_POLICY_NONE));
560 ip4_def_policy.policy = IPSEC_POLICY_NONE;
561 }
562 ip4_def_policy.refcnt++;
563 lck_mtx_unlock(sadb_mutex);
564 *error = 0;
565 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 3, *error, 0, 0, 0);
566 return &ip4_def_policy;
567 }
568
569 /* Match with bound interface rather than src addr.
570 * Unlike getpolicybyaddr, do not set the default policy.
571 * Return 0 if should continue processing, or -1 if packet
572 * should be dropped.
573 */
574 int
ipsec4_getpolicybyinterface(struct mbuf * m,u_int8_t dir,int * flags,struct ip_out_args * ipoa,struct secpolicy ** sp)575 ipsec4_getpolicybyinterface(struct mbuf *m,
576 u_int8_t dir,
577 int *flags,
578 struct ip_out_args *ipoa,
579 struct secpolicy **sp)
580 {
581 struct secpolicyindex spidx;
582 int error = 0;
583
584 if (ipsec_bypass != 0) {
585 return 0;
586 }
587
588 /* Sanity check */
589 if (m == NULL || ipoa == NULL || sp == NULL) {
590 panic("ipsec4_getpolicybyinterface: NULL pointer was passed.");
591 }
592
593 if (ipoa->ipoa_boundif == IFSCOPE_NONE) {
594 return 0;
595 }
596
597 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
598 bzero(&spidx, sizeof(spidx));
599
600 /* make a index to look for a policy */
601 error = ipsec_setspidx_interface(&spidx, dir, m, (*flags & IP_FORWARDING) ? 0 : 1,
602 ipoa->ipoa_boundif, 4);
603
604 if (error != 0) {
605 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0);
606 return 0;
607 }
608
609 *sp = key_allocsp(&spidx, dir);
610
611 /* Return SP, whether NULL or not */
612 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
613 if ((*sp)->ipsec_if == NULL) {
614 /* Invalid to capture on an interface without redirect */
615 key_freesp(*sp, KEY_SADB_UNLOCKED);
616 *sp = NULL;
617 return -1;
618 } else if ((*sp)->disabled) {
619 /* Disabled policies go in the clear */
620 key_freesp(*sp, KEY_SADB_UNLOCKED);
621 *sp = NULL;
622 *flags |= IP_NOIPSEC; /* Avoid later IPsec check */
623 } else {
624 /* If policy is enabled, redirect to ipsec interface */
625 ipoa->ipoa_boundif = (*sp)->ipsec_if->if_index;
626 }
627 }
628
629 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, error, 0, 0, 0);
630
631 return 0;
632 }
633
634
635 /*
636 * For OUTBOUND packet having a socket. Searching SPD for packet,
637 * and return a pointer to SP.
638 * OUT: NULL: no apropreate SP found, the following value is set to error.
639 * 0 : bypass
640 * EACCES : discard packet.
641 * ENOENT : ipsec_acquire() in progress, maybe.
642 * others : error occurred.
643 * others: a pointer to SP
644 */
645 struct secpolicy *
ipsec6_getpolicybysock(struct mbuf * m,u_int8_t dir,struct socket * so,int * error)646 ipsec6_getpolicybysock(struct mbuf *m,
647 u_int8_t dir,
648 struct socket *so,
649 int *error)
650 {
651 struct inpcbpolicy *pcbsp = NULL;
652 struct secpolicy *currsp = NULL; /* policy on socket */
653 struct secpolicy *kernsp = NULL; /* policy on kernel */
654
655 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
656
657 /* sanity check */
658 if (m == NULL || so == NULL || error == NULL) {
659 panic("ipsec6_getpolicybysock: NULL pointer was passed.");
660 }
661
662 #if DIAGNOSTIC
663 if (SOCK_DOM(so) != PF_INET6) {
664 panic("ipsec6_getpolicybysock: socket domain != inet6");
665 }
666 #endif
667
668 pcbsp = sotoin6pcb(so)->in6p_sp;
669
670 if (!pcbsp) {
671 return ipsec6_getpolicybyaddr(m, dir, 0, error);
672 }
673
674 /* set spidx in pcb */
675 ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
676
677 /* sanity check */
678 if (pcbsp == NULL) {
679 panic("ipsec6_getpolicybysock: pcbsp is NULL.");
680 }
681
682 switch (dir) {
683 case IPSEC_DIR_INBOUND:
684 currsp = pcbsp->sp_in;
685 break;
686 case IPSEC_DIR_OUTBOUND:
687 currsp = pcbsp->sp_out;
688 break;
689 default:
690 panic("ipsec6_getpolicybysock: illegal direction.");
691 }
692
693 /* sanity check */
694 if (currsp == NULL) {
695 panic("ipsec6_getpolicybysock: currsp is NULL.");
696 }
697
698 /* when privilieged socket */
699 if (pcbsp->priv) {
700 switch (currsp->policy) {
701 case IPSEC_POLICY_BYPASS:
702 lck_mtx_lock(sadb_mutex);
703 currsp->refcnt++;
704 lck_mtx_unlock(sadb_mutex);
705 *error = 0;
706 return currsp;
707
708 case IPSEC_POLICY_ENTRUST:
709 /* look for a policy in SPD */
710 kernsp = key_allocsp(&currsp->spidx, dir);
711
712 /* SP found */
713 if (kernsp != NULL) {
714 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
715 printf("DP ipsec6_getpolicybysock called "
716 "to allocate SP:0x%llx\n",
717 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
718 *error = 0;
719 return kernsp;
720 }
721
722 /* no SP found */
723 lck_mtx_lock(sadb_mutex);
724 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
725 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
726 ipseclog((LOG_INFO,
727 "fixed system default policy: %d->%d\n",
728 ip6_def_policy.policy, IPSEC_POLICY_NONE));
729 ip6_def_policy.policy = IPSEC_POLICY_NONE;
730 }
731 ip6_def_policy.refcnt++;
732 lck_mtx_unlock(sadb_mutex);
733 *error = 0;
734 return &ip6_def_policy;
735
736 case IPSEC_POLICY_IPSEC:
737 lck_mtx_lock(sadb_mutex);
738 currsp->refcnt++;
739 lck_mtx_unlock(sadb_mutex);
740 *error = 0;
741 return currsp;
742
743 default:
744 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
745 "Invalid policy for PCB %d\n", currsp->policy));
746 *error = EINVAL;
747 return NULL;
748 }
749 /* NOTREACHED */
750 }
751
752 /* when non-privilieged socket */
753 /* look for a policy in SPD */
754 kernsp = key_allocsp(&currsp->spidx, dir);
755
756 /* SP found */
757 if (kernsp != NULL) {
758 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
759 printf("DP ipsec6_getpolicybysock called "
760 "to allocate SP:0x%llx\n",
761 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
762 *error = 0;
763 return kernsp;
764 }
765
766 /* no SP found */
767 switch (currsp->policy) {
768 case IPSEC_POLICY_BYPASS:
769 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
770 "Illegal policy for non-priviliged defined %d\n",
771 currsp->policy));
772 *error = EINVAL;
773 return NULL;
774
775 case IPSEC_POLICY_ENTRUST:
776 lck_mtx_lock(sadb_mutex);
777 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
778 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
779 ipseclog((LOG_INFO,
780 "fixed system default policy: %d->%d\n",
781 ip6_def_policy.policy, IPSEC_POLICY_NONE));
782 ip6_def_policy.policy = IPSEC_POLICY_NONE;
783 }
784 ip6_def_policy.refcnt++;
785 lck_mtx_unlock(sadb_mutex);
786 *error = 0;
787 return &ip6_def_policy;
788
789 case IPSEC_POLICY_IPSEC:
790 lck_mtx_lock(sadb_mutex);
791 currsp->refcnt++;
792 lck_mtx_unlock(sadb_mutex);
793 *error = 0;
794 return currsp;
795
796 default:
797 ipseclog((LOG_ERR,
798 "ipsec6_policybysock: Invalid policy for PCB %d\n",
799 currsp->policy));
800 *error = EINVAL;
801 return NULL;
802 }
803 /* NOTREACHED */
804 }
805
806 /*
807 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
808 * and return a pointer to SP.
809 * `flag' means that packet is to be forwarded whether or not.
810 * flag = 1: forwad
811 * OUT: positive: a pointer to the entry for security policy leaf matched.
812 * NULL: no apropreate SP found, the following value is set to error.
813 * 0 : bypass
814 * EACCES : discard packet.
815 * ENOENT : ipsec_acquire() in progress, maybe.
816 * others : error occurred.
817 */
818 #ifndef IP_FORWARDING
819 #define IP_FORWARDING 1
820 #endif
821
822 struct secpolicy *
ipsec6_getpolicybyaddr(struct mbuf * m,u_int8_t dir,int flag,int * error)823 ipsec6_getpolicybyaddr(struct mbuf *m,
824 u_int8_t dir,
825 int flag,
826 int *error)
827 {
828 struct secpolicy *sp = NULL;
829
830 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
831
832 /* sanity check */
833 if (m == NULL || error == NULL) {
834 panic("ipsec6_getpolicybyaddr: NULL pointer was passed.");
835 }
836
837 {
838 struct secpolicyindex spidx;
839
840 bzero(&spidx, sizeof(spidx));
841
842 /* make a index to look for a policy */
843 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET6, m,
844 (flag & IP_FORWARDING) ? 0 : 1);
845
846 if (*error != 0) {
847 return NULL;
848 }
849
850 sp = key_allocsp(&spidx, dir);
851 }
852
853 /* SP found */
854 if (sp != NULL) {
855 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
856 printf("DP ipsec6_getpolicybyaddr called "
857 "to allocate SP:0x%llx\n",
858 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
859 *error = 0;
860 return sp;
861 }
862
863 /* no SP found */
864 lck_mtx_lock(sadb_mutex);
865 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
866 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
867 ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n",
868 ip6_def_policy.policy, IPSEC_POLICY_NONE));
869 ip6_def_policy.policy = IPSEC_POLICY_NONE;
870 }
871 ip6_def_policy.refcnt++;
872 lck_mtx_unlock(sadb_mutex);
873 *error = 0;
874 return &ip6_def_policy;
875 }
876
877 /* Match with bound interface rather than src addr.
878 * Unlike getpolicybyaddr, do not set the default policy.
879 * Return 0 if should continue processing, or -1 if packet
880 * should be dropped.
881 */
882 int
ipsec6_getpolicybyinterface(struct mbuf * m,u_int8_t dir,int flag,struct ip6_out_args * ip6oap,int * noipsec,struct secpolicy ** sp)883 ipsec6_getpolicybyinterface(struct mbuf *m,
884 u_int8_t dir,
885 int flag,
886 struct ip6_out_args *ip6oap,
887 int *noipsec,
888 struct secpolicy **sp)
889 {
890 struct secpolicyindex spidx;
891 int error = 0;
892
893 if (ipsec_bypass != 0) {
894 return 0;
895 }
896
897 /* Sanity check */
898 if (m == NULL || sp == NULL || noipsec == NULL || ip6oap == NULL) {
899 panic("ipsec6_getpolicybyinterface: NULL pointer was passed.");
900 }
901
902 *noipsec = 0;
903
904 if (ip6oap->ip6oa_boundif == IFSCOPE_NONE) {
905 return 0;
906 }
907
908 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
909 bzero(&spidx, sizeof(spidx));
910
911 /* make a index to look for a policy */
912 error = ipsec_setspidx_interface(&spidx, dir, m, (flag & IP_FORWARDING) ? 0 : 1,
913 ip6oap->ip6oa_boundif, 6);
914
915 if (error != 0) {
916 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0);
917 return 0;
918 }
919
920 *sp = key_allocsp(&spidx, dir);
921
922 /* Return SP, whether NULL or not */
923 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
924 if ((*sp)->ipsec_if == NULL) {
925 /* Invalid to capture on an interface without redirect */
926 key_freesp(*sp, KEY_SADB_UNLOCKED);
927 *sp = NULL;
928 return -1;
929 } else if ((*sp)->disabled) {
930 /* Disabled policies go in the clear */
931 key_freesp(*sp, KEY_SADB_UNLOCKED);
932 *sp = NULL;
933 *noipsec = 1; /* Avoid later IPsec check */
934 } else {
935 /* If policy is enabled, redirect to ipsec interface */
936 ip6oap->ip6oa_boundif = (*sp)->ipsec_if->if_index;
937 }
938 }
939
940 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0);
941
942 return 0;
943 }
944
945 /*
946 * set IP address into spidx from mbuf.
947 * When Forwarding packet and ICMP echo reply, this function is used.
948 *
949 * IN: get the followings from mbuf.
950 * protocol family, src, dst, next protocol
951 * OUT:
952 * 0: success.
953 * other: failure, and set errno.
954 */
955 static int
ipsec_setspidx_mbuf(struct secpolicyindex * spidx,u_int8_t dir,__unused u_int family,struct mbuf * m,int needport)956 ipsec_setspidx_mbuf(
957 struct secpolicyindex *spidx,
958 u_int8_t dir,
959 __unused u_int family,
960 struct mbuf *m,
961 int needport)
962 {
963 int error;
964
965 /* sanity check */
966 if (spidx == NULL || m == NULL) {
967 panic("ipsec_setspidx_mbuf: NULL pointer was passed.");
968 }
969
970 bzero(spidx, sizeof(*spidx));
971
972 error = ipsec_setspidx(m, spidx, needport, 0);
973 if (error) {
974 goto bad;
975 }
976 spidx->dir = dir;
977
978 return 0;
979
980 bad:
981 /* XXX initialize */
982 bzero(spidx, sizeof(*spidx));
983 return EINVAL;
984 }
985
986 static int
ipsec_setspidx_interface(struct secpolicyindex * spidx,u_int8_t dir,struct mbuf * m,int needport,int ifindex,int ip_version)987 ipsec_setspidx_interface(
988 struct secpolicyindex *spidx,
989 u_int8_t dir,
990 struct mbuf *m,
991 int needport,
992 int ifindex,
993 int ip_version)
994 {
995 int error;
996
997 /* sanity check */
998 if (spidx == NULL || m == NULL) {
999 panic("ipsec_setspidx_interface: NULL pointer was passed.");
1000 }
1001
1002 bzero(spidx, sizeof(*spidx));
1003
1004 error = ipsec_setspidx(m, spidx, needport, ip_version);
1005 if (error) {
1006 goto bad;
1007 }
1008 spidx->dir = dir;
1009
1010 if (ifindex != 0) {
1011 ifnet_head_lock_shared();
1012 spidx->internal_if = ifindex2ifnet[ifindex];
1013 ifnet_head_done();
1014 } else {
1015 spidx->internal_if = NULL;
1016 }
1017
1018 return 0;
1019
1020 bad:
1021 return EINVAL;
1022 }
1023
1024 static int
ipsec4_setspidx_inpcb(struct mbuf * m,struct inpcb * pcb)1025 ipsec4_setspidx_inpcb(struct mbuf *m, struct inpcb *pcb)
1026 {
1027 struct secpolicyindex *spidx;
1028 int error;
1029
1030 if (ipsec_bypass != 0) {
1031 return 0;
1032 }
1033
1034 /* sanity check */
1035 if (pcb == NULL) {
1036 panic("ipsec4_setspidx_inpcb: no PCB found.");
1037 }
1038 if (pcb->inp_sp == NULL) {
1039 panic("ipsec4_setspidx_inpcb: no inp_sp found.");
1040 }
1041 if (pcb->inp_sp->sp_out == NULL || pcb->inp_sp->sp_in == NULL) {
1042 panic("ipsec4_setspidx_inpcb: no sp_in/out found.");
1043 }
1044
1045 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1046 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1047
1048 spidx = &pcb->inp_sp->sp_in->spidx;
1049 error = ipsec_setspidx(m, spidx, 1, 0);
1050 if (error) {
1051 goto bad;
1052 }
1053 spidx->dir = IPSEC_DIR_INBOUND;
1054
1055 spidx = &pcb->inp_sp->sp_out->spidx;
1056 error = ipsec_setspidx(m, spidx, 1, 0);
1057 if (error) {
1058 goto bad;
1059 }
1060 spidx->dir = IPSEC_DIR_OUTBOUND;
1061
1062 return 0;
1063
1064 bad:
1065 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1066 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1067 return error;
1068 }
1069
1070 static int
ipsec6_setspidx_in6pcb(struct mbuf * m,struct in6pcb * pcb)1071 ipsec6_setspidx_in6pcb(struct mbuf *m, struct in6pcb *pcb)
1072 {
1073 struct secpolicyindex *spidx;
1074 int error;
1075
1076 /* sanity check */
1077 if (pcb == NULL) {
1078 panic("ipsec6_setspidx_in6pcb: no PCB found.");
1079 }
1080 if (pcb->in6p_sp == NULL) {
1081 panic("ipsec6_setspidx_in6pcb: no in6p_sp found.");
1082 }
1083 if (pcb->in6p_sp->sp_out == NULL || pcb->in6p_sp->sp_in == NULL) {
1084 panic("ipsec6_setspidx_in6pcb: no sp_in/out found.");
1085 }
1086
1087 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1088 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1089
1090 spidx = &pcb->in6p_sp->sp_in->spidx;
1091 error = ipsec_setspidx(m, spidx, 1, 0);
1092 if (error) {
1093 goto bad;
1094 }
1095 spidx->dir = IPSEC_DIR_INBOUND;
1096
1097 spidx = &pcb->in6p_sp->sp_out->spidx;
1098 error = ipsec_setspidx(m, spidx, 1, 0);
1099 if (error) {
1100 goto bad;
1101 }
1102 spidx->dir = IPSEC_DIR_OUTBOUND;
1103
1104 return 0;
1105
1106 bad:
1107 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1108 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1109 return error;
1110 }
1111
1112 /*
1113 * configure security policy index (src/dst/proto/sport/dport)
1114 * by looking at the content of mbuf.
1115 * the caller is responsible for error recovery (like clearing up spidx).
1116 */
1117 static int
ipsec_setspidx(struct mbuf * m,struct secpolicyindex * spidx,int needport,int force_ip_version)1118 ipsec_setspidx(struct mbuf *m,
1119 struct secpolicyindex *spidx,
1120 int needport,
1121 int force_ip_version)
1122 {
1123 struct ip *ip = NULL;
1124 struct ip ipbuf;
1125 u_int v;
1126 struct mbuf *n;
1127 int len;
1128 int error;
1129
1130 if (m == NULL) {
1131 panic("ipsec_setspidx: m == 0 passed.");
1132 }
1133
1134 /*
1135 * validate m->m_pkthdr.len. we see incorrect length if we
1136 * mistakenly call this function with inconsistent mbuf chain
1137 * (like 4.4BSD tcp/udp processing). XXX should we panic here?
1138 */
1139 len = 0;
1140 for (n = m; n; n = n->m_next) {
1141 len += n->m_len;
1142 }
1143 if (m->m_pkthdr.len != len) {
1144 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1145 printf("ipsec_setspidx: "
1146 "total of m_len(%d) != pkthdr.len(%d), "
1147 "ignored.\n",
1148 len, m->m_pkthdr.len));
1149 return EINVAL;
1150 }
1151
1152 if (m->m_pkthdr.len < sizeof(struct ip)) {
1153 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1154 printf("ipsec_setspidx: "
1155 "pkthdr.len(%d) < sizeof(struct ip), ignored.\n",
1156 m->m_pkthdr.len));
1157 return EINVAL;
1158 }
1159
1160 if (m->m_len >= sizeof(*ip)) {
1161 ip = mtod(m, struct ip *);
1162 } else {
1163 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1164 ip = &ipbuf;
1165 }
1166
1167 if (force_ip_version) {
1168 v = force_ip_version;
1169 } else {
1170 #ifdef _IP_VHL
1171 v = _IP_VHL_V(ip->ip_vhl);
1172 #else
1173 v = ip->ip_v;
1174 #endif
1175 }
1176 switch (v) {
1177 case 4:
1178 error = ipsec4_setspidx_ipaddr(m, spidx);
1179 if (error) {
1180 return error;
1181 }
1182 ipsec4_get_ulp(m, spidx, needport);
1183 return 0;
1184 case 6:
1185 if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) {
1186 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1187 printf("ipsec_setspidx: "
1188 "pkthdr.len(%d) < sizeof(struct ip6_hdr), "
1189 "ignored.\n", m->m_pkthdr.len));
1190 return EINVAL;
1191 }
1192 error = ipsec6_setspidx_ipaddr(m, spidx);
1193 if (error) {
1194 return error;
1195 }
1196 ipsec6_get_ulp(m, spidx, needport);
1197 return 0;
1198 default:
1199 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1200 printf("ipsec_setspidx: "
1201 "unknown IP version %u, ignored.\n", v));
1202 return EINVAL;
1203 }
1204 }
1205
1206 static void
ipsec4_get_ulp(struct mbuf * m,struct secpolicyindex * spidx,int needport)1207 ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport)
1208 {
1209 struct ip ip;
1210 struct ip6_ext ip6e;
1211 u_int8_t nxt;
1212 int off;
1213 struct tcphdr th;
1214 struct udphdr uh;
1215
1216 /* sanity check */
1217 if (m == NULL) {
1218 panic("ipsec4_get_ulp: NULL pointer was passed.");
1219 }
1220 if (m->m_pkthdr.len < sizeof(ip)) {
1221 panic("ipsec4_get_ulp: too short");
1222 }
1223
1224 /* set default */
1225 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1226 ((struct sockaddr_in *)&spidx->src)->sin_port = IPSEC_PORT_ANY;
1227 ((struct sockaddr_in *)&spidx->dst)->sin_port = IPSEC_PORT_ANY;
1228
1229 m_copydata(m, 0, sizeof(ip), (caddr_t)&ip);
1230 /* ip_input() flips it into host endian XXX need more checking */
1231 if (ip.ip_off & (IP_MF | IP_OFFMASK)) {
1232 return;
1233 }
1234
1235 nxt = ip.ip_p;
1236 #ifdef _IP_VHL
1237 off = _IP_VHL_HL(ip->ip_vhl) << 2;
1238 #else
1239 off = ip.ip_hl << 2;
1240 #endif
1241 while (off < m->m_pkthdr.len) {
1242 switch (nxt) {
1243 case IPPROTO_TCP:
1244 spidx->ul_proto = nxt;
1245 if (!needport) {
1246 return;
1247 }
1248 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) {
1249 return;
1250 }
1251 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1252 ((struct sockaddr_in *)&spidx->src)->sin_port =
1253 th.th_sport;
1254 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1255 th.th_dport;
1256 return;
1257 case IPPROTO_UDP:
1258 spidx->ul_proto = nxt;
1259 if (!needport) {
1260 return;
1261 }
1262 if (off + sizeof(struct udphdr) > m->m_pkthdr.len) {
1263 return;
1264 }
1265 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1266 ((struct sockaddr_in *)&spidx->src)->sin_port =
1267 uh.uh_sport;
1268 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1269 uh.uh_dport;
1270 return;
1271 case IPPROTO_AH:
1272 if (off + sizeof(ip6e) > m->m_pkthdr.len) {
1273 return;
1274 }
1275 m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
1276 off += (ip6e.ip6e_len + 2) << 2;
1277 nxt = ip6e.ip6e_nxt;
1278 break;
1279 case IPPROTO_ICMP:
1280 default:
1281 /* XXX intermediate headers??? */
1282 spidx->ul_proto = nxt;
1283 return;
1284 }
1285 }
1286 }
1287
1288 /* assumes that m is sane */
1289 static int
ipsec4_setspidx_ipaddr(struct mbuf * m,struct secpolicyindex * spidx)1290 ipsec4_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx)
1291 {
1292 struct ip *ip = NULL;
1293 struct ip ipbuf;
1294 struct sockaddr_in *sin;
1295
1296 if (m->m_len >= sizeof(*ip)) {
1297 ip = mtod(m, struct ip *);
1298 } else {
1299 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1300 ip = &ipbuf;
1301 }
1302
1303 sin = (struct sockaddr_in *)&spidx->src;
1304 bzero(sin, sizeof(*sin));
1305 sin->sin_family = AF_INET;
1306 sin->sin_len = sizeof(struct sockaddr_in);
1307 bcopy(&ip->ip_src, &sin->sin_addr, sizeof(ip->ip_src));
1308 spidx->prefs = sizeof(struct in_addr) << 3;
1309
1310 sin = (struct sockaddr_in *)&spidx->dst;
1311 bzero(sin, sizeof(*sin));
1312 sin->sin_family = AF_INET;
1313 sin->sin_len = sizeof(struct sockaddr_in);
1314 bcopy(&ip->ip_dst, &sin->sin_addr, sizeof(ip->ip_dst));
1315 spidx->prefd = sizeof(struct in_addr) << 3;
1316
1317 return 0;
1318 }
1319
1320 static void
ipsec6_get_ulp(struct mbuf * m,struct secpolicyindex * spidx,int needport)1321 ipsec6_get_ulp(struct mbuf *m,
1322 struct secpolicyindex *spidx,
1323 int needport)
1324 {
1325 int off, nxt;
1326 struct tcphdr th;
1327 struct udphdr uh;
1328
1329 /* sanity check */
1330 if (m == NULL) {
1331 panic("ipsec6_get_ulp: NULL pointer was passed.");
1332 }
1333
1334 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1335 printf("ipsec6_get_ulp:\n"); kdebug_mbuf(m));
1336
1337 /* set default */
1338 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1339 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = IPSEC_PORT_ANY;
1340 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = IPSEC_PORT_ANY;
1341
1342 nxt = -1;
1343 off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt);
1344 if (off < 0 || m->m_pkthdr.len < off) {
1345 return;
1346 }
1347
1348 VERIFY(nxt <= UINT8_MAX);
1349 switch (nxt) {
1350 case IPPROTO_TCP:
1351 spidx->ul_proto = (u_int8_t)nxt;
1352 if (!needport) {
1353 break;
1354 }
1355 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) {
1356 break;
1357 }
1358 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1359 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = th.th_sport;
1360 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = th.th_dport;
1361 break;
1362 case IPPROTO_UDP:
1363 spidx->ul_proto = (u_int8_t)nxt;
1364 if (!needport) {
1365 break;
1366 }
1367 if (off + sizeof(struct udphdr) > m->m_pkthdr.len) {
1368 break;
1369 }
1370 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1371 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = uh.uh_sport;
1372 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = uh.uh_dport;
1373 break;
1374 case IPPROTO_ICMPV6:
1375 default:
1376 /* XXX intermediate headers??? */
1377 spidx->ul_proto = (u_int8_t)nxt;
1378 break;
1379 }
1380 }
1381
1382 /* assumes that m is sane */
1383 static int
ipsec6_setspidx_ipaddr(struct mbuf * m,struct secpolicyindex * spidx)1384 ipsec6_setspidx_ipaddr(struct mbuf *m,
1385 struct secpolicyindex *spidx)
1386 {
1387 struct ip6_hdr *ip6 = NULL;
1388 struct ip6_hdr ip6buf;
1389 struct sockaddr_in6 *sin6;
1390
1391 if (m->m_len >= sizeof(*ip6)) {
1392 ip6 = mtod(m, struct ip6_hdr *);
1393 } else {
1394 m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf);
1395 ip6 = &ip6buf;
1396 }
1397
1398 sin6 = (struct sockaddr_in6 *)&spidx->src;
1399 bzero(sin6, sizeof(*sin6));
1400 sin6->sin6_family = AF_INET6;
1401 sin6->sin6_len = sizeof(struct sockaddr_in6);
1402 bcopy(&ip6->ip6_src, &sin6->sin6_addr, sizeof(ip6->ip6_src));
1403 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
1404 if (m->m_pkthdr.pkt_flags & PKTF_IFAINFO) {
1405 ip6_getsrcifaddr_info(m, &sin6->sin6_scope_id, NULL);
1406 } else if (m->m_pkthdr.pkt_ext_flags & PKTF_EXT_OUTPUT_SCOPE) {
1407 sin6->sin6_scope_id = ip6_output_getsrcifscope(m);
1408 }
1409 in6_verify_ifscope(&ip6->ip6_src, sin6->sin6_scope_id);
1410 if (in6_embedded_scope) {
1411 sin6->sin6_addr.s6_addr16[1] = 0;
1412 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
1413 }
1414 }
1415 spidx->prefs = sizeof(struct in6_addr) << 3;
1416
1417 sin6 = (struct sockaddr_in6 *)&spidx->dst;
1418 bzero(sin6, sizeof(*sin6));
1419 sin6->sin6_family = AF_INET6;
1420 sin6->sin6_len = sizeof(struct sockaddr_in6);
1421 bcopy(&ip6->ip6_dst, &sin6->sin6_addr, sizeof(ip6->ip6_dst));
1422 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
1423 if (m->m_pkthdr.pkt_flags & PKTF_IFAINFO) {
1424 ip6_getdstifaddr_info(m, &sin6->sin6_scope_id, NULL);
1425 } else if (m->m_pkthdr.pkt_ext_flags & PKTF_EXT_OUTPUT_SCOPE) {
1426 sin6->sin6_scope_id = ip6_output_getdstifscope(m);
1427 }
1428 in6_verify_ifscope(&ip6->ip6_dst, sin6->sin6_scope_id);
1429 if (in6_embedded_scope) {
1430 sin6->sin6_addr.s6_addr16[1] = 0;
1431 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
1432 }
1433 }
1434 spidx->prefd = sizeof(struct in6_addr) << 3;
1435
1436 return 0;
1437 }
1438
1439 static struct inpcbpolicy *
ipsec_newpcbpolicy(void)1440 ipsec_newpcbpolicy(void)
1441 {
1442 struct inpcbpolicy *p;
1443
1444 p = kalloc_type(struct inpcbpolicy, Z_WAITOK | Z_ZERO);
1445 return p;
1446 }
1447
1448 static void
ipsec_delpcbpolicy(struct inpcbpolicy * p)1449 ipsec_delpcbpolicy(struct inpcbpolicy *p)
1450 {
1451 kfree_type(struct inpcbpolicy, p);
1452 }
1453
1454 /* initialize policy in PCB */
1455 int
ipsec_init_policy(struct socket * so,struct inpcbpolicy ** pcb_sp)1456 ipsec_init_policy(struct socket *so,
1457 struct inpcbpolicy **pcb_sp)
1458 {
1459 struct inpcbpolicy *new;
1460
1461 /* sanity check. */
1462 if (so == NULL || pcb_sp == NULL) {
1463 panic("ipsec_init_policy: NULL pointer was passed.");
1464 }
1465
1466 new = ipsec_newpcbpolicy();
1467 if (new == NULL) {
1468 ipseclog((LOG_DEBUG, "ipsec_init_policy: No more memory.\n"));
1469 return ENOBUFS;
1470 }
1471
1472 #ifdef __APPLE__
1473 if (kauth_cred_issuser(so->so_cred))
1474 #else
1475 if (so->so_cred != 0 && !suser(so->so_cred->pc_ucred, NULL))
1476 #endif
1477 { new->priv = 1;} else {
1478 new->priv = 0;
1479 }
1480
1481 if ((new->sp_in = key_newsp()) == NULL) {
1482 ipsec_delpcbpolicy(new);
1483 return ENOBUFS;
1484 }
1485 new->sp_in->state = IPSEC_SPSTATE_ALIVE;
1486 new->sp_in->policy = IPSEC_POLICY_ENTRUST;
1487
1488 if ((new->sp_out = key_newsp()) == NULL) {
1489 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1490 ipsec_delpcbpolicy(new);
1491 return ENOBUFS;
1492 }
1493 new->sp_out->state = IPSEC_SPSTATE_ALIVE;
1494 new->sp_out->policy = IPSEC_POLICY_ENTRUST;
1495
1496 *pcb_sp = new;
1497
1498 return 0;
1499 }
1500
1501 /* copy old ipsec policy into new */
1502 int
ipsec_copy_policy(struct inpcbpolicy * old,struct inpcbpolicy * new)1503 ipsec_copy_policy(struct inpcbpolicy *old,
1504 struct inpcbpolicy *new)
1505 {
1506 struct secpolicy *sp;
1507
1508 if (ipsec_bypass != 0) {
1509 return 0;
1510 }
1511
1512 sp = ipsec_deepcopy_policy(old->sp_in);
1513 if (sp) {
1514 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1515 new->sp_in = sp;
1516 } else {
1517 return ENOBUFS;
1518 }
1519
1520 sp = ipsec_deepcopy_policy(old->sp_out);
1521 if (sp) {
1522 key_freesp(new->sp_out, KEY_SADB_UNLOCKED);
1523 new->sp_out = sp;
1524 } else {
1525 return ENOBUFS;
1526 }
1527
1528 new->priv = old->priv;
1529
1530 return 0;
1531 }
1532
1533 /* deep-copy a policy in PCB */
1534 static struct secpolicy *
ipsec_deepcopy_policy(struct secpolicy * src)1535 ipsec_deepcopy_policy(struct secpolicy *src)
1536 {
1537 struct ipsecrequest *newchain = NULL;
1538 struct ipsecrequest *p;
1539 struct ipsecrequest **q;
1540 struct ipsecrequest *r;
1541 struct secpolicy *dst;
1542
1543 if (src == NULL) {
1544 return NULL;
1545 }
1546 dst = key_newsp();
1547 if (dst == NULL) {
1548 return NULL;
1549 }
1550
1551 /*
1552 * deep-copy IPsec request chain. This is required since struct
1553 * ipsecrequest is not reference counted.
1554 */
1555 q = &newchain;
1556 for (p = src->req; p; p = p->next) {
1557 *q = (struct ipsecrequest *)_MALLOC(sizeof(struct ipsecrequest),
1558 M_SECA, M_WAITOK | M_ZERO);
1559 if (*q == NULL) {
1560 goto fail;
1561 }
1562 (*q)->next = NULL;
1563
1564 (*q)->saidx.proto = p->saidx.proto;
1565 (*q)->saidx.mode = p->saidx.mode;
1566 (*q)->level = p->level;
1567 (*q)->saidx.reqid = p->saidx.reqid;
1568
1569 bcopy(&p->saidx.src, &(*q)->saidx.src, sizeof((*q)->saidx.src));
1570 bcopy(&p->saidx.dst, &(*q)->saidx.dst, sizeof((*q)->saidx.dst));
1571
1572 (*q)->sp = dst;
1573
1574 q = &((*q)->next);
1575 }
1576
1577 dst->req = newchain;
1578 dst->state = src->state;
1579 dst->policy = src->policy;
1580 /* do not touch the refcnt fields */
1581
1582 return dst;
1583
1584 fail:
1585 for (p = newchain; p; p = r) {
1586 r = p->next;
1587 FREE(p, M_SECA);
1588 p = NULL;
1589 }
1590 key_freesp(dst, KEY_SADB_UNLOCKED);
1591 return NULL;
1592 }
1593
1594 /* set policy and ipsec request if present. */
1595 static int
ipsec_set_policy(struct secpolicy ** pcb_sp,__unused int optname,caddr_t request,size_t len,int priv)1596 ipsec_set_policy(struct secpolicy **pcb_sp,
1597 __unused int optname,
1598 caddr_t request,
1599 size_t len,
1600 int priv)
1601 {
1602 struct sadb_x_policy *xpl;
1603 struct secpolicy *newsp = NULL;
1604 int error;
1605
1606 /* sanity check. */
1607 if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL) {
1608 return EINVAL;
1609 }
1610 if (len < sizeof(*xpl)) {
1611 return EINVAL;
1612 }
1613 xpl = (struct sadb_x_policy *)(void *)request;
1614
1615 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1616 printf("ipsec_set_policy: passed policy\n");
1617 kdebug_sadb_x_policy((struct sadb_ext *)xpl));
1618
1619 /* check policy type */
1620 /* ipsec_set_policy() accepts IPSEC, ENTRUST and BYPASS. */
1621 if (xpl->sadb_x_policy_type == IPSEC_POLICY_DISCARD
1622 || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE) {
1623 return EINVAL;
1624 }
1625
1626 /* check privileged socket */
1627 if (priv == 0 && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS) {
1628 return EACCES;
1629 }
1630
1631 /* allocation new SP entry */
1632 if ((newsp = key_msg2sp(xpl, len, &error)) == NULL) {
1633 return error;
1634 }
1635
1636 newsp->state = IPSEC_SPSTATE_ALIVE;
1637
1638 /* clear old SP and set new SP */
1639 key_freesp(*pcb_sp, KEY_SADB_UNLOCKED);
1640 *pcb_sp = newsp;
1641 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1642 printf("ipsec_set_policy: new policy\n");
1643 kdebug_secpolicy(newsp));
1644
1645 return 0;
1646 }
1647
1648 int
ipsec4_set_policy(struct inpcb * inp,int optname,caddr_t request,size_t len,int priv)1649 ipsec4_set_policy(struct inpcb *inp,
1650 int optname,
1651 caddr_t request,
1652 size_t len,
1653 int priv)
1654 {
1655 struct sadb_x_policy *xpl;
1656 struct secpolicy **pcb_sp;
1657 int error = 0;
1658 struct sadb_x_policy xpl_aligned_buf;
1659 u_int8_t *xpl_unaligned;
1660
1661 /* sanity check. */
1662 if (inp == NULL || request == NULL) {
1663 return EINVAL;
1664 }
1665 if (len < sizeof(*xpl)) {
1666 return EINVAL;
1667 }
1668 xpl = (struct sadb_x_policy *)(void *)request;
1669
1670 /* This is a new mbuf allocated by soopt_getm() */
1671 if (IPSEC_IS_P2ALIGNED(xpl)) {
1672 xpl_unaligned = NULL;
1673 } else {
1674 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1675 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1676 xpl = (__typeof__(xpl)) & xpl_aligned_buf;
1677 }
1678
1679 if (inp->inp_sp == NULL) {
1680 error = ipsec_init_policy(inp->inp_socket, &inp->inp_sp);
1681 if (error) {
1682 return error;
1683 }
1684 }
1685
1686 /* select direction */
1687 switch (xpl->sadb_x_policy_dir) {
1688 case IPSEC_DIR_INBOUND:
1689 pcb_sp = &inp->inp_sp->sp_in;
1690 break;
1691 case IPSEC_DIR_OUTBOUND:
1692 pcb_sp = &inp->inp_sp->sp_out;
1693 break;
1694 default:
1695 ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n",
1696 xpl->sadb_x_policy_dir));
1697 return EINVAL;
1698 }
1699
1700 /* turn bypass off */
1701 if (ipsec_bypass != 0) {
1702 ipsec_bypass = 0;
1703 }
1704
1705 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1706 }
1707
1708 /* delete policy in PCB */
1709 int
ipsec4_delete_pcbpolicy(struct inpcb * inp)1710 ipsec4_delete_pcbpolicy(struct inpcb *inp)
1711 {
1712 /* sanity check. */
1713 if (inp == NULL) {
1714 panic("ipsec4_delete_pcbpolicy: NULL pointer was passed.");
1715 }
1716
1717 if (inp->inp_sp == NULL) {
1718 return 0;
1719 }
1720
1721 if (inp->inp_sp->sp_in != NULL) {
1722 key_freesp(inp->inp_sp->sp_in, KEY_SADB_UNLOCKED);
1723 inp->inp_sp->sp_in = NULL;
1724 }
1725
1726 if (inp->inp_sp->sp_out != NULL) {
1727 key_freesp(inp->inp_sp->sp_out, KEY_SADB_UNLOCKED);
1728 inp->inp_sp->sp_out = NULL;
1729 }
1730
1731 ipsec_delpcbpolicy(inp->inp_sp);
1732 inp->inp_sp = NULL;
1733
1734 return 0;
1735 }
1736
1737 int
ipsec6_set_policy(struct in6pcb * in6p,int optname,caddr_t request,size_t len,int priv)1738 ipsec6_set_policy(struct in6pcb *in6p,
1739 int optname,
1740 caddr_t request,
1741 size_t len,
1742 int priv)
1743 {
1744 struct sadb_x_policy *xpl;
1745 struct secpolicy **pcb_sp;
1746 int error = 0;
1747 struct sadb_x_policy xpl_aligned_buf;
1748 u_int8_t *xpl_unaligned;
1749
1750 /* sanity check. */
1751 if (in6p == NULL || request == NULL) {
1752 return EINVAL;
1753 }
1754 if (len < sizeof(*xpl)) {
1755 return EINVAL;
1756 }
1757 xpl = (struct sadb_x_policy *)(void *)request;
1758
1759 /* This is a new mbuf allocated by soopt_getm() */
1760 if (IPSEC_IS_P2ALIGNED(xpl)) {
1761 xpl_unaligned = NULL;
1762 } else {
1763 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1764 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1765 xpl = (__typeof__(xpl)) & xpl_aligned_buf;
1766 }
1767
1768 if (in6p->in6p_sp == NULL) {
1769 error = ipsec_init_policy(in6p->inp_socket, &in6p->in6p_sp);
1770 if (error) {
1771 return error;
1772 }
1773 }
1774
1775 /* select direction */
1776 switch (xpl->sadb_x_policy_dir) {
1777 case IPSEC_DIR_INBOUND:
1778 pcb_sp = &in6p->in6p_sp->sp_in;
1779 break;
1780 case IPSEC_DIR_OUTBOUND:
1781 pcb_sp = &in6p->in6p_sp->sp_out;
1782 break;
1783 default:
1784 ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n",
1785 xpl->sadb_x_policy_dir));
1786 return EINVAL;
1787 }
1788
1789 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1790 }
1791
1792 int
ipsec6_delete_pcbpolicy(struct in6pcb * in6p)1793 ipsec6_delete_pcbpolicy(struct in6pcb *in6p)
1794 {
1795 /* sanity check. */
1796 if (in6p == NULL) {
1797 panic("ipsec6_delete_pcbpolicy: NULL pointer was passed.");
1798 }
1799
1800 if (in6p->in6p_sp == NULL) {
1801 return 0;
1802 }
1803
1804 if (in6p->in6p_sp->sp_in != NULL) {
1805 key_freesp(in6p->in6p_sp->sp_in, KEY_SADB_UNLOCKED);
1806 in6p->in6p_sp->sp_in = NULL;
1807 }
1808
1809 if (in6p->in6p_sp->sp_out != NULL) {
1810 key_freesp(in6p->in6p_sp->sp_out, KEY_SADB_UNLOCKED);
1811 in6p->in6p_sp->sp_out = NULL;
1812 }
1813
1814 ipsec_delpcbpolicy(in6p->in6p_sp);
1815 in6p->in6p_sp = NULL;
1816
1817 return 0;
1818 }
1819
1820 /*
1821 * return current level.
1822 * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned.
1823 */
1824 u_int
ipsec_get_reqlevel(struct ipsecrequest * isr)1825 ipsec_get_reqlevel(struct ipsecrequest *isr)
1826 {
1827 u_int level = 0;
1828 u_int esp_trans_deflev = 0, esp_net_deflev = 0, ah_trans_deflev = 0, ah_net_deflev = 0;
1829
1830 /* sanity check */
1831 if (isr == NULL || isr->sp == NULL) {
1832 panic("ipsec_get_reqlevel: NULL pointer is passed.");
1833 }
1834 if (((struct sockaddr *)&isr->sp->spidx.src)->sa_family
1835 != ((struct sockaddr *)&isr->sp->spidx.dst)->sa_family) {
1836 panic("ipsec_get_reqlevel: family mismatched.");
1837 }
1838
1839 /* XXX note that we have ipseclog() expanded here - code sync issue */
1840 #define IPSEC_CHECK_DEFAULT(lev) \
1841 (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \
1842 && (lev) != IPSEC_LEVEL_UNIQUE) \
1843 ? (ipsec_debug \
1844 ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\
1845 (lev), IPSEC_LEVEL_REQUIRE) \
1846 : (void)0), \
1847 (lev) = IPSEC_LEVEL_REQUIRE, \
1848 (lev) \
1849 : (lev))
1850
1851 /* set default level */
1852 switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) {
1853 case AF_INET:
1854 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_trans_deflev);
1855 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_net_deflev);
1856 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_trans_deflev);
1857 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_net_deflev);
1858 break;
1859 case AF_INET6:
1860 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_trans_deflev);
1861 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_net_deflev);
1862 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_trans_deflev);
1863 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_net_deflev);
1864 break;
1865 default:
1866 panic("key_get_reqlevel: Unknown family. %d",
1867 ((struct sockaddr *)&isr->sp->spidx.src)->sa_family);
1868 }
1869
1870 #undef IPSEC_CHECK_DEFAULT
1871
1872 /* set level */
1873 switch (isr->level) {
1874 case IPSEC_LEVEL_DEFAULT:
1875 switch (isr->saidx.proto) {
1876 case IPPROTO_ESP:
1877 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
1878 level = esp_net_deflev;
1879 } else {
1880 level = esp_trans_deflev;
1881 }
1882 break;
1883 case IPPROTO_AH:
1884 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
1885 level = ah_net_deflev;
1886 } else {
1887 level = ah_trans_deflev;
1888 }
1889 break;
1890 case IPPROTO_IPCOMP:
1891 ipseclog((LOG_ERR, "ipsec_get_reqlevel: "
1892 "still got IPCOMP - exiting\n"));
1893 break;
1894 default:
1895 panic("ipsec_get_reqlevel: "
1896 "Illegal protocol defined %u\n",
1897 isr->saidx.proto);
1898 }
1899 break;
1900
1901 case IPSEC_LEVEL_USE:
1902 case IPSEC_LEVEL_REQUIRE:
1903 level = isr->level;
1904 break;
1905 case IPSEC_LEVEL_UNIQUE:
1906 level = IPSEC_LEVEL_REQUIRE;
1907 break;
1908
1909 default:
1910 panic("ipsec_get_reqlevel: Illegal IPsec level %u",
1911 isr->level);
1912 }
1913
1914 return level;
1915 }
1916
1917 /*
1918 * Check AH/ESP integrity.
1919 * OUT:
1920 * 0: valid
1921 * 1: invalid
1922 */
1923 static int
ipsec_in_reject(struct secpolicy * sp,struct mbuf * m)1924 ipsec_in_reject(struct secpolicy *sp, struct mbuf *m)
1925 {
1926 struct ipsecrequest *isr;
1927 u_int level;
1928 int need_auth, need_conf, need_icv;
1929
1930 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
1931 printf("ipsec_in_reject: using SP\n");
1932 kdebug_secpolicy(sp));
1933
1934 /* check policy */
1935 switch (sp->policy) {
1936 case IPSEC_POLICY_DISCARD:
1937 case IPSEC_POLICY_GENERATE:
1938 return 1;
1939 case IPSEC_POLICY_BYPASS:
1940 case IPSEC_POLICY_NONE:
1941 return 0;
1942
1943 case IPSEC_POLICY_IPSEC:
1944 break;
1945
1946 case IPSEC_POLICY_ENTRUST:
1947 default:
1948 panic("ipsec_hdrsiz: Invalid policy found. %d", sp->policy);
1949 }
1950
1951 need_auth = 0;
1952 need_conf = 0;
1953 need_icv = 0;
1954
1955 /* XXX should compare policy against ipsec header history */
1956
1957 for (isr = sp->req; isr != NULL; isr = isr->next) {
1958 /* get current level */
1959 level = ipsec_get_reqlevel(isr);
1960
1961 switch (isr->saidx.proto) {
1962 case IPPROTO_ESP:
1963 if (level == IPSEC_LEVEL_REQUIRE) {
1964 need_conf++;
1965
1966 #if 0
1967 /* this won't work with multiple input threads - isr->sav would change
1968 * with every packet and is not necessarily related to the current packet
1969 * being processed. If ESP processing is required - the esp code should
1970 * make sure that the integrity check is present and correct. I don't see
1971 * why it would be necessary to check for the presence of the integrity
1972 * check value here. I think this is just wrong.
1973 * isr->sav has been removed.
1974 * %%%%%% this needs to be re-worked at some point but I think the code below can
1975 * be ignored for now.
1976 */
1977 if (isr->sav != NULL
1978 && isr->sav->flags == SADB_X_EXT_NONE
1979 && isr->sav->alg_auth != SADB_AALG_NONE) {
1980 need_icv++;
1981 }
1982 #endif
1983 }
1984 break;
1985 case IPPROTO_AH:
1986 if (level == IPSEC_LEVEL_REQUIRE) {
1987 need_auth++;
1988 need_icv++;
1989 }
1990 break;
1991 case IPPROTO_IPCOMP:
1992 /*
1993 * we don't really care, as IPcomp document says that
1994 * we shouldn't compress small packets, IPComp policy
1995 * should always be treated as being in "use" level.
1996 */
1997 break;
1998 }
1999 }
2000
2001 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
2002 printf("ipsec_in_reject: auth:%d conf:%d icv:%d m_flags:%x\n",
2003 need_auth, need_conf, need_icv, m->m_flags));
2004
2005 if ((need_conf && !(m->m_flags & M_DECRYPTED))
2006 || (!need_auth && need_icv && !(m->m_flags & M_AUTHIPDGM))
2007 || (need_auth && !(m->m_flags & M_AUTHIPHDR))) {
2008 return 1;
2009 }
2010
2011 return 0;
2012 }
2013
2014 /*
2015 * Check AH/ESP integrity.
2016 * This function is called from tcp_input(), udp_input(),
2017 * and {ah,esp}4_input for tunnel mode
2018 */
2019 int
ipsec4_in_reject_so(struct mbuf * m,struct socket * so)2020 ipsec4_in_reject_so(struct mbuf *m, struct socket *so)
2021 {
2022 struct secpolicy *sp = NULL;
2023 int error;
2024 int result;
2025
2026 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2027 /* sanity check */
2028 if (m == NULL) {
2029 return 0; /* XXX should be panic ? */
2030 }
2031 /* get SP for this packet.
2032 * When we are called from ip_forward(), we call
2033 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2034 */
2035 if (so == NULL) {
2036 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2037 } else {
2038 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2039 }
2040
2041 if (sp == NULL) {
2042 return 0; /* XXX should be panic ?
2043 * -> No, there may be error. */
2044 }
2045 result = ipsec_in_reject(sp, m);
2046 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2047 printf("DP ipsec4_in_reject_so call free SP:0x%llx\n",
2048 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2049 key_freesp(sp, KEY_SADB_UNLOCKED);
2050
2051 return result;
2052 }
2053
2054 int
ipsec4_in_reject(struct mbuf * m,struct inpcb * inp)2055 ipsec4_in_reject(struct mbuf *m, struct inpcb *inp)
2056 {
2057 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2058 if (inp == NULL) {
2059 return ipsec4_in_reject_so(m, NULL);
2060 }
2061 if (inp->inp_socket) {
2062 return ipsec4_in_reject_so(m, inp->inp_socket);
2063 } else {
2064 panic("ipsec4_in_reject: invalid inpcb/socket");
2065 }
2066
2067 /* NOTREACHED */
2068 return 0;
2069 }
2070
2071 /*
2072 * Check AH/ESP integrity.
2073 * This function is called from tcp6_input(), udp6_input(),
2074 * and {ah,esp}6_input for tunnel mode
2075 */
2076 int
ipsec6_in_reject_so(struct mbuf * m,struct socket * so)2077 ipsec6_in_reject_so(struct mbuf *m, struct socket *so)
2078 {
2079 struct secpolicy *sp = NULL;
2080 int error;
2081 int result;
2082
2083 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2084 /* sanity check */
2085 if (m == NULL) {
2086 return 0; /* XXX should be panic ? */
2087 }
2088 /* get SP for this packet.
2089 * When we are called from ip_forward(), we call
2090 * ipsec6_getpolicybyaddr() with IP_FORWARDING flag.
2091 */
2092 if (so == NULL) {
2093 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2094 } else {
2095 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2096 }
2097
2098 if (sp == NULL) {
2099 return 0; /* XXX should be panic ? */
2100 }
2101 result = ipsec_in_reject(sp, m);
2102 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2103 printf("DP ipsec6_in_reject_so call free SP:0x%llx\n",
2104 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2105 key_freesp(sp, KEY_SADB_UNLOCKED);
2106
2107 return result;
2108 }
2109
2110 int
ipsec6_in_reject(struct mbuf * m,struct in6pcb * in6p)2111 ipsec6_in_reject(struct mbuf *m, struct in6pcb *in6p)
2112 {
2113 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2114 if (in6p == NULL) {
2115 return ipsec6_in_reject_so(m, NULL);
2116 }
2117 if (in6p->in6p_socket) {
2118 return ipsec6_in_reject_so(m, in6p->in6p_socket);
2119 } else {
2120 panic("ipsec6_in_reject: invalid in6p/socket");
2121 }
2122
2123 /* NOTREACHED */
2124 return 0;
2125 }
2126
2127 /*
2128 * compute the byte size to be occupied by IPsec header.
2129 * in case it is tunneled, it includes the size of outer IP header.
2130 * NOTE: SP passed is free in this function.
2131 */
2132 size_t
ipsec_hdrsiz(struct secpolicy * sp)2133 ipsec_hdrsiz(struct secpolicy *sp)
2134 {
2135 struct ipsecrequest *isr;
2136 size_t siz, clen;
2137
2138 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2139 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2140 printf("ipsec_hdrsiz: using SP\n");
2141 kdebug_secpolicy(sp));
2142
2143 /* check policy */
2144 switch (sp->policy) {
2145 case IPSEC_POLICY_DISCARD:
2146 case IPSEC_POLICY_GENERATE:
2147 case IPSEC_POLICY_BYPASS:
2148 case IPSEC_POLICY_NONE:
2149 return 0;
2150
2151 case IPSEC_POLICY_IPSEC:
2152 break;
2153
2154 case IPSEC_POLICY_ENTRUST:
2155 default:
2156 panic("ipsec_hdrsiz: Invalid policy found. %d", sp->policy);
2157 }
2158
2159 siz = 0;
2160
2161 for (isr = sp->req; isr != NULL; isr = isr->next) {
2162 clen = 0;
2163
2164 switch (isr->saidx.proto) {
2165 case IPPROTO_ESP:
2166 #if IPSEC_ESP
2167 clen = esp_hdrsiz(isr);
2168 #else
2169 clen = 0; /*XXX*/
2170 #endif
2171 break;
2172 case IPPROTO_AH:
2173 clen = ah_hdrsiz(isr);
2174 break;
2175 default:
2176 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2177 "unknown protocol %u\n",
2178 isr->saidx.proto));
2179 break;
2180 }
2181
2182 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
2183 switch (((struct sockaddr *)&isr->saidx.dst)->sa_family) {
2184 case AF_INET:
2185 clen += sizeof(struct ip);
2186 break;
2187 case AF_INET6:
2188 clen += sizeof(struct ip6_hdr);
2189 break;
2190 default:
2191 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2192 "unknown AF %d in IPsec tunnel SA\n",
2193 ((struct sockaddr *)&isr->saidx.dst)->sa_family));
2194 break;
2195 }
2196 }
2197 siz += clen;
2198 }
2199
2200 return siz;
2201 }
2202
2203 /* This function is called from ip_forward() and ipsec4_hdrsize_tcp(). */
2204 size_t
ipsec4_hdrsiz(struct mbuf * m,u_int8_t dir,struct inpcb * inp)2205 ipsec4_hdrsiz(struct mbuf *m, u_int8_t dir, struct inpcb *inp)
2206 {
2207 struct secpolicy *sp = NULL;
2208 int error;
2209 size_t size;
2210
2211 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2212 /* sanity check */
2213 if (m == NULL) {
2214 return 0; /* XXX should be panic ? */
2215 }
2216 if (inp != NULL && inp->inp_socket == NULL) {
2217 panic("ipsec4_hdrsize: why is socket NULL but there is PCB.");
2218 }
2219
2220 /* get SP for this packet.
2221 * When we are called from ip_forward(), we call
2222 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2223 */
2224 if (inp == NULL) {
2225 sp = ipsec4_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2226 } else {
2227 sp = ipsec4_getpolicybyaddr(m, dir, 0, &error);
2228 }
2229
2230 if (sp == NULL) {
2231 return 0; /* XXX should be panic ? */
2232 }
2233 size = ipsec_hdrsiz(sp);
2234 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2235 printf("DP ipsec4_hdrsiz call free SP:0x%llx\n",
2236 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2237 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2238 printf("ipsec4_hdrsiz: size:%lu.\n", (u_int32_t)size));
2239 key_freesp(sp, KEY_SADB_UNLOCKED);
2240
2241 return size;
2242 }
2243
2244 /* This function is called from ipsec6_hdrsize_tcp(),
2245 * and maybe from ip6_forward.()
2246 */
2247 size_t
ipsec6_hdrsiz(struct mbuf * m,u_int8_t dir,struct in6pcb * in6p)2248 ipsec6_hdrsiz(struct mbuf *m, u_int8_t dir, struct in6pcb *in6p)
2249 {
2250 struct secpolicy *sp = NULL;
2251 int error;
2252 size_t size;
2253
2254 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2255 /* sanity check */
2256 if (m == NULL) {
2257 return 0; /* XXX shoud be panic ? */
2258 }
2259 if (in6p != NULL && in6p->in6p_socket == NULL) {
2260 panic("ipsec6_hdrsize: why is socket NULL but there is PCB.");
2261 }
2262
2263 /* get SP for this packet */
2264 /* XXX Is it right to call with IP_FORWARDING. */
2265 if (in6p == NULL) {
2266 sp = ipsec6_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2267 } else {
2268 sp = ipsec6_getpolicybyaddr(m, dir, 0, &error);
2269 }
2270
2271 if (sp == NULL) {
2272 return 0;
2273 }
2274 size = ipsec_hdrsiz(sp);
2275 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2276 printf("DP ipsec6_hdrsiz call free SP:0x%llx\n",
2277 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2278 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2279 printf("ipsec6_hdrsiz: size:%lu.\n", (u_int32_t)size));
2280 key_freesp(sp, KEY_SADB_UNLOCKED);
2281
2282 return size;
2283 }
2284
2285 /*
2286 * encapsulate for ipsec tunnel.
2287 * ip->ip_src must be fixed later on.
2288 */
2289 int
ipsec4_encapsulate(struct mbuf * m,struct secasvar * sav)2290 ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav)
2291 {
2292 struct ip *oip;
2293 struct ip *ip;
2294 size_t plen;
2295 u_int32_t hlen;
2296
2297 /* can't tunnel between different AFs */
2298 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2299 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2300 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2301 m_freem(m);
2302 return EINVAL;
2303 }
2304
2305 if (m->m_len < sizeof(*ip)) {
2306 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2307 }
2308
2309 ip = mtod(m, struct ip *);
2310 #ifdef _IP_VHL
2311 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2312 #else
2313 hlen = ip->ip_hl << 2;
2314 #endif
2315
2316 if (m->m_len != hlen) {
2317 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2318 }
2319
2320 /* generate header checksum */
2321 ip->ip_sum = 0;
2322 #ifdef _IP_VHL
2323 ip->ip_sum = in_cksum(m, hlen);
2324 #else
2325 ip->ip_sum = in_cksum(m, hlen);
2326 #endif
2327
2328 plen = m->m_pkthdr.len;
2329
2330 /*
2331 * grow the mbuf to accomodate the new IPv4 header.
2332 * NOTE: IPv4 options will never be copied.
2333 */
2334 if (M_LEADINGSPACE(m->m_next) < hlen) {
2335 struct mbuf *n;
2336 MGET(n, M_DONTWAIT, MT_DATA);
2337 if (!n) {
2338 m_freem(m);
2339 return ENOBUFS;
2340 }
2341 n->m_len = hlen;
2342 n->m_next = m->m_next;
2343 m->m_next = n;
2344 m->m_pkthdr.len += hlen;
2345 oip = mtod(n, struct ip *);
2346 } else {
2347 m->m_next->m_len += hlen;
2348 m->m_next->m_data -= hlen;
2349 m->m_pkthdr.len += hlen;
2350 oip = mtod(m->m_next, struct ip *);
2351 }
2352 ip = mtod(m, struct ip *);
2353 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2354 m->m_len = sizeof(struct ip);
2355 m->m_pkthdr.len -= (hlen - sizeof(struct ip));
2356
2357 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2358 /* ECN consideration. */
2359 ip_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &oip->ip_tos);
2360 #ifdef _IP_VHL
2361 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
2362 #else
2363 ip->ip_hl = sizeof(struct ip) >> 2;
2364 #endif
2365 ip->ip_off &= htons(~IP_OFFMASK);
2366 ip->ip_off &= htons(~IP_MF);
2367 switch (ip4_ipsec_dfbit) {
2368 case 0: /* clear DF bit */
2369 ip->ip_off &= htons(~IP_DF);
2370 break;
2371 case 1: /* set DF bit */
2372 ip->ip_off |= htons(IP_DF);
2373 break;
2374 default: /* copy DF bit */
2375 break;
2376 }
2377 ip->ip_p = IPPROTO_IPIP;
2378 if (plen + sizeof(struct ip) < IP_MAXPACKET) {
2379 ip->ip_len = htons((u_int16_t)(plen + sizeof(struct ip)));
2380 } else {
2381 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2382 "leave ip_len as is (invalid packet)\n"));
2383 }
2384 if (rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off))) {
2385 ip->ip_id = 0;
2386 } else {
2387 ip->ip_id = ip_randomid((uint64_t)m);
2388 }
2389 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2390 &ip->ip_src, sizeof(ip->ip_src));
2391 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2392 &ip->ip_dst, sizeof(ip->ip_dst));
2393 ip->ip_ttl = IPDEFTTL;
2394
2395 /* XXX Should ip_src be updated later ? */
2396
2397 return 0;
2398 }
2399
2400
2401 int
ipsec6_encapsulate(struct mbuf * m,struct secasvar * sav)2402 ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav)
2403 {
2404 struct ip6_hdr *oip6;
2405 struct ip6_hdr *ip6;
2406 size_t plen;
2407
2408 /* can't tunnel between different AFs */
2409 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2410 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2411 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2412 m_freem(m);
2413 return EINVAL;
2414 }
2415
2416 plen = m->m_pkthdr.len;
2417
2418 /*
2419 * grow the mbuf to accomodate the new IPv6 header.
2420 */
2421 if (m->m_len != sizeof(struct ip6_hdr)) {
2422 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2423 }
2424 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2425 struct mbuf *n;
2426 MGET(n, M_DONTWAIT, MT_DATA);
2427 if (!n) {
2428 m_freem(m);
2429 return ENOBUFS;
2430 }
2431 n->m_len = sizeof(struct ip6_hdr);
2432 n->m_next = m->m_next;
2433 m->m_next = n;
2434 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2435 oip6 = mtod(n, struct ip6_hdr *);
2436 } else {
2437 m->m_next->m_len += sizeof(struct ip6_hdr);
2438 m->m_next->m_data -= sizeof(struct ip6_hdr);
2439 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2440 oip6 = mtod(m->m_next, struct ip6_hdr *);
2441 }
2442 ip6 = mtod(m, struct ip6_hdr *);
2443 ovbcopy((caddr_t)ip6, (caddr_t)oip6, sizeof(struct ip6_hdr));
2444
2445 /* Fake link-local scope-class addresses */
2446 if (in6_embedded_scope && IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src)) {
2447 oip6->ip6_src.s6_addr16[1] = 0;
2448 }
2449 if (in6_embedded_scope && IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst)) {
2450 oip6->ip6_dst.s6_addr16[1] = 0;
2451 }
2452
2453 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2454 /* ECN consideration. */
2455 ip6_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow);
2456 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
2457 ip6->ip6_plen = htons((u_int16_t)plen);
2458 } else {
2459 /* ip6->ip6_plen will be updated in ip6_output() */
2460 }
2461 ip6->ip6_nxt = IPPROTO_IPV6;
2462 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2463 &ip6->ip6_src, sizeof(ip6->ip6_src));
2464 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2465 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2466 ip6->ip6_hlim = IPV6_DEFHLIM;
2467
2468 /* XXX Should ip6_src be updated later ? */
2469
2470 return 0;
2471 }
2472
2473 static int
ipsec64_encapsulate(struct mbuf * m,struct secasvar * sav,u_int32_t dscp_mapping)2474 ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav, u_int32_t dscp_mapping)
2475 {
2476 struct ip6_hdr *ip6, *ip6i;
2477 struct ip *ip;
2478 size_t plen;
2479
2480 /* tunneling over IPv4 */
2481 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2482 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2483 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2484 m_freem(m);
2485 return EINVAL;
2486 }
2487
2488 plen = m->m_pkthdr.len;
2489 ip6 = mtod(m, struct ip6_hdr *);
2490 /*
2491 * grow the mbuf to accomodate the new IPv4 header.
2492 */
2493 if (m->m_len != sizeof(struct ip6_hdr)) {
2494 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2495 }
2496 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2497 struct mbuf *n;
2498 MGET(n, M_DONTWAIT, MT_DATA);
2499 if (!n) {
2500 m_freem(m);
2501 return ENOBUFS;
2502 }
2503 n->m_len = sizeof(struct ip6_hdr);
2504 n->m_next = m->m_next;
2505 m->m_next = n;
2506 m->m_pkthdr.len += sizeof(struct ip);
2507 ip6i = mtod(n, struct ip6_hdr *);
2508 } else {
2509 m->m_next->m_len += sizeof(struct ip6_hdr);
2510 m->m_next->m_data -= sizeof(struct ip6_hdr);
2511 m->m_pkthdr.len += sizeof(struct ip);
2512 ip6i = mtod(m->m_next, struct ip6_hdr *);
2513 }
2514
2515 bcopy(ip6, ip6i, sizeof(struct ip6_hdr));
2516 ip = mtod(m, struct ip *);
2517 m->m_len = sizeof(struct ip);
2518 /*
2519 * Fill in some of the IPv4 fields - we don't need all of them
2520 * because the rest will be filled in by ip_output
2521 */
2522 ip->ip_v = IPVERSION;
2523 ip->ip_hl = sizeof(struct ip) >> 2;
2524 ip->ip_id = 0;
2525 ip->ip_sum = 0;
2526 ip->ip_tos = 0;
2527 ip->ip_off = 0;
2528 ip->ip_ttl = IPDEFTTL;
2529 ip->ip_p = IPPROTO_IPV6;
2530
2531 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2532 /* ECN consideration. */
2533 if (dscp_mapping == IPSEC_DSCP_MAPPING_COPY) {
2534 // Copy DSCP bits from inner IP to outer IP packet.
2535 ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6i->ip6_flow);
2536 } else if (dscp_mapping == IPSEC_DSCP_MAPPING_LEGACY) {
2537 // Copy DSCP bits in legacy style.
2538 ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow);
2539 }
2540
2541 if (plen + sizeof(struct ip) < IP_MAXPACKET) {
2542 ip->ip_len = htons((u_int16_t)(plen + sizeof(struct ip)));
2543 } else {
2544 ip->ip_len = htons((u_int16_t)plen);
2545 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2546 "leave ip_len as is (invalid packet)\n"));
2547 }
2548 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2549 &ip->ip_src, sizeof(ip->ip_src));
2550 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2551 &ip->ip_dst, sizeof(ip->ip_dst));
2552
2553 return 0;
2554 }
2555
2556 int
ipsec6_update_routecache_and_output(struct ipsec_output_state * state,struct secasvar * sav)2557 ipsec6_update_routecache_and_output(
2558 struct ipsec_output_state *state,
2559 struct secasvar *sav)
2560 {
2561 struct sockaddr_in6* dst6;
2562 struct route_in6 *ro6;
2563 struct ip6_hdr *ip6;
2564 errno_t error = 0;
2565
2566 int plen;
2567 struct ip6_out_args ip6oa;
2568 struct route_in6 ro6_new;
2569 struct flowadv *adv = NULL;
2570
2571 if (!state->m) {
2572 return EINVAL;
2573 }
2574 ip6 = mtod(state->m, struct ip6_hdr *);
2575
2576 // grab sadb_mutex, before updating sah's route cache
2577 lck_mtx_lock(sadb_mutex);
2578 ro6 = &sav->sah->sa_route;
2579 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
2580 if (ro6->ro_rt) {
2581 RT_LOCK(ro6->ro_rt);
2582 }
2583 if (ROUTE_UNUSABLE(ro6) ||
2584 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
2585 if (ro6->ro_rt != NULL) {
2586 RT_UNLOCK(ro6->ro_rt);
2587 }
2588 ROUTE_RELEASE(ro6);
2589 }
2590 if (ro6->ro_rt == 0) {
2591 bzero(dst6, sizeof(*dst6));
2592 dst6->sin6_family = AF_INET6;
2593 dst6->sin6_len = sizeof(*dst6);
2594 dst6->sin6_addr = ip6->ip6_dst;
2595 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
2596 if (ro6->ro_rt) {
2597 RT_LOCK(ro6->ro_rt);
2598 }
2599 }
2600 if (ro6->ro_rt == 0) {
2601 ip6stat.ip6s_noroute++;
2602 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
2603 error = EHOSTUNREACH;
2604 // release sadb_mutex, after updating sah's route cache
2605 lck_mtx_unlock(sadb_mutex);
2606 return error;
2607 }
2608
2609 /*
2610 * adjust state->dst if tunnel endpoint is offlink
2611 *
2612 * XXX: caching rt_gateway value in the state is
2613 * not really good, since it may point elsewhere
2614 * when the gateway gets modified to a larger
2615 * sockaddr via rt_setgate(). This is currently
2616 * addressed by SA_SIZE roundup in that routine.
2617 */
2618 if (ro6->ro_rt->rt_flags & RTF_GATEWAY) {
2619 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
2620 }
2621 RT_UNLOCK(ro6->ro_rt);
2622 ROUTE_RELEASE(&state->ro);
2623 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
2624 state->dst = (struct sockaddr *)dst6;
2625 state->tunneled = 6;
2626 // release sadb_mutex, after updating sah's route cache
2627 lck_mtx_unlock(sadb_mutex);
2628
2629 state->m = ipsec6_splithdr(state->m);
2630 if (!state->m) {
2631 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
2632 error = ENOMEM;
2633 return error;
2634 }
2635
2636 ip6 = mtod(state->m, struct ip6_hdr *);
2637 switch (sav->sah->saidx.proto) {
2638 case IPPROTO_ESP:
2639 #if IPSEC_ESP
2640 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2641 #else
2642 m_freem(state->m);
2643 error = EINVAL;
2644 #endif
2645 break;
2646 case IPPROTO_AH:
2647 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2648 break;
2649 default:
2650 ipseclog((LOG_ERR, "%s: unknown ipsec protocol %d\n", __FUNCTION__, sav->sah->saidx.proto));
2651 m_freem(state->m);
2652 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2653 error = EINVAL;
2654 break;
2655 }
2656 if (error) {
2657 // If error, packet already freed by above output routines
2658 state->m = NULL;
2659 return error;
2660 }
2661
2662 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
2663 if (plen > IPV6_MAXPACKET) {
2664 ipseclog((LOG_ERR, "%s: IPsec with IPv6 jumbogram is not supported\n", __FUNCTION__));
2665 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2666 error = EINVAL;/*XXX*/
2667 return error;
2668 }
2669 ip6 = mtod(state->m, struct ip6_hdr *);
2670 ip6->ip6_plen = htons((u_int16_t)plen);
2671
2672 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET6);
2673 ipsec_set_ip6oa_for_interface(sav->sah->ipsec_if, &ip6oa);
2674
2675 /* Increment statistics */
2676 ifnet_stat_increment_out(sav->sah->ipsec_if, 1, (u_int32_t)mbuf_pkthdr_len(state->m), 0);
2677
2678 /* Send to ip6_output */
2679 bzero(&ro6_new, sizeof(ro6_new));
2680 bzero(&ip6oa, sizeof(ip6oa));
2681 ip6oa.ip6oa_flowadv.code = 0;
2682 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
2683 if (state->outgoing_if) {
2684 ip6oa.ip6oa_boundif = state->outgoing_if;
2685 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
2686 ip6_output_setsrcifscope(state->m, state->outgoing_if, NULL);
2687 ip6_output_setdstifscope(state->m, state->outgoing_if, NULL);
2688 }
2689
2690 adv = &ip6oa.ip6oa_flowadv;
2691 (void) ip6_output(state->m, NULL, &ro6_new, IPV6_OUTARGS, NULL, NULL, &ip6oa);
2692 state->m = NULL;
2693
2694 if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) {
2695 error = ENOBUFS;
2696 ifnet_disable_output(sav->sah->ipsec_if);
2697 return error;
2698 }
2699
2700 return 0;
2701 }
2702
2703 int
ipsec46_encapsulate(struct ipsec_output_state * state,struct secasvar * sav)2704 ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav)
2705 {
2706 struct mbuf *m;
2707 struct ip6_hdr *ip6;
2708 struct ip *oip;
2709 struct ip *ip;
2710 size_t plen;
2711 u_int32_t hlen;
2712
2713 m = state->m;
2714 if (!m) {
2715 return EINVAL;
2716 }
2717
2718 /* can't tunnel between different AFs */
2719 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2720 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2721 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2722 m_freem(m);
2723 return EINVAL;
2724 }
2725
2726 if (m->m_len < sizeof(*ip)) {
2727 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2728 return EINVAL;
2729 }
2730
2731 ip = mtod(m, struct ip *);
2732 #ifdef _IP_VHL
2733 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2734 #else
2735 hlen = ip->ip_hl << 2;
2736 #endif
2737
2738 if (m->m_len != hlen) {
2739 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2740 return EINVAL;
2741 }
2742
2743 /* generate header checksum */
2744 ip->ip_sum = 0;
2745 #ifdef _IP_VHL
2746 ip->ip_sum = in_cksum(m, hlen);
2747 #else
2748 ip->ip_sum = in_cksum(m, hlen);
2749 #endif
2750
2751 plen = m->m_pkthdr.len; // save original IPv4 packet len, this will be ipv6 payload len
2752
2753 /*
2754 * First move the IPv4 header to the second mbuf in the chain
2755 */
2756 if (M_LEADINGSPACE(m->m_next) < hlen) {
2757 struct mbuf *n;
2758 MGET(n, M_DONTWAIT, MT_DATA);
2759 if (!n) {
2760 m_freem(m);
2761 return ENOBUFS;
2762 }
2763 n->m_len = hlen;
2764 n->m_next = m->m_next;
2765 m->m_next = n;
2766 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2767 oip = mtod(n, struct ip *);
2768 } else {
2769 m->m_next->m_len += hlen;
2770 m->m_next->m_data -= hlen;
2771 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2772 oip = mtod(m->m_next, struct ip *);
2773 }
2774 ip = mtod(m, struct ip *);
2775 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2776
2777 /*
2778 * Grow the first mbuf to accomodate the new IPv6 header.
2779 */
2780 if (M_LEADINGSPACE(m) < sizeof(struct ip6_hdr) - hlen) {
2781 struct mbuf *n;
2782 MGETHDR(n, M_DONTWAIT, MT_HEADER);
2783 if (!n) {
2784 m_freem(m);
2785 return ENOBUFS;
2786 }
2787 M_COPY_PKTHDR(n, m);
2788 MH_ALIGN(n, sizeof(struct ip6_hdr));
2789 n->m_len = sizeof(struct ip6_hdr);
2790 n->m_next = m->m_next;
2791 m->m_next = NULL;
2792 m_freem(m);
2793 state->m = n;
2794 m = state->m;
2795 } else {
2796 m->m_len += (sizeof(struct ip6_hdr) - hlen);
2797 m->m_data -= (sizeof(struct ip6_hdr) - hlen);
2798 }
2799 ip6 = mtod(m, struct ip6_hdr *);
2800 ip6->ip6_flow = 0;
2801 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2802 ip6->ip6_vfc |= IPV6_VERSION;
2803
2804 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2805 /* ECN consideration. */
2806 if (state->dscp_mapping == IPSEC_DSCP_MAPPING_COPY) {
2807 // Copy DSCP bits from inner IP to outer IP packet.
2808 ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip->ip_tos);
2809 } else if (state->dscp_mapping == IPSEC_DSCP_MAPPING_LEGACY) {
2810 // Copy DSCP bits in legacy style.
2811 ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos);
2812 }
2813 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
2814 ip6->ip6_plen = htons((u_int16_t)plen);
2815 } else {
2816 /* ip6->ip6_plen will be updated in ip6_output() */
2817 }
2818
2819 ip6->ip6_nxt = IPPROTO_IPV4;
2820 ip6->ip6_hlim = IPV6_DEFHLIM;
2821
2822 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2823 &ip6->ip6_src, sizeof(ip6->ip6_src));
2824 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2825 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2826
2827 return 0;
2828 }
2829
2830 /*
2831 * Check the variable replay window.
2832 * ipsec_chkreplay() performs replay check before ICV verification.
2833 * ipsec_updatereplay() updates replay bitmap. This must be called after
2834 * ICV verification (it also performs replay check, which is usually done
2835 * beforehand).
2836 * 0 (zero) is returned if packet disallowed, 1 if packet permitted.
2837 *
2838 * based on RFC 2401.
2839 */
2840 int
ipsec_chkreplay(u_int32_t seq,struct secasvar * sav,u_int8_t replay_index)2841 ipsec_chkreplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index)
2842 {
2843 const struct secreplay *replay;
2844 u_int32_t diff;
2845 size_t fr;
2846 size_t wsizeb; /* constant: bits of window size */
2847 size_t frlast; /* constant: last frame */
2848
2849
2850 /* sanity check */
2851 if (sav == NULL) {
2852 panic("ipsec_chkreplay: NULL pointer was passed.");
2853 }
2854
2855 lck_mtx_lock(sadb_mutex);
2856 replay = sav->replay[replay_index];
2857
2858 if (replay->wsize == 0) {
2859 lck_mtx_unlock(sadb_mutex);
2860 return 1; /* no need to check replay. */
2861 }
2862
2863 /* constant */
2864 frlast = replay->wsize - 1;
2865 wsizeb = replay->wsize << 3;
2866
2867 /* sequence number of 0 is invalid */
2868 if (seq == 0) {
2869 lck_mtx_unlock(sadb_mutex);
2870 return 0;
2871 }
2872
2873 /* first time is always okay */
2874 if (replay->count == 0) {
2875 lck_mtx_unlock(sadb_mutex);
2876 return 1;
2877 }
2878
2879 if (seq > replay->lastseq) {
2880 /* larger sequences are okay */
2881 lck_mtx_unlock(sadb_mutex);
2882 return 1;
2883 } else {
2884 /* seq is equal or less than lastseq. */
2885 diff = replay->lastseq - seq;
2886
2887 /* over range to check, i.e. too old or wrapped */
2888 if (diff >= wsizeb) {
2889 lck_mtx_unlock(sadb_mutex);
2890 return 0;
2891 }
2892
2893 fr = frlast - diff / 8;
2894
2895 /* this packet already seen ? */
2896 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2897 lck_mtx_unlock(sadb_mutex);
2898 return 0;
2899 }
2900
2901 /* out of order but good */
2902 lck_mtx_unlock(sadb_mutex);
2903 return 1;
2904 }
2905 }
2906
2907 /*
2908 * check replay counter whether to update or not.
2909 * OUT: 0: OK
2910 * 1: NG
2911 */
2912 int
ipsec_updatereplay(u_int32_t seq,struct secasvar * sav,u_int8_t replay_index)2913 ipsec_updatereplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index)
2914 {
2915 struct secreplay *replay;
2916 u_int32_t diff;
2917 size_t fr;
2918 size_t wsizeb; /* constant: bits of window size */
2919 size_t frlast; /* constant: last frame */
2920
2921 /* sanity check */
2922 if (sav == NULL) {
2923 panic("ipsec_chkreplay: NULL pointer was passed.");
2924 }
2925
2926 lck_mtx_lock(sadb_mutex);
2927 replay = sav->replay[replay_index];
2928
2929 if (replay->wsize == 0) {
2930 goto ok; /* no need to check replay. */
2931 }
2932 /* constant */
2933 frlast = replay->wsize - 1;
2934 wsizeb = replay->wsize << 3;
2935
2936 /* sequence number of 0 is invalid */
2937 if (seq == 0) {
2938 lck_mtx_unlock(sadb_mutex);
2939 return 1;
2940 }
2941
2942 /* first time */
2943 if (replay->count == 0) {
2944 replay->lastseq = seq;
2945 bzero(replay->bitmap, replay->wsize);
2946 (replay->bitmap)[frlast] = 1;
2947 goto ok;
2948 }
2949
2950 if (seq > replay->lastseq) {
2951 /* seq is larger than lastseq. */
2952 diff = seq - replay->lastseq;
2953
2954 /* new larger sequence number */
2955 if (diff < wsizeb) {
2956 /* In window */
2957 /* set bit for this packet */
2958 vshiftl((unsigned char *) replay->bitmap, diff, replay->wsize);
2959 (replay->bitmap)[frlast] |= 1;
2960 } else {
2961 /* this packet has a "way larger" */
2962 bzero(replay->bitmap, replay->wsize);
2963 (replay->bitmap)[frlast] = 1;
2964 }
2965 replay->lastseq = seq;
2966
2967 /* larger is good */
2968 } else {
2969 /* seq is equal or less than lastseq. */
2970 diff = replay->lastseq - seq;
2971
2972 /* over range to check, i.e. too old or wrapped */
2973 if (diff >= wsizeb) {
2974 lck_mtx_unlock(sadb_mutex);
2975 return 1;
2976 }
2977
2978 fr = frlast - diff / 8;
2979
2980 /* this packet already seen ? */
2981 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2982 lck_mtx_unlock(sadb_mutex);
2983 return 1;
2984 }
2985
2986 /* mark as seen */
2987 (replay->bitmap)[fr] |= (1 << (diff % 8));
2988
2989 /* out of order but good */
2990 }
2991
2992 ok:
2993 {
2994 u_int32_t max_count = ~0;
2995 if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
2996 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
2997 max_count = (1ULL << 32) / MAX_REPLAY_WINDOWS;
2998 }
2999
3000 if (replay->count == max_count) {
3001 /* set overflow flag */
3002 replay->overflow++;
3003
3004 /* don't increment, no more packets accepted */
3005 if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
3006 lck_mtx_unlock(sadb_mutex);
3007 return 1;
3008 }
3009
3010 ipseclog((LOG_WARNING, "replay counter made %d cycle. %s\n",
3011 replay->overflow, ipsec_logsastr(sav)));
3012 }
3013 }
3014
3015 replay->count++;
3016
3017 lck_mtx_unlock(sadb_mutex);
3018 return 0;
3019 }
3020
3021 /*
3022 * shift variable length buffer to left.
3023 * IN: bitmap: pointer to the buffer
3024 * nbit: the number of to shift.
3025 * wsize: buffer size (bytes).
3026 */
3027 static void
vshiftl(unsigned char * bitmap,int nbit,size_t wsize)3028 vshiftl(unsigned char *bitmap, int nbit, size_t wsize)
3029 {
3030 size_t i;
3031 int s, j;
3032 unsigned char over;
3033
3034 for (j = 0; j < nbit; j += 8) {
3035 s = (nbit - j < 8) ? (nbit - j): 8;
3036 bitmap[0] <<= s;
3037 for (i = 1; i < wsize; i++) {
3038 over = (bitmap[i] >> (8 - s));
3039 bitmap[i] <<= s;
3040 bitmap[i - 1] |= over;
3041 }
3042 }
3043
3044 return;
3045 }
3046
3047 const char *
ipsec4_logpacketstr(struct ip * ip,u_int32_t spi)3048 ipsec4_logpacketstr(struct ip *ip, u_int32_t spi)
3049 {
3050 static char buf[256] __attribute__((aligned(4)));
3051 char *p;
3052 u_int8_t *s, *d;
3053
3054 s = (u_int8_t *)(&ip->ip_src);
3055 d = (u_int8_t *)(&ip->ip_dst);
3056
3057 p = buf;
3058 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3059 while (p && *p) {
3060 p++;
3061 }
3062 snprintf(p, sizeof(buf) - (p - buf), "src=%u.%u.%u.%u",
3063 s[0], s[1], s[2], s[3]);
3064 while (p && *p) {
3065 p++;
3066 }
3067 snprintf(p, sizeof(buf) - (p - buf), " dst=%u.%u.%u.%u",
3068 d[0], d[1], d[2], d[3]);
3069 while (p && *p) {
3070 p++;
3071 }
3072 snprintf(p, sizeof(buf) - (p - buf), ")");
3073
3074 return buf;
3075 }
3076
3077 const char *
ipsec6_logpacketstr(struct ip6_hdr * ip6,u_int32_t spi)3078 ipsec6_logpacketstr(struct ip6_hdr *ip6, u_int32_t spi)
3079 {
3080 static char buf[256] __attribute__((aligned(4)));
3081 char *p;
3082
3083 p = buf;
3084 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3085 while (p && *p) {
3086 p++;
3087 }
3088 snprintf(p, sizeof(buf) - (p - buf), "src=%s",
3089 ip6_sprintf(&ip6->ip6_src));
3090 while (p && *p) {
3091 p++;
3092 }
3093 snprintf(p, sizeof(buf) - (p - buf), " dst=%s",
3094 ip6_sprintf(&ip6->ip6_dst));
3095 while (p && *p) {
3096 p++;
3097 }
3098 snprintf(p, sizeof(buf) - (p - buf), ")");
3099
3100 return buf;
3101 }
3102
3103 const char *
ipsec_logsastr(struct secasvar * sav)3104 ipsec_logsastr(struct secasvar *sav)
3105 {
3106 static char buf[256] __attribute__((aligned(4)));
3107 char *p;
3108 struct secasindex *saidx = &sav->sah->saidx;
3109
3110 /* validity check */
3111 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
3112 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family) {
3113 panic("ipsec_logsastr: family mismatched.");
3114 }
3115
3116 p = buf;
3117 snprintf(buf, sizeof(buf), "SA(SPI=%u ", (u_int32_t)ntohl(sav->spi));
3118 while (p && *p) {
3119 p++;
3120 }
3121 if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET) {
3122 u_int8_t *s, *d;
3123 s = (u_int8_t *)&((struct sockaddr_in *)&saidx->src)->sin_addr;
3124 d = (u_int8_t *)&((struct sockaddr_in *)&saidx->dst)->sin_addr;
3125 snprintf(p, sizeof(buf) - (p - buf),
3126 "src=%d.%d.%d.%d dst=%d.%d.%d.%d",
3127 s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]);
3128 } else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) {
3129 snprintf(p, sizeof(buf) - (p - buf),
3130 "src=%s",
3131 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr));
3132 while (p && *p) {
3133 p++;
3134 }
3135 snprintf(p, sizeof(buf) - (p - buf),
3136 " dst=%s",
3137 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr));
3138 }
3139 while (p && *p) {
3140 p++;
3141 }
3142 snprintf(p, sizeof(buf) - (p - buf), ")");
3143
3144 return buf;
3145 }
3146
3147 void
ipsec_dumpmbuf(struct mbuf * m)3148 ipsec_dumpmbuf(struct mbuf *m)
3149 {
3150 int totlen;
3151 int i;
3152 u_char *p;
3153
3154 totlen = 0;
3155 printf("---\n");
3156 while (m) {
3157 p = mtod(m, u_char *);
3158 for (i = 0; i < m->m_len; i++) {
3159 printf("%02x ", p[i]);
3160 totlen++;
3161 if (totlen % 16 == 0) {
3162 printf("\n");
3163 }
3164 }
3165 m = m->m_next;
3166 }
3167 if (totlen % 16 != 0) {
3168 printf("\n");
3169 }
3170 printf("---\n");
3171 }
3172
3173 #if INET
3174 /*
3175 * IPsec output logic for IPv4.
3176 */
3177 static int
ipsec4_output_internal(struct ipsec_output_state * state,struct secasvar * sav)3178 ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav)
3179 {
3180 struct ip *ip = NULL;
3181 int error = 0;
3182 struct sockaddr_in *dst4;
3183 struct route *ro4;
3184
3185 /* validity check */
3186 if (sav == NULL || sav->sah == NULL) {
3187 error = EINVAL;
3188 goto bad;
3189 }
3190
3191 /*
3192 * If there is no valid SA, we give up to process any
3193 * more. In such a case, the SA's status is changed
3194 * from DYING to DEAD after allocating. If a packet
3195 * send to the receiver by dead SA, the receiver can
3196 * not decode a packet because SA has been dead.
3197 */
3198 if (sav->state != SADB_SASTATE_MATURE
3199 && sav->state != SADB_SASTATE_DYING) {
3200 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3201 error = EINVAL;
3202 goto bad;
3203 }
3204
3205 state->outgoing_if = sav->sah->outgoing_if;
3206
3207 /*
3208 * There may be the case that SA status will be changed when
3209 * we are refering to one. So calling splsoftnet().
3210 */
3211
3212 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3213 /*
3214 * build IPsec tunnel.
3215 */
3216 state->m = ipsec4_splithdr(state->m);
3217 if (!state->m) {
3218 error = ENOMEM;
3219 goto bad;
3220 }
3221
3222 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3223 error = ipsec46_encapsulate(state, sav);
3224 if (error) {
3225 // packet already freed by encapsulation error handling
3226 state->m = NULL;
3227 return error;
3228 }
3229
3230 error = ipsec6_update_routecache_and_output(state, sav);
3231 return error;
3232 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3233 error = ipsec4_encapsulate(state->m, sav);
3234 if (error) {
3235 state->m = NULL;
3236 goto bad;
3237 }
3238 ip = mtod(state->m, struct ip *);
3239
3240 // grab sadb_mutex, before updating sah's route cache
3241 lck_mtx_lock(sadb_mutex);
3242 ro4 = (struct route *)&sav->sah->sa_route;
3243 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3244 if (ro4->ro_rt != NULL) {
3245 RT_LOCK(ro4->ro_rt);
3246 }
3247 if (ROUTE_UNUSABLE(ro4) ||
3248 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3249 if (ro4->ro_rt != NULL) {
3250 RT_UNLOCK(ro4->ro_rt);
3251 }
3252 ROUTE_RELEASE(ro4);
3253 }
3254 if (ro4->ro_rt == 0) {
3255 dst4->sin_family = AF_INET;
3256 dst4->sin_len = sizeof(*dst4);
3257 dst4->sin_addr = ip->ip_dst;
3258 rtalloc_scoped(ro4, sav->sah->outgoing_if);
3259 if (ro4->ro_rt == 0) {
3260 OSAddAtomic(1, &ipstat.ips_noroute);
3261 error = EHOSTUNREACH;
3262 // release sadb_mutex, after updating sah's route cache
3263 lck_mtx_unlock(sadb_mutex);
3264 goto bad;
3265 }
3266 RT_LOCK(ro4->ro_rt);
3267 }
3268
3269 /*
3270 * adjust state->dst if tunnel endpoint is offlink
3271 *
3272 * XXX: caching rt_gateway value in the state is
3273 * not really good, since it may point elsewhere
3274 * when the gateway gets modified to a larger
3275 * sockaddr via rt_setgate(). This is currently
3276 * addressed by SA_SIZE roundup in that routine.
3277 */
3278 if (ro4->ro_rt->rt_flags & RTF_GATEWAY) {
3279 dst4 = (struct sockaddr_in *)(void *)ro4->ro_rt->rt_gateway;
3280 }
3281 RT_UNLOCK(ro4->ro_rt);
3282 ROUTE_RELEASE(&state->ro);
3283 route_copyout((struct route *)&state->ro, ro4, sizeof(struct route));
3284 state->dst = (struct sockaddr *)dst4;
3285 state->tunneled = 4;
3286 // release sadb_mutex, after updating sah's route cache
3287 lck_mtx_unlock(sadb_mutex);
3288 } else {
3289 ipseclog((LOG_ERR, "%s: family mismatched between inner and outer spi=%u\n",
3290 __FUNCTION__, (u_int32_t)ntohl(sav->spi)));
3291 error = EAFNOSUPPORT;
3292 goto bad;
3293 }
3294 }
3295
3296 state->m = ipsec4_splithdr(state->m);
3297 if (!state->m) {
3298 error = ENOMEM;
3299 goto bad;
3300 }
3301 switch (sav->sah->saidx.proto) {
3302 case IPPROTO_ESP:
3303 #if IPSEC_ESP
3304 if ((error = esp4_output(state->m, sav)) != 0) {
3305 state->m = NULL;
3306 goto bad;
3307 }
3308 break;
3309 #else
3310 m_freem(state->m);
3311 state->m = NULL;
3312 error = EINVAL;
3313 goto bad;
3314 #endif
3315 case IPPROTO_AH:
3316 if ((error = ah4_output(state->m, sav)) != 0) {
3317 state->m = NULL;
3318 goto bad;
3319 }
3320 break;
3321 default:
3322 ipseclog((LOG_ERR,
3323 "ipsec4_output: unknown ipsec protocol %d\n",
3324 sav->sah->saidx.proto));
3325 m_freem(state->m);
3326 state->m = NULL;
3327 error = EPROTONOSUPPORT;
3328 goto bad;
3329 }
3330
3331 if (state->m == 0) {
3332 error = ENOMEM;
3333 goto bad;
3334 }
3335
3336 return 0;
3337
3338 bad:
3339 return error;
3340 }
3341
3342 int
ipsec4_interface_output(struct ipsec_output_state * state,ifnet_t interface)3343 ipsec4_interface_output(struct ipsec_output_state *state, ifnet_t interface)
3344 {
3345 int error = 0;
3346 struct secasvar *sav = NULL;
3347
3348 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3349
3350 if (state == NULL) {
3351 panic("state == NULL in ipsec4_output");
3352 }
3353 if (state->m == NULL) {
3354 panic("state->m == NULL in ipsec4_output");
3355 }
3356 if (state->dst == NULL) {
3357 panic("state->dst == NULL in ipsec4_output");
3358 }
3359
3360 struct ip *ip = mtod(state->m, struct ip *);
3361
3362 struct sockaddr_in src = {};
3363 src.sin_family = AF_INET;
3364 src.sin_len = sizeof(src);
3365 memcpy(&src.sin_addr, &ip->ip_src, sizeof(src.sin_addr));
3366
3367 struct sockaddr_in dst = {};
3368 dst.sin_family = AF_INET;
3369 dst.sin_len = sizeof(dst);
3370 memcpy(&dst.sin_addr, &ip->ip_dst, sizeof(dst.sin_addr));
3371
3372 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET,
3373 (struct sockaddr *)&src,
3374 (struct sockaddr *)&dst);
3375 if (sav == NULL) {
3376 goto bad;
3377 }
3378
3379 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3380 goto bad;
3381 }
3382
3383 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3384 if (sav) {
3385 key_freesav(sav, KEY_SADB_UNLOCKED);
3386 }
3387 return 0;
3388
3389 bad:
3390 if (sav) {
3391 key_freesav(sav, KEY_SADB_UNLOCKED);
3392 }
3393 m_freem(state->m);
3394 state->m = NULL;
3395 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0);
3396 return error;
3397 }
3398
3399 int
ipsec4_output(struct ipsec_output_state * state,struct secpolicy * sp,__unused int flags)3400 ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused int flags)
3401 {
3402 struct ip *ip = NULL;
3403 struct ipsecrequest *isr = NULL;
3404 struct secasindex saidx;
3405 struct secasvar *sav = NULL;
3406 int error = 0;
3407 struct sockaddr_in *sin;
3408
3409 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3410
3411 if (!state) {
3412 panic("state == NULL in ipsec4_output");
3413 }
3414 if (!state->m) {
3415 panic("state->m == NULL in ipsec4_output");
3416 }
3417 if (!state->dst) {
3418 panic("state->dst == NULL in ipsec4_output");
3419 }
3420
3421 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
3422
3423 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3424 printf("ipsec4_output: applied SP\n");
3425 kdebug_secpolicy(sp));
3426
3427 for (isr = sp->req; isr != NULL; isr = isr->next) {
3428 /* make SA index for search proper SA */
3429 ip = mtod(state->m, struct ip *);
3430 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3431 saidx.mode = isr->saidx.mode;
3432 saidx.reqid = isr->saidx.reqid;
3433 sin = (struct sockaddr_in *)&saidx.src;
3434 if (sin->sin_len == 0) {
3435 sin->sin_len = sizeof(*sin);
3436 sin->sin_family = AF_INET;
3437 sin->sin_port = IPSEC_PORT_ANY;
3438 bcopy(&ip->ip_src, &sin->sin_addr,
3439 sizeof(sin->sin_addr));
3440 }
3441 sin = (struct sockaddr_in *)&saidx.dst;
3442 if (sin->sin_len == 0) {
3443 sin->sin_len = sizeof(*sin);
3444 sin->sin_family = AF_INET;
3445 sin->sin_port = IPSEC_PORT_ANY;
3446 /*
3447 * Get port from packet if upper layer is UDP and nat traversal
3448 * is enabled and transport mode.
3449 */
3450
3451 if ((esp_udp_encap_port & 0xFFFF) != 0 &&
3452 isr->saidx.mode == IPSEC_MODE_TRANSPORT) {
3453 if (ip->ip_p == IPPROTO_UDP) {
3454 struct udphdr *udp;
3455 u_int32_t hlen;
3456 #ifdef _IP_VHL
3457 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
3458 #else
3459 hlen = ip->ip_hl << 2;
3460 #endif
3461 if (state->m->m_len < hlen + sizeof(struct udphdr)) {
3462 state->m = m_pullup(state->m, hlen + sizeof(struct udphdr));
3463 if (!state->m) {
3464 ipseclog((LOG_DEBUG, "IPv4 output: can't pullup UDP header\n"));
3465 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
3466 goto bad;
3467 }
3468 ip = mtod(state->m, struct ip *);
3469 }
3470 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + hlen);
3471 sin->sin_port = udp->uh_dport;
3472 }
3473 }
3474
3475 bcopy(&ip->ip_dst, &sin->sin_addr,
3476 sizeof(sin->sin_addr));
3477 }
3478
3479 if ((error = key_checkrequest(isr, &saidx, &sav)) != 0) {
3480 /*
3481 * IPsec processing is required, but no SA found.
3482 * I assume that key_acquire() had been called
3483 * to get/establish the SA. Here I discard
3484 * this packet because it is responsibility for
3485 * upper layer to retransmit the packet.
3486 */
3487 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3488 goto bad;
3489 }
3490
3491 /* validity check */
3492 if (sav == NULL) {
3493 switch (ipsec_get_reqlevel(isr)) {
3494 case IPSEC_LEVEL_USE:
3495 continue;
3496 case IPSEC_LEVEL_REQUIRE:
3497 /* must be not reached here. */
3498 panic("ipsec4_output: no SA found, but required.");
3499 }
3500 }
3501
3502 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3503 goto bad;
3504 }
3505 }
3506
3507 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3508 if (sav) {
3509 key_freesav(sav, KEY_SADB_UNLOCKED);
3510 }
3511 return 0;
3512
3513 bad:
3514 if (sav) {
3515 key_freesav(sav, KEY_SADB_UNLOCKED);
3516 }
3517 m_freem(state->m);
3518 state->m = NULL;
3519 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0);
3520 return error;
3521 }
3522
3523 #endif
3524
3525 /*
3526 * IPsec output logic for IPv6, transport mode.
3527 */
3528 static int
ipsec6_output_trans_internal(struct ipsec_output_state * state,struct secasvar * sav,u_char * nexthdrp,struct mbuf * mprev)3529 ipsec6_output_trans_internal(
3530 struct ipsec_output_state *state,
3531 struct secasvar *sav,
3532 u_char *nexthdrp,
3533 struct mbuf *mprev)
3534 {
3535 struct ip6_hdr *ip6;
3536 size_t plen;
3537 int error = 0;
3538
3539 /* validity check */
3540 if (sav == NULL || sav->sah == NULL) {
3541 error = EINVAL;
3542 goto bad;
3543 }
3544
3545 /*
3546 * If there is no valid SA, we give up to process.
3547 * see same place at ipsec4_output().
3548 */
3549 if (sav->state != SADB_SASTATE_MATURE
3550 && sav->state != SADB_SASTATE_DYING) {
3551 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3552 error = EINVAL;
3553 goto bad;
3554 }
3555
3556 state->outgoing_if = sav->sah->outgoing_if;
3557
3558 switch (sav->sah->saidx.proto) {
3559 case IPPROTO_ESP:
3560 #if IPSEC_ESP
3561 error = esp6_output(state->m, nexthdrp, mprev->m_next, sav);
3562 #else
3563 m_freem(state->m);
3564 error = EINVAL;
3565 #endif
3566 break;
3567 case IPPROTO_AH:
3568 error = ah6_output(state->m, nexthdrp, mprev->m_next, sav);
3569 break;
3570 default:
3571 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3572 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3573 m_freem(state->m);
3574 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3575 error = EPROTONOSUPPORT;
3576 break;
3577 }
3578 if (error) {
3579 state->m = NULL;
3580 goto bad;
3581 }
3582 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3583 if (plen > IPV6_MAXPACKET) {
3584 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3585 "IPsec with IPv6 jumbogram is not supported\n"));
3586 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3587 error = EINVAL; /*XXX*/
3588 goto bad;
3589 }
3590 ip6 = mtod(state->m, struct ip6_hdr *);
3591 ip6->ip6_plen = htons((u_int16_t)plen);
3592
3593 return 0;
3594 bad:
3595 return error;
3596 }
3597
3598 int
ipsec6_output_trans(struct ipsec_output_state * state,u_char * nexthdrp,struct mbuf * mprev,struct secpolicy * sp,__unused int flags,int * tun)3599 ipsec6_output_trans(
3600 struct ipsec_output_state *state,
3601 u_char *nexthdrp,
3602 struct mbuf *mprev,
3603 struct secpolicy *sp,
3604 __unused int flags,
3605 int *tun)
3606 {
3607 struct ip6_hdr *ip6;
3608 struct ipsecrequest *isr = NULL;
3609 struct secasindex saidx;
3610 int error = 0;
3611 struct sockaddr_in6 *sin6;
3612 struct secasvar *sav = NULL;
3613
3614 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3615
3616 if (!state) {
3617 panic("state == NULL in ipsec6_output_trans");
3618 }
3619 if (!state->m) {
3620 panic("state->m == NULL in ipsec6_output_trans");
3621 }
3622 if (!nexthdrp) {
3623 panic("nexthdrp == NULL in ipsec6_output_trans");
3624 }
3625 if (!mprev) {
3626 panic("mprev == NULL in ipsec6_output_trans");
3627 }
3628 if (!sp) {
3629 panic("sp == NULL in ipsec6_output_trans");
3630 }
3631 if (!tun) {
3632 panic("tun == NULL in ipsec6_output_trans");
3633 }
3634
3635 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3636 printf("ipsec6_output_trans: applyed SP\n");
3637 kdebug_secpolicy(sp));
3638
3639 *tun = 0;
3640 for (isr = sp->req; isr; isr = isr->next) {
3641 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
3642 /* the rest will be handled by ipsec6_output_tunnel() */
3643 break;
3644 }
3645
3646 /* make SA index for search proper SA */
3647 ip6 = mtod(state->m, struct ip6_hdr *);
3648 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3649 saidx.mode = isr->saidx.mode;
3650 saidx.reqid = isr->saidx.reqid;
3651 sin6 = (struct sockaddr_in6 *)&saidx.src;
3652 if (sin6->sin6_len == 0) {
3653 sin6->sin6_len = sizeof(*sin6);
3654 sin6->sin6_family = AF_INET6;
3655 sin6->sin6_port = IPSEC_PORT_ANY;
3656 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
3657 sizeof(ip6->ip6_src));
3658 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
3659 /* fix scope id for comparing SPD */
3660 sin6->sin6_scope_id = ip6_output_getsrcifscope(state->m);
3661 in6_verify_ifscope(&ip6->ip6_src, sin6->sin6_scope_id);
3662 if (in6_embedded_scope) {
3663 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
3664 sin6->sin6_addr.s6_addr16[1] = 0;
3665 }
3666 }
3667 }
3668 sin6 = (struct sockaddr_in6 *)&saidx.dst;
3669 if (sin6->sin6_len == 0) {
3670 sin6->sin6_len = sizeof(*sin6);
3671 sin6->sin6_family = AF_INET6;
3672 sin6->sin6_port = IPSEC_PORT_ANY;
3673 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
3674 sizeof(ip6->ip6_dst));
3675 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
3676 /* fix scope id for comparing SPD */
3677 sin6->sin6_scope_id = ip6_output_getdstifscope(state->m);
3678 in6_verify_ifscope(&ip6->ip6_dst, sin6->sin6_scope_id);
3679 if (in6_embedded_scope) {
3680 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
3681 sin6->sin6_addr.s6_addr16[1] = 0;
3682 }
3683 }
3684 }
3685
3686 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
3687 /*
3688 * IPsec processing is required, but no SA found.
3689 * I assume that key_acquire() had been called
3690 * to get/establish the SA. Here I discard
3691 * this packet because it is responsibility for
3692 * upper layer to retransmit the packet.
3693 */
3694 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3695 error = ENOENT;
3696
3697 /*
3698 * Notify the fact that the packet is discarded
3699 * to ourselves. I believe this is better than
3700 * just silently discarding. ([email protected])
3701 * XXX: should we restrict the error to TCP packets?
3702 * XXX: should we directly notify sockets via
3703 * pfctlinputs?
3704 */
3705 icmp6_error(state->m, ICMP6_DST_UNREACH,
3706 ICMP6_DST_UNREACH_ADMIN, 0);
3707 state->m = NULL; /* icmp6_error freed the mbuf */
3708 goto bad;
3709 }
3710
3711 /* validity check */
3712 if (sav == NULL) {
3713 switch (ipsec_get_reqlevel(isr)) {
3714 case IPSEC_LEVEL_USE:
3715 continue;
3716 case IPSEC_LEVEL_REQUIRE:
3717 /* must be not reached here. */
3718 panic("ipsec6_output_trans: no SA found, but required.");
3719 }
3720 }
3721
3722 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
3723 goto bad;
3724 }
3725 }
3726
3727 /* if we have more to go, we need a tunnel mode processing */
3728 if (isr != NULL) {
3729 *tun = 1;
3730 }
3731
3732 if (sav) {
3733 key_freesav(sav, KEY_SADB_UNLOCKED);
3734 }
3735 return 0;
3736
3737 bad:
3738 if (sav) {
3739 key_freesav(sav, KEY_SADB_UNLOCKED);
3740 }
3741 m_freem(state->m);
3742 state->m = NULL;
3743 return error;
3744 }
3745
3746 /*
3747 * IPsec output logic for IPv6, tunnel mode.
3748 */
3749 static int
ipsec6_output_tunnel_internal(struct ipsec_output_state * state,struct secasvar * sav,int * must_be_last)3750 ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar *sav, int *must_be_last)
3751 {
3752 struct ip6_hdr *ip6;
3753 struct sockaddr_in6* dst6;
3754 struct route_in6 *ro6;
3755 size_t plen;
3756 int error = 0;
3757
3758 /* validity check */
3759 if (sav == NULL || sav->sah == NULL || sav->sah->saidx.mode != IPSEC_MODE_TUNNEL) {
3760 error = EINVAL;
3761 goto bad;
3762 }
3763
3764 /*
3765 * If there is no valid SA, we give up to process.
3766 * see same place at ipsec4_output().
3767 */
3768 if (sav->state != SADB_SASTATE_MATURE
3769 && sav->state != SADB_SASTATE_DYING) {
3770 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3771 error = EINVAL;
3772 goto bad;
3773 }
3774
3775 state->outgoing_if = sav->sah->outgoing_if;
3776
3777 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3778 /*
3779 * build IPsec tunnel.
3780 */
3781 state->m = ipsec6_splithdr(state->m);
3782 if (!state->m) {
3783 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3784 error = ENOMEM;
3785 goto bad;
3786 }
3787
3788 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3789 error = ipsec6_encapsulate(state->m, sav);
3790 if (error) {
3791 state->m = 0;
3792 goto bad;
3793 }
3794 ip6 = mtod(state->m, struct ip6_hdr *);
3795 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3796 struct ip *ip;
3797 struct sockaddr_in* dst4;
3798 struct route *ro4 = NULL;
3799 struct route ro4_copy;
3800 struct ip_out_args ipoa;
3801
3802 bzero(&ipoa, sizeof(ipoa));
3803 ipoa.ipoa_boundif = IFSCOPE_NONE;
3804 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
3805 ipoa.ipoa_sotc = SO_TC_UNSPEC;
3806 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3807
3808 if (must_be_last) {
3809 *must_be_last = 1;
3810 }
3811
3812 state->tunneled = 4; /* must not process any further in ip6_output */
3813 error = ipsec64_encapsulate(state->m, sav, state->dscp_mapping);
3814 if (error) {
3815 state->m = 0;
3816 goto bad;
3817 }
3818 /* Now we have an IPv4 packet */
3819 ip = mtod(state->m, struct ip *);
3820
3821 // grab sadb_mutex, to update sah's route cache and get a local copy of it
3822 lck_mtx_lock(sadb_mutex);
3823 ro4 = (struct route *)&sav->sah->sa_route;
3824 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3825 if (ro4->ro_rt) {
3826 RT_LOCK(ro4->ro_rt);
3827 }
3828 if (ROUTE_UNUSABLE(ro4) ||
3829 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3830 if (ro4->ro_rt != NULL) {
3831 RT_UNLOCK(ro4->ro_rt);
3832 }
3833 ROUTE_RELEASE(ro4);
3834 }
3835 if (ro4->ro_rt == NULL) {
3836 dst4->sin_family = AF_INET;
3837 dst4->sin_len = sizeof(*dst4);
3838 dst4->sin_addr = ip->ip_dst;
3839 } else {
3840 RT_UNLOCK(ro4->ro_rt);
3841 }
3842 route_copyout(&ro4_copy, ro4, sizeof(struct route));
3843 // release sadb_mutex, after updating sah's route cache and getting a local copy
3844 lck_mtx_unlock(sadb_mutex);
3845 state->m = ipsec4_splithdr(state->m);
3846 if (!state->m) {
3847 error = ENOMEM;
3848 ROUTE_RELEASE(&ro4_copy);
3849 goto bad;
3850 }
3851 switch (sav->sah->saidx.proto) {
3852 case IPPROTO_ESP:
3853 #if IPSEC_ESP
3854 if ((error = esp4_output(state->m, sav)) != 0) {
3855 state->m = NULL;
3856 ROUTE_RELEASE(&ro4_copy);
3857 goto bad;
3858 }
3859 break;
3860
3861 #else
3862 m_freem(state->m);
3863 state->m = NULL;
3864 error = EINVAL;
3865 ROUTE_RELEASE(&ro4_copy);
3866 goto bad;
3867 #endif
3868 case IPPROTO_AH:
3869 if ((error = ah4_output(state->m, sav)) != 0) {
3870 state->m = NULL;
3871 ROUTE_RELEASE(&ro4_copy);
3872 goto bad;
3873 }
3874 break;
3875 default:
3876 ipseclog((LOG_ERR,
3877 "ipsec4_output: unknown ipsec protocol %d\n",
3878 sav->sah->saidx.proto));
3879 m_freem(state->m);
3880 state->m = NULL;
3881 error = EPROTONOSUPPORT;
3882 ROUTE_RELEASE(&ro4_copy);
3883 goto bad;
3884 }
3885
3886 if (state->m == 0) {
3887 error = ENOMEM;
3888 ROUTE_RELEASE(&ro4_copy);
3889 goto bad;
3890 }
3891 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET);
3892 ipsec_set_ipoa_for_interface(sav->sah->ipsec_if, &ipoa);
3893
3894 ip = mtod(state->m, struct ip *);
3895 ip->ip_len = ntohs(ip->ip_len); /* flip len field before calling ip_output */
3896 error = ip_output(state->m, NULL, &ro4_copy, IP_OUTARGS, NULL, &ipoa);
3897 state->m = NULL;
3898 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
3899 lck_mtx_lock(sadb_mutex);
3900 route_copyin(&ro4_copy, ro4, sizeof(struct route));
3901 lck_mtx_unlock(sadb_mutex);
3902 if (error != 0) {
3903 goto bad;
3904 }
3905 goto done;
3906 } else {
3907 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3908 "unsupported inner family, spi=%u\n",
3909 (u_int32_t)ntohl(sav->spi)));
3910 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3911 error = EAFNOSUPPORT;
3912 goto bad;
3913 }
3914
3915 // grab sadb_mutex, before updating sah's route cache
3916 lck_mtx_lock(sadb_mutex);
3917 ro6 = &sav->sah->sa_route;
3918 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
3919 if (ro6->ro_rt) {
3920 RT_LOCK(ro6->ro_rt);
3921 }
3922 if (ROUTE_UNUSABLE(ro6) ||
3923 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
3924 if (ro6->ro_rt != NULL) {
3925 RT_UNLOCK(ro6->ro_rt);
3926 }
3927 ROUTE_RELEASE(ro6);
3928 }
3929 if (ro6->ro_rt == 0) {
3930 bzero(dst6, sizeof(*dst6));
3931 dst6->sin6_family = AF_INET6;
3932 dst6->sin6_len = sizeof(*dst6);
3933 dst6->sin6_addr = ip6->ip6_dst;
3934 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
3935 if (ro6->ro_rt) {
3936 RT_LOCK(ro6->ro_rt);
3937 }
3938 }
3939 if (ro6->ro_rt == 0) {
3940 ip6stat.ip6s_noroute++;
3941 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
3942 error = EHOSTUNREACH;
3943 // release sadb_mutex, after updating sah's route cache
3944 lck_mtx_unlock(sadb_mutex);
3945 goto bad;
3946 }
3947
3948 /*
3949 * adjust state->dst if tunnel endpoint is offlink
3950 *
3951 * XXX: caching rt_gateway value in the state is
3952 * not really good, since it may point elsewhere
3953 * when the gateway gets modified to a larger
3954 * sockaddr via rt_setgate(). This is currently
3955 * addressed by SA_SIZE roundup in that routine.
3956 */
3957 if (ro6->ro_rt->rt_flags & RTF_GATEWAY) {
3958 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
3959 }
3960 RT_UNLOCK(ro6->ro_rt);
3961 ROUTE_RELEASE(&state->ro);
3962 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
3963 state->dst = (struct sockaddr *)dst6;
3964 state->tunneled = 6;
3965 // release sadb_mutex, after updating sah's route cache
3966 lck_mtx_unlock(sadb_mutex);
3967 }
3968
3969 state->m = ipsec6_splithdr(state->m);
3970 if (!state->m) {
3971 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3972 error = ENOMEM;
3973 goto bad;
3974 }
3975 ip6 = mtod(state->m, struct ip6_hdr *);
3976 switch (sav->sah->saidx.proto) {
3977 case IPPROTO_ESP:
3978 #if IPSEC_ESP
3979 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
3980 #else
3981 m_freem(state->m);
3982 error = EINVAL;
3983 #endif
3984 break;
3985 case IPPROTO_AH:
3986 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
3987 break;
3988 default:
3989 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3990 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3991 m_freem(state->m);
3992 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3993 error = EINVAL;
3994 break;
3995 }
3996 if (error) {
3997 state->m = NULL;
3998 goto bad;
3999 }
4000 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
4001 if (plen > IPV6_MAXPACKET) {
4002 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4003 "IPsec with IPv6 jumbogram is not supported\n"));
4004 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
4005 error = EINVAL; /*XXX*/
4006 goto bad;
4007 }
4008 ip6 = mtod(state->m, struct ip6_hdr *);
4009 ip6->ip6_plen = htons((u_int16_t)plen);
4010 done:
4011 return 0;
4012
4013 bad:
4014 return error;
4015 }
4016
4017 int
ipsec6_output_tunnel(struct ipsec_output_state * state,struct secpolicy * sp,__unused int flags)4018 ipsec6_output_tunnel(
4019 struct ipsec_output_state *state,
4020 struct secpolicy *sp,
4021 __unused int flags)
4022 {
4023 struct ip6_hdr *ip6;
4024 struct ipsecrequest *isr = NULL;
4025 struct secasindex saidx;
4026 struct secasvar *sav = NULL;
4027 int error = 0;
4028
4029 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4030
4031 if (!state) {
4032 panic("state == NULL in ipsec6_output_tunnel");
4033 }
4034 if (!state->m) {
4035 panic("state->m == NULL in ipsec6_output_tunnel");
4036 }
4037 if (!sp) {
4038 panic("sp == NULL in ipsec6_output_tunnel");
4039 }
4040
4041 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
4042 printf("ipsec6_output_tunnel: applyed SP\n");
4043 kdebug_secpolicy(sp));
4044
4045 /*
4046 * transport mode ipsec (before the 1st tunnel mode) is already
4047 * processed by ipsec6_output_trans().
4048 */
4049 for (isr = sp->req; isr; isr = isr->next) {
4050 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
4051 break;
4052 }
4053 }
4054
4055 for (/* already initialized */; isr; isr = isr->next) {
4056 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
4057 /* When tunnel mode, SA peers must be specified. */
4058 bcopy(&isr->saidx, &saidx, sizeof(saidx));
4059 } else {
4060 /* make SA index to look for a proper SA */
4061 struct sockaddr_in6 *sin6;
4062
4063 bzero(&saidx, sizeof(saidx));
4064 saidx.proto = isr->saidx.proto;
4065 saidx.mode = isr->saidx.mode;
4066 saidx.reqid = isr->saidx.reqid;
4067
4068 ip6 = mtod(state->m, struct ip6_hdr *);
4069 sin6 = (struct sockaddr_in6 *)&saidx.src;
4070 if (sin6->sin6_len == 0) {
4071 sin6->sin6_len = sizeof(*sin6);
4072 sin6->sin6_family = AF_INET6;
4073 sin6->sin6_port = IPSEC_PORT_ANY;
4074 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
4075 sizeof(ip6->ip6_src));
4076 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
4077 /* fix scope id for comparing SPD */
4078 sin6->sin6_scope_id = ip6_output_getsrcifscope(state->m);
4079 in6_verify_ifscope(&ip6->ip6_src, sin6->sin6_scope_id);
4080 if (in6_embedded_scope) {
4081 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
4082 sin6->sin6_addr.s6_addr16[1] = 0;
4083 }
4084 }
4085 }
4086 sin6 = (struct sockaddr_in6 *)&saidx.dst;
4087 if (sin6->sin6_len == 0) {
4088 sin6->sin6_len = sizeof(*sin6);
4089 sin6->sin6_family = AF_INET6;
4090 sin6->sin6_port = IPSEC_PORT_ANY;
4091 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
4092 sizeof(ip6->ip6_dst));
4093 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
4094 /* fix scope id for comparing SPD */
4095 sin6->sin6_scope_id = ip6_output_getdstifscope(state->m);
4096 in6_verify_ifscope(&ip6->ip6_dst, sin6->sin6_scope_id);
4097 if (in6_embedded_scope) {
4098 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
4099 sin6->sin6_addr.s6_addr16[1] = 0;
4100 }
4101 }
4102 }
4103 }
4104
4105 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
4106 /*
4107 * IPsec processing is required, but no SA found.
4108 * I assume that key_acquire() had been called
4109 * to get/establish the SA. Here I discard
4110 * this packet because it is responsibility for
4111 * upper layer to retransmit the packet.
4112 */
4113 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4114 error = ENOENT;
4115 goto bad;
4116 }
4117
4118 /* validity check */
4119 if (sav == NULL) {
4120 switch (ipsec_get_reqlevel(isr)) {
4121 case IPSEC_LEVEL_USE:
4122 continue;
4123 case IPSEC_LEVEL_REQUIRE:
4124 /* must be not reached here. */
4125 panic("ipsec6_output_tunnel: no SA found, but required.");
4126 }
4127 }
4128
4129 /*
4130 * If there is no valid SA, we give up to process.
4131 * see same place at ipsec4_output().
4132 */
4133 if (sav->state != SADB_SASTATE_MATURE
4134 && sav->state != SADB_SASTATE_DYING) {
4135 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4136 error = EINVAL;
4137 goto bad;
4138 }
4139
4140 int must_be_last = 0;
4141
4142 if ((error = ipsec6_output_tunnel_internal(state, sav, &must_be_last)) != 0) {
4143 goto bad;
4144 }
4145
4146 if (must_be_last && isr->next) {
4147 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4148 "IPv4 must be outer layer, spi=%u\n",
4149 (u_int32_t)ntohl(sav->spi)));
4150 error = EINVAL;
4151 goto bad;
4152 }
4153 }
4154
4155 if (sav) {
4156 key_freesav(sav, KEY_SADB_UNLOCKED);
4157 }
4158 return 0;
4159
4160 bad:
4161 if (sav) {
4162 key_freesav(sav, KEY_SADB_UNLOCKED);
4163 }
4164 if (state->m) {
4165 m_freem(state->m);
4166 }
4167 state->m = NULL;
4168 return error;
4169 }
4170
4171 int
ipsec6_interface_output(struct ipsec_output_state * state,ifnet_t interface,u_char * nexthdrp,struct mbuf * mprev)4172 ipsec6_interface_output(struct ipsec_output_state *state, ifnet_t interface, u_char *nexthdrp, struct mbuf *mprev)
4173 {
4174 int error = 0;
4175 struct secasvar *sav = NULL;
4176
4177 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4178
4179 if (state == NULL) {
4180 panic("state == NULL in ipsec6_output");
4181 }
4182 if (state->m == NULL) {
4183 panic("state->m == NULL in ipsec6_output");
4184 }
4185 if (nexthdrp == NULL) {
4186 panic("nexthdrp == NULL in ipsec6_output");
4187 }
4188 if (mprev == NULL) {
4189 panic("mprev == NULL in ipsec6_output");
4190 }
4191
4192 struct ip6_hdr *ip6 = mtod(state->m, struct ip6_hdr *);
4193
4194 struct sockaddr_in6 src = {};
4195 src.sin6_family = AF_INET6;
4196 src.sin6_len = sizeof(src);
4197 memcpy(&src.sin6_addr, &ip6->ip6_src, sizeof(src.sin6_addr));
4198
4199 struct sockaddr_in6 dst = {};
4200 dst.sin6_family = AF_INET6;
4201 dst.sin6_len = sizeof(dst);
4202 memcpy(&dst.sin6_addr, &ip6->ip6_dst, sizeof(dst.sin6_addr));
4203
4204 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET6,
4205 (struct sockaddr *)&src,
4206 (struct sockaddr *)&dst);
4207 if (sav == NULL) {
4208 goto bad;
4209 }
4210
4211 if (sav->sah && sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
4212 if ((error = ipsec6_output_tunnel_internal(state, sav, NULL)) != 0) {
4213 goto bad;
4214 }
4215 } else {
4216 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
4217 goto bad;
4218 }
4219 }
4220
4221 if (sav) {
4222 key_freesav(sav, KEY_SADB_UNLOCKED);
4223 }
4224 return 0;
4225
4226 bad:
4227 if (sav) {
4228 key_freesav(sav, KEY_SADB_UNLOCKED);
4229 }
4230 m_freem(state->m);
4231 state->m = NULL;
4232 return error;
4233 }
4234
4235 #if INET
4236 /*
4237 * Chop IP header and option off from the payload.
4238 */
4239 struct mbuf *
ipsec4_splithdr(struct mbuf * m)4240 ipsec4_splithdr(struct mbuf *m)
4241 {
4242 struct mbuf *mh;
4243 struct ip *ip;
4244 int hlen;
4245
4246 if (m->m_len < sizeof(struct ip)) {
4247 panic("ipsec4_splithdr: first mbuf too short, m_len %d, pkt_len %d, m_flag %x", m->m_len, m->m_pkthdr.len, m->m_flags);
4248 }
4249 ip = mtod(m, struct ip *);
4250 #ifdef _IP_VHL
4251 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
4252 #else
4253 hlen = ip->ip_hl << 2;
4254 #endif
4255 if (m->m_len > hlen) {
4256 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4257 if (!mh) {
4258 m_freem(m);
4259 return NULL;
4260 }
4261 M_COPY_PKTHDR(mh, m);
4262 MH_ALIGN(mh, hlen);
4263 m->m_flags &= ~M_PKTHDR;
4264 m_mchtype(m, MT_DATA);
4265 m->m_len -= hlen;
4266 m->m_data += hlen;
4267 mh->m_next = m;
4268 m = mh;
4269 m->m_len = hlen;
4270 bcopy((caddr_t)ip, mtod(m, caddr_t), hlen);
4271 } else if (m->m_len < hlen) {
4272 m = m_pullup(m, hlen);
4273 if (!m) {
4274 return NULL;
4275 }
4276 }
4277 return m;
4278 }
4279 #endif
4280
4281 struct mbuf *
ipsec6_splithdr(struct mbuf * m)4282 ipsec6_splithdr(struct mbuf *m)
4283 {
4284 struct mbuf *mh;
4285 struct ip6_hdr *ip6;
4286 int hlen;
4287
4288 if (m->m_len < sizeof(struct ip6_hdr)) {
4289 panic("ipsec6_splithdr: first mbuf too short");
4290 }
4291 ip6 = mtod(m, struct ip6_hdr *);
4292 hlen = sizeof(struct ip6_hdr);
4293 if (m->m_len > hlen) {
4294 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4295 if (!mh) {
4296 m_freem(m);
4297 return NULL;
4298 }
4299 M_COPY_PKTHDR(mh, m);
4300 MH_ALIGN(mh, hlen);
4301 m->m_flags &= ~M_PKTHDR;
4302 m_mchtype(m, MT_DATA);
4303 m->m_len -= hlen;
4304 m->m_data += hlen;
4305 mh->m_next = m;
4306 m = mh;
4307 m->m_len = hlen;
4308 bcopy((caddr_t)ip6, mtod(m, caddr_t), hlen);
4309 } else if (m->m_len < hlen) {
4310 m = m_pullup(m, hlen);
4311 if (!m) {
4312 return NULL;
4313 }
4314 }
4315 return m;
4316 }
4317
4318 /* validate inbound IPsec tunnel packet. */
4319 int
ipsec4_tunnel_validate(struct mbuf * m,int off,u_int nxt0,struct secasvar * sav,sa_family_t * ifamily)4320 ipsec4_tunnel_validate(
4321 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4322 int off,
4323 u_int nxt0,
4324 struct secasvar *sav,
4325 sa_family_t *ifamily)
4326 {
4327 u_int8_t nxt = nxt0 & 0xff;
4328 struct sockaddr_in *sin;
4329 struct sockaddr_in osrc, odst, i4src, i4dst;
4330 struct sockaddr_in6 i6src, i6dst;
4331 int hlen;
4332 struct secpolicy *sp;
4333 struct ip *oip;
4334
4335 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4336
4337 /* do not decapsulate if the SA is for transport mode only */
4338 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) {
4339 return 0;
4340 }
4341
4342 #if DIAGNOSTIC
4343 if (m->m_len < sizeof(struct ip)) {
4344 panic("too short mbuf on ipsec4_tunnel_validate");
4345 }
4346 #endif
4347 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) {
4348 return 0;
4349 }
4350 if (m->m_pkthdr.len < off + sizeof(struct ip)) {
4351 return 0;
4352 }
4353
4354 oip = mtod(m, struct ip *);
4355 #ifdef _IP_VHL
4356 hlen = _IP_VHL_HL(oip->ip_vhl) << 2;
4357 #else
4358 hlen = oip->ip_hl << 2;
4359 #endif
4360 if (hlen != sizeof(struct ip)) {
4361 return 0;
4362 }
4363
4364 sin = (struct sockaddr_in *)&sav->sah->saidx.dst;
4365 if (sin->sin_family != AF_INET) {
4366 return 0;
4367 }
4368 if (bcmp(&oip->ip_dst, &sin->sin_addr, sizeof(oip->ip_dst)) != 0) {
4369 return 0;
4370 }
4371
4372 if (sav->sah->ipsec_if != NULL) {
4373 // the ipsec interface SAs don't have a policies.
4374 if (nxt == IPPROTO_IPV4) {
4375 *ifamily = AF_INET;
4376 } else if (nxt == IPPROTO_IPV6) {
4377 *ifamily = AF_INET6;
4378 } else {
4379 return 0;
4380 }
4381 return 1;
4382 }
4383
4384 /* XXX slow */
4385 bzero(&osrc, sizeof(osrc));
4386 bzero(&odst, sizeof(odst));
4387 osrc.sin_family = odst.sin_family = AF_INET;
4388 osrc.sin_len = odst.sin_len = sizeof(struct sockaddr_in);
4389 osrc.sin_addr = oip->ip_src;
4390 odst.sin_addr = oip->ip_dst;
4391 /*
4392 * RFC2401 5.2.1 (b): (assume that we are using tunnel mode)
4393 * - if the inner destination is multicast address, there can be
4394 * multiple permissible inner source address. implementation
4395 * may want to skip verification of inner source address against
4396 * SPD selector.
4397 * - if the inner protocol is ICMP, the packet may be an error report
4398 * from routers on the other side of the VPN cloud (R in the
4399 * following diagram). in this case, we cannot verify inner source
4400 * address against SPD selector.
4401 * me -- gw === gw -- R -- you
4402 *
4403 * we consider the first bullet to be users responsibility on SPD entry
4404 * configuration (if you need to encrypt multicast traffic, set
4405 * the source range of SPD selector to 0.0.0.0/0, or have explicit
4406 * address ranges for possible senders).
4407 * the second bullet is not taken care of (yet).
4408 *
4409 * therefore, we do not do anything special about inner source.
4410 */
4411 if (nxt == IPPROTO_IPV4) {
4412 bzero(&i4src, sizeof(struct sockaddr_in));
4413 bzero(&i4dst, sizeof(struct sockaddr_in));
4414 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4415 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4416 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4417 (caddr_t)&i4src.sin_addr);
4418 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4419 (caddr_t)&i4dst.sin_addr);
4420 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4421 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4422 } else if (nxt == IPPROTO_IPV6) {
4423 bzero(&i6src, sizeof(struct sockaddr_in6));
4424 bzero(&i6dst, sizeof(struct sockaddr_in6));
4425 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4426 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4427 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4428 (caddr_t)&i6src.sin6_addr);
4429 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4430 (caddr_t)&i6dst.sin6_addr);
4431 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4432 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4433 } else {
4434 return 0; /* unsupported family */
4435 }
4436 if (!sp) {
4437 return 0;
4438 }
4439
4440 key_freesp(sp, KEY_SADB_UNLOCKED);
4441
4442 return 1;
4443 }
4444
4445 /* validate inbound IPsec tunnel packet. */
4446 int
ipsec6_tunnel_validate(struct mbuf * m,int off,u_int nxt0,struct secasvar * sav,sa_family_t * ifamily)4447 ipsec6_tunnel_validate(
4448 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4449 int off,
4450 u_int nxt0,
4451 struct secasvar *sav,
4452 sa_family_t *ifamily)
4453 {
4454 u_int8_t nxt = nxt0 & 0xff;
4455 struct sockaddr_in6 *sin6;
4456 struct sockaddr_in i4src, i4dst;
4457 struct sockaddr_in6 osrc, odst, i6src, i6dst;
4458 struct secpolicy *sp;
4459 struct ip6_hdr *oip6;
4460
4461 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4462
4463 /* do not decapsulate if the SA is for transport mode only */
4464 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) {
4465 return 0;
4466 }
4467
4468 #if DIAGNOSTIC
4469 if (m->m_len < sizeof(struct ip6_hdr)) {
4470 panic("too short mbuf on ipsec6_tunnel_validate");
4471 }
4472 #endif
4473 if (nxt == IPPROTO_IPV4) {
4474 if (m->m_pkthdr.len < off + sizeof(struct ip)) {
4475 ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate pkthdr %d off %d ip6hdr %zu", m->m_pkthdr.len, off, sizeof(struct ip6_hdr)));
4476 return 0;
4477 }
4478 } else if (nxt == IPPROTO_IPV6) {
4479 if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr)) {
4480 ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate pkthdr %d off %d ip6hdr %zu", m->m_pkthdr.len, off, sizeof(struct ip6_hdr)));
4481 return 0;
4482 }
4483 } else {
4484 ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate invalid nxt(%u) protocol", nxt));
4485 return 0;
4486 }
4487
4488 oip6 = mtod(m, struct ip6_hdr *);
4489 /* AF_INET should be supported, but at this moment we don't. */
4490 sin6 = (struct sockaddr_in6 *)&sav->sah->saidx.dst;
4491 if (sin6->sin6_family != AF_INET6) {
4492 return 0;
4493 }
4494 if (!IN6_ARE_ADDR_EQUAL(&oip6->ip6_dst, &sin6->sin6_addr)) {
4495 return 0;
4496 }
4497
4498 if (sav->sah->ipsec_if != NULL) {
4499 // the ipsec interface SAs don't have a policies.
4500 if (nxt == IPPROTO_IPV4) {
4501 *ifamily = AF_INET;
4502 } else if (nxt == IPPROTO_IPV6) {
4503 *ifamily = AF_INET6;
4504 } else {
4505 return 0;
4506 }
4507 return 1;
4508 }
4509
4510 /* XXX slow */
4511 bzero(&osrc, sizeof(osrc));
4512 bzero(&odst, sizeof(odst));
4513 osrc.sin6_family = odst.sin6_family = AF_INET6;
4514 osrc.sin6_len = odst.sin6_len = sizeof(struct sockaddr_in6);
4515 osrc.sin6_addr = oip6->ip6_src;
4516 odst.sin6_addr = oip6->ip6_dst;
4517
4518 /*
4519 * regarding to inner source address validation, see a long comment
4520 * in ipsec4_tunnel_validate.
4521 */
4522
4523 if (nxt == IPPROTO_IPV4) {
4524 bzero(&i4src, sizeof(struct sockaddr_in));
4525 bzero(&i4dst, sizeof(struct sockaddr_in));
4526 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4527 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4528 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4529 (caddr_t)&i4src.sin_addr);
4530 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4531 (caddr_t)&i4dst.sin_addr);
4532 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4533 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4534 } else if (nxt == IPPROTO_IPV6) {
4535 bzero(&i6src, sizeof(struct sockaddr_in6));
4536 bzero(&i6dst, sizeof(struct sockaddr_in6));
4537 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4538 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4539 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4540 (caddr_t)&i6src.sin6_addr);
4541 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4542 (caddr_t)&i6dst.sin6_addr);
4543 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4544 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4545 } else {
4546 return 0; /* unsupported family */
4547 }
4548 /*
4549 * when there is no suitable inbound policy for the packet of the ipsec
4550 * tunnel mode, the kernel never decapsulate the tunneled packet
4551 * as the ipsec tunnel mode even when the system wide policy is "none".
4552 * then the kernel leaves the generic tunnel module to process this
4553 * packet. if there is no rule of the generic tunnel, the packet
4554 * is rejected and the statistics will be counted up.
4555 */
4556 if (!sp) {
4557 return 0;
4558 }
4559 key_freesp(sp, KEY_SADB_UNLOCKED);
4560
4561 return 1;
4562 }
4563
4564 /*
4565 * Make a mbuf chain for encryption.
4566 * If the original mbuf chain contains a mbuf with a cluster,
4567 * allocate a new cluster and copy the data to the new cluster.
4568 * XXX: this hack is inefficient, but is necessary to handle cases
4569 * of TCP retransmission...
4570 */
4571 struct mbuf *
ipsec_copypkt(struct mbuf * m)4572 ipsec_copypkt(struct mbuf *m)
4573 {
4574 struct mbuf *n, **mpp, *mnew;
4575
4576 for (n = m, mpp = &m; n; n = n->m_next) {
4577 if (n->m_flags & M_EXT) {
4578 /*
4579 * Make a copy only if there are more than one references
4580 * to the cluster.
4581 * XXX: is this approach effective?
4582 */
4583 if (
4584 m_get_ext_free(n) != NULL ||
4585 m_mclhasreference(n)
4586 ) {
4587 int remain, copied;
4588 struct mbuf *mm;
4589
4590 if (n->m_flags & M_PKTHDR) {
4591 MGETHDR(mnew, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4592 if (mnew == NULL) {
4593 goto fail;
4594 }
4595 M_COPY_PKTHDR(mnew, n);
4596 } else {
4597 MGET(mnew, M_DONTWAIT, MT_DATA);
4598 if (mnew == NULL) {
4599 goto fail;
4600 }
4601 }
4602 mnew->m_len = 0;
4603 mm = mnew;
4604
4605 /*
4606 * Copy data. If we don't have enough space to
4607 * store the whole data, allocate a cluster
4608 * or additional mbufs.
4609 * XXX: we don't use m_copyback(), since the
4610 * function does not use clusters and thus is
4611 * inefficient.
4612 */
4613 remain = n->m_len;
4614 copied = 0;
4615 while (1) {
4616 int len;
4617 struct mbuf *mn;
4618
4619 if (remain <= (mm->m_flags & M_PKTHDR ? MHLEN : MLEN)) {
4620 len = remain;
4621 } else { /* allocate a cluster */
4622 MCLGET(mm, M_DONTWAIT);
4623 if (!(mm->m_flags & M_EXT)) {
4624 m_free(mm);
4625 goto fail;
4626 }
4627 len = remain < MCLBYTES ?
4628 remain : MCLBYTES;
4629 }
4630
4631 bcopy(n->m_data + copied, mm->m_data,
4632 len);
4633
4634 copied += len;
4635 remain -= len;
4636 mm->m_len = len;
4637
4638 if (remain <= 0) { /* completed? */
4639 break;
4640 }
4641
4642 /* need another mbuf */
4643 MGETHDR(mn, M_DONTWAIT, MT_HEADER); /* XXXMAC: tags copied next time in loop? */
4644 if (mn == NULL) {
4645 goto fail;
4646 }
4647 mn->m_pkthdr.rcvif = NULL;
4648 mm->m_next = mn;
4649 mm = mn;
4650 }
4651
4652 /* adjust chain */
4653 mm->m_next = m_free(n);
4654 n = mm;
4655 *mpp = mnew;
4656 mpp = &n->m_next;
4657
4658 continue;
4659 }
4660 }
4661 *mpp = n;
4662 mpp = &n->m_next;
4663 }
4664
4665 return m;
4666 fail:
4667 m_freem(m);
4668 return NULL;
4669 }
4670
4671 /*
4672 * Tags are allocated as mbufs for now, since our minimum size is MLEN, we
4673 * should make use of up to that much space.
4674 */
4675 #define IPSEC_TAG_HEADER \
4676
4677 struct ipsec_tag {
4678 struct socket *socket;
4679 u_int32_t history_count;
4680 struct ipsec_history history[];
4681 #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
4682 /* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers
4683 * are 32-bit:
4684 * Aligning to 64-bit since we case to m_tag which is 64-bit aligned.
4685 */
4686 } __attribute__ ((aligned(8)));
4687 #else
4688 };
4689 #endif
4690
4691 #define IPSEC_TAG_SIZE (MLEN - sizeof(struct m_tag))
4692 #define IPSEC_TAG_HDR_SIZE (offsetof(struct ipsec_tag, history[0]))
4693 #define IPSEC_HISTORY_MAX ((IPSEC_TAG_SIZE - IPSEC_TAG_HDR_SIZE) / \
4694 sizeof(struct ipsec_history))
4695
4696 static struct ipsec_tag *
ipsec_addaux(struct mbuf * m)4697 ipsec_addaux(
4698 struct mbuf *m)
4699 {
4700 struct m_tag *tag;
4701
4702 /* Check if the tag already exists */
4703 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4704
4705 if (tag == NULL) {
4706 struct ipsec_tag *itag;
4707
4708 /* Allocate a tag */
4709 tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC,
4710 IPSEC_TAG_SIZE, M_DONTWAIT, m);
4711
4712 if (tag) {
4713 itag = (struct ipsec_tag*)(tag + 1);
4714 itag->socket = 0;
4715 itag->history_count = 0;
4716
4717 m_tag_prepend(m, tag);
4718 }
4719 }
4720
4721 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4722 }
4723
4724 static struct ipsec_tag *
ipsec_findaux(struct mbuf * m)4725 ipsec_findaux(
4726 struct mbuf *m)
4727 {
4728 struct m_tag *tag;
4729
4730 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4731
4732 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4733 }
4734
4735 void
ipsec_delaux(struct mbuf * m)4736 ipsec_delaux(
4737 struct mbuf *m)
4738 {
4739 struct m_tag *tag;
4740
4741 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4742
4743 if (tag) {
4744 m_tag_delete(m, tag);
4745 }
4746 }
4747
4748 /* if the aux buffer is unnecessary, nuke it. */
4749 static void
ipsec_optaux(struct mbuf * m,struct ipsec_tag * itag)4750 ipsec_optaux(
4751 struct mbuf *m,
4752 struct ipsec_tag *itag)
4753 {
4754 if (itag && itag->socket == NULL && itag->history_count == 0) {
4755 m_tag_delete(m, ((struct m_tag*)itag) - 1);
4756 }
4757 }
4758
4759 int
ipsec_setsocket(struct mbuf * m,struct socket * so)4760 ipsec_setsocket(struct mbuf *m, struct socket *so)
4761 {
4762 struct ipsec_tag *tag;
4763
4764 /* if so == NULL, don't insist on getting the aux mbuf */
4765 if (so) {
4766 tag = ipsec_addaux(m);
4767 if (!tag) {
4768 return ENOBUFS;
4769 }
4770 } else {
4771 tag = ipsec_findaux(m);
4772 }
4773 if (tag) {
4774 tag->socket = so;
4775 ipsec_optaux(m, tag);
4776 }
4777 return 0;
4778 }
4779
4780 struct socket *
ipsec_getsocket(struct mbuf * m)4781 ipsec_getsocket(struct mbuf *m)
4782 {
4783 struct ipsec_tag *itag;
4784
4785 itag = ipsec_findaux(m);
4786 if (itag) {
4787 return itag->socket;
4788 } else {
4789 return NULL;
4790 }
4791 }
4792
4793 int
ipsec_addhist(struct mbuf * m,int proto,u_int32_t spi)4794 ipsec_addhist(
4795 struct mbuf *m,
4796 int proto,
4797 u_int32_t spi)
4798 {
4799 struct ipsec_tag *itag;
4800 struct ipsec_history *p;
4801 itag = ipsec_addaux(m);
4802 if (!itag) {
4803 return ENOBUFS;
4804 }
4805 if (itag->history_count == IPSEC_HISTORY_MAX) {
4806 return ENOSPC; /* XXX */
4807 }
4808 p = &itag->history[itag->history_count];
4809 itag->history_count++;
4810
4811 bzero(p, sizeof(*p));
4812 p->ih_proto = proto;
4813 p->ih_spi = spi;
4814
4815 return 0;
4816 }
4817
4818 struct ipsec_history *
ipsec_gethist(struct mbuf * m,int * lenp)4819 ipsec_gethist(
4820 struct mbuf *m,
4821 int *lenp)
4822 {
4823 struct ipsec_tag *itag;
4824
4825 itag = ipsec_findaux(m);
4826 if (!itag) {
4827 return NULL;
4828 }
4829 if (itag->history_count == 0) {
4830 return NULL;
4831 }
4832 if (lenp) {
4833 *lenp = (int)(itag->history_count * sizeof(struct ipsec_history));
4834 }
4835 return itag->history;
4836 }
4837
4838 void
ipsec_clearhist(struct mbuf * m)4839 ipsec_clearhist(
4840 struct mbuf *m)
4841 {
4842 struct ipsec_tag *itag;
4843
4844 itag = ipsec_findaux(m);
4845 if (itag) {
4846 itag->history_count = 0;
4847 }
4848 ipsec_optaux(m, itag);
4849 }
4850
4851 __private_extern__ boolean_t
ipsec_send_natt_keepalive(struct secasvar * sav)4852 ipsec_send_natt_keepalive(
4853 struct secasvar *sav)
4854 {
4855 struct mbuf *m = NULL;
4856 int error = 0;
4857 int keepalive_interval = natt_keepalive_interval;
4858
4859 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4860 lck_mtx_lock(sadb_mutex);
4861
4862 if (((esp_udp_encap_port & 0xFFFF) == 0 && sav->natt_encapsulated_src_port == 0) || sav->remote_ike_port == 0) {
4863 lck_mtx_unlock(sadb_mutex);
4864 return FALSE;
4865 }
4866
4867 if (sav->natt_interval != 0) {
4868 keepalive_interval = (int)sav->natt_interval;
4869 }
4870
4871 // natt timestamp may have changed... reverify
4872 if ((natt_now - sav->natt_last_activity) < keepalive_interval) {
4873 lck_mtx_unlock(sadb_mutex);
4874 return FALSE;
4875 }
4876
4877 if (sav->flags & SADB_X_EXT_ESP_KEEPALIVE) {
4878 lck_mtx_unlock(sadb_mutex);
4879 return FALSE; // don't send these from the kernel
4880 }
4881
4882 lck_mtx_unlock(sadb_mutex);
4883
4884 m = m_gethdr(M_NOWAIT, MT_DATA);
4885 if (m == NULL) {
4886 return FALSE;
4887 }
4888
4889 lck_mtx_lock(sadb_mutex);
4890 if (sav->sah->saidx.dst.ss_family == AF_INET) {
4891 struct ip_out_args ipoa = {};
4892 struct route ro = {};
4893
4894 ipoa.ipoa_boundif = IFSCOPE_NONE;
4895 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
4896 ipoa.ipoa_sotc = SO_TC_UNSPEC;
4897 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
4898
4899 struct ip *ip = (__typeof__(ip))m_mtod(m);
4900
4901 /*
4902 * Type 2: a UDP packet complete with IP header.
4903 * We must do this because UDP output requires
4904 * an inpcb which we don't have. UDP packet
4905 * contains one byte payload. The byte is set
4906 * to 0xFF.
4907 */
4908 struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip));
4909 m->m_len = sizeof(struct udpiphdr) + 1;
4910 bzero(m_mtod(m), m->m_len);
4911 m->m_pkthdr.len = m->m_len;
4912
4913 ip->ip_len = (u_short)m->m_len;
4914 ip->ip_ttl = (u_char)ip_defttl;
4915 ip->ip_p = IPPROTO_UDP;
4916 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4917 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4918 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4919 } else {
4920 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4921 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4922 }
4923 if (sav->natt_encapsulated_src_port != 0) {
4924 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
4925 } else {
4926 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4927 }
4928 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4929 uh->uh_dport = htons(sav->remote_ike_port);
4930 uh->uh_ulen = htons(1 + sizeof(*uh));
4931 uh->uh_sum = 0;
4932 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip) + sizeof(*uh)) = 0xFF;
4933
4934 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
4935 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET) {
4936 ROUTE_RELEASE(&sav->sah->sa_route);
4937 }
4938
4939 route_copyout(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
4940 lck_mtx_unlock(sadb_mutex);
4941
4942 necp_mark_packet_as_keepalive(m, TRUE);
4943 error = ip_output(m, NULL, &ro, IP_OUTARGS | IP_NOIPSEC, NULL, &ipoa);
4944
4945 lck_mtx_lock(sadb_mutex);
4946 route_copyin(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
4947 } else if (sav->sah->saidx.dst.ss_family == AF_INET6) {
4948 struct ip6_out_args ip6oa = {};
4949 struct route_in6 ro6 = {};
4950
4951 ip6oa.ip6oa_flowadv.code = 0;
4952 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
4953 if (sav->sah->outgoing_if) {
4954 ip6oa.ip6oa_boundif = sav->sah->outgoing_if;
4955 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
4956 }
4957
4958 struct ip6_hdr *ip6 = (__typeof__(ip6))m_mtod(m);
4959
4960 /*
4961 * Type 2: a UDP packet complete with IPv6 header.
4962 * We must do this because UDP output requires
4963 * an inpcb which we don't have. UDP packet
4964 * contains one byte payload. The byte is set
4965 * to 0xFF.
4966 */
4967 struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip6));
4968 m->m_len = sizeof(struct udphdr) + sizeof(struct ip6_hdr) + 1;
4969 bzero(m_mtod(m), m->m_len);
4970 m->m_pkthdr.len = m->m_len;
4971
4972 ip6->ip6_flow = 0;
4973 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
4974 ip6->ip6_vfc |= IPV6_VERSION;
4975 ip6->ip6_nxt = IPPROTO_UDP;
4976 ip6->ip6_hlim = (u_int8_t)ip6_defhlim;
4977 ip6->ip6_plen = htons(sizeof(struct udphdr) + 1);
4978 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4979 ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr;
4980 ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
4981 ip6_output_setsrcifscope(m, ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_scope_id, NULL);
4982 ip6_output_setdstifscope(m, ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_scope_id, NULL);
4983 } else {
4984 ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
4985 ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr;
4986 ip6_output_setdstifscope(m, ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_scope_id, NULL);
4987 ip6_output_setsrcifscope(m, ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_scope_id, NULL);
4988 }
4989
4990 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
4991 ip6->ip6_src.s6_addr16[1] = 0;
4992 }
4993 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
4994 ip6->ip6_dst.s6_addr16[1] = 0;
4995 }
4996
4997 if (sav->natt_encapsulated_src_port != 0) {
4998 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
4999 } else {
5000 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5001 }
5002 uh->uh_dport = htons(sav->remote_ike_port);
5003 uh->uh_ulen = htons(1 + sizeof(*uh));
5004 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip6) + sizeof(*uh)) = 0xFF;
5005 uh->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(uh->uh_ulen) + IPPROTO_UDP));
5006 m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
5007 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
5008
5009 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
5010 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET6) {
5011 ROUTE_RELEASE(&sav->sah->sa_route);
5012 }
5013
5014 route_copyout((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6));
5015 lck_mtx_unlock(sadb_mutex);
5016
5017 necp_mark_packet_as_keepalive(m, TRUE);
5018 error = ip6_output(m, NULL, &ro6, IPV6_OUTARGS, NULL, NULL, &ip6oa);
5019
5020 lck_mtx_lock(sadb_mutex);
5021 route_copyin((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6));
5022 } else {
5023 ipseclog((LOG_ERR, "nat keepalive: invalid address family %u\n", sav->sah->saidx.dst.ss_family));
5024 lck_mtx_unlock(sadb_mutex);
5025 m_freem(m);
5026 return FALSE;
5027 }
5028
5029 if (error == 0) {
5030 sav->natt_last_activity = natt_now;
5031 lck_mtx_unlock(sadb_mutex);
5032 return TRUE;
5033 }
5034
5035 lck_mtx_unlock(sadb_mutex);
5036 return FALSE;
5037 }
5038
5039 __private_extern__ bool
ipsec_fill_offload_frame(ifnet_t ifp,struct secasvar * sav,struct ifnet_keepalive_offload_frame * frame,size_t frame_data_offset)5040 ipsec_fill_offload_frame(ifnet_t ifp,
5041 struct secasvar *sav,
5042 struct ifnet_keepalive_offload_frame *frame,
5043 size_t frame_data_offset)
5044 {
5045 u_int8_t *data = NULL;
5046 struct ip *ip = NULL;
5047 struct udphdr *uh = NULL;
5048
5049 if (sav == NULL || sav->sah == NULL || frame == NULL ||
5050 (ifp != NULL && ifp->if_index != sav->sah->outgoing_if) ||
5051 sav->sah->saidx.dst.ss_family != AF_INET ||
5052 !(sav->flags & SADB_X_EXT_NATT) ||
5053 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE) ||
5054 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE_OFFLOAD) ||
5055 sav->flags & SADB_X_EXT_ESP_KEEPALIVE ||
5056 ((esp_udp_encap_port & 0xFFFF) == 0 && sav->natt_encapsulated_src_port == 0) ||
5057 sav->remote_ike_port == 0 ||
5058 (natt_keepalive_interval == 0 && sav->natt_interval == 0 && sav->natt_offload_interval == 0)) {
5059 /* SA is not eligible for keepalive offload on this interface */
5060 return FALSE;
5061 }
5062
5063 if (frame_data_offset + sizeof(struct udpiphdr) + 1 >
5064 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
5065 /* Not enough room in this data frame */
5066 return FALSE;
5067 }
5068
5069 data = frame->data;
5070 ip = (__typeof__(ip))(void *)(data + frame_data_offset);
5071 uh = (__typeof__(uh))(void *)(data + frame_data_offset + sizeof(*ip));
5072
5073 frame->length = (u_int8_t)(frame_data_offset + sizeof(struct udpiphdr) + 1);
5074 frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_IPSEC;
5075 frame->ether_type = IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
5076
5077 bzero(data, IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE);
5078
5079 ip->ip_v = IPVERSION;
5080 ip->ip_hl = sizeof(struct ip) >> 2;
5081 ip->ip_off &= htons(~IP_OFFMASK);
5082 ip->ip_off &= htons(~IP_MF);
5083 switch (ip4_ipsec_dfbit) {
5084 case 0: /* clear DF bit */
5085 ip->ip_off &= htons(~IP_DF);
5086 break;
5087 case 1: /* set DF bit */
5088 ip->ip_off |= htons(IP_DF);
5089 break;
5090 default: /* copy DF bit */
5091 break;
5092 }
5093 ip->ip_len = htons(sizeof(struct udpiphdr) + 1);
5094 if (rfc6864 && IP_OFF_IS_ATOMIC(htons(ip->ip_off))) {
5095 ip->ip_id = 0;
5096 } else {
5097 ip->ip_id = ip_randomid((uint64_t)data);
5098 }
5099 ip->ip_ttl = (u_char)ip_defttl;
5100 ip->ip_p = IPPROTO_UDP;
5101 ip->ip_sum = 0;
5102 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
5103 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5104 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5105 } else {
5106 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5107 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5108 }
5109 ip->ip_sum = in_cksum_hdr_opt(ip);
5110 /* Fill out the UDP header */
5111 if (sav->natt_encapsulated_src_port != 0) {
5112 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
5113 } else {
5114 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5115 }
5116 uh->uh_dport = htons(sav->remote_ike_port);
5117 uh->uh_ulen = htons(1 + sizeof(*uh));
5118 uh->uh_sum = 0;
5119 *(u_int8_t*)(data + frame_data_offset + sizeof(*ip) + sizeof(*uh)) = 0xFF;
5120
5121 if (sav->natt_offload_interval != 0) {
5122 frame->interval = sav->natt_offload_interval;
5123 } else if (sav->natt_interval != 0) {
5124 frame->interval = sav->natt_interval;
5125 } else {
5126 frame->interval = (u_int16_t)natt_keepalive_interval;
5127 }
5128 return TRUE;
5129 }
5130
5131 static int
5132 sysctl_ipsec_wake_packet SYSCTL_HANDLER_ARGS
5133 {
5134 #pragma unused(oidp, arg1, arg2)
5135 if (req->newptr != USER_ADDR_NULL) {
5136 ipseclog((LOG_ERR, "ipsec: invalid parameters"));
5137 return EINVAL;
5138 }
5139
5140 struct proc *p = current_proc();
5141 if (p != NULL) {
5142 uid_t uid = kauth_cred_getuid(kauth_cred_get());
5143 if (uid != 0 && priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_IPSEC_WAKE_PACKET, 0) != 0) {
5144 ipseclog((LOG_ERR, "process does not hold necessary entitlement to get ipsec wake packet"));
5145 return EPERM;
5146 }
5147
5148 int result = sysctl_io_opaque(req, &ipsec_wake_pkt, sizeof(ipsec_wake_pkt), NULL);
5149
5150 ipseclog((LOG_NOTICE, "%s: uuid %s spi %u seq %u len %u result %d",
5151 __func__,
5152 ipsec_wake_pkt.wake_uuid,
5153 ipsec_wake_pkt.wake_pkt_spi,
5154 ipsec_wake_pkt.wake_pkt_seq,
5155 ipsec_wake_pkt.wake_pkt_len,
5156 result));
5157
5158 return result;
5159 }
5160
5161 return EINVAL;
5162 }
5163
5164 SYSCTL_PROC(_net_link_generic_system, OID_AUTO, ipsec_wake_pkt, CTLTYPE_STRUCT | CTLFLAG_RD |
5165 CTLFLAG_LOCKED, 0, 0, &sysctl_ipsec_wake_packet, "S,ipsec wake packet", "");
5166
5167 void
ipsec_save_wake_packet(struct mbuf * wake_mbuf,u_int32_t spi,u_int32_t seq)5168 ipsec_save_wake_packet(struct mbuf *wake_mbuf, u_int32_t spi, u_int32_t seq)
5169 {
5170 if (wake_mbuf == NULL) {
5171 ipseclog((LOG_ERR, "ipsec: bad wake packet"));
5172 return;
5173 }
5174
5175 lck_mtx_lock(sadb_mutex);
5176 if (__probable(!ipsec_save_wake_pkt)) {
5177 goto done;
5178 }
5179
5180 u_int16_t max_len = (wake_mbuf->m_pkthdr.len > IPSEC_MAX_WAKE_PKT_LEN) ? IPSEC_MAX_WAKE_PKT_LEN : (u_int16_t)wake_mbuf->m_pkthdr.len;
5181 m_copydata(wake_mbuf, 0, max_len, (void *)ipsec_wake_pkt.wake_pkt);
5182 ipsec_wake_pkt.wake_pkt_len = max_len;
5183
5184 ipsec_wake_pkt.wake_pkt_spi = spi;
5185 ipsec_wake_pkt.wake_pkt_seq = seq;
5186
5187 ipseclog((LOG_NOTICE, "%s: uuid %s spi %u seq %u len %u",
5188 __func__,
5189 ipsec_wake_pkt.wake_uuid,
5190 ipsec_wake_pkt.wake_pkt_spi,
5191 ipsec_wake_pkt.wake_pkt_seq,
5192 ipsec_wake_pkt.wake_pkt_len));
5193
5194 struct kev_msg ev_msg;
5195 bzero(&ev_msg, sizeof(ev_msg));
5196
5197 ev_msg.vendor_code = KEV_VENDOR_APPLE;
5198 ev_msg.kev_class = KEV_NETWORK_CLASS;
5199 ev_msg.kev_subclass = KEV_IPSEC_SUBCLASS;
5200 ev_msg.event_code = KEV_IPSEC_WAKE_PACKET;
5201
5202 struct ipsec_wake_pkt_event_data event_data;
5203 strlcpy(event_data.wake_uuid, ipsec_wake_pkt.wake_uuid, sizeof(event_data.wake_uuid));
5204 ev_msg.dv[0].data_ptr = &event_data;
5205 ev_msg.dv[0].data_length = sizeof(event_data);
5206
5207 int result = kev_post_msg(&ev_msg);
5208 if (result != 0) {
5209 os_log_error(OS_LOG_DEFAULT, "%s: kev_post_msg() failed with error %d for wake uuid %s",
5210 __func__, result, ipsec_wake_pkt.wake_uuid);
5211 }
5212
5213 ipsec_save_wake_pkt = false;
5214 done:
5215 lck_mtx_unlock(sadb_mutex);
5216 return;
5217 }
5218
5219 static void
ipsec_get_local_ports(void)5220 ipsec_get_local_ports(void)
5221 {
5222 errno_t error;
5223 ifnet_t *ifp_list;
5224 uint32_t count, i;
5225 static uint8_t port_bitmap[bitstr_size(IP_PORTRANGE_SIZE)];
5226
5227 error = ifnet_list_get_all(IFNET_FAMILY_IPSEC, &ifp_list, &count);
5228 if (error != 0) {
5229 os_log_error(OS_LOG_DEFAULT, "%s: ifnet_list_get_all() failed %d",
5230 __func__, error);
5231 return;
5232 }
5233 for (i = 0; i < count; i++) {
5234 ifnet_t ifp = ifp_list[i];
5235
5236 /*
5237 * Get all the TCP and UDP ports for IPv4 and IPv6
5238 */
5239 error = ifnet_get_local_ports_extended(ifp, PF_UNSPEC,
5240 IFNET_GET_LOCAL_PORTS_WILDCARDOK |
5241 IFNET_GET_LOCAL_PORTS_NOWAKEUPOK |
5242 IFNET_GET_LOCAL_PORTS_ANYTCPSTATEOK,
5243 port_bitmap);
5244 if (error != 0) {
5245 os_log_error(OS_LOG_DEFAULT, "%s: ifnet_get_local_ports_extended(%s) failed %d",
5246 __func__, if_name(ifp), error);
5247 }
5248 }
5249 ifnet_list_free(ifp_list);
5250 }
5251
5252 static IOReturn
ipsec_sleep_wake_handler(void * target,void * refCon,UInt32 messageType,void * provider,void * messageArgument,vm_size_t argSize)5253 ipsec_sleep_wake_handler(void *target, void *refCon, UInt32 messageType,
5254 void *provider, void *messageArgument, vm_size_t argSize)
5255 {
5256 #pragma unused(target, refCon, provider, messageArgument, argSize)
5257 switch (messageType) {
5258 case kIOMessageSystemWillSleep:
5259 {
5260 ipsec_get_local_ports();
5261 ipsec_save_wake_pkt = false;
5262 memset(&ipsec_wake_pkt, 0, sizeof(ipsec_wake_pkt));
5263 IOPMCopySleepWakeUUIDKey(ipsec_wake_pkt.wake_uuid,
5264 sizeof(ipsec_wake_pkt.wake_uuid));
5265 ipseclog((LOG_NOTICE,
5266 "ipsec: system will sleep, uuid: %s", ipsec_wake_pkt.wake_uuid));
5267 break;
5268 }
5269 case kIOMessageSystemHasPoweredOn:
5270 {
5271 char wake_reason[128] = {0};
5272 size_t size = sizeof(wake_reason);
5273 if (kernel_sysctlbyname("kern.wakereason", wake_reason, &size, NULL, 0) == 0) {
5274 if (strnstr(wake_reason, "wlan", size) == 0 ||
5275 strnstr(wake_reason, "WL.OutboxNotEmpty", size) == 0 ||
5276 strnstr(wake_reason, "baseband", size) == 0 ||
5277 strnstr(wake_reason, "bluetooth", size) == 0 ||
5278 strnstr(wake_reason, "BT.OutboxNotEmpty", size) == 0) {
5279 ipsec_save_wake_pkt = true;
5280 ipseclog((LOG_NOTICE,
5281 "ipsec: system has powered on, uuid: %s reason %s", ipsec_wake_pkt.wake_uuid, wake_reason));
5282 }
5283 }
5284 break;
5285 }
5286 default:
5287 break;
5288 }
5289
5290 return IOPMAckImplied;
5291 }
5292
5293 void
ipsec_monitor_sleep_wake(void)5294 ipsec_monitor_sleep_wake(void)
5295 {
5296 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
5297
5298 if (sleep_wake_handle == NULL) {
5299 sleep_wake_handle = registerSleepWakeInterest(ipsec_sleep_wake_handler,
5300 NULL, NULL);
5301 if (sleep_wake_handle != NULL) {
5302 ipseclog((LOG_INFO,
5303 "ipsec: monitoring sleep wake"));
5304 }
5305 }
5306 }
5307