xref: /xnu-8020.121.3/bsd/netinet6/esp_output.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2008-2017 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*	$FreeBSD: src/sys/netinet6/esp_output.c,v 1.1.2.3 2002/04/28 05:40:26 suz Exp $	*/
30 /*	$KAME: esp_output.c,v 1.44 2001/07/26 06:53:15 jinmei Exp $	*/
31 
32 /*
33  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. Neither the name of the project nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  */
60 
61 #define _IP_VHL
62 
63 /*
64  * RFC1827/2406 Encapsulated Security Payload.
65  */
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/malloc.h>
70 #include <sys/mbuf.h>
71 #include <sys/domain.h>
72 #include <sys/protosw.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/errno.h>
76 #include <sys/time.h>
77 #include <sys/kernel.h>
78 #include <sys/syslog.h>
79 
80 #include <net/if.h>
81 #include <net/route.h>
82 #include <net/multi_layer_pkt_log.h>
83 
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #include <netinet/in_var.h>
88 #include <netinet/udp.h> /* for nat traversal */
89 #include <netinet/tcp.h>
90 #include <netinet/in_tclass.h>
91 
92 #include <netinet/ip6.h>
93 #include <netinet6/ip6_var.h>
94 #include <netinet/icmp6.h>
95 
96 #include <netinet6/ipsec.h>
97 #include <netinet6/ipsec6.h>
98 #include <netinet6/ah.h>
99 #include <netinet6/ah6.h>
100 #include <netinet6/esp.h>
101 #include <netinet6/esp6.h>
102 #include <netkey/key.h>
103 #include <netkey/keydb.h>
104 
105 #include <net/net_osdep.h>
106 
107 #include <sys/kdebug.h>
108 #define DBG_LAYER_BEG           NETDBG_CODE(DBG_NETIPSEC, 1)
109 #define DBG_LAYER_END           NETDBG_CODE(DBG_NETIPSEC, 3)
110 #define DBG_FNC_ESPOUT          NETDBG_CODE(DBG_NETIPSEC, (4 << 8))
111 #define DBG_FNC_ENCRYPT         NETDBG_CODE(DBG_NETIPSEC, (5 << 8))
112 
113 static int esp_output(struct mbuf *, u_char *, struct mbuf *,
114     int, struct secasvar *sav);
115 
116 extern int      esp_udp_encap_port;
117 extern u_int64_t natt_now;
118 
119 /*
120  * compute ESP header size.
121  */
122 size_t
esp_hdrsiz(__unused struct ipsecrequest * isr)123 esp_hdrsiz(__unused struct ipsecrequest *isr)
124 {
125 #if 0
126 	/* sanity check */
127 	if (isr == NULL) {
128 		panic("esp_hdrsiz: NULL was passed.");
129 	}
130 
131 
132 	lck_mtx_lock(sadb_mutex);
133 	{
134 		struct secasvar *sav;
135 		const struct esp_algorithm *algo;
136 		const struct ah_algorithm *aalgo;
137 		size_t ivlen;
138 		size_t authlen;
139 		size_t hdrsiz;
140 		size_t maxpad;
141 
142 		/*%%%% this needs to change - no sav in ipsecrequest any more */
143 		sav = isr->sav;
144 
145 		if (isr->saidx.proto != IPPROTO_ESP) {
146 			panic("unsupported mode passed to esp_hdrsiz");
147 		}
148 
149 		if (sav == NULL) {
150 			goto estimate;
151 		}
152 		if (sav->state != SADB_SASTATE_MATURE
153 		    && sav->state != SADB_SASTATE_DYING) {
154 			goto estimate;
155 		}
156 
157 		/* we need transport mode ESP. */
158 		algo = esp_algorithm_lookup(sav->alg_enc);
159 		if (!algo) {
160 			goto estimate;
161 		}
162 		ivlen = sav->ivlen;
163 		if (ivlen < 0) {
164 			goto estimate;
165 		}
166 
167 		if (algo->padbound) {
168 			maxpad = algo->padbound;
169 		} else {
170 			maxpad = 4;
171 		}
172 		maxpad += 1; /* maximum 'extendsiz' is padbound + 1, see esp_output */
173 
174 		if (sav->flags & SADB_X_EXT_OLD) {
175 			/* RFC 1827 */
176 			hdrsiz = sizeof(struct esp) + ivlen + maxpad;
177 		} else {
178 			/* RFC 2406 */
179 			aalgo = ah_algorithm_lookup(sav->alg_auth);
180 			if (aalgo && sav->replay[0] != NULL && sav->key_auth) {
181 				authlen = (aalgo->sumsiz)(sav);
182 			} else {
183 				authlen = 0;
184 			}
185 			hdrsiz = sizeof(struct newesp) + ivlen + maxpad + authlen;
186 		}
187 
188 		/*
189 		 * If the security association indicates that NATT is required,
190 		 * add the size of the NATT encapsulation header:
191 		 */
192 		if ((sav->flags & SADB_X_EXT_NATT) != 0) {
193 			hdrsiz += sizeof(struct udphdr) + 4;
194 		}
195 
196 		lck_mtx_unlock(sadb_mutex);
197 		return hdrsiz;
198 	}
199 estimate:
200 	lck_mtx_unlock(sadb_mutex);
201 #endif
202 	/*
203 	 * ASSUMING:
204 	 *	sizeof(struct newesp) > sizeof(struct esp). (8)
205 	 *	esp_max_ivlen() = max ivlen for CBC mode
206 	 *	17 = (maximum padding length without random padding length)
207 	 *	   + (Pad Length field) + (Next Header field).
208 	 *	64 = maximum ICV we support.
209 	 *  sizeof(struct udphdr) in case NAT traversal is used
210 	 */
211 	return sizeof(struct newesp) + esp_max_ivlen() + 17 + AH_MAXSUMSIZE + sizeof(struct udphdr);
212 }
213 
214 /*
215  * Modify the packet so that the payload is encrypted.
216  * The mbuf (m) must start with IPv4 or IPv6 header.
217  * On failure, free the given mbuf and return NULL.
218  *
219  * on invocation:
220  *	m   nexthdrp md
221  *	v   v        v
222  *	IP ......... payload
223  * during the encryption:
224  *	m   nexthdrp mprev md
225  *	v   v        v     v
226  *	IP ............... esp iv payload pad padlen nxthdr
227  *	                   <--><-><------><--------------->
228  *	                   esplen plen    extendsiz
229  *	                       ivlen
230  *	                   <-----> esphlen
231  *	<-> hlen
232  *	<-----------------> espoff
233  */
234 static int
esp_output(struct mbuf * m,u_char * nexthdrp,struct mbuf * md,int af,struct secasvar * sav)235 esp_output(
236 	struct mbuf *m,
237 	u_char *nexthdrp,
238 	struct mbuf *md,
239 	int af,
240 	struct secasvar *sav)
241 {
242 	struct mbuf *n;
243 	struct mbuf *mprev;
244 	struct esp *esp;
245 	struct esptail *esptail;
246 	const struct esp_algorithm *algo;
247 	struct tcphdr th = {};
248 	u_int32_t spi;
249 	u_int32_t seq;
250 	size_t inner_payload_len = 0;
251 	u_int8_t inner_protocol = 0;
252 	u_int8_t nxt = 0;
253 	size_t plen;    /*payload length to be encrypted*/
254 	size_t espoff;
255 	size_t esphlen; /* sizeof(struct esp/newesp) + ivlen */
256 	int ivlen;
257 	int afnumber;
258 	size_t extendsiz;
259 	int error = 0;
260 	struct ipsecstat *stat;
261 	struct udphdr *udp = NULL;
262 	int     udp_encapsulate = (sav->flags & SADB_X_EXT_NATT && (af == AF_INET || af == AF_INET6) &&
263 	    ((esp_udp_encap_port & 0xFFFF) != 0 || sav->natt_encapsulated_src_port != 0));
264 
265 	KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_START, sav->ivlen, 0, 0, 0, 0);
266 	switch (af) {
267 	case AF_INET:
268 		afnumber = 4;
269 		stat = &ipsecstat;
270 		break;
271 	case AF_INET6:
272 		afnumber = 6;
273 		stat = &ipsec6stat;
274 		break;
275 	default:
276 		ipseclog((LOG_ERR, "esp_output: unsupported af %d\n", af));
277 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 1, 0, 0, 0, 0);
278 		return 0;       /* no change at all */
279 	}
280 
281 	mbuf_traffic_class_t traffic_class = 0;
282 	if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
283 	    SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
284 		u_int8_t dscp = 0;
285 		switch (af) {
286 		case AF_INET:
287 		{
288 			struct ip *ip = mtod(m, struct ip *);
289 			dscp = ip->ip_tos >> IPTOS_DSCP_SHIFT;
290 			break;
291 		}
292 		case AF_INET6:
293 		{
294 			struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
295 			dscp = (ntohl(ip6->ip6_flow) & IP6FLOW_DSCP_MASK) >> IP6FLOW_DSCP_SHIFT;
296 			break;
297 		}
298 		default:
299 			panic("esp_output: should not reach here");
300 		}
301 		traffic_class = rfc4594_dscp_to_tc(dscp);
302 	}
303 
304 	/* some sanity check */
305 	if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] == NULL) {
306 		switch (af) {
307 		case AF_INET:
308 		{
309 			struct ip *ip;
310 
311 			ip = mtod(m, struct ip *);
312 			ipseclog((LOG_DEBUG, "esp4_output: internal error: "
313 			    "sav->replay is null: %x->%x, SPI=%u\n",
314 			    (u_int32_t)ntohl(ip->ip_src.s_addr),
315 			    (u_int32_t)ntohl(ip->ip_dst.s_addr),
316 			    (u_int32_t)ntohl(sav->spi)));
317 			IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
318 			break;
319 		}
320 		case AF_INET6:
321 			ipseclog((LOG_DEBUG, "esp6_output: internal error: "
322 			    "sav->replay is null: SPI=%u\n",
323 			    (u_int32_t)ntohl(sav->spi)));
324 			IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
325 			break;
326 		default:
327 			panic("esp_output: should not reach here");
328 		}
329 		m_freem(m);
330 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 2, 0, 0, 0, 0);
331 		return EINVAL;
332 	}
333 
334 	algo = esp_algorithm_lookup(sav->alg_enc);
335 	if (!algo) {
336 		ipseclog((LOG_ERR, "esp_output: unsupported algorithm: "
337 		    "SPI=%u\n", (u_int32_t)ntohl(sav->spi)));
338 		m_freem(m);
339 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 3, 0, 0, 0, 0);
340 		return EINVAL;
341 	}
342 	spi = sav->spi;
343 	ivlen = sav->ivlen;
344 	/* should be okey */
345 	if (ivlen < 0) {
346 		panic("invalid ivlen");
347 	}
348 
349 	{
350 		/*
351 		 * insert ESP header.
352 		 * XXX inserts ESP header right after IPv4 header.  should
353 		 * chase the header chain.
354 		 * XXX sequential number
355 		 */
356 		struct ip *ip = NULL;
357 		struct ip6_hdr *ip6 = NULL;
358 		size_t esplen; /* sizeof(struct esp/newesp) */
359 		size_t hlen = 0; /* ip header len */
360 
361 		if (sav->flags & SADB_X_EXT_OLD) {
362 			/* RFC 1827 */
363 			esplen = sizeof(struct esp);
364 		} else {
365 			/* RFC 2406 */
366 			if (sav->flags & SADB_X_EXT_DERIV) {
367 				esplen = sizeof(struct esp);
368 			} else {
369 				esplen = sizeof(struct newesp);
370 			}
371 		}
372 		esphlen = esplen + ivlen;
373 
374 		for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) {
375 			;
376 		}
377 		if (mprev == NULL || mprev->m_next != md) {
378 			ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n",
379 			    afnumber));
380 			m_freem(m);
381 			KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 4, 0, 0, 0, 0);
382 			return EINVAL;
383 		}
384 
385 		plen = 0;
386 		for (n = md; n; n = n->m_next) {
387 			plen += n->m_len;
388 		}
389 
390 		switch (af) {
391 		case AF_INET:
392 			ip = mtod(m, struct ip *);
393 #ifdef _IP_VHL
394 			hlen = IP_VHL_HL(ip->ip_vhl) << 2;
395 #else
396 			hlen = ip->ip_hl << 2;
397 #endif
398 			break;
399 		case AF_INET6:
400 			ip6 = mtod(m, struct ip6_hdr *);
401 			hlen = sizeof(*ip6);
402 			break;
403 		}
404 
405 		/* grab info for packet logging */
406 		struct secashead *sah = sav->sah;
407 		if (net_mpklog_enabled &&
408 		    sah != NULL && sah->ipsec_if != NULL) {
409 			ifnet_t ifp = sah->ipsec_if;
410 
411 			if ((ifp->if_xflags & IFXF_MPK_LOG) == IFXF_MPK_LOG) {
412 				size_t iphlen = 0;
413 
414 				if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
415 					struct ip *inner_ip = mtod(md, struct ip *);
416 					if (IP_VHL_V(inner_ip->ip_vhl) == IPVERSION) {
417 #ifdef _IP_VHL
418 						iphlen = IP_VHL_HL(inner_ip->ip_vhl) << 2;
419 #else
420 						iphlen = inner_ip->ip_hl << 2;
421 #endif
422 						inner_protocol = inner_ip->ip_p;
423 					} else if (IP_VHL_V(inner_ip->ip_vhl) == IPV6_VERSION) {
424 						struct ip6_hdr *inner_ip6 = mtod(md, struct ip6_hdr *);
425 						iphlen = sizeof(struct ip6_hdr);
426 						inner_protocol = inner_ip6->ip6_nxt;
427 					}
428 
429 					if (inner_protocol == IPPROTO_TCP) {
430 						if ((int)(iphlen + sizeof(th)) <=
431 						    (m->m_pkthdr.len - m->m_len)) {
432 							m_copydata(md, (int)iphlen, sizeof(th), (u_int8_t *)&th);
433 						}
434 
435 						inner_payload_len = m->m_pkthdr.len - m->m_len - iphlen - (th.th_off << 2);
436 					}
437 				} else {
438 					iphlen = hlen;
439 					if (af == AF_INET) {
440 						inner_protocol = ip->ip_p;
441 					} else if (af == AF_INET6) {
442 						inner_protocol = ip6->ip6_nxt;
443 					}
444 
445 					if (inner_protocol == IPPROTO_TCP) {
446 						if ((int)(iphlen + sizeof(th)) <=
447 						    m->m_pkthdr.len) {
448 							m_copydata(m, (int)iphlen, sizeof(th), (u_int8_t *)&th);
449 						}
450 
451 						inner_payload_len = m->m_pkthdr.len - iphlen - (th.th_off << 2);
452 					}
453 				}
454 			}
455 		}
456 
457 		/* make the packet over-writable */
458 		mprev->m_next = NULL;
459 		if ((md = ipsec_copypkt(md)) == NULL) {
460 			m_freem(m);
461 			error = ENOBUFS;
462 			goto fail;
463 		}
464 		mprev->m_next = md;
465 
466 		/*
467 		 * Translate UDP source port back to its original value.
468 		 * SADB_X_EXT_NATT_MULTIPLEUSERS is only set for transort mode.
469 		 */
470 		if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
471 			/* if not UDP - drop it */
472 			if (ip->ip_p != IPPROTO_UDP) {
473 				IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
474 				m_freem(m);
475 				error = EINVAL;
476 				goto fail;
477 			}
478 
479 			udp = mtod(md, struct udphdr *);
480 
481 			/* if src port not set in sav - find it */
482 			if (sav->natt_encapsulated_src_port == 0) {
483 				if (key_natt_get_translated_port(sav) == 0) {
484 					m_freem(m);
485 					error = EINVAL;
486 					goto fail;
487 				}
488 			}
489 			if (sav->remote_ike_port == htons(udp->uh_dport)) {
490 				/* translate UDP port */
491 				udp->uh_dport = sav->natt_encapsulated_src_port;
492 				udp->uh_sum = 0; /* don't need checksum with ESP auth */
493 			} else {
494 				/* drop the packet - can't translate the port */
495 				IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
496 				m_freem(m);
497 				error = EINVAL;
498 				goto fail;
499 			}
500 		}
501 
502 
503 		espoff = m->m_pkthdr.len - plen;
504 
505 		if (udp_encapsulate) {
506 			esphlen += sizeof(struct udphdr);
507 			espoff += sizeof(struct udphdr);
508 		}
509 
510 		/*
511 		 * grow the mbuf to accomodate ESP header.
512 		 * before: IP ... payload
513 		 * after:  IP ... [UDP] ESP IV payload
514 		 */
515 		if (M_LEADINGSPACE(md) < esphlen || (md->m_flags & M_EXT) != 0) {
516 			MGET(n, M_DONTWAIT, MT_DATA);
517 			if (!n) {
518 				m_freem(m);
519 				error = ENOBUFS;
520 				goto fail;
521 			}
522 			VERIFY(esphlen <= INT32_MAX);
523 			n->m_len = (int)esphlen;
524 			mprev->m_next = n;
525 			n->m_next = md;
526 			m->m_pkthdr.len += esphlen;
527 			if (udp_encapsulate) {
528 				udp = mtod(n, struct udphdr *);
529 				esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr));
530 			} else {
531 				esp = mtod(n, struct esp *);
532 			}
533 		} else {
534 			md->m_len += esphlen;
535 			md->m_data -= esphlen;
536 			m->m_pkthdr.len += esphlen;
537 			esp = mtod(md, struct esp *);
538 			if (udp_encapsulate) {
539 				udp = mtod(md, struct udphdr *);
540 				esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr));
541 			} else {
542 				esp = mtod(md, struct esp *);
543 			}
544 		}
545 
546 		switch (af) {
547 		case AF_INET:
548 			if (esphlen < (IP_MAXPACKET - ntohs(ip->ip_len))) {
549 				ip->ip_len = htons(ntohs(ip->ip_len) + (u_short)esphlen);
550 			} else {
551 				ipseclog((LOG_ERR,
552 				    "IPv4 ESP output: size exceeds limit\n"));
553 				IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
554 				m_freem(m);
555 				error = EMSGSIZE;
556 				goto fail;
557 			}
558 			break;
559 		case AF_INET6:
560 			/* total packet length will be computed in ip6_output() */
561 			break;
562 		}
563 	}
564 
565 	/* initialize esp header. */
566 	esp->esp_spi = spi;
567 	if ((sav->flags & SADB_X_EXT_OLD) == 0) {
568 		struct newesp *nesp;
569 		nesp = (struct newesp *)esp;
570 		if (sav->replay[traffic_class]->seq == sav->replay[traffic_class]->lastseq) {
571 			if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
572 				/* XXX Is it noisy ? */
573 				ipseclog((LOG_WARNING,
574 				    "replay counter overflowed. %s\n",
575 				    ipsec_logsastr(sav)));
576 				IPSEC_STAT_INCREMENT(stat->out_inval);
577 				m_freem(m);
578 				KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 5, 0, 0, 0, 0);
579 				return EINVAL;
580 			}
581 		}
582 		lck_mtx_lock(sadb_mutex);
583 		sav->replay[traffic_class]->count++;
584 		sav->replay[traffic_class]->seq++;
585 		lck_mtx_unlock(sadb_mutex);
586 		/*
587 		 * XXX sequence number must not be cycled, if the SA is
588 		 * installed by IKE daemon.
589 		 */
590 		nesp->esp_seq = htonl(sav->replay[traffic_class]->seq);
591 		seq = sav->replay[traffic_class]->seq;
592 	}
593 
594 	{
595 		/*
596 		 * find the last mbuf. make some room for ESP trailer.
597 		 */
598 		struct ip *ip = NULL;
599 		size_t padbound;
600 		u_char *extend;
601 		int i;
602 		int randpadmax;
603 
604 		if (algo->padbound) {
605 			padbound = algo->padbound;
606 		} else {
607 			padbound = 4;
608 		}
609 		/* ESP packet, including nxthdr field, must be length of 4n */
610 		if (padbound < 4) {
611 			padbound = 4;
612 		}
613 
614 		extendsiz = padbound - (plen % padbound);
615 		if (extendsiz == 1) {
616 			extendsiz = padbound + 1;
617 		}
618 
619 		/* random padding */
620 		switch (af) {
621 		case AF_INET:
622 			randpadmax = ip4_esp_randpad;
623 			break;
624 		case AF_INET6:
625 			randpadmax = ip6_esp_randpad;
626 			break;
627 		default:
628 			randpadmax = -1;
629 			break;
630 		}
631 		if (randpadmax < 0 || plen + extendsiz >= randpadmax) {
632 			;
633 		} else {
634 			size_t pad;
635 
636 			/* round */
637 			randpadmax = (int)((randpadmax / padbound) * padbound);
638 			pad = (randpadmax - plen + extendsiz) / padbound;
639 
640 			if (pad > 0) {
641 				pad = (random() % pad) * padbound;
642 			} else {
643 				pad = 0;
644 			}
645 
646 			/*
647 			 * make sure we do not pad too much.
648 			 * MLEN limitation comes from the trailer attachment
649 			 * code below.
650 			 * 256 limitation comes from sequential padding.
651 			 * also, the 1-octet length field in ESP trailer imposes
652 			 * limitation (but is less strict than sequential padding
653 			 * as length field do not count the last 2 octets).
654 			 */
655 			if (extendsiz + pad <= MLEN && extendsiz + pad < 256) {
656 				extendsiz += pad;
657 			}
658 		}
659 
660 		n = m;
661 		while (n->m_next) {
662 			n = n->m_next;
663 		}
664 
665 		/*
666 		 * if M_EXT, the external mbuf data may be shared among
667 		 * two consequtive TCP packets, and it may be unsafe to use the
668 		 * trailing space.
669 		 */
670 		if (!(n->m_flags & M_EXT) && extendsiz < M_TRAILINGSPACE(n)) {
671 			extend = mtod(n, u_char *) + n->m_len;
672 			n->m_len += (int)extendsiz;
673 			m->m_pkthdr.len += extendsiz;
674 		} else {
675 			struct mbuf *nn;
676 
677 			MGET(nn, M_DONTWAIT, MT_DATA);
678 			if (!nn) {
679 				ipseclog((LOG_DEBUG, "esp%d_output: can't alloc mbuf",
680 				    afnumber));
681 				m_freem(m);
682 				error = ENOBUFS;
683 				goto fail;
684 			}
685 			extend = mtod(nn, u_char *);
686 			VERIFY(extendsiz <= INT_MAX);
687 			nn->m_len = (int)extendsiz;
688 			nn->m_next = NULL;
689 			n->m_next = nn;
690 			n = nn;
691 			m->m_pkthdr.len += extendsiz;
692 		}
693 		switch (sav->flags & SADB_X_EXT_PMASK) {
694 		case SADB_X_EXT_PRAND:
695 			key_randomfill(extend, extendsiz);
696 			break;
697 		case SADB_X_EXT_PZERO:
698 			bzero(extend, extendsiz);
699 			break;
700 		case SADB_X_EXT_PSEQ:
701 			for (i = 0; i < extendsiz; i++) {
702 				extend[i] = (i + 1) & 0xff;
703 			}
704 			break;
705 		}
706 
707 		nxt = *nexthdrp;
708 		if (udp_encapsulate) {
709 			*nexthdrp = IPPROTO_UDP;
710 
711 			/* Fill out the UDP header */
712 			if (sav->natt_encapsulated_src_port != 0) {
713 				udp->uh_sport = (u_short)sav->natt_encapsulated_src_port;
714 			} else {
715 				udp->uh_sport = htons((u_short)esp_udp_encap_port);
716 			}
717 			udp->uh_dport = htons(sav->remote_ike_port);
718 			// udp->uh_len set later, after all length tweaks are complete
719 			udp->uh_sum = 0;
720 
721 			/* Update last sent so we know if we need to send keepalive */
722 			sav->natt_last_activity = natt_now;
723 		} else {
724 			*nexthdrp = IPPROTO_ESP;
725 		}
726 
727 		/* initialize esp trailer. */
728 		esptail = (struct esptail *)
729 		    (mtod(n, u_int8_t *) + n->m_len - sizeof(struct esptail));
730 		esptail->esp_nxt = nxt;
731 		VERIFY((extendsiz - 2) <= UINT8_MAX);
732 		esptail->esp_padlen = (u_int8_t)(extendsiz - 2);
733 
734 		/* modify IP header (for ESP header part only) */
735 		switch (af) {
736 		case AF_INET:
737 			ip = mtod(m, struct ip *);
738 			if (extendsiz < (IP_MAXPACKET - ntohs(ip->ip_len))) {
739 				ip->ip_len = htons(ntohs(ip->ip_len) + (u_short)extendsiz);
740 			} else {
741 				ipseclog((LOG_ERR,
742 				    "IPv4 ESP output: size exceeds limit\n"));
743 				IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
744 				m_freem(m);
745 				error = EMSGSIZE;
746 				goto fail;
747 			}
748 			break;
749 		case AF_INET6:
750 			/* total packet length will be computed in ip6_output() */
751 			break;
752 		}
753 	}
754 
755 	/*
756 	 * pre-compute and cache intermediate key
757 	 */
758 	error = esp_schedule(algo, sav);
759 	if (error) {
760 		m_freem(m);
761 		IPSEC_STAT_INCREMENT(stat->out_inval);
762 		goto fail;
763 	}
764 
765 	/*
766 	 * encrypt the packet, based on security association
767 	 * and the algorithm specified.
768 	 */
769 	if (!algo->encrypt) {
770 		panic("internal error: no encrypt function");
771 	}
772 	KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0);
773 	if ((*algo->encrypt)(m, espoff, plen + extendsiz, sav, algo, ivlen)) {
774 		/* m is already freed */
775 		ipseclog((LOG_ERR, "packet encryption failure\n"));
776 		IPSEC_STAT_INCREMENT(stat->out_inval);
777 		error = EINVAL;
778 		KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1, error, 0, 0, 0);
779 		goto fail;
780 	}
781 	KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0);
782 
783 	/*
784 	 * calculate ICV if required.
785 	 */
786 	size_t siz = 0;
787 	u_char authbuf[AH_MAXSUMSIZE] __attribute__((aligned(4)));
788 
789 	if (algo->finalizeencrypt) {
790 		siz = algo->icvlen;
791 		if ((*algo->finalizeencrypt)(sav, authbuf, siz)) {
792 			ipseclog((LOG_ERR, "packet encryption ICV failure\n"));
793 			IPSEC_STAT_INCREMENT(stat->out_inval);
794 			error = EINVAL;
795 			KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1, error, 0, 0, 0);
796 			goto fail;
797 		}
798 		goto fill_icv;
799 	}
800 
801 	if (!sav->replay[traffic_class]) {
802 		goto noantireplay;
803 	}
804 	if (!sav->key_auth) {
805 		goto noantireplay;
806 	}
807 	if (sav->key_auth == SADB_AALG_NONE) {
808 		goto noantireplay;
809 	}
810 
811 	{
812 		const struct ah_algorithm *aalgo;
813 
814 		aalgo = ah_algorithm_lookup(sav->alg_auth);
815 		if (!aalgo) {
816 			goto noantireplay;
817 		}
818 		siz = ((aalgo->sumsiz)(sav) + 3) & ~(4 - 1);
819 		if (AH_MAXSUMSIZE < siz) {
820 			panic("assertion failed for AH_MAXSUMSIZE");
821 		}
822 
823 		if (esp_auth(m, espoff, m->m_pkthdr.len - espoff, sav, authbuf)) {
824 			ipseclog((LOG_ERR, "ESP checksum generation failure\n"));
825 			m_freem(m);
826 			error = EINVAL;
827 			IPSEC_STAT_INCREMENT(stat->out_inval);
828 			goto fail;
829 		}
830 	}
831 
832 fill_icv:
833 	{
834 		struct ip *ip;
835 		u_char *p;
836 
837 		n = m;
838 		while (n->m_next) {
839 			n = n->m_next;
840 		}
841 
842 		if (!(n->m_flags & M_EXT) && siz < M_TRAILINGSPACE(n)) { /* XXX */
843 			n->m_len += siz;
844 			m->m_pkthdr.len += siz;
845 			p = mtod(n, u_char *) + n->m_len - siz;
846 		} else {
847 			struct mbuf *nn;
848 
849 			MGET(nn, M_DONTWAIT, MT_DATA);
850 			if (!nn) {
851 				ipseclog((LOG_DEBUG, "can't alloc mbuf in esp%d_output",
852 				    afnumber));
853 				m_freem(m);
854 				error = ENOBUFS;
855 				goto fail;
856 			}
857 			nn->m_len = (int)siz;
858 			nn->m_next = NULL;
859 			n->m_next = nn;
860 			n = nn;
861 			m->m_pkthdr.len += siz;
862 			p = mtod(nn, u_char *);
863 		}
864 		bcopy(authbuf, p, siz);
865 
866 		/* modify IP header (for ESP header part only) */
867 		switch (af) {
868 		case AF_INET:
869 			ip = mtod(m, struct ip *);
870 			if (siz < (IP_MAXPACKET - ntohs(ip->ip_len))) {
871 				ip->ip_len = htons(ntohs(ip->ip_len) + (u_short)siz);
872 			} else {
873 				ipseclog((LOG_ERR,
874 				    "IPv4 ESP output: size exceeds limit\n"));
875 				IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
876 				m_freem(m);
877 				error = EMSGSIZE;
878 				goto fail;
879 			}
880 			break;
881 		case AF_INET6:
882 			/* total packet length will be computed in ip6_output() */
883 			break;
884 		}
885 	}
886 
887 	if (udp_encapsulate) {
888 		struct ip *ip;
889 		struct ip6_hdr *ip6;
890 
891 		switch (af) {
892 		case AF_INET:
893 			ip = mtod(m, struct ip *);
894 			udp->uh_ulen = htons((u_int16_t)(ntohs(ip->ip_len) - (IP_VHL_HL(ip->ip_vhl) << 2)));
895 			break;
896 		case AF_INET6:
897 			ip6 = mtod(m, struct ip6_hdr *);
898 			VERIFY((plen + siz + extendsiz + esphlen) <= UINT16_MAX);
899 			udp->uh_ulen = htons((u_int16_t)(plen + siz + extendsiz + esphlen));
900 			udp->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(udp->uh_ulen) + IPPROTO_UDP));
901 			m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
902 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
903 			break;
904 		}
905 	}
906 
907 noantireplay:
908 	if (net_mpklog_enabled && sav->sah != NULL &&
909 	    sav->sah->ipsec_if != NULL &&
910 	    (sav->sah->ipsec_if->if_xflags & IFXF_MPK_LOG) &&
911 	    inner_protocol == IPPROTO_TCP) {
912 		MPKL_ESP_OUTPUT_TCP(esp_mpkl_log_object,
913 		    ntohl(spi), seq,
914 		    ntohs(th.th_sport), ntohs(th.th_dport),
915 		    ntohl(th.th_seq), ntohl(th.th_ack),
916 		    inner_payload_len, th.th_flags);
917 	}
918 
919 	lck_mtx_lock(sadb_mutex);
920 	if (!m) {
921 		ipseclog((LOG_ERR,
922 		    "NULL mbuf after encryption in esp%d_output", afnumber));
923 	} else {
924 		stat->out_success++;
925 	}
926 	stat->out_esphist[sav->alg_enc]++;
927 	lck_mtx_unlock(sadb_mutex);
928 	key_sa_recordxfer(sav, m);
929 	KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 6, 0, 0, 0, 0);
930 	return 0;
931 
932 fail:
933 	KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 7, error, 0, 0, 0);
934 	return error;
935 }
936 
937 int
esp4_output(struct mbuf * m,struct secasvar * sav)938 esp4_output(
939 	struct mbuf *m,
940 	struct secasvar *sav)
941 {
942 	struct ip *ip;
943 	if (m->m_len < sizeof(struct ip)) {
944 		ipseclog((LOG_DEBUG, "esp4_output: first mbuf too short\n"));
945 		m_freem(m);
946 		return EINVAL;
947 	}
948 	ip = mtod(m, struct ip *);
949 	/* XXX assumes that m->m_next points to payload */
950 	return esp_output(m, &ip->ip_p, m->m_next, AF_INET, sav);
951 }
952 
953 int
esp6_output(struct mbuf * m,u_char * nexthdrp,struct mbuf * md,struct secasvar * sav)954 esp6_output(
955 	struct mbuf *m,
956 	u_char *nexthdrp,
957 	struct mbuf *md,
958 	struct secasvar *sav)
959 {
960 	if (m->m_len < sizeof(struct ip6_hdr)) {
961 		ipseclog((LOG_DEBUG, "esp6_output: first mbuf too short\n"));
962 		m_freem(m);
963 		return EINVAL;
964 	}
965 	return esp_output(m, nexthdrp, md, AF_INET6, sav);
966 }
967