xref: /xnu-12377.81.4/bsd/netinet6/esp_rijndael.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2008-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*	$FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $	*/
30 /*	$KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $	*/
31 
32 /*
33  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. Neither the name of the project nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  */
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/socket.h>
64 #include <sys/queue.h>
65 #include <sys/syslog.h>
66 #include <sys/mbuf.h>
67 #include <sys/mcache.h>
68 
69 #include <kern/locks.h>
70 
71 #include <net/if.h>
72 #include <net/route.h>
73 
74 #include <netinet6/ipsec.h>
75 #include <netinet6/esp.h>
76 #include <netinet6/esp_rijndael.h>
77 
78 #include <libkern/crypto/aes.h>
79 
80 #include <netkey/key.h>
81 
82 #include <net/net_osdep.h>
83 
84 #define MAX_REALIGN_LEN 2000
85 #define AES_BLOCKLEN 16
86 #define ESP_GCM_SALT_LEN 4   // RFC 4106 Section 4
87 #define ESP_GCM_IVLEN 8
88 #define ESP_GCM_ALIGN 16
89 
90 typedef struct {
91 	ccgcm_ctx *decrypt;
92 	ccgcm_ctx *encrypt;
93 	ccgcm_ctx ctxt[0];
94 } aes_gcm_ctx;
95 
96 size_t
esp_aes_schedlen(__unused const struct esp_algorithm * algo)97 esp_aes_schedlen(
98 	__unused const struct esp_algorithm *algo)
99 {
100 	return sizeof(aes_ctx);
101 }
102 
103 int
esp_aes_schedule(__unused const struct esp_algorithm * algo,struct secasvar * sav)104 esp_aes_schedule(
105 	__unused const struct esp_algorithm *algo,
106 	struct secasvar *sav)
107 {
108 	LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
109 	aes_ctx *ctx = (aes_ctx*)sav->sched_enc;
110 
111 	aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt);
112 	aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt);
113 
114 	return 0;
115 }
116 
117 
118 /* The following 2 functions decrypt or encrypt the contents of
119  * the mbuf chain passed in keeping the IP and ESP header's in place,
120  * along with the IV.
121  * The code attempts to call the crypto code with the largest chunk
122  * of data it can based on the amount of source data in
123  * the current source mbuf and the space remaining in the current
124  * destination mbuf.  The crypto code requires data to be a multiples
125  * of 16 bytes.  A separate buffer is used when a 16 byte block spans
126  * mbufs.
127  *
128  * m = mbuf chain
129  * off = offset to ESP header
130  *
131  * local vars for source:
132  * soff = offset from beginning of the chain to the head of the
133  *			current mbuf.
134  * scut = last mbuf that contains headers to be retained
135  * scutoff = offset to end of the headers in scut
136  * s = the current mbuf
137  * sn = current offset to data in s (next source data to process)
138  *
139  * local vars for dest:
140  * d0 = head of chain
141  * d = current mbuf
142  * dn = current offset in d (next location to store result)
143  */
144 
145 
146 int
esp_cbc_decrypt_aes(struct mbuf * m,size_t off,struct secasvar * sav,const struct esp_algorithm * algo,int ivlen)147 esp_cbc_decrypt_aes(
148 	struct mbuf *m,
149 	size_t off,
150 	struct secasvar *sav,
151 	const struct esp_algorithm *algo,
152 	int ivlen)
153 {
154 	struct mbuf *s;
155 	struct mbuf *d, *d0, *dp;
156 	int soff;       /* offset from the head of chain, to head of this mbuf */
157 	int sn, dn;     /* offset from the head of the mbuf, to meat */
158 	size_t ivoff, bodyoff;
159 	u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr;
160 	u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned;
161 	u_int8_t *__bidi_indexable sp_aligned = NULL;
162 	struct mbuf *scut;
163 	int scutoff;
164 	int     i, len;
165 
166 
167 	if (ivlen != AES_BLOCKLEN) {
168 		ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
169 		    "unsupported ivlen %d\n", algo->name, ivlen));
170 		m_freem(m);
171 		return EINVAL;
172 	}
173 
174 	if (sav->flags & SADB_X_EXT_OLD) {
175 		/* RFC 1827 */
176 		ivoff = off + sizeof(struct esp);
177 		bodyoff = off + sizeof(struct esp) + ivlen;
178 	} else {
179 		ivoff = off + sizeof(struct newesp);
180 		bodyoff = off + sizeof(struct newesp) + ivlen;
181 	}
182 
183 	if (m->m_pkthdr.len < bodyoff) {
184 		ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%u\n",
185 		    algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
186 		m_freem(m);
187 		return EINVAL;
188 	}
189 	if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
190 		ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
191 		    "payload length must be multiple of %d\n",
192 		    algo->name, AES_BLOCKLEN));
193 		m_freem(m);
194 		return EINVAL;
195 	}
196 
197 	VERIFY(ivoff <= INT_MAX);
198 
199 	/* grab iv */
200 	m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv);
201 
202 	s = m;
203 	soff = sn = dn = 0;
204 	d = d0 = dp = NULL;
205 	sp = dptr = NULL;
206 
207 	/* skip header/IV offset */
208 	while (soff < bodyoff) {
209 		if (soff + s->m_len > bodyoff) {
210 			sn = (int)(bodyoff - soff);
211 			break;
212 		}
213 
214 		soff += s->m_len;
215 		s = s->m_next;
216 	}
217 	scut = s;
218 	scutoff = sn;
219 
220 	/* skip over empty mbuf */
221 	while (s && s->m_len == 0) {
222 		s = s->m_next;
223 	}
224 
225 	while (soff < m->m_pkthdr.len) {
226 		/* source */
227 		if (sn + AES_BLOCKLEN <= s->m_len) {
228 			/* body is continuous */
229 			sp = mtod(s, u_int8_t *) + sn;
230 			len = s->m_len - sn;
231 			len -= len % AES_BLOCKLEN;      // full blocks only
232 		} else {
233 			/* body is non-continuous */
234 			m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
235 			sp = sbuf;
236 			len = AES_BLOCKLEN;                     // 1 block only in sbuf
237 		}
238 
239 		/* destination */
240 		if (!d || dn + AES_BLOCKLEN > d->m_len) {
241 			if (d) {
242 				dp = d;
243 			}
244 			MGET(d, M_DONTWAIT, MT_DATA);
245 			i = m->m_pkthdr.len - (soff + sn);
246 			if (d && i > MLEN) {
247 				MCLGET(d, M_DONTWAIT);
248 				if ((d->m_flags & M_EXT) == 0) {
249 					d = m_mbigget(d, M_DONTWAIT);
250 					if ((d->m_flags & M_EXT) == 0) {
251 						m_free(d);
252 						d = NULL;
253 					}
254 				}
255 			}
256 			if (!d) {
257 				m_freem(m);
258 				if (d0) {
259 					m_freem(d0);
260 				}
261 				return ENOBUFS;
262 			}
263 			if (!d0) {
264 				d0 = d;
265 			}
266 			if (dp) {
267 				dp->m_next = d;
268 			}
269 
270 			// try to make mbuf data aligned
271 			if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
272 				m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
273 			}
274 
275 			d->m_len = (int)M_TRAILINGSPACE(d);
276 			d->m_len -= d->m_len % AES_BLOCKLEN;
277 			if (d->m_len > i) {
278 				d->m_len = i;
279 			}
280 			dptr = mtod(d, u_int8_t *);
281 			dn = 0;
282 		}
283 
284 		/* adjust len if greater than space available in dest */
285 		if (len > d->m_len - dn) {
286 			len = d->m_len - dn;
287 		}
288 
289 		/* decrypt */
290 		// check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
291 		if (IPSEC_IS_P2ALIGNED(sp)) {
292 			sp_unaligned = NULL;
293 		} else {
294 			sp_unaligned = sp;
295 			if (len > MAX_REALIGN_LEN) {
296 				m_freem(m);
297 				if (d0 != NULL) {
298 					m_freem(d0);
299 				}
300 				if (sp_aligned != NULL) {
301 					kfree_data(sp_aligned, MAX_REALIGN_LEN);
302 					sp_aligned = NULL;
303 				}
304 				return ENOBUFS;
305 			}
306 			if (sp_aligned == NULL) {
307 				sp_aligned = (u_int8_t *)kalloc_data(MAX_REALIGN_LEN, Z_NOWAIT);
308 				if (sp_aligned == NULL) {
309 					m_freem(m);
310 					if (d0 != NULL) {
311 						m_freem(d0);
312 					}
313 					return ENOMEM;
314 				}
315 			}
316 			sp = sp_aligned;
317 			memcpy(sp, sp_unaligned, len);
318 		}
319 		// no need to check output pointer alignment
320 		aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn,
321 		    (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched_enc)->decrypt)));
322 
323 		// update unaligned pointers
324 		if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
325 			sp = sp_unaligned;
326 		}
327 
328 		/* udpate offsets */
329 		sn += len;
330 		dn += len;
331 
332 		// next iv
333 		memcpy(iv, sp + len - AES_BLOCKLEN, AES_BLOCKLEN);
334 
335 		/* find the next source block */
336 		while (s && sn >= s->m_len) {
337 			sn -= s->m_len;
338 			soff += s->m_len;
339 			s = s->m_next;
340 		}
341 	}
342 
343 	/* free un-needed source mbufs and add dest mbufs to chain */
344 	m_freem(scut->m_next);
345 	scut->m_len = scutoff;
346 	scut->m_next = d0;
347 
348 	// free memory
349 	if (sp_aligned != NULL) {
350 		kfree_data(sp_aligned, MAX_REALIGN_LEN);
351 		sp_aligned = NULL;
352 	}
353 
354 	/* just in case */
355 	cc_clear(sizeof(iv), iv);
356 	cc_clear(sizeof(sbuf), sbuf);
357 
358 	return 0;
359 }
360 
361 int
esp_cbc_encrypt_aes(struct mbuf * m,size_t off,__unused size_t plen,struct secasvar * sav,const struct esp_algorithm * algo,int ivlen)362 esp_cbc_encrypt_aes(
363 	struct mbuf *m,
364 	size_t off,
365 	__unused size_t plen,
366 	struct secasvar *sav,
367 	const struct esp_algorithm *algo,
368 	int ivlen)
369 {
370 	struct mbuf *s;
371 	struct mbuf *d, *d0, *dp;
372 	int soff;       /* offset from the head of chain, to head of this mbuf */
373 	int sn, dn;     /* offset from the head of the mbuf, to meat */
374 	size_t ivoff, bodyoff;
375 	u_int8_t *ivp, *dptr, *ivp_unaligned;
376 	u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned;
377 	u_int8_t *__bidi_indexable sp_aligned = NULL;
378 	u_int8_t ivp_aligned_buf[AES_BLOCKLEN] __attribute__((aligned(4)));
379 	struct mbuf *scut;
380 	int scutoff;
381 	int i, len;
382 
383 	if (ivlen != AES_BLOCKLEN) {
384 		ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
385 		    "unsupported ivlen %d\n", algo->name, ivlen));
386 		m_freem(m);
387 		return EINVAL;
388 	}
389 
390 	if (sav->flags & SADB_X_EXT_OLD) {
391 		/* RFC 1827 */
392 		ivoff = off + sizeof(struct esp);
393 		bodyoff = off + sizeof(struct esp) + ivlen;
394 	} else {
395 		ivoff = off + sizeof(struct newesp);
396 		bodyoff = off + sizeof(struct newesp) + ivlen;
397 	}
398 
399 	VERIFY(ivoff <= INT_MAX);
400 
401 	/* put iv into the packet */
402 	m_copyback(m, (int)ivoff, ivlen, sav->iv);
403 	ivp = (u_int8_t *) sav->iv;
404 
405 	if (m->m_pkthdr.len < bodyoff) {
406 		ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%u\n",
407 		    algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
408 		m_freem(m);
409 		return EINVAL;
410 	}
411 	if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
412 		ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
413 		    "payload length must be multiple of %d\n",
414 		    algo->name, AES_BLOCKLEN));
415 		m_freem(m);
416 		return EINVAL;
417 	}
418 
419 	s = m;
420 	soff = sn = dn = 0;
421 	d = d0 = dp = NULL;
422 	sp = dptr = NULL;
423 
424 	/* skip headers/IV */
425 	while (soff < bodyoff) {
426 		if (soff + s->m_len > bodyoff) {
427 			sn = (int)(bodyoff - soff);
428 			break;
429 		}
430 
431 		soff += s->m_len;
432 		s = s->m_next;
433 	}
434 	scut = s;
435 	scutoff = sn;
436 
437 	/* skip over empty mbuf */
438 	while (s && s->m_len == 0) {
439 		s = s->m_next;
440 	}
441 
442 	while (soff < m->m_pkthdr.len) {
443 		/* source */
444 		if (sn + AES_BLOCKLEN <= s->m_len) {
445 			/* body is continuous */
446 			sp = mtod(s, u_int8_t *) + sn;
447 			len = s->m_len - sn;
448 			len -= len % AES_BLOCKLEN;      // full blocks only
449 		} else {
450 			/* body is non-continuous */
451 			m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
452 			sp = sbuf;
453 			len = AES_BLOCKLEN;                     // 1 block only in sbuf
454 		}
455 
456 		/* destination */
457 		if (!d || dn + AES_BLOCKLEN > d->m_len) {
458 			if (d) {
459 				dp = d;
460 			}
461 			MGET(d, M_DONTWAIT, MT_DATA);
462 			i = m->m_pkthdr.len - (soff + sn);
463 			if (d && i > MLEN) {
464 				MCLGET(d, M_DONTWAIT);
465 				if ((d->m_flags & M_EXT) == 0) {
466 					d = m_mbigget(d, M_DONTWAIT);
467 					if ((d->m_flags & M_EXT) == 0) {
468 						m_free(d);
469 						d = NULL;
470 					}
471 				}
472 			}
473 			if (!d) {
474 				m_freem(m);
475 				if (d0) {
476 					m_freem(d0);
477 				}
478 				return ENOBUFS;
479 			}
480 			if (!d0) {
481 				d0 = d;
482 			}
483 			if (dp) {
484 				dp->m_next = d;
485 			}
486 
487 			// try to make mbuf data aligned
488 			if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
489 				m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
490 			}
491 
492 			d->m_len = (int)M_TRAILINGSPACE(d);
493 			d->m_len -= d->m_len % AES_BLOCKLEN;
494 			if (d->m_len > i) {
495 				d->m_len = i;
496 			}
497 			dptr = mtod(d, u_int8_t *);
498 			dn = 0;
499 		}
500 
501 		/* adjust len if greater than space available */
502 		if (len > d->m_len - dn) {
503 			len = d->m_len - dn;
504 		}
505 
506 		/* encrypt */
507 		// check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
508 		if (IPSEC_IS_P2ALIGNED(sp)) {
509 			sp_unaligned = NULL;
510 		} else {
511 			sp_unaligned = sp;
512 			if (len > MAX_REALIGN_LEN) {
513 				m_freem(m);
514 				if (d0) {
515 					m_freem(d0);
516 				}
517 				if (sp_aligned != NULL) {
518 					kfree_data(sp_aligned, MAX_REALIGN_LEN);
519 					sp_aligned = NULL;
520 				}
521 				return ENOBUFS;
522 			}
523 			if (sp_aligned == NULL) {
524 				sp_aligned = (u_int8_t *)kalloc_data(MAX_REALIGN_LEN, Z_NOWAIT);
525 				if (sp_aligned == NULL) {
526 					m_freem(m);
527 					if (d0) {
528 						m_freem(d0);
529 					}
530 					return ENOMEM;
531 				}
532 			}
533 			sp = sp_aligned;
534 			memcpy(sp, sp_unaligned, len);
535 		}
536 		// check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
537 		if (IPSEC_IS_P2ALIGNED(ivp)) {
538 			ivp_unaligned = NULL;
539 		} else {
540 			ivp_unaligned = ivp;
541 			ivp = ivp_aligned_buf;
542 			memcpy(ivp, ivp_unaligned, AES_BLOCKLEN);
543 		}
544 		// no need to check output pointer alignment
545 		aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn,
546 		    (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched_enc)->encrypt)));
547 
548 		// update unaligned pointers
549 		if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
550 			sp = sp_unaligned;
551 		}
552 		if (!IPSEC_IS_P2ALIGNED(ivp_unaligned)) {
553 			ivp = ivp_unaligned;
554 		}
555 
556 		/* update offsets */
557 		sn += len;
558 		dn += len;
559 
560 		/* next iv */
561 		ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted
562 
563 		/* find the next source block and skip empty mbufs */
564 		while (s && sn >= s->m_len) {
565 			sn -= s->m_len;
566 			soff += s->m_len;
567 			s = s->m_next;
568 		}
569 	}
570 
571 	/* free un-needed source mbufs and add dest mbufs to chain */
572 	m_freem(scut->m_next);
573 	scut->m_len = scutoff;
574 	scut->m_next = d0;
575 
576 	// free memory
577 	if (sp_aligned != NULL) {
578 		kfree_data(sp_aligned, MAX_REALIGN_LEN);
579 		sp_aligned = NULL;
580 	}
581 
582 	/* just in case */
583 	cc_clear(sizeof(sbuf), sbuf);
584 	key_sa_stir_iv(sav);
585 
586 	return 0;
587 }
588 
589 int
esp_aes_cbc_encrypt_data(struct secasvar * sav,uint8_t * __sized_by (input_data_len)input_data,size_t input_data_len,struct newesp * esp_hdr,uint8_t * __sized_by (out_ivlen)out_iv,size_t out_ivlen,uint8_t * __sized_by (output_data_len)output_data,size_t output_data_len)590 esp_aes_cbc_encrypt_data(struct secasvar *sav,
591     uint8_t *__sized_by(input_data_len)input_data, size_t input_data_len,
592     struct newesp *esp_hdr,
593     uint8_t *__sized_by(out_ivlen)out_iv, size_t out_ivlen,
594     uint8_t *__sized_by(output_data_len)output_data, size_t output_data_len)
595 {
596 	aes_encrypt_ctx *ctx = NULL;
597 	uint8_t *ivp = NULL;
598 	aes_rval rc = 0;
599 
600 	ESP_CHECK_ARG(sav);
601 	ESP_CHECK_ARG(input_data);
602 	ESP_CHECK_ARG(esp_hdr);
603 	ESP_CHECK_ARG(out_iv);
604 	ESP_CHECK_ARG(output_data);
605 
606 	VERIFY(input_data_len > 0);
607 	VERIFY(output_data_len >= input_data_len);
608 
609 	VERIFY(out_ivlen == AES_BLOCKLEN);
610 	memcpy(out_iv, sav->iv, out_ivlen);
611 	ivp = (uint8_t *)sav->iv;
612 
613 	if (input_data_len % AES_BLOCKLEN) {
614 		esp_log_err("payload length %zu must be multiple of "
615 		    "AES_BLOCKLEN, SPI 0x%08x", input_data_len, ntohl(sav->spi));
616 		return EINVAL;
617 	}
618 
619 	ctx = (aes_encrypt_ctx *)(&(((aes_ctx *)sav->sched_enc)->encrypt));
620 
621 	VERIFY((input_data_len >> 4) <= UINT32_MAX);
622 	if (__improbable((rc = aes_encrypt_cbc(input_data, ivp,
623 	    (unsigned int)(input_data_len >> 4), output_data, ctx)) != 0)) {
624 		esp_log_err("encrypt failed %d, SPI 0x%08x", rc, ntohl(sav->spi));
625 		return rc;
626 	}
627 
628 	key_sa_stir_iv(sav);
629 	return 0;
630 }
631 
632 int
esp_aes_cbc_decrypt_data(struct secasvar * sav,uint8_t * __sized_by (input_data_len)input_data,size_t input_data_len,struct newesp * esp_hdr,uint8_t * __sized_by (ivlen)iv,size_t ivlen,uint8_t * __sized_by (output_data_len)output_data,size_t output_data_len)633 esp_aes_cbc_decrypt_data(struct secasvar *sav,
634     uint8_t *__sized_by(input_data_len)input_data, size_t input_data_len,
635     struct newesp *esp_hdr,
636     uint8_t *__sized_by(ivlen)iv, size_t ivlen,
637     uint8_t *__sized_by(output_data_len)output_data, size_t output_data_len)
638 {
639 	aes_decrypt_ctx *ctx = NULL;
640 	aes_rval rc = 0;
641 
642 	ESP_CHECK_ARG(sav);
643 	ESP_CHECK_ARG(input_data);
644 	ESP_CHECK_ARG(esp_hdr);
645 	ESP_CHECK_ARG(output_data);
646 
647 	VERIFY(input_data_len > 0);
648 	VERIFY(output_data_len >= input_data_len);
649 
650 	if (__improbable(ivlen != AES_BLOCKLEN)) {
651 		esp_log_err("ivlen(%zu) != AES_BLOCKLEN, SPI 0x%08x",
652 		    ivlen, ntohl(sav->spi));
653 		return EINVAL;
654 	}
655 
656 	if (__improbable(input_data_len % AES_BLOCKLEN)) {
657 		esp_packet_log_err("input data length(%zu) must be a multiple of "
658 		    "AES_BLOCKLEN", input_data_len);
659 		return EINVAL;
660 	}
661 
662 	ctx = (aes_decrypt_ctx *)(&(((aes_ctx *)sav->sched_enc)->decrypt));
663 
664 	VERIFY((input_data_len >> 4) <= UINT32_MAX);
665 	if (__improbable((rc = aes_decrypt_cbc(input_data, iv,
666 	    (unsigned int)(input_data_len >> 4), output_data, ctx)) != 0)) {
667 		esp_log_err("decrypt failed %d, SPI 0x%08x", rc, ntohl(sav->spi));
668 		return rc;
669 	}
670 
671 	return 0;
672 }
673 
674 size_t
esp_gcm_schedlen(__unused const struct esp_algorithm * algo)675 esp_gcm_schedlen(
676 	__unused const struct esp_algorithm *algo)
677 {
678 	return sizeof(aes_gcm_ctx) + aes_decrypt_get_ctx_size_gcm() + aes_encrypt_get_ctx_size_gcm() + ESP_GCM_ALIGN;
679 }
680 
681 #define P2ROUNDUP_GCM(p, ctx)                                                 \
682 	intptr_t aligned_p = P2ROUNDUP(p, ESP_GCM_ALIGN);                     \
683 	intptr_t diff = (aligned_p - (intptr_t)p);                            \
684 	ctx = (aes_gcm_ctx *)(void *)((uint8_t *)p + diff)
685 
686 int
esp_gcm_schedule(__unused const struct esp_algorithm * algo,struct secasvar * sav)687 esp_gcm_schedule( __unused const struct esp_algorithm *algo,
688     struct secasvar *sav)
689 {
690 	LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
691 	aes_gcm_ctx *ctx = NULL;
692 	const u_int ivlen = sav->ivlen;
693 	const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) != 0);
694 	const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
695 	unsigned char nonce[ESP_GCM_SALT_LEN + ivlen];
696 	int rc;
697 
698 	P2ROUNDUP_GCM(sav->sched_enc, ctx);
699 	ctx->decrypt = (ccgcm_ctx *)(void *)((uint8_t *)ctx + offsetof(aes_gcm_ctx, ctxt));
700 	ctx->encrypt = (ccgcm_ctx *)(void *)((uint8_t *)ctx + offsetof(aes_gcm_ctx, ctxt) + aes_decrypt_get_ctx_size_gcm());
701 
702 	if (ivlen != (implicit_iv ? 0 : ESP_GCM_IVLEN)) {
703 		ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
704 		return EINVAL;
705 	}
706 
707 	if (implicit_iv && gmac_only) {
708 		ipseclog((LOG_ERR, "%s: IIV and GMAC-only not supported together\n", __FUNCTION__));
709 		return EINVAL;
710 	}
711 
712 	rc = aes_decrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ctx->decrypt);
713 	if (rc) {
714 		return rc;
715 	}
716 
717 	if (!implicit_iv) {
718 		memset(nonce, 0, ESP_GCM_SALT_LEN + ivlen);
719 		memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
720 		memcpy(nonce + ESP_GCM_SALT_LEN, sav->iv, ivlen);
721 
722 		rc = aes_encrypt_key_with_iv_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, nonce, ctx->encrypt);
723 		cc_clear(sizeof(nonce), nonce);
724 		if (rc) {
725 			return rc;
726 		}
727 	} else {
728 		rc = aes_encrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ctx->encrypt);
729 		if (rc) {
730 			return rc;
731 		}
732 	}
733 
734 	rc = aes_encrypt_reset_gcm(ctx->encrypt);
735 	if (rc) {
736 		return rc;
737 	}
738 
739 	return rc;
740 }
741 
742 int
esp_gcm_ivlen(const struct esp_algorithm * algo,struct secasvar * sav)743 esp_gcm_ivlen(const struct esp_algorithm *algo,
744     struct secasvar *sav)
745 {
746 	if (!algo) {
747 		panic("esp_gcm_ivlen: unknown algorithm");
748 	}
749 
750 	if (sav != NULL && ((sav->flags & SADB_X_EXT_IIV) != 0)) {
751 		return 0;
752 	} else {
753 		return algo->ivlenval;
754 	}
755 }
756 
757 int
esp_gcm_encrypt_finalize(struct secasvar * sav,unsigned char * tag,size_t tag_bytes)758 esp_gcm_encrypt_finalize(struct secasvar *sav,
759     unsigned char *tag, size_t tag_bytes)
760 {
761 	aes_gcm_ctx *ctx = NULL;
762 
763 	P2ROUNDUP_GCM(sav->sched_enc, ctx);
764 	return aes_encrypt_finalize_gcm(tag, tag_bytes, ctx->encrypt);
765 }
766 
767 int
esp_gcm_decrypt_finalize(struct secasvar * sav,unsigned char * tag,size_t tag_bytes)768 esp_gcm_decrypt_finalize(struct secasvar *sav,
769     unsigned char *tag, size_t tag_bytes)
770 {
771 	aes_gcm_ctx *ctx = NULL;
772 
773 	P2ROUNDUP_GCM(sav->sched_enc, ctx);
774 	return aes_decrypt_finalize_gcm(tag, tag_bytes, ctx->decrypt);
775 }
776 
777 int
esp_gcm_encrypt_aes(struct mbuf * m,size_t off,__unused size_t plen,struct secasvar * sav,const struct esp_algorithm * algo __unused,int ivlen)778 esp_gcm_encrypt_aes(
779 	struct mbuf *m,
780 	size_t off,
781 	__unused size_t plen,
782 	struct secasvar *sav,
783 	const struct esp_algorithm *algo __unused,
784 	int ivlen)
785 {
786 	struct mbuf *s = m;
787 	uint32_t soff = 0;       /* offset from the head of chain, to head of this mbuf */
788 	uint32_t sn = 0;     /* offset from the head of the mbuf, to meat */
789 	uint8_t *sp = NULL;
790 	aes_gcm_ctx *ctx = NULL;
791 	uint32_t len;
792 	const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) != 0);
793 	const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
794 	struct newesp esp;
795 	unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN];
796 
797 	VERIFY(off <= INT_MAX);
798 	const size_t ivoff = off + sizeof(struct newesp);
799 	VERIFY(ivoff <= INT_MAX);
800 	const size_t bodyoff = ivoff + ivlen;
801 	VERIFY(bodyoff <= INT_MAX);
802 
803 	if (ivlen != (implicit_iv ? 0 : ESP_GCM_IVLEN)) {
804 		ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
805 		m_freem(m);
806 		return EINVAL;
807 	}
808 
809 	if (implicit_iv && gmac_only) {
810 		ipseclog((LOG_ERR, "%s: IIV and GMAC-only not supported together\n", __FUNCTION__));
811 		m_freem(m);
812 		return EINVAL;
813 	}
814 
815 	P2ROUNDUP_GCM(sav->sched_enc, ctx);
816 
817 	if (aes_encrypt_reset_gcm(ctx->encrypt)) {
818 		ipseclog((LOG_ERR, "%s: gcm reset failure\n", __FUNCTION__));
819 		m_freem(m);
820 		return EINVAL;
821 	}
822 
823 	/* Copy the ESP header */
824 	m_copydata(m, (int)off, sizeof(esp), (caddr_t) &esp);
825 
826 	/* Construct the IV */
827 	memset(nonce, 0, sizeof(nonce));
828 	if (!implicit_iv) {
829 		/* generate new iv */
830 		if (aes_encrypt_inc_iv_gcm((unsigned char *)nonce, ctx->encrypt)) {
831 			ipseclog((LOG_ERR, "%s: iv generation failure\n", __FUNCTION__));
832 			m_freem(m);
833 			return EINVAL;
834 		}
835 
836 		/*
837 		 * The IV is now generated within corecrypto and
838 		 * is provided to ESP using aes_encrypt_inc_iv_gcm().
839 		 * This makes the sav->iv redundant and is no longer
840 		 * used in GCM operations. But we still copy the IV
841 		 * back to sav->iv to ensure that any future code reading
842 		 * this value will get the latest IV.
843 		 */
844 		memcpy(sav->iv, (nonce + ESP_GCM_SALT_LEN), ivlen);
845 		m_copyback(m, (int)ivoff, ivlen, sav->iv);
846 	} else {
847 		/* Use the ESP sequence number in the header to form the
848 		 * nonce according to RFC 8750. The first 4 bytes are the
849 		 * salt value, the next 4 bytes are zeroes, and the final
850 		 * 4 bytes are the ESP sequence number.
851 		 */
852 		memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
853 		memcpy(nonce + sizeof(nonce) - sizeof(esp.esp_seq), &esp.esp_seq, sizeof(esp.esp_seq));
854 		if (aes_encrypt_set_iv_gcm((const unsigned char *)nonce, sizeof(nonce), ctx->encrypt)) {
855 			ipseclog((LOG_ERR, "%s: iv set failure\n", __FUNCTION__));
856 			cc_clear(sizeof(nonce), nonce);
857 			m_freem(m);
858 			return EINVAL;
859 		}
860 	}
861 
862 	if (m->m_pkthdr.len < bodyoff) {
863 		ipseclog((LOG_ERR, "%s: bad len %d/%u\n", __FUNCTION__,
864 		    m->m_pkthdr.len, (u_int32_t)bodyoff));
865 		cc_clear(sizeof(nonce), nonce);
866 		m_freem(m);
867 		return EINVAL;
868 	}
869 
870 	/* Add ESP header to Additional Authentication Data */
871 	if (aes_encrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->encrypt)) {
872 		ipseclog((LOG_ERR, "%s: packet encryption ESP header AAD failure\n", __FUNCTION__));
873 		cc_clear(sizeof(nonce), nonce);
874 		m_freem(m);
875 		return EINVAL;
876 	}
877 	/* Add IV to Additional Authentication Data for GMAC-only mode */
878 	if (gmac_only) {
879 		if (aes_encrypt_aad_gcm(nonce + ESP_GCM_SALT_LEN, ESP_GCM_IVLEN, ctx->encrypt)) {
880 			ipseclog((LOG_ERR, "%s: packet encryption IV AAD failure\n", __FUNCTION__));
881 			cc_clear(sizeof(nonce), nonce);
882 			m_freem(m);
883 			return EINVAL;
884 		}
885 	}
886 
887 	/* Clear nonce */
888 	cc_clear(sizeof(nonce), nonce);
889 
890 	/* skip headers/IV */
891 	while (s != NULL && soff < bodyoff) {
892 		if (soff + s->m_len > bodyoff) {
893 			sn = (uint32_t)bodyoff - soff;
894 			break;
895 		}
896 
897 		soff += s->m_len;
898 		s = s->m_next;
899 	}
900 
901 	/* Encrypt (or add to AAD) payload */
902 	while (s != NULL && soff < m->m_pkthdr.len) {
903 		/* skip empty mbufs */
904 		if ((len = s->m_len - sn) != 0) {
905 			sp = mtod(s, uint8_t *) + sn;
906 
907 			if (!gmac_only) {
908 				if (aes_encrypt_gcm(sp, len, sp, ctx->encrypt)) {
909 					ipseclog((LOG_ERR, "%s: failed to encrypt\n", __FUNCTION__));
910 					m_freem(m);
911 					return EINVAL;
912 				}
913 			} else {
914 				if (aes_encrypt_aad_gcm(sp, len, ctx->encrypt)) {
915 					ipseclog((LOG_ERR, "%s: failed to add data to AAD\n", __FUNCTION__));
916 					m_freem(m);
917 					return EINVAL;
918 				}
919 			}
920 		}
921 
922 		sn = 0;
923 		soff += s->m_len;
924 		s = s->m_next;
925 	}
926 
927 	if (s == NULL && soff != m->m_pkthdr.len) {
928 		ipseclog((LOG_ERR, "%s: not enough mbufs %d %d, SPI 0x%08x",
929 		    __FUNCTION__, soff, m->m_pkthdr.len, ntohl(sav->spi)));
930 		m_freem(m);
931 		return EFBIG;
932 	}
933 
934 	return 0;
935 }
936 
937 int
esp_gcm_decrypt_aes(struct mbuf * m,size_t off,struct secasvar * sav,const struct esp_algorithm * algo __unused,int ivlen)938 esp_gcm_decrypt_aes(
939 	struct mbuf *m,
940 	size_t off,
941 	struct secasvar *sav,
942 	const struct esp_algorithm *algo __unused,
943 	int ivlen)
944 {
945 	struct mbuf *s = m;
946 	uint32_t soff = 0;       /* offset from the head of chain, to head of this mbuf */
947 	uint32_t sn = 0;     /* offset from the head of the mbuf, to meat */
948 	uint8_t *sp = NULL;
949 	aes_gcm_ctx *ctx = NULL;
950 	uint32_t len;
951 	const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) != 0);
952 	const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
953 	struct newesp esp;
954 	unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN];
955 
956 	VERIFY(off <= INT_MAX);
957 	const size_t ivoff = off + sizeof(struct newesp);
958 	VERIFY(ivoff <= INT_MAX);
959 	const size_t bodyoff = ivoff + ivlen;
960 	VERIFY(bodyoff <= INT_MAX);
961 
962 	if (ivlen != (implicit_iv ? 0 : ESP_GCM_IVLEN)) {
963 		ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
964 		m_freem(m);
965 		return EINVAL;
966 	}
967 
968 	if (implicit_iv && gmac_only) {
969 		ipseclog((LOG_ERR, "%s: IIV and GMAC-only not supported together\n", __FUNCTION__));
970 		m_freem(m);
971 		return EINVAL;
972 	}
973 
974 	if (m->m_pkthdr.len < bodyoff) {
975 		ipseclog((LOG_ERR, "%s: bad len %d/%u\n", __FUNCTION__,
976 		    m->m_pkthdr.len, (u_int32_t)bodyoff));
977 		m_freem(m);
978 		return EINVAL;
979 	}
980 
981 	/* Copy the ESP header */
982 	m_copydata(m, (int)off, sizeof(esp), (caddr_t) &esp);
983 
984 	/* Construct IV starting with salt */
985 	memset(nonce, 0, sizeof(nonce));
986 	memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
987 	if (!implicit_iv) {
988 		/* grab IV from packet */
989 		u_int8_t iv[ESP_GCM_IVLEN] __attribute__((aligned(4)));
990 		m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv);
991 		memcpy(nonce + ESP_GCM_SALT_LEN, iv, ivlen);
992 		/* just in case */
993 		cc_clear(sizeof(iv), iv);
994 	} else {
995 		/* Use the ESP sequence number in the header to form the
996 		 * rest of the nonce according to RFC 8750.
997 		 */
998 		memcpy(nonce + sizeof(nonce) - sizeof(esp.esp_seq), &esp.esp_seq, sizeof(esp.esp_seq));
999 	}
1000 
1001 	P2ROUNDUP_GCM(sav->sched_enc, ctx);
1002 	if (aes_decrypt_set_iv_gcm(nonce, sizeof(nonce), ctx->decrypt)) {
1003 		ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__));
1004 		cc_clear(sizeof(nonce), nonce);
1005 		m_freem(m);
1006 		return EINVAL;
1007 	}
1008 
1009 	/* Add ESP header to Additional Authentication Data */
1010 	if (aes_decrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->decrypt)) {
1011 		ipseclog((LOG_ERR, "%s: packet decryption ESP header AAD failure\n", __FUNCTION__));
1012 		cc_clear(sizeof(nonce), nonce);
1013 		m_freem(m);
1014 		return EINVAL;
1015 	}
1016 
1017 	/* Add IV to Additional Authentication Data for GMAC-only mode */
1018 	if (gmac_only) {
1019 		if (aes_decrypt_aad_gcm(nonce + ESP_GCM_SALT_LEN, ESP_GCM_IVLEN, ctx->decrypt)) {
1020 			ipseclog((LOG_ERR, "%s: packet decryption IV AAD failure\n", __FUNCTION__));
1021 			cc_clear(sizeof(nonce), nonce);
1022 			m_freem(m);
1023 			return EINVAL;
1024 		}
1025 	}
1026 
1027 	/* Clear nonce */
1028 	cc_clear(sizeof(nonce), nonce);
1029 
1030 	/* skip headers/IV */
1031 	while (s != NULL && soff < bodyoff) {
1032 		if (soff + s->m_len > bodyoff) {
1033 			sn = (uint32_t)bodyoff - soff;
1034 			break;
1035 		}
1036 
1037 		soff += s->m_len;
1038 		s = s->m_next;
1039 	}
1040 
1041 	/* Decrypt (or just authenticate) payload */
1042 	while (s != NULL && soff < m->m_pkthdr.len) {
1043 		/* skip empty mbufs */
1044 		if ((len = s->m_len - sn) != 0) {
1045 			sp = mtod(s, uint8_t *) + sn;
1046 
1047 			if (!gmac_only) {
1048 				if (aes_decrypt_gcm(sp, len, sp, ctx->decrypt)) {
1049 					ipseclog((LOG_ERR, "%s: failed to decrypt\n", __FUNCTION__));
1050 					m_freem(m);
1051 					return EINVAL;
1052 				}
1053 			} else {
1054 				if (aes_decrypt_aad_gcm(sp, len, ctx->decrypt)) {
1055 					ipseclog((LOG_ERR, "%s: failed to add data to AAD\n", __FUNCTION__));
1056 					m_freem(m);
1057 					return EINVAL;
1058 				}
1059 			}
1060 		}
1061 
1062 		sn = 0;
1063 		soff += s->m_len;
1064 		s = s->m_next;
1065 	}
1066 
1067 	if (s == NULL && soff != m->m_pkthdr.len) {
1068 		ipseclog((LOG_ERR, "%s: not enough mbufs %d %d, SPI 0x%08x",
1069 		    __FUNCTION__, soff, m->m_pkthdr.len, ntohl(sav->spi)));
1070 		m_freem(m);
1071 		return EFBIG;
1072 	}
1073 
1074 	return 0;
1075 }
1076 
1077 int
esp_aes_gcm_encrypt_data(struct secasvar * sav,uint8_t * __sized_by (input_data_len)input_data,size_t input_data_len,struct newesp * esp_hdr,uint8_t * __sized_by (ivlen)out_iv,size_t ivlen,uint8_t * __sized_by (output_data_len)output_data,size_t output_data_len)1078 esp_aes_gcm_encrypt_data(struct secasvar *sav,
1079     uint8_t *__sized_by(input_data_len)input_data, size_t input_data_len,
1080     struct newesp *esp_hdr,
1081     uint8_t *__sized_by(ivlen)out_iv, size_t ivlen,
1082     uint8_t *__sized_by(output_data_len)output_data, size_t output_data_len)
1083 {
1084 	unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN] = {};
1085 	aes_gcm_ctx *ctx = NULL;
1086 	int rc = 0; // return code of corecrypto operations
1087 
1088 	ESP_CHECK_ARG(sav);
1089 	ESP_CHECK_ARG(input_data);
1090 	ESP_CHECK_ARG(esp_hdr);
1091 	ESP_CHECK_ARG(output_data);
1092 
1093 	VERIFY(input_data_len > 0);
1094 	VERIFY(output_data_len >= input_data_len);
1095 
1096 	const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) == SADB_X_EXT_IIV);
1097 	const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
1098 
1099 	if (__improbable(implicit_iv && gmac_only)) {
1100 		esp_log_err("IIV and GMAC-only not supported together, SPI  0x%08x\n",
1101 		    ntohl(sav->spi));
1102 		return EINVAL;
1103 	}
1104 
1105 	P2ROUNDUP_GCM(sav->sched_enc, ctx);
1106 
1107 	if (__improbable((rc = aes_encrypt_reset_gcm(ctx->encrypt)) != 0)) {
1108 		esp_log_err("Context reset failure %d, SPI 0x%08x\n",
1109 		    rc, ntohl(sav->spi));
1110 		return rc;
1111 	}
1112 
1113 	if (implicit_iv) {
1114 		VERIFY(out_iv == NULL);
1115 		VERIFY(ivlen == 0);
1116 
1117 		/* Use the ESP sequence number in the header to form the
1118 		 * nonce according to RFC 8750. The first 4 bytes are the
1119 		 * salt value, the next 4 bytes are zeroes, and the final
1120 		 * 4 bytes are the ESP sequence number.
1121 		 */
1122 		memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) -
1123 		    ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
1124 		memcpy(nonce + sizeof(nonce) - sizeof(esp_hdr->esp_seq),
1125 		    &esp_hdr->esp_seq, sizeof(esp_hdr->esp_seq));
1126 		if (__improbable((rc = aes_encrypt_set_iv_gcm((const unsigned char *)nonce,
1127 		    sizeof(nonce), ctx->encrypt)) != 0)) {
1128 			esp_log_err("Set IV failure %d, SPI 0x%08x\n",
1129 			    rc, ntohl(sav->spi));
1130 			cc_clear(sizeof(nonce), nonce);
1131 			return rc;
1132 		}
1133 	} else {
1134 		ESP_CHECK_ARG(out_iv);
1135 		VERIFY(ivlen == ESP_GCM_IVLEN);
1136 
1137 		/* generate new iv */
1138 		if (__improbable((rc = aes_encrypt_inc_iv_gcm((unsigned char *)nonce,
1139 		    ctx->encrypt)) != 0)) {
1140 			esp_log_err("IV generation failure %d, SPI 0x%08x\n",
1141 			    rc, ntohl(sav->spi));
1142 			cc_clear(sizeof(nonce), nonce);
1143 			return rc;
1144 		}
1145 
1146 		memcpy(out_iv, (nonce + ESP_GCM_SALT_LEN), ESP_GCM_IVLEN);
1147 	}
1148 
1149 	/* Set Additional Authentication Data */
1150 	if (__improbable((rc = aes_encrypt_aad_gcm((unsigned char*)esp_hdr,
1151 	    sizeof(*esp_hdr), ctx->encrypt)) != 0)) {
1152 		esp_log_err("Set AAD failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1153 		cc_clear(sizeof(nonce), nonce);
1154 		return rc;
1155 	}
1156 
1157 	/* Add IV to Additional Authentication Data for GMAC-only mode */
1158 	if (gmac_only) {
1159 		if (__improbable((rc = aes_encrypt_aad_gcm(nonce +
1160 		    ESP_GCM_SALT_LEN, ESP_GCM_IVLEN, ctx->encrypt)) != 0)) {
1161 			esp_log_err("Packet encryption IV AAD failure %d, SPI 0x%08x\n",
1162 			    rc, ntohl(sav->spi));
1163 			cc_clear(sizeof(nonce), nonce);
1164 			return rc;
1165 		}
1166 	}
1167 
1168 	cc_clear(sizeof(nonce), nonce);
1169 
1170 	if (gmac_only) {
1171 		if (__improbable((rc = aes_encrypt_aad_gcm(input_data, (unsigned int)input_data_len,
1172 		    ctx->encrypt)) != 0)) {
1173 			esp_log_err("set aad failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1174 			return rc;
1175 		}
1176 		memcpy(output_data, input_data, input_data_len);
1177 	} else {
1178 		if (__improbable((rc = aes_encrypt_gcm(input_data, (unsigned int)input_data_len,
1179 		    output_data, ctx->encrypt)) != 0)) {
1180 			esp_log_err("encrypt failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1181 			return rc;
1182 		}
1183 	}
1184 
1185 	return 0;
1186 }
1187 
1188 int
esp_aes_gcm_decrypt_data(struct secasvar * sav,uint8_t * __sized_by (input_data_len)input_data,size_t input_data_len,struct newesp * esp_hdr,uint8_t * __sized_by (ivlen)iv,size_t ivlen,uint8_t * __sized_by (output_data_len)output_data,size_t output_data_len)1189 esp_aes_gcm_decrypt_data(struct secasvar *sav,
1190     uint8_t *__sized_by(input_data_len)input_data, size_t input_data_len,
1191     struct newesp *esp_hdr,
1192     uint8_t *__sized_by(ivlen)iv, size_t ivlen,
1193     uint8_t *__sized_by(output_data_len)output_data, size_t output_data_len)
1194 {
1195 	unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN] = {};
1196 	aes_gcm_ctx *ctx = NULL;
1197 	int rc = 0;
1198 
1199 	ESP_CHECK_ARG(sav);
1200 	ESP_CHECK_ARG(input_data);
1201 	ESP_CHECK_ARG(esp_hdr);
1202 	ESP_CHECK_ARG(output_data);
1203 
1204 	VERIFY(input_data_len > 0);
1205 	VERIFY(output_data_len >= input_data_len);
1206 
1207 	const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) == SADB_X_EXT_IIV);
1208 	const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
1209 
1210 	if (__improbable(implicit_iv && gmac_only)) {
1211 		esp_log_err("IIV and GMAC-only not supported together, SPI  0x%08x\n",
1212 		    ntohl(sav->spi));
1213 		return EINVAL;
1214 	}
1215 
1216 	memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) -
1217 	    ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
1218 
1219 	if (implicit_iv) {
1220 		VERIFY(iv == NULL);
1221 		VERIFY(ivlen == 0);
1222 
1223 		/* Use the ESP sequence number in the header to form the
1224 		 * rest of the nonce according to RFC 8750.
1225 		 */
1226 		memcpy(nonce + sizeof(nonce) - sizeof(esp_hdr->esp_seq), &esp_hdr->esp_seq, sizeof(esp_hdr->esp_seq));
1227 	} else {
1228 		ESP_CHECK_ARG(iv);
1229 		VERIFY(ivlen == ESP_GCM_IVLEN);
1230 
1231 		memcpy(nonce + ESP_GCM_SALT_LEN, iv, ESP_GCM_IVLEN);
1232 	}
1233 
1234 	P2ROUNDUP_GCM(sav->sched_enc, ctx);
1235 
1236 	if (__improbable((rc = aes_decrypt_set_iv_gcm(nonce, sizeof(nonce),
1237 	    ctx->decrypt)) != 0)) {
1238 		esp_log_err("set iv failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1239 		cc_clear(sizeof(nonce), nonce);
1240 		return rc;
1241 	}
1242 
1243 	/* Set Additional Authentication Data */
1244 	if (__improbable((rc = aes_decrypt_aad_gcm((unsigned char *)esp_hdr, sizeof(*esp_hdr),
1245 	    ctx->decrypt)) != 0)) {
1246 		esp_log_err("AAD failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1247 		cc_clear(sizeof(nonce), nonce);
1248 		return rc;
1249 	}
1250 
1251 	/* Add IV to Additional Authentication Data for GMAC-only mode */
1252 	if (gmac_only) {
1253 		if (__improbable((rc = aes_decrypt_aad_gcm(nonce + ESP_GCM_SALT_LEN,
1254 		    ESP_GCM_IVLEN, ctx->decrypt)) != 0)) {
1255 			esp_log_err("AAD failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1256 			cc_clear(sizeof(nonce), nonce);
1257 			return rc;
1258 		}
1259 	}
1260 
1261 	cc_clear(sizeof(nonce), nonce);
1262 
1263 	if (gmac_only) {
1264 		if (__improbable((rc = aes_decrypt_aad_gcm(input_data, (unsigned int)input_data_len,
1265 		    ctx->decrypt)) != 0)) {
1266 			esp_log_err("AAD failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1267 			return rc;
1268 		}
1269 		memcpy(output_data, input_data, input_data_len);
1270 	} else {
1271 		if (__improbable((rc = aes_decrypt_gcm(input_data, (unsigned int)input_data_len,
1272 		    output_data, ctx->decrypt)) != 0)) {
1273 			esp_log_err("decrypt failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1274 			return rc;
1275 		}
1276 	}
1277 
1278 	return 0;
1279 }
1280