xref: /xnu-10002.61.3/bsd/netinet6/esp_rijndael.c (revision 0f4c859e951fba394238ab619495c4e1d54d0f34)
1 /*
2  * Copyright (c) 2008-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*	$FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $	*/
30 /*	$KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $	*/
31 
32 /*
33  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. Neither the name of the project nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  */
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/socket.h>
64 #include <sys/queue.h>
65 #include <sys/syslog.h>
66 #include <sys/mbuf.h>
67 #include <sys/mcache.h>
68 
69 #include <kern/locks.h>
70 
71 #include <net/if.h>
72 #include <net/route.h>
73 
74 #include <netinet6/ipsec.h>
75 #include <netinet6/esp.h>
76 #include <netinet6/esp_rijndael.h>
77 
78 #include <libkern/crypto/aes.h>
79 
80 #include <netkey/key.h>
81 
82 #include <net/net_osdep.h>
83 
84 #define MAX_REALIGN_LEN 2000
85 #define AES_BLOCKLEN 16
86 #define ESP_GCM_SALT_LEN 4   // RFC 4106 Section 4
87 #define ESP_GCM_IVLEN 8
88 #define ESP_GCM_ALIGN 16
89 
90 typedef struct {
91 	ccgcm_ctx *decrypt;
92 	ccgcm_ctx *encrypt;
93 	ccgcm_ctx ctxt[0];
94 } aes_gcm_ctx;
95 
96 size_t
esp_aes_schedlen(__unused const struct esp_algorithm * algo)97 esp_aes_schedlen(
98 	__unused const struct esp_algorithm *algo)
99 {
100 	return sizeof(aes_ctx);
101 }
102 
103 int
esp_aes_schedule(__unused const struct esp_algorithm * algo,struct secasvar * sav)104 esp_aes_schedule(
105 	__unused const struct esp_algorithm *algo,
106 	struct secasvar *sav)
107 {
108 	LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
109 	aes_ctx *ctx = (aes_ctx*)sav->sched;
110 
111 	aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt);
112 	aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt);
113 
114 	return 0;
115 }
116 
117 
118 /* The following 2 functions decrypt or encrypt the contents of
119  * the mbuf chain passed in keeping the IP and ESP header's in place,
120  * along with the IV.
121  * The code attempts to call the crypto code with the largest chunk
122  * of data it can based on the amount of source data in
123  * the current source mbuf and the space remaining in the current
124  * destination mbuf.  The crypto code requires data to be a multiples
125  * of 16 bytes.  A separate buffer is used when a 16 byte block spans
126  * mbufs.
127  *
128  * m = mbuf chain
129  * off = offset to ESP header
130  *
131  * local vars for source:
132  * soff = offset from beginning of the chain to the head of the
133  *			current mbuf.
134  * scut = last mbuf that contains headers to be retained
135  * scutoff = offset to end of the headers in scut
136  * s = the current mbuf
137  * sn = current offset to data in s (next source data to process)
138  *
139  * local vars for dest:
140  * d0 = head of chain
141  * d = current mbuf
142  * dn = current offset in d (next location to store result)
143  */
144 
145 
146 int
esp_cbc_decrypt_aes(struct mbuf * m,size_t off,struct secasvar * sav,const struct esp_algorithm * algo,int ivlen)147 esp_cbc_decrypt_aes(
148 	struct mbuf *m,
149 	size_t off,
150 	struct secasvar *sav,
151 	const struct esp_algorithm *algo,
152 	int ivlen)
153 {
154 	struct mbuf *s;
155 	struct mbuf *d, *d0, *dp;
156 	int soff;       /* offset from the head of chain, to head of this mbuf */
157 	int sn, dn;     /* offset from the head of the mbuf, to meat */
158 	size_t ivoff, bodyoff;
159 	u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr;
160 	u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
161 	struct mbuf *scut;
162 	int scutoff;
163 	int     i, len;
164 
165 
166 	if (ivlen != AES_BLOCKLEN) {
167 		ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
168 		    "unsupported ivlen %d\n", algo->name, ivlen));
169 		m_freem(m);
170 		return EINVAL;
171 	}
172 
173 	if (sav->flags & SADB_X_EXT_OLD) {
174 		/* RFC 1827 */
175 		ivoff = off + sizeof(struct esp);
176 		bodyoff = off + sizeof(struct esp) + ivlen;
177 	} else {
178 		ivoff = off + sizeof(struct newesp);
179 		bodyoff = off + sizeof(struct newesp) + ivlen;
180 	}
181 
182 	if (m->m_pkthdr.len < bodyoff) {
183 		ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%u\n",
184 		    algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
185 		m_freem(m);
186 		return EINVAL;
187 	}
188 	if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
189 		ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
190 		    "payload length must be multiple of %d\n",
191 		    algo->name, AES_BLOCKLEN));
192 		m_freem(m);
193 		return EINVAL;
194 	}
195 
196 	VERIFY(ivoff <= INT_MAX);
197 
198 	/* grab iv */
199 	m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv);
200 
201 	s = m;
202 	soff = sn = dn = 0;
203 	d = d0 = dp = NULL;
204 	sp = dptr = NULL;
205 
206 	/* skip header/IV offset */
207 	while (soff < bodyoff) {
208 		if (soff + s->m_len > bodyoff) {
209 			sn = (int)(bodyoff - soff);
210 			break;
211 		}
212 
213 		soff += s->m_len;
214 		s = s->m_next;
215 	}
216 	scut = s;
217 	scutoff = sn;
218 
219 	/* skip over empty mbuf */
220 	while (s && s->m_len == 0) {
221 		s = s->m_next;
222 	}
223 
224 	while (soff < m->m_pkthdr.len) {
225 		/* source */
226 		if (sn + AES_BLOCKLEN <= s->m_len) {
227 			/* body is continuous */
228 			sp = mtod(s, u_int8_t *) + sn;
229 			len = s->m_len - sn;
230 			len -= len % AES_BLOCKLEN;      // full blocks only
231 		} else {
232 			/* body is non-continuous */
233 			m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
234 			sp = sbuf;
235 			len = AES_BLOCKLEN;                     // 1 block only in sbuf
236 		}
237 
238 		/* destination */
239 		if (!d || dn + AES_BLOCKLEN > d->m_len) {
240 			if (d) {
241 				dp = d;
242 			}
243 			MGET(d, M_DONTWAIT, MT_DATA);
244 			i = m->m_pkthdr.len - (soff + sn);
245 			if (d && i > MLEN) {
246 				MCLGET(d, M_DONTWAIT);
247 				if ((d->m_flags & M_EXT) == 0) {
248 					d = m_mbigget(d, M_DONTWAIT);
249 					if ((d->m_flags & M_EXT) == 0) {
250 						m_free(d);
251 						d = NULL;
252 					}
253 				}
254 			}
255 			if (!d) {
256 				m_freem(m);
257 				if (d0) {
258 					m_freem(d0);
259 				}
260 				return ENOBUFS;
261 			}
262 			if (!d0) {
263 				d0 = d;
264 			}
265 			if (dp) {
266 				dp->m_next = d;
267 			}
268 
269 			// try to make mbuf data aligned
270 			if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
271 				m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
272 			}
273 
274 			d->m_len = (int)M_TRAILINGSPACE(d);
275 			d->m_len -= d->m_len % AES_BLOCKLEN;
276 			if (d->m_len > i) {
277 				d->m_len = i;
278 			}
279 			dptr = mtod(d, u_int8_t *);
280 			dn = 0;
281 		}
282 
283 		/* adjust len if greater than space available in dest */
284 		if (len > d->m_len - dn) {
285 			len = d->m_len - dn;
286 		}
287 
288 		/* decrypt */
289 		// check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
290 		if (IPSEC_IS_P2ALIGNED(sp)) {
291 			sp_unaligned = NULL;
292 		} else {
293 			sp_unaligned = sp;
294 			if (len > MAX_REALIGN_LEN) {
295 				m_freem(m);
296 				if (d0 != NULL) {
297 					m_freem(d0);
298 				}
299 				if (sp_aligned != NULL) {
300 					kfree_data(sp_aligned, MAX_REALIGN_LEN);
301 					sp_aligned = NULL;
302 				}
303 				return ENOBUFS;
304 			}
305 			if (sp_aligned == NULL) {
306 				sp_aligned = (u_int8_t *)kalloc_data(MAX_REALIGN_LEN, Z_NOWAIT);
307 				if (sp_aligned == NULL) {
308 					m_freem(m);
309 					if (d0 != NULL) {
310 						m_freem(d0);
311 					}
312 					return ENOMEM;
313 				}
314 			}
315 			sp = sp_aligned;
316 			memcpy(sp, sp_unaligned, len);
317 		}
318 		// no need to check output pointer alignment
319 		aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn,
320 		    (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt)));
321 
322 		// update unaligned pointers
323 		if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
324 			sp = sp_unaligned;
325 		}
326 
327 		/* udpate offsets */
328 		sn += len;
329 		dn += len;
330 
331 		// next iv
332 		memcpy(iv, sp + len - AES_BLOCKLEN, AES_BLOCKLEN);
333 
334 		/* find the next source block */
335 		while (s && sn >= s->m_len) {
336 			sn -= s->m_len;
337 			soff += s->m_len;
338 			s = s->m_next;
339 		}
340 	}
341 
342 	/* free un-needed source mbufs and add dest mbufs to chain */
343 	m_freem(scut->m_next);
344 	scut->m_len = scutoff;
345 	scut->m_next = d0;
346 
347 	// free memory
348 	if (sp_aligned != NULL) {
349 		kfree_data(sp_aligned, MAX_REALIGN_LEN);
350 		sp_aligned = NULL;
351 	}
352 
353 	/* just in case */
354 	cc_clear(sizeof(iv), iv);
355 	cc_clear(sizeof(sbuf), sbuf);
356 
357 	return 0;
358 }
359 
360 int
esp_cbc_encrypt_aes(struct mbuf * m,size_t off,__unused size_t plen,struct secasvar * sav,const struct esp_algorithm * algo,int ivlen)361 esp_cbc_encrypt_aes(
362 	struct mbuf *m,
363 	size_t off,
364 	__unused size_t plen,
365 	struct secasvar *sav,
366 	const struct esp_algorithm *algo,
367 	int ivlen)
368 {
369 	struct mbuf *s;
370 	struct mbuf *d, *d0, *dp;
371 	int soff;       /* offset from the head of chain, to head of this mbuf */
372 	int sn, dn;     /* offset from the head of the mbuf, to meat */
373 	size_t ivoff, bodyoff;
374 	u_int8_t *ivp, *dptr, *ivp_unaligned;
375 	u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
376 	u_int8_t ivp_aligned_buf[AES_BLOCKLEN] __attribute__((aligned(4)));
377 	struct mbuf *scut;
378 	int scutoff;
379 	int i, len;
380 
381 	if (ivlen != AES_BLOCKLEN) {
382 		ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
383 		    "unsupported ivlen %d\n", algo->name, ivlen));
384 		m_freem(m);
385 		return EINVAL;
386 	}
387 
388 	if (sav->flags & SADB_X_EXT_OLD) {
389 		/* RFC 1827 */
390 		ivoff = off + sizeof(struct esp);
391 		bodyoff = off + sizeof(struct esp) + ivlen;
392 	} else {
393 		ivoff = off + sizeof(struct newesp);
394 		bodyoff = off + sizeof(struct newesp) + ivlen;
395 	}
396 
397 	VERIFY(ivoff <= INT_MAX);
398 
399 	/* put iv into the packet */
400 	m_copyback(m, (int)ivoff, ivlen, sav->iv);
401 	ivp = (u_int8_t *) sav->iv;
402 
403 	if (m->m_pkthdr.len < bodyoff) {
404 		ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%u\n",
405 		    algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
406 		m_freem(m);
407 		return EINVAL;
408 	}
409 	if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
410 		ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
411 		    "payload length must be multiple of %d\n",
412 		    algo->name, AES_BLOCKLEN));
413 		m_freem(m);
414 		return EINVAL;
415 	}
416 
417 	s = m;
418 	soff = sn = dn = 0;
419 	d = d0 = dp = NULL;
420 	sp = dptr = NULL;
421 
422 	/* skip headers/IV */
423 	while (soff < bodyoff) {
424 		if (soff + s->m_len > bodyoff) {
425 			sn = (int)(bodyoff - soff);
426 			break;
427 		}
428 
429 		soff += s->m_len;
430 		s = s->m_next;
431 	}
432 	scut = s;
433 	scutoff = sn;
434 
435 	/* skip over empty mbuf */
436 	while (s && s->m_len == 0) {
437 		s = s->m_next;
438 	}
439 
440 	while (soff < m->m_pkthdr.len) {
441 		/* source */
442 		if (sn + AES_BLOCKLEN <= s->m_len) {
443 			/* body is continuous */
444 			sp = mtod(s, u_int8_t *) + sn;
445 			len = s->m_len - sn;
446 			len -= len % AES_BLOCKLEN;      // full blocks only
447 		} else {
448 			/* body is non-continuous */
449 			m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
450 			sp = sbuf;
451 			len = AES_BLOCKLEN;                     // 1 block only in sbuf
452 		}
453 
454 		/* destination */
455 		if (!d || dn + AES_BLOCKLEN > d->m_len) {
456 			if (d) {
457 				dp = d;
458 			}
459 			MGET(d, M_DONTWAIT, MT_DATA);
460 			i = m->m_pkthdr.len - (soff + sn);
461 			if (d && i > MLEN) {
462 				MCLGET(d, M_DONTWAIT);
463 				if ((d->m_flags & M_EXT) == 0) {
464 					d = m_mbigget(d, M_DONTWAIT);
465 					if ((d->m_flags & M_EXT) == 0) {
466 						m_free(d);
467 						d = NULL;
468 					}
469 				}
470 			}
471 			if (!d) {
472 				m_freem(m);
473 				if (d0) {
474 					m_freem(d0);
475 				}
476 				return ENOBUFS;
477 			}
478 			if (!d0) {
479 				d0 = d;
480 			}
481 			if (dp) {
482 				dp->m_next = d;
483 			}
484 
485 			// try to make mbuf data aligned
486 			if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
487 				m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
488 			}
489 
490 			d->m_len = (int)M_TRAILINGSPACE(d);
491 			d->m_len -= d->m_len % AES_BLOCKLEN;
492 			if (d->m_len > i) {
493 				d->m_len = i;
494 			}
495 			dptr = mtod(d, u_int8_t *);
496 			dn = 0;
497 		}
498 
499 		/* adjust len if greater than space available */
500 		if (len > d->m_len - dn) {
501 			len = d->m_len - dn;
502 		}
503 
504 		/* encrypt */
505 		// check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
506 		if (IPSEC_IS_P2ALIGNED(sp)) {
507 			sp_unaligned = NULL;
508 		} else {
509 			sp_unaligned = sp;
510 			if (len > MAX_REALIGN_LEN) {
511 				m_freem(m);
512 				if (d0) {
513 					m_freem(d0);
514 				}
515 				if (sp_aligned != NULL) {
516 					kfree_data(sp_aligned, MAX_REALIGN_LEN);
517 					sp_aligned = NULL;
518 				}
519 				return ENOBUFS;
520 			}
521 			if (sp_aligned == NULL) {
522 				sp_aligned = (u_int8_t *)kalloc_data(MAX_REALIGN_LEN, Z_NOWAIT);
523 				if (sp_aligned == NULL) {
524 					m_freem(m);
525 					if (d0) {
526 						m_freem(d0);
527 					}
528 					return ENOMEM;
529 				}
530 			}
531 			sp = sp_aligned;
532 			memcpy(sp, sp_unaligned, len);
533 		}
534 		// check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
535 		if (IPSEC_IS_P2ALIGNED(ivp)) {
536 			ivp_unaligned = NULL;
537 		} else {
538 			ivp_unaligned = ivp;
539 			ivp = ivp_aligned_buf;
540 			memcpy(ivp, ivp_unaligned, AES_BLOCKLEN);
541 		}
542 		// no need to check output pointer alignment
543 		aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn,
544 		    (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt)));
545 
546 		// update unaligned pointers
547 		if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
548 			sp = sp_unaligned;
549 		}
550 		if (!IPSEC_IS_P2ALIGNED(ivp_unaligned)) {
551 			ivp = ivp_unaligned;
552 		}
553 
554 		/* update offsets */
555 		sn += len;
556 		dn += len;
557 
558 		/* next iv */
559 		ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted
560 
561 		/* find the next source block and skip empty mbufs */
562 		while (s && sn >= s->m_len) {
563 			sn -= s->m_len;
564 			soff += s->m_len;
565 			s = s->m_next;
566 		}
567 	}
568 
569 	/* free un-needed source mbufs and add dest mbufs to chain */
570 	m_freem(scut->m_next);
571 	scut->m_len = scutoff;
572 	scut->m_next = d0;
573 
574 	// free memory
575 	if (sp_aligned != NULL) {
576 		kfree_data(sp_aligned, MAX_REALIGN_LEN);
577 		sp_aligned = NULL;
578 	}
579 
580 	/* just in case */
581 	cc_clear(sizeof(sbuf), sbuf);
582 	key_sa_stir_iv(sav);
583 
584 	return 0;
585 }
586 
587 int
esp_aes_cbc_encrypt_data(struct secasvar * sav,uint8_t * input_data,size_t input_data_len,struct newesp * esp_hdr,uint8_t * out_iv,size_t out_ivlen,uint8_t * output_data,size_t output_data_len)588 esp_aes_cbc_encrypt_data(struct secasvar *sav, uint8_t *input_data,
589     size_t input_data_len, struct newesp *esp_hdr, uint8_t *out_iv,
590     size_t out_ivlen, uint8_t *output_data, size_t output_data_len)
591 {
592 	aes_encrypt_ctx *ctx = NULL;
593 	uint8_t *ivp = NULL;
594 	aes_rval rc = 0;
595 
596 	ESP_CHECK_ARG(sav);
597 	ESP_CHECK_ARG(input_data);
598 	ESP_CHECK_ARG(esp_hdr);
599 	ESP_CHECK_ARG(out_iv);
600 	ESP_CHECK_ARG(output_data);
601 
602 	VERIFY(input_data_len > 0);
603 	VERIFY(output_data_len >= input_data_len);
604 
605 	VERIFY(out_ivlen == AES_BLOCKLEN);
606 	memcpy(out_iv, sav->iv, out_ivlen);
607 	ivp = (uint8_t *)sav->iv;
608 
609 	if (input_data_len % AES_BLOCKLEN) {
610 		esp_log_err("payload length %zu must be multiple of "
611 		    "AES_BLOCKLEN, SPI 0x%08x", input_data_len, ntohl(sav->spi));
612 		return EINVAL;
613 	}
614 
615 	ctx = (aes_encrypt_ctx *)(&(((aes_ctx *)sav->sched)->encrypt));
616 
617 	VERIFY((input_data_len >> 4) <= UINT32_MAX);
618 	if (__improbable((rc = aes_encrypt_cbc(input_data, ivp,
619 	    (unsigned int)(input_data_len >> 4), output_data, ctx)) != 0)) {
620 		esp_log_err("encrypt failed %d, SPI 0x%08x", rc, ntohl(sav->spi));
621 		return rc;
622 	}
623 
624 	key_sa_stir_iv(sav);
625 	return 0;
626 }
627 
628 int
esp_aes_cbc_decrypt_data(struct secasvar * sav,uint8_t * input_data,size_t input_data_len,struct newesp * esp_hdr,uint8_t * iv,size_t ivlen,uint8_t * output_data,size_t output_data_len)629 esp_aes_cbc_decrypt_data(struct secasvar *sav, uint8_t *input_data,
630     size_t input_data_len, struct newesp *esp_hdr, uint8_t *iv,
631     size_t ivlen, uint8_t *output_data, size_t output_data_len)
632 {
633 	aes_decrypt_ctx *ctx = NULL;
634 	aes_rval rc = 0;
635 
636 	ESP_CHECK_ARG(sav);
637 	ESP_CHECK_ARG(input_data);
638 	ESP_CHECK_ARG(esp_hdr);
639 	ESP_CHECK_ARG(output_data);
640 
641 	VERIFY(input_data_len > 0);
642 	VERIFY(output_data_len >= input_data_len);
643 
644 	if (__improbable(ivlen != AES_BLOCKLEN)) {
645 		esp_log_err("ivlen(%zu) != AES_BLOCKLEN, SPI 0x%08x",
646 		    ivlen, ntohl(sav->spi));
647 		return EINVAL;
648 	}
649 
650 	if (__improbable(input_data_len % AES_BLOCKLEN)) {
651 		esp_packet_log_err("input data length(%zu) must be a multiple of "
652 		    "AES_BLOCKLEN", input_data_len);
653 		return EINVAL;
654 	}
655 
656 	ctx = (aes_decrypt_ctx *)(&(((aes_ctx *)sav->sched)->decrypt));
657 
658 	VERIFY((input_data_len >> 4) <= UINT32_MAX);
659 	if (__improbable((rc = aes_decrypt_cbc(input_data, iv,
660 	    (unsigned int)(input_data_len >> 4), output_data, ctx)) != 0)) {
661 		esp_log_err("decrypt failed %d, SPI 0x%08x", rc, ntohl(sav->spi));
662 		return rc;
663 	}
664 
665 	return 0;
666 }
667 
668 size_t
esp_gcm_schedlen(__unused const struct esp_algorithm * algo)669 esp_gcm_schedlen(
670 	__unused const struct esp_algorithm *algo)
671 {
672 	return sizeof(aes_gcm_ctx) + aes_decrypt_get_ctx_size_gcm() + aes_encrypt_get_ctx_size_gcm() + ESP_GCM_ALIGN;
673 }
674 
675 int
esp_gcm_schedule(__unused const struct esp_algorithm * algo,struct secasvar * sav)676 esp_gcm_schedule( __unused const struct esp_algorithm *algo,
677     struct secasvar *sav)
678 {
679 	LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
680 	aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
681 	const u_int ivlen = sav->ivlen;
682 	const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) != 0);
683 	const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
684 	unsigned char nonce[ESP_GCM_SALT_LEN + ivlen];
685 	int rc;
686 
687 	ctx->decrypt = &ctx->ctxt[0];
688 	ctx->encrypt = &ctx->ctxt[aes_decrypt_get_ctx_size_gcm() / sizeof(ccgcm_ctx)];
689 
690 	if (ivlen != (implicit_iv ? 0 : ESP_GCM_IVLEN)) {
691 		ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
692 		return EINVAL;
693 	}
694 
695 	if (implicit_iv && gmac_only) {
696 		ipseclog((LOG_ERR, "%s: IIV and GMAC-only not supported together\n", __FUNCTION__));
697 		return EINVAL;
698 	}
699 
700 	rc = aes_decrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ctx->decrypt);
701 	if (rc) {
702 		return rc;
703 	}
704 
705 	if (!implicit_iv) {
706 		memset(nonce, 0, ESP_GCM_SALT_LEN + ivlen);
707 		memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
708 		memcpy(nonce + ESP_GCM_SALT_LEN, sav->iv, ivlen);
709 
710 		rc = aes_encrypt_key_with_iv_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, nonce, ctx->encrypt);
711 		cc_clear(sizeof(nonce), nonce);
712 		if (rc) {
713 			return rc;
714 		}
715 	} else {
716 		rc = aes_encrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ctx->encrypt);
717 		if (rc) {
718 			return rc;
719 		}
720 	}
721 
722 	rc = aes_encrypt_reset_gcm(ctx->encrypt);
723 	if (rc) {
724 		return rc;
725 	}
726 
727 	return rc;
728 }
729 
730 int
esp_gcm_ivlen(const struct esp_algorithm * algo,struct secasvar * sav)731 esp_gcm_ivlen(const struct esp_algorithm *algo,
732     struct secasvar *sav)
733 {
734 	if (!algo) {
735 		panic("esp_gcm_ivlen: unknown algorithm");
736 	}
737 
738 	if (sav != NULL && ((sav->flags & SADB_X_EXT_IIV) != 0)) {
739 		return 0;
740 	} else {
741 		return algo->ivlenval;
742 	}
743 }
744 
745 int
esp_gcm_encrypt_finalize(struct secasvar * sav,unsigned char * tag,size_t tag_bytes)746 esp_gcm_encrypt_finalize(struct secasvar *sav,
747     unsigned char *tag, size_t tag_bytes)
748 {
749 	aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
750 	return aes_encrypt_finalize_gcm(tag, tag_bytes, ctx->encrypt);
751 }
752 
753 int
esp_gcm_decrypt_finalize(struct secasvar * sav,unsigned char * tag,size_t tag_bytes)754 esp_gcm_decrypt_finalize(struct secasvar *sav,
755     unsigned char *tag, size_t tag_bytes)
756 {
757 	aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
758 	return aes_decrypt_finalize_gcm(tag, tag_bytes, ctx->decrypt);
759 }
760 
761 int
esp_gcm_encrypt_aes(struct mbuf * m,size_t off,__unused size_t plen,struct secasvar * sav,const struct esp_algorithm * algo __unused,int ivlen)762 esp_gcm_encrypt_aes(
763 	struct mbuf *m,
764 	size_t off,
765 	__unused size_t plen,
766 	struct secasvar *sav,
767 	const struct esp_algorithm *algo __unused,
768 	int ivlen)
769 {
770 	struct mbuf *s = m;
771 	uint32_t soff = 0;       /* offset from the head of chain, to head of this mbuf */
772 	uint32_t sn = 0;     /* offset from the head of the mbuf, to meat */
773 	uint8_t *sp = NULL;
774 	aes_gcm_ctx *ctx;
775 	uint32_t len;
776 	const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) != 0);
777 	const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
778 	struct newesp esp;
779 	unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN];
780 
781 	VERIFY(off <= INT_MAX);
782 	const size_t ivoff = off + sizeof(struct newesp);
783 	VERIFY(ivoff <= INT_MAX);
784 	const size_t bodyoff = ivoff + ivlen;
785 	VERIFY(bodyoff <= INT_MAX);
786 
787 	if (ivlen != (implicit_iv ? 0 : ESP_GCM_IVLEN)) {
788 		ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
789 		m_freem(m);
790 		return EINVAL;
791 	}
792 
793 	if (implicit_iv && gmac_only) {
794 		ipseclog((LOG_ERR, "%s: IIV and GMAC-only not supported together\n", __FUNCTION__));
795 		m_freem(m);
796 		return EINVAL;
797 	}
798 
799 	ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
800 
801 	if (aes_encrypt_reset_gcm(ctx->encrypt)) {
802 		ipseclog((LOG_ERR, "%s: gcm reset failure\n", __FUNCTION__));
803 		m_freem(m);
804 		return EINVAL;
805 	}
806 
807 	/* Copy the ESP header */
808 	m_copydata(m, (int)off, sizeof(esp), (caddr_t) &esp);
809 
810 	/* Construct the IV */
811 	memset(nonce, 0, sizeof(nonce));
812 	if (!implicit_iv) {
813 		/* generate new iv */
814 		if (aes_encrypt_inc_iv_gcm((unsigned char *)nonce, ctx->encrypt)) {
815 			ipseclog((LOG_ERR, "%s: iv generation failure\n", __FUNCTION__));
816 			m_freem(m);
817 			return EINVAL;
818 		}
819 
820 		/*
821 		 * The IV is now generated within corecrypto and
822 		 * is provided to ESP using aes_encrypt_inc_iv_gcm().
823 		 * This makes the sav->iv redundant and is no longer
824 		 * used in GCM operations. But we still copy the IV
825 		 * back to sav->iv to ensure that any future code reading
826 		 * this value will get the latest IV.
827 		 */
828 		memcpy(sav->iv, (nonce + ESP_GCM_SALT_LEN), ivlen);
829 		m_copyback(m, (int)ivoff, ivlen, sav->iv);
830 	} else {
831 		/* Use the ESP sequence number in the header to form the
832 		 * nonce according to RFC 8750. The first 4 bytes are the
833 		 * salt value, the next 4 bytes are zeroes, and the final
834 		 * 4 bytes are the ESP sequence number.
835 		 */
836 		memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
837 		memcpy(nonce + sizeof(nonce) - sizeof(esp.esp_seq), &esp.esp_seq, sizeof(esp.esp_seq));
838 		if (aes_encrypt_set_iv_gcm((const unsigned char *)nonce, sizeof(nonce), ctx->encrypt)) {
839 			ipseclog((LOG_ERR, "%s: iv set failure\n", __FUNCTION__));
840 			cc_clear(sizeof(nonce), nonce);
841 			m_freem(m);
842 			return EINVAL;
843 		}
844 	}
845 
846 	if (m->m_pkthdr.len < bodyoff) {
847 		ipseclog((LOG_ERR, "%s: bad len %d/%u\n", __FUNCTION__,
848 		    m->m_pkthdr.len, (u_int32_t)bodyoff));
849 		cc_clear(sizeof(nonce), nonce);
850 		m_freem(m);
851 		return EINVAL;
852 	}
853 
854 	/* Add ESP header to Additional Authentication Data */
855 	if (aes_encrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->encrypt)) {
856 		ipseclog((LOG_ERR, "%s: packet encryption ESP header AAD failure\n", __FUNCTION__));
857 		cc_clear(sizeof(nonce), nonce);
858 		m_freem(m);
859 		return EINVAL;
860 	}
861 	/* Add IV to Additional Authentication Data for GMAC-only mode */
862 	if (gmac_only) {
863 		if (aes_encrypt_aad_gcm(nonce + ESP_GCM_SALT_LEN, ESP_GCM_IVLEN, ctx->encrypt)) {
864 			ipseclog((LOG_ERR, "%s: packet encryption IV AAD failure\n", __FUNCTION__));
865 			cc_clear(sizeof(nonce), nonce);
866 			m_freem(m);
867 			return EINVAL;
868 		}
869 	}
870 
871 	/* Clear nonce */
872 	cc_clear(sizeof(nonce), nonce);
873 
874 	/* skip headers/IV */
875 	while (s != NULL && soff < bodyoff) {
876 		if (soff + s->m_len > bodyoff) {
877 			sn = (uint32_t)bodyoff - soff;
878 			break;
879 		}
880 
881 		soff += s->m_len;
882 		s = s->m_next;
883 	}
884 
885 	/* Encrypt (or add to AAD) payload */
886 	while (s != NULL && soff < m->m_pkthdr.len) {
887 		/* skip empty mbufs */
888 		if ((len = s->m_len - sn) != 0) {
889 			sp = mtod(s, uint8_t *) + sn;
890 
891 			if (!gmac_only) {
892 				if (aes_encrypt_gcm(sp, len, sp, ctx->encrypt)) {
893 					ipseclog((LOG_ERR, "%s: failed to encrypt\n", __FUNCTION__));
894 					m_freem(m);
895 					return EINVAL;
896 				}
897 			} else {
898 				if (aes_encrypt_aad_gcm(sp, len, ctx->encrypt)) {
899 					ipseclog((LOG_ERR, "%s: failed to add data to AAD\n", __FUNCTION__));
900 					m_freem(m);
901 					return EINVAL;
902 				}
903 			}
904 		}
905 
906 		sn = 0;
907 		soff += s->m_len;
908 		s = s->m_next;
909 	}
910 
911 	if (s == NULL && soff != m->m_pkthdr.len) {
912 		ipseclog((LOG_ERR, "%s: not enough mbufs %d %d, SPI 0x%08x",
913 		    __FUNCTION__, soff, m->m_pkthdr.len, ntohl(sav->spi)));
914 		m_freem(m);
915 		return EFBIG;
916 	}
917 
918 	return 0;
919 }
920 
921 int
esp_gcm_decrypt_aes(struct mbuf * m,size_t off,struct secasvar * sav,const struct esp_algorithm * algo __unused,int ivlen)922 esp_gcm_decrypt_aes(
923 	struct mbuf *m,
924 	size_t off,
925 	struct secasvar *sav,
926 	const struct esp_algorithm *algo __unused,
927 	int ivlen)
928 {
929 	struct mbuf *s = m;
930 	uint32_t soff = 0;       /* offset from the head of chain, to head of this mbuf */
931 	uint32_t sn = 0;     /* offset from the head of the mbuf, to meat */
932 	uint8_t *sp = NULL;
933 	aes_gcm_ctx *ctx;
934 	uint32_t len;
935 	const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) != 0);
936 	const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
937 	struct newesp esp;
938 	unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN];
939 
940 	VERIFY(off <= INT_MAX);
941 	const size_t ivoff = off + sizeof(struct newesp);
942 	VERIFY(ivoff <= INT_MAX);
943 	const size_t bodyoff = ivoff + ivlen;
944 	VERIFY(bodyoff <= INT_MAX);
945 
946 	if (ivlen != (implicit_iv ? 0 : ESP_GCM_IVLEN)) {
947 		ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
948 		m_freem(m);
949 		return EINVAL;
950 	}
951 
952 	if (implicit_iv && gmac_only) {
953 		ipseclog((LOG_ERR, "%s: IIV and GMAC-only not supported together\n", __FUNCTION__));
954 		m_freem(m);
955 		return EINVAL;
956 	}
957 
958 	if (m->m_pkthdr.len < bodyoff) {
959 		ipseclog((LOG_ERR, "%s: bad len %d/%u\n", __FUNCTION__,
960 		    m->m_pkthdr.len, (u_int32_t)bodyoff));
961 		m_freem(m);
962 		return EINVAL;
963 	}
964 
965 	/* Copy the ESP header */
966 	m_copydata(m, (int)off, sizeof(esp), (caddr_t) &esp);
967 
968 	/* Construct IV starting with salt */
969 	memset(nonce, 0, sizeof(nonce));
970 	memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
971 	if (!implicit_iv) {
972 		/* grab IV from packet */
973 		u_int8_t iv[ESP_GCM_IVLEN] __attribute__((aligned(4)));
974 		m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv);
975 		memcpy(nonce + ESP_GCM_SALT_LEN, iv, ivlen);
976 		/* just in case */
977 		cc_clear(sizeof(iv), iv);
978 	} else {
979 		/* Use the ESP sequence number in the header to form the
980 		 * rest of the nonce according to RFC 8750.
981 		 */
982 		memcpy(nonce + sizeof(nonce) - sizeof(esp.esp_seq), &esp.esp_seq, sizeof(esp.esp_seq));
983 	}
984 
985 	ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
986 	if (aes_decrypt_set_iv_gcm(nonce, sizeof(nonce), ctx->decrypt)) {
987 		ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__));
988 		cc_clear(sizeof(nonce), nonce);
989 		m_freem(m);
990 		return EINVAL;
991 	}
992 
993 	/* Add ESP header to Additional Authentication Data */
994 	if (aes_decrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->decrypt)) {
995 		ipseclog((LOG_ERR, "%s: packet decryption ESP header AAD failure\n", __FUNCTION__));
996 		cc_clear(sizeof(nonce), nonce);
997 		m_freem(m);
998 		return EINVAL;
999 	}
1000 
1001 	/* Add IV to Additional Authentication Data for GMAC-only mode */
1002 	if (gmac_only) {
1003 		if (aes_decrypt_aad_gcm(nonce + ESP_GCM_SALT_LEN, ESP_GCM_IVLEN, ctx->decrypt)) {
1004 			ipseclog((LOG_ERR, "%s: packet decryption IV AAD failure\n", __FUNCTION__));
1005 			cc_clear(sizeof(nonce), nonce);
1006 			m_freem(m);
1007 			return EINVAL;
1008 		}
1009 	}
1010 
1011 	/* Clear nonce */
1012 	cc_clear(sizeof(nonce), nonce);
1013 
1014 	/* skip headers/IV */
1015 	while (s != NULL && soff < bodyoff) {
1016 		if (soff + s->m_len > bodyoff) {
1017 			sn = (uint32_t)bodyoff - soff;
1018 			break;
1019 		}
1020 
1021 		soff += s->m_len;
1022 		s = s->m_next;
1023 	}
1024 
1025 	/* Decrypt (or just authenticate) payload */
1026 	while (s != NULL && soff < m->m_pkthdr.len) {
1027 		/* skip empty mbufs */
1028 		if ((len = s->m_len - sn) != 0) {
1029 			sp = mtod(s, uint8_t *) + sn;
1030 
1031 			if (!gmac_only) {
1032 				if (aes_decrypt_gcm(sp, len, sp, ctx->decrypt)) {
1033 					ipseclog((LOG_ERR, "%s: failed to decrypt\n", __FUNCTION__));
1034 					m_freem(m);
1035 					return EINVAL;
1036 				}
1037 			} else {
1038 				if (aes_decrypt_aad_gcm(sp, len, ctx->decrypt)) {
1039 					ipseclog((LOG_ERR, "%s: failed to add data to AAD\n", __FUNCTION__));
1040 					m_freem(m);
1041 					return EINVAL;
1042 				}
1043 			}
1044 		}
1045 
1046 		sn = 0;
1047 		soff += s->m_len;
1048 		s = s->m_next;
1049 	}
1050 
1051 	if (s == NULL && soff != m->m_pkthdr.len) {
1052 		ipseclog((LOG_ERR, "%s: not enough mbufs %d %d, SPI 0x%08x",
1053 		    __FUNCTION__, soff, m->m_pkthdr.len, ntohl(sav->spi)));
1054 		m_freem(m);
1055 		return EFBIG;
1056 	}
1057 
1058 	return 0;
1059 }
1060 
1061 int
esp_aes_gcm_encrypt_data(struct secasvar * sav,uint8_t * input_data,size_t input_data_len,struct newesp * esp_hdr,uint8_t * out_iv,size_t ivlen,uint8_t * output_data,size_t output_data_len)1062 esp_aes_gcm_encrypt_data(struct secasvar *sav, uint8_t *input_data,
1063     size_t input_data_len, struct newesp *esp_hdr, uint8_t *out_iv,
1064     size_t ivlen, uint8_t *output_data, size_t output_data_len)
1065 {
1066 	unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN] = {};
1067 	int rc = 0; // return code of corecrypto operations
1068 
1069 	ESP_CHECK_ARG(sav);
1070 	ESP_CHECK_ARG(input_data);
1071 	ESP_CHECK_ARG(esp_hdr);
1072 	ESP_CHECK_ARG(output_data);
1073 
1074 	VERIFY(input_data_len > 0);
1075 	VERIFY(output_data_len >= input_data_len);
1076 
1077 	const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) == SADB_X_EXT_IIV);
1078 	const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
1079 
1080 	if (__improbable(implicit_iv && gmac_only)) {
1081 		esp_log_err("IIV and GMAC-only not supported together, SPI  0x%08x\n",
1082 		    ntohl(sav->spi));
1083 		return EINVAL;
1084 	}
1085 
1086 	aes_gcm_ctx *ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
1087 
1088 	if (__improbable((rc = aes_encrypt_reset_gcm(ctx->encrypt)) != 0)) {
1089 		esp_log_err("Context reset failure %d, SPI 0x%08x\n",
1090 		    rc, ntohl(sav->spi));
1091 		return rc;
1092 	}
1093 
1094 	if (implicit_iv) {
1095 		VERIFY(out_iv == NULL);
1096 		VERIFY(ivlen == 0);
1097 
1098 		/* Use the ESP sequence number in the header to form the
1099 		 * nonce according to RFC 8750. The first 4 bytes are the
1100 		 * salt value, the next 4 bytes are zeroes, and the final
1101 		 * 4 bytes are the ESP sequence number.
1102 		 */
1103 		memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) -
1104 		    ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
1105 		memcpy(nonce + sizeof(nonce) - sizeof(esp_hdr->esp_seq),
1106 		    &esp_hdr->esp_seq, sizeof(esp_hdr->esp_seq));
1107 		if (__improbable((rc = aes_encrypt_set_iv_gcm((const unsigned char *)nonce,
1108 		    sizeof(nonce), ctx->encrypt)) != 0)) {
1109 			esp_log_err("Set IV failure %d, SPI 0x%08x\n",
1110 			    rc, ntohl(sav->spi));
1111 			cc_clear(sizeof(nonce), nonce);
1112 			return rc;
1113 		}
1114 	} else {
1115 		ESP_CHECK_ARG(out_iv);
1116 		VERIFY(ivlen == ESP_GCM_IVLEN);
1117 
1118 		/* generate new iv */
1119 		if (__improbable((rc = aes_encrypt_inc_iv_gcm((unsigned char *)nonce,
1120 		    ctx->encrypt)) != 0)) {
1121 			esp_log_err("IV generation failure %d, SPI 0x%08x\n",
1122 			    rc, ntohl(sav->spi));
1123 			cc_clear(sizeof(nonce), nonce);
1124 			return rc;
1125 		}
1126 
1127 		memcpy(out_iv, (nonce + ESP_GCM_SALT_LEN), ESP_GCM_IVLEN);
1128 	}
1129 
1130 	/* Set Additional Authentication Data */
1131 	if (__improbable((rc = aes_encrypt_aad_gcm((unsigned char*)esp_hdr,
1132 	    sizeof(*esp_hdr), ctx->encrypt)) != 0)) {
1133 		esp_log_err("Set AAD failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1134 		cc_clear(sizeof(nonce), nonce);
1135 		return rc;
1136 	}
1137 
1138 	/* Add IV to Additional Authentication Data for GMAC-only mode */
1139 	if (gmac_only) {
1140 		if (__improbable((rc = aes_encrypt_aad_gcm(nonce +
1141 		    ESP_GCM_SALT_LEN, ESP_GCM_IVLEN, ctx->encrypt)) != 0)) {
1142 			esp_log_err("Packet encryption IV AAD failure %d, SPI 0x%08x\n",
1143 			    rc, ntohl(sav->spi));
1144 			cc_clear(sizeof(nonce), nonce);
1145 			return rc;
1146 		}
1147 	}
1148 
1149 	cc_clear(sizeof(nonce), nonce);
1150 
1151 	if (gmac_only) {
1152 		if (__improbable((rc = aes_encrypt_aad_gcm(input_data, (unsigned int)input_data_len,
1153 		    ctx->encrypt)) != 0)) {
1154 			esp_log_err("set aad failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1155 			return rc;
1156 		}
1157 		memcpy(output_data, input_data, input_data_len);
1158 	} else {
1159 		if (__improbable((rc = aes_encrypt_gcm(input_data, (unsigned int)input_data_len,
1160 		    output_data, ctx->encrypt)) != 0)) {
1161 			esp_log_err("encrypt failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1162 			return rc;
1163 		}
1164 	}
1165 
1166 	return 0;
1167 }
1168 
1169 int
esp_aes_gcm_decrypt_data(struct secasvar * sav,uint8_t * input_data,size_t input_data_len,struct newesp * esp_hdr,uint8_t * iv,size_t ivlen,uint8_t * output_data,size_t output_data_len)1170 esp_aes_gcm_decrypt_data(struct secasvar *sav, uint8_t *input_data,
1171     size_t input_data_len, struct newesp *esp_hdr, uint8_t *iv, size_t ivlen,
1172     uint8_t *output_data, size_t output_data_len)
1173 {
1174 	unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN] = {};
1175 	aes_gcm_ctx *ctx = NULL;
1176 	int rc = 0;
1177 
1178 	ESP_CHECK_ARG(sav);
1179 	ESP_CHECK_ARG(input_data);
1180 	ESP_CHECK_ARG(esp_hdr);
1181 	ESP_CHECK_ARG(output_data);
1182 
1183 	VERIFY(input_data_len > 0);
1184 	VERIFY(output_data_len >= input_data_len);
1185 
1186 	const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) == SADB_X_EXT_IIV);
1187 	const bool gmac_only = (sav->alg_enc == SADB_X_EALG_AES_GMAC);
1188 
1189 	if (__improbable(implicit_iv && gmac_only)) {
1190 		esp_log_err("IIV and GMAC-only not supported together, SPI  0x%08x\n",
1191 		    ntohl(sav->spi));
1192 		return EINVAL;
1193 	}
1194 
1195 	memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) -
1196 	    ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
1197 
1198 	if (implicit_iv) {
1199 		VERIFY(iv == NULL);
1200 		VERIFY(ivlen == 0);
1201 
1202 		/* Use the ESP sequence number in the header to form the
1203 		 * rest of the nonce according to RFC 8750.
1204 		 */
1205 		memcpy(nonce + sizeof(nonce) - sizeof(esp_hdr->esp_seq), &esp_hdr->esp_seq, sizeof(esp_hdr->esp_seq));
1206 	} else {
1207 		ESP_CHECK_ARG(iv);
1208 		VERIFY(ivlen == ESP_GCM_IVLEN);
1209 
1210 		memcpy(nonce + ESP_GCM_SALT_LEN, iv, ESP_GCM_IVLEN);
1211 	}
1212 
1213 	ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
1214 
1215 	if (__improbable((rc = aes_decrypt_set_iv_gcm(nonce, sizeof(nonce),
1216 	    ctx->decrypt)) != 0)) {
1217 		esp_log_err("set iv failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1218 		cc_clear(sizeof(nonce), nonce);
1219 		return rc;
1220 	}
1221 
1222 	/* Set Additional Authentication Data */
1223 	if (__improbable((rc = aes_decrypt_aad_gcm((unsigned char *)esp_hdr, sizeof(*esp_hdr),
1224 	    ctx->decrypt)) != 0)) {
1225 		esp_log_err("AAD failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1226 		cc_clear(sizeof(nonce), nonce);
1227 		return rc;
1228 	}
1229 
1230 	/* Add IV to Additional Authentication Data for GMAC-only mode */
1231 	if (gmac_only) {
1232 		if (__improbable((rc = aes_decrypt_aad_gcm(nonce + ESP_GCM_SALT_LEN,
1233 		    ESP_GCM_IVLEN, ctx->decrypt)) != 0)) {
1234 			esp_log_err("AAD failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1235 			cc_clear(sizeof(nonce), nonce);
1236 			return rc;
1237 		}
1238 	}
1239 
1240 	cc_clear(sizeof(nonce), nonce);
1241 
1242 	if (gmac_only) {
1243 		if (__improbable((rc = aes_decrypt_aad_gcm(input_data, (unsigned int)input_data_len,
1244 		    ctx->decrypt)) != 0)) {
1245 			esp_log_err("AAD failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1246 			return rc;
1247 		}
1248 		memcpy(output_data, input_data, input_data_len);
1249 	} else {
1250 		if (__improbable((rc = aes_decrypt_gcm(input_data, (unsigned int)input_data_len,
1251 		    output_data, ctx->decrypt)) != 0)) {
1252 			esp_log_err("decrypt failure %d, SPI 0x%08x\n", rc, ntohl(sav->spi));
1253 			return rc;
1254 		}
1255 	}
1256 
1257 	return 0;
1258 }
1259