1 /*
2 * Copyright (c) 2008-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/socket.h>
64 #include <sys/queue.h>
65 #include <sys/syslog.h>
66 #include <sys/mbuf.h>
67 #include <sys/mcache.h>
68
69 #include <kern/locks.h>
70
71 #include <net/if.h>
72 #include <net/route.h>
73
74 #include <netinet6/ipsec.h>
75 #include <netinet6/esp.h>
76 #include <netinet6/esp_rijndael.h>
77
78 #include <libkern/crypto/aes.h>
79
80 #include <netkey/key.h>
81
82 #include <net/net_osdep.h>
83
84 #define MAX_REALIGN_LEN 2000
85 #define AES_BLOCKLEN 16
86 #define ESP_GCM_SALT_LEN 4 // RFC 4106 Section 4
87 #define ESP_GCM_IVLEN 8
88 #define ESP_GCM_ALIGN 16
89
90 typedef struct {
91 ccgcm_ctx *decrypt;
92 ccgcm_ctx *encrypt;
93 ccgcm_ctx ctxt[0];
94 } aes_gcm_ctx;
95
96 size_t
esp_aes_schedlen(__unused const struct esp_algorithm * algo)97 esp_aes_schedlen(
98 __unused const struct esp_algorithm *algo)
99 {
100 return sizeof(aes_ctx);
101 }
102
103 int
esp_aes_schedule(__unused const struct esp_algorithm * algo,struct secasvar * sav)104 esp_aes_schedule(
105 __unused const struct esp_algorithm *algo,
106 struct secasvar *sav)
107 {
108 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
109 aes_ctx *ctx = (aes_ctx*)sav->sched;
110
111 aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt);
112 aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt);
113
114 return 0;
115 }
116
117
118 /* The following 2 functions decrypt or encrypt the contents of
119 * the mbuf chain passed in keeping the IP and ESP header's in place,
120 * along with the IV.
121 * The code attempts to call the crypto code with the largest chunk
122 * of data it can based on the amount of source data in
123 * the current source mbuf and the space remaining in the current
124 * destination mbuf. The crypto code requires data to be a multiples
125 * of 16 bytes. A separate buffer is used when a 16 byte block spans
126 * mbufs.
127 *
128 * m = mbuf chain
129 * off = offset to ESP header
130 *
131 * local vars for source:
132 * soff = offset from beginning of the chain to the head of the
133 * current mbuf.
134 * scut = last mbuf that contains headers to be retained
135 * scutoff = offset to end of the headers in scut
136 * s = the current mbuf
137 * sn = current offset to data in s (next source data to process)
138 *
139 * local vars for dest:
140 * d0 = head of chain
141 * d = current mbuf
142 * dn = current offset in d (next location to store result)
143 */
144
145
146 int
esp_cbc_decrypt_aes(struct mbuf * m,size_t off,struct secasvar * sav,const struct esp_algorithm * algo,int ivlen)147 esp_cbc_decrypt_aes(
148 struct mbuf *m,
149 size_t off,
150 struct secasvar *sav,
151 const struct esp_algorithm *algo,
152 int ivlen)
153 {
154 struct mbuf *s;
155 struct mbuf *d, *d0, *dp;
156 int soff; /* offset from the head of chain, to head of this mbuf */
157 int sn, dn; /* offset from the head of the mbuf, to meat */
158 size_t ivoff, bodyoff;
159 u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr;
160 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
161 struct mbuf *scut;
162 int scutoff;
163 int i, len;
164
165
166 if (ivlen != AES_BLOCKLEN) {
167 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
168 "unsupported ivlen %d\n", algo->name, ivlen));
169 m_freem(m);
170 return EINVAL;
171 }
172
173 if (sav->flags & SADB_X_EXT_OLD) {
174 /* RFC 1827 */
175 ivoff = off + sizeof(struct esp);
176 bodyoff = off + sizeof(struct esp) + ivlen;
177 } else {
178 ivoff = off + sizeof(struct newesp);
179 bodyoff = off + sizeof(struct newesp) + ivlen;
180 }
181
182 if (m->m_pkthdr.len < bodyoff) {
183 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%u\n",
184 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
185 m_freem(m);
186 return EINVAL;
187 }
188 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
189 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
190 "payload length must be multiple of %d\n",
191 algo->name, AES_BLOCKLEN));
192 m_freem(m);
193 return EINVAL;
194 }
195
196 VERIFY(ivoff <= INT_MAX);
197
198 /* grab iv */
199 m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv);
200
201 s = m;
202 soff = sn = dn = 0;
203 d = d0 = dp = NULL;
204 sp = dptr = NULL;
205
206 /* skip header/IV offset */
207 while (soff < bodyoff) {
208 if (soff + s->m_len > bodyoff) {
209 sn = (int)(bodyoff - soff);
210 break;
211 }
212
213 soff += s->m_len;
214 s = s->m_next;
215 }
216 scut = s;
217 scutoff = sn;
218
219 /* skip over empty mbuf */
220 while (s && s->m_len == 0) {
221 s = s->m_next;
222 }
223
224 while (soff < m->m_pkthdr.len) {
225 /* source */
226 if (sn + AES_BLOCKLEN <= s->m_len) {
227 /* body is continuous */
228 sp = mtod(s, u_int8_t *) + sn;
229 len = s->m_len - sn;
230 len -= len % AES_BLOCKLEN; // full blocks only
231 } else {
232 /* body is non-continuous */
233 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
234 sp = sbuf;
235 len = AES_BLOCKLEN; // 1 block only in sbuf
236 }
237
238 /* destination */
239 if (!d || dn + AES_BLOCKLEN > d->m_len) {
240 if (d) {
241 dp = d;
242 }
243 MGET(d, M_DONTWAIT, MT_DATA);
244 i = m->m_pkthdr.len - (soff + sn);
245 if (d && i > MLEN) {
246 MCLGET(d, M_DONTWAIT);
247 if ((d->m_flags & M_EXT) == 0) {
248 d = m_mbigget(d, M_DONTWAIT);
249 if ((d->m_flags & M_EXT) == 0) {
250 m_free(d);
251 d = NULL;
252 }
253 }
254 }
255 if (!d) {
256 m_freem(m);
257 if (d0) {
258 m_freem(d0);
259 }
260 return ENOBUFS;
261 }
262 if (!d0) {
263 d0 = d;
264 }
265 if (dp) {
266 dp->m_next = d;
267 }
268
269 // try to make mbuf data aligned
270 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
271 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
272 }
273
274 d->m_len = (int)M_TRAILINGSPACE(d);
275 d->m_len -= d->m_len % AES_BLOCKLEN;
276 if (d->m_len > i) {
277 d->m_len = i;
278 }
279 dptr = mtod(d, u_int8_t *);
280 dn = 0;
281 }
282
283 /* adjust len if greater than space available in dest */
284 if (len > d->m_len - dn) {
285 len = d->m_len - dn;
286 }
287
288 /* decrypt */
289 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
290 if (IPSEC_IS_P2ALIGNED(sp)) {
291 sp_unaligned = NULL;
292 } else {
293 sp_unaligned = sp;
294 if (len > MAX_REALIGN_LEN) {
295 m_freem(m);
296 if (d0 != NULL) {
297 m_freem(d0);
298 }
299 if (sp_aligned != NULL) {
300 kfree_data(sp_aligned, MAX_REALIGN_LEN);
301 sp_aligned = NULL;
302 }
303 return ENOBUFS;
304 }
305 if (sp_aligned == NULL) {
306 sp_aligned = (u_int8_t *)kalloc_data(MAX_REALIGN_LEN, Z_NOWAIT);
307 if (sp_aligned == NULL) {
308 m_freem(m);
309 if (d0 != NULL) {
310 m_freem(d0);
311 }
312 return ENOMEM;
313 }
314 }
315 sp = sp_aligned;
316 memcpy(sp, sp_unaligned, len);
317 }
318 // no need to check output pointer alignment
319 aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn,
320 (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt)));
321
322 // update unaligned pointers
323 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
324 sp = sp_unaligned;
325 }
326
327 /* udpate offsets */
328 sn += len;
329 dn += len;
330
331 // next iv
332 bcopy(sp + len - AES_BLOCKLEN, iv, AES_BLOCKLEN);
333
334 /* find the next source block */
335 while (s && sn >= s->m_len) {
336 sn -= s->m_len;
337 soff += s->m_len;
338 s = s->m_next;
339 }
340 }
341
342 /* free un-needed source mbufs and add dest mbufs to chain */
343 m_freem(scut->m_next);
344 scut->m_len = scutoff;
345 scut->m_next = d0;
346
347 // free memory
348 if (sp_aligned != NULL) {
349 kfree_data(sp_aligned, MAX_REALIGN_LEN);
350 sp_aligned = NULL;
351 }
352
353 /* just in case */
354 cc_clear(sizeof(iv), iv);
355 cc_clear(sizeof(sbuf), sbuf);
356
357 return 0;
358 }
359
360 int
esp_cbc_encrypt_aes(struct mbuf * m,size_t off,__unused size_t plen,struct secasvar * sav,const struct esp_algorithm * algo,int ivlen)361 esp_cbc_encrypt_aes(
362 struct mbuf *m,
363 size_t off,
364 __unused size_t plen,
365 struct secasvar *sav,
366 const struct esp_algorithm *algo,
367 int ivlen)
368 {
369 struct mbuf *s;
370 struct mbuf *d, *d0, *dp;
371 int soff; /* offset from the head of chain, to head of this mbuf */
372 int sn, dn; /* offset from the head of the mbuf, to meat */
373 size_t ivoff, bodyoff;
374 u_int8_t *ivp, *dptr, *ivp_unaligned;
375 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
376 u_int8_t ivp_aligned_buf[AES_BLOCKLEN] __attribute__((aligned(4)));
377 struct mbuf *scut;
378 int scutoff;
379 int i, len;
380
381 if (ivlen != AES_BLOCKLEN) {
382 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
383 "unsupported ivlen %d\n", algo->name, ivlen));
384 m_freem(m);
385 return EINVAL;
386 }
387
388 if (sav->flags & SADB_X_EXT_OLD) {
389 /* RFC 1827 */
390 ivoff = off + sizeof(struct esp);
391 bodyoff = off + sizeof(struct esp) + ivlen;
392 } else {
393 ivoff = off + sizeof(struct newesp);
394 bodyoff = off + sizeof(struct newesp) + ivlen;
395 }
396
397 VERIFY(ivoff <= INT_MAX);
398
399 /* put iv into the packet */
400 m_copyback(m, (int)ivoff, ivlen, sav->iv);
401 ivp = (u_int8_t *) sav->iv;
402
403 if (m->m_pkthdr.len < bodyoff) {
404 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%u\n",
405 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
406 m_freem(m);
407 return EINVAL;
408 }
409 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
410 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
411 "payload length must be multiple of %d\n",
412 algo->name, AES_BLOCKLEN));
413 m_freem(m);
414 return EINVAL;
415 }
416
417 s = m;
418 soff = sn = dn = 0;
419 d = d0 = dp = NULL;
420 sp = dptr = NULL;
421
422 /* skip headers/IV */
423 while (soff < bodyoff) {
424 if (soff + s->m_len > bodyoff) {
425 sn = (int)(bodyoff - soff);
426 break;
427 }
428
429 soff += s->m_len;
430 s = s->m_next;
431 }
432 scut = s;
433 scutoff = sn;
434
435 /* skip over empty mbuf */
436 while (s && s->m_len == 0) {
437 s = s->m_next;
438 }
439
440 while (soff < m->m_pkthdr.len) {
441 /* source */
442 if (sn + AES_BLOCKLEN <= s->m_len) {
443 /* body is continuous */
444 sp = mtod(s, u_int8_t *) + sn;
445 len = s->m_len - sn;
446 len -= len % AES_BLOCKLEN; // full blocks only
447 } else {
448 /* body is non-continuous */
449 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
450 sp = sbuf;
451 len = AES_BLOCKLEN; // 1 block only in sbuf
452 }
453
454 /* destination */
455 if (!d || dn + AES_BLOCKLEN > d->m_len) {
456 if (d) {
457 dp = d;
458 }
459 MGET(d, M_DONTWAIT, MT_DATA);
460 i = m->m_pkthdr.len - (soff + sn);
461 if (d && i > MLEN) {
462 MCLGET(d, M_DONTWAIT);
463 if ((d->m_flags & M_EXT) == 0) {
464 d = m_mbigget(d, M_DONTWAIT);
465 if ((d->m_flags & M_EXT) == 0) {
466 m_free(d);
467 d = NULL;
468 }
469 }
470 }
471 if (!d) {
472 m_freem(m);
473 if (d0) {
474 m_freem(d0);
475 }
476 return ENOBUFS;
477 }
478 if (!d0) {
479 d0 = d;
480 }
481 if (dp) {
482 dp->m_next = d;
483 }
484
485 // try to make mbuf data aligned
486 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
487 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
488 }
489
490 d->m_len = (int)M_TRAILINGSPACE(d);
491 d->m_len -= d->m_len % AES_BLOCKLEN;
492 if (d->m_len > i) {
493 d->m_len = i;
494 }
495 dptr = mtod(d, u_int8_t *);
496 dn = 0;
497 }
498
499 /* adjust len if greater than space available */
500 if (len > d->m_len - dn) {
501 len = d->m_len - dn;
502 }
503
504 /* encrypt */
505 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
506 if (IPSEC_IS_P2ALIGNED(sp)) {
507 sp_unaligned = NULL;
508 } else {
509 sp_unaligned = sp;
510 if (len > MAX_REALIGN_LEN) {
511 m_freem(m);
512 if (d0) {
513 m_freem(d0);
514 }
515 if (sp_aligned != NULL) {
516 kfree_data(sp_aligned, MAX_REALIGN_LEN);
517 sp_aligned = NULL;
518 }
519 return ENOBUFS;
520 }
521 if (sp_aligned == NULL) {
522 sp_aligned = (u_int8_t *)kalloc_data(MAX_REALIGN_LEN, Z_NOWAIT);
523 if (sp_aligned == NULL) {
524 m_freem(m);
525 if (d0) {
526 m_freem(d0);
527 }
528 return ENOMEM;
529 }
530 }
531 sp = sp_aligned;
532 memcpy(sp, sp_unaligned, len);
533 }
534 // check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
535 if (IPSEC_IS_P2ALIGNED(ivp)) {
536 ivp_unaligned = NULL;
537 } else {
538 ivp_unaligned = ivp;
539 ivp = ivp_aligned_buf;
540 memcpy(ivp, ivp_unaligned, AES_BLOCKLEN);
541 }
542 // no need to check output pointer alignment
543 aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn,
544 (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt)));
545
546 // update unaligned pointers
547 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
548 sp = sp_unaligned;
549 }
550 if (!IPSEC_IS_P2ALIGNED(ivp_unaligned)) {
551 ivp = ivp_unaligned;
552 }
553
554 /* update offsets */
555 sn += len;
556 dn += len;
557
558 /* next iv */
559 ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted
560
561 /* find the next source block and skip empty mbufs */
562 while (s && sn >= s->m_len) {
563 sn -= s->m_len;
564 soff += s->m_len;
565 s = s->m_next;
566 }
567 }
568
569 /* free un-needed source mbufs and add dest mbufs to chain */
570 m_freem(scut->m_next);
571 scut->m_len = scutoff;
572 scut->m_next = d0;
573
574 // free memory
575 if (sp_aligned != NULL) {
576 kfree_data(sp_aligned, MAX_REALIGN_LEN);
577 sp_aligned = NULL;
578 }
579
580 /* just in case */
581 cc_clear(sizeof(sbuf), sbuf);
582 key_sa_stir_iv(sav);
583
584 return 0;
585 }
586
587 size_t
esp_gcm_schedlen(__unused const struct esp_algorithm * algo)588 esp_gcm_schedlen(
589 __unused const struct esp_algorithm *algo)
590 {
591 return sizeof(aes_gcm_ctx) + aes_decrypt_get_ctx_size_gcm() + aes_encrypt_get_ctx_size_gcm() + ESP_GCM_ALIGN;
592 }
593
594 int
esp_gcm_schedule(__unused const struct esp_algorithm * algo,struct secasvar * sav)595 esp_gcm_schedule( __unused const struct esp_algorithm *algo,
596 struct secasvar *sav)
597 {
598 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
599 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
600 const u_int ivlen = sav->ivlen;
601 const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) != 0);
602 unsigned char nonce[ESP_GCM_SALT_LEN + ivlen];
603 int rc;
604
605 ctx->decrypt = &ctx->ctxt[0];
606 ctx->encrypt = &ctx->ctxt[aes_decrypt_get_ctx_size_gcm() / sizeof(ccgcm_ctx)];
607
608 if (ivlen != (implicit_iv ? 0 : ESP_GCM_IVLEN)) {
609 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
610 return EINVAL;
611 }
612
613 rc = aes_decrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ctx->decrypt);
614 if (rc) {
615 return rc;
616 }
617
618 if (!implicit_iv) {
619 memset(nonce, 0, ESP_GCM_SALT_LEN + ivlen);
620 memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
621 memcpy(nonce + ESP_GCM_SALT_LEN, sav->iv, ivlen);
622
623 rc = aes_encrypt_key_with_iv_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, nonce, ctx->encrypt);
624 cc_clear(sizeof(nonce), nonce);
625 if (rc) {
626 return rc;
627 }
628 } else {
629 rc = aes_encrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ctx->encrypt);
630 if (rc) {
631 return rc;
632 }
633 }
634
635 rc = aes_encrypt_reset_gcm(ctx->encrypt);
636 if (rc) {
637 return rc;
638 }
639
640 return rc;
641 }
642
643 int
esp_gcm_ivlen(const struct esp_algorithm * algo,struct secasvar * sav)644 esp_gcm_ivlen(const struct esp_algorithm *algo,
645 struct secasvar *sav)
646 {
647 if (!algo) {
648 panic("esp_gcm_ivlen: unknown algorithm");
649 }
650
651 if (sav != NULL && ((sav->flags & SADB_X_EXT_IIV) != 0)) {
652 return 0;
653 } else {
654 return algo->ivlenval;
655 }
656 }
657
658 int
esp_gcm_encrypt_finalize(struct secasvar * sav,unsigned char * tag,size_t tag_bytes)659 esp_gcm_encrypt_finalize(struct secasvar *sav,
660 unsigned char *tag, size_t tag_bytes)
661 {
662 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
663 return aes_encrypt_finalize_gcm(tag, tag_bytes, ctx->encrypt);
664 }
665
666 int
esp_gcm_decrypt_finalize(struct secasvar * sav,unsigned char * tag,size_t tag_bytes)667 esp_gcm_decrypt_finalize(struct secasvar *sav,
668 unsigned char *tag, size_t tag_bytes)
669 {
670 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
671 return aes_decrypt_finalize_gcm(tag, tag_bytes, ctx->decrypt);
672 }
673
674 int
esp_gcm_encrypt_aes(struct mbuf * m,size_t off,__unused size_t plen,struct secasvar * sav,const struct esp_algorithm * algo __unused,int ivlen)675 esp_gcm_encrypt_aes(
676 struct mbuf *m,
677 size_t off,
678 __unused size_t plen,
679 struct secasvar *sav,
680 const struct esp_algorithm *algo __unused,
681 int ivlen)
682 {
683 struct mbuf *s;
684 struct mbuf *d, *d0, *dp;
685 int soff; /* offset from the head of chain, to head of this mbuf */
686 int sn, dn; /* offset from the head of the mbuf, to meat */
687 const size_t ivoff = off + sizeof(struct newesp);
688 const size_t bodyoff = ivoff + ivlen;
689 u_int8_t *dptr, *sp, *sp_unaligned, *sp_aligned = NULL;
690 aes_gcm_ctx *ctx;
691 struct mbuf *scut;
692 int scutoff;
693 int i, len;
694 const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) != 0);
695 struct newesp esp;
696 unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN];
697
698 VERIFY(off <= INT_MAX);
699 VERIFY(ivoff <= INT_MAX);
700 VERIFY(bodyoff <= INT_MAX);
701
702 if (ivlen != (implicit_iv ? 0 : ESP_GCM_IVLEN)) {
703 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
704 m_freem(m);
705 return EINVAL;
706 }
707
708 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
709
710 if (aes_encrypt_reset_gcm(ctx->encrypt)) {
711 ipseclog((LOG_ERR, "%s: gcm reset failure\n", __FUNCTION__));
712 m_freem(m);
713 return EINVAL;
714 }
715
716 /* Copy the ESP header */
717 m_copydata(m, (int)off, sizeof(esp), (caddr_t) &esp);
718
719 /* Construct the IV */
720 memset(nonce, 0, sizeof(nonce));
721 if (!implicit_iv) {
722 /* generate new iv */
723 if (aes_encrypt_inc_iv_gcm((unsigned char *)nonce, ctx->encrypt)) {
724 ipseclog((LOG_ERR, "%s: iv generation failure\n", __FUNCTION__));
725 m_freem(m);
726 return EINVAL;
727 }
728
729 /*
730 * The IV is now generated within corecrypto and
731 * is provided to ESP using aes_encrypt_inc_iv_gcm().
732 * This makes the sav->iv redundant and is no longer
733 * used in GCM operations. But we still copy the IV
734 * back to sav->iv to ensure that any future code reading
735 * this value will get the latest IV.
736 */
737 memcpy(sav->iv, (nonce + ESP_GCM_SALT_LEN), ivlen);
738 m_copyback(m, (int)ivoff, ivlen, sav->iv);
739 cc_clear(sizeof(nonce), nonce);
740 } else {
741 /* Use the ESP sequence number in the header to form the
742 * nonce according to RFC 8750. The first 4 bytes are the
743 * salt value, the next 4 bytes are zeroes, and the final
744 * 4 bytes are the ESP sequence number.
745 */
746 memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
747 memcpy(nonce + sizeof(nonce) - sizeof(esp.esp_seq), &esp.esp_seq, sizeof(esp.esp_seq));
748 int rc = aes_encrypt_set_iv_gcm((const unsigned char *)nonce, sizeof(nonce), ctx->encrypt);
749 cc_clear(sizeof(nonce), nonce);
750 if (rc) {
751 ipseclog((LOG_ERR, "%s: iv set failure\n", __FUNCTION__));
752 m_freem(m);
753 return EINVAL;
754 }
755 }
756
757 if (m->m_pkthdr.len < bodyoff) {
758 ipseclog((LOG_ERR, "%s: bad len %d/%u\n", __FUNCTION__,
759 m->m_pkthdr.len, (u_int32_t)bodyoff));
760 m_freem(m);
761 return EINVAL;
762 }
763
764
765 /* Set Additional Authentication Data */
766 if (aes_encrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->encrypt)) {
767 ipseclog((LOG_ERR, "%s: packet encryption AAD failure\n", __FUNCTION__));
768 m_freem(m);
769 return EINVAL;
770 }
771
772 s = m;
773 soff = sn = dn = 0;
774 d = d0 = dp = NULL;
775 sp = dptr = NULL;
776
777 /* skip headers/IV */
778 while (soff < bodyoff) {
779 if (soff + s->m_len > bodyoff) {
780 sn = (int)(bodyoff - soff);
781 break;
782 }
783
784 soff += s->m_len;
785 s = s->m_next;
786 }
787 scut = s;
788 scutoff = sn;
789
790 /* skip over empty mbuf */
791 while (s && s->m_len == 0) {
792 s = s->m_next;
793 }
794
795 while (soff < m->m_pkthdr.len) {
796 /* source */
797 sp = mtod(s, u_int8_t *) + sn;
798 len = s->m_len - sn;
799
800 /* destination */
801 if (!d || (dn + len > d->m_len)) {
802 if (d) {
803 dp = d;
804 }
805 MGET(d, M_DONTWAIT, MT_DATA);
806 i = m->m_pkthdr.len - (soff + sn);
807 if (d && i > MLEN) {
808 MCLGET(d, M_DONTWAIT);
809 if ((d->m_flags & M_EXT) == 0) {
810 d = m_mbigget(d, M_DONTWAIT);
811 if ((d->m_flags & M_EXT) == 0) {
812 m_free(d);
813 d = NULL;
814 }
815 }
816 }
817 if (!d) {
818 m_freem(m);
819 if (d0) {
820 m_freem(d0);
821 }
822 return ENOBUFS;
823 }
824 if (!d0) {
825 d0 = d;
826 }
827 if (dp) {
828 dp->m_next = d;
829 }
830
831 // try to make mbuf data aligned
832 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
833 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
834 }
835
836 d->m_len = (int)M_TRAILINGSPACE(d);
837
838 if (d->m_len > i) {
839 d->m_len = i;
840 }
841
842 dptr = mtod(d, u_int8_t *);
843 dn = 0;
844 }
845
846 /* adjust len if greater than space available */
847 if (len > d->m_len - dn) {
848 len = d->m_len - dn;
849 }
850
851 /* encrypt */
852 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
853 if (IPSEC_IS_P2ALIGNED(sp)) {
854 sp_unaligned = NULL;
855 } else {
856 sp_unaligned = sp;
857 if (len > MAX_REALIGN_LEN) {
858 m_freem(m);
859 if (d0) {
860 m_freem(d0);
861 }
862 if (sp_aligned != NULL) {
863 kfree_data(sp_aligned, MAX_REALIGN_LEN);
864 sp_aligned = NULL;
865 }
866 return ENOBUFS;
867 }
868 if (sp_aligned == NULL) {
869 sp_aligned = (u_int8_t *)kalloc_data(MAX_REALIGN_LEN, Z_NOWAIT);
870 if (sp_aligned == NULL) {
871 m_freem(m);
872 if (d0) {
873 m_freem(d0);
874 }
875 return ENOMEM;
876 }
877 }
878 sp = sp_aligned;
879 memcpy(sp, sp_unaligned, len);
880 }
881
882 if (aes_encrypt_gcm(sp, len, dptr + dn, ctx->encrypt)) {
883 ipseclog((LOG_ERR, "%s: failed to encrypt\n", __FUNCTION__));
884 m_freem(m);
885 return EINVAL;
886 }
887
888 // update unaligned pointers
889 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
890 sp = sp_unaligned;
891 }
892
893 /* update offsets */
894 sn += len;
895 dn += len;
896
897 /* find the next source block and skip empty mbufs */
898 while (s && sn >= s->m_len) {
899 sn -= s->m_len;
900 soff += s->m_len;
901 s = s->m_next;
902 }
903 }
904
905 /* free un-needed source mbufs and add dest mbufs to chain */
906 m_freem(scut->m_next);
907 scut->m_len = scutoff;
908 scut->m_next = d0;
909
910 // free memory
911 if (sp_aligned != NULL) {
912 kfree_data(sp_aligned, MAX_REALIGN_LEN);
913 sp_aligned = NULL;
914 }
915
916 return 0;
917 }
918
919 int
esp_gcm_decrypt_aes(struct mbuf * m,size_t off,struct secasvar * sav,const struct esp_algorithm * algo __unused,int ivlen)920 esp_gcm_decrypt_aes(
921 struct mbuf *m,
922 size_t off,
923 struct secasvar *sav,
924 const struct esp_algorithm *algo __unused,
925 int ivlen)
926 {
927 struct mbuf *s;
928 struct mbuf *d, *d0, *dp;
929 int soff; /* offset from the head of chain, to head of this mbuf */
930 int sn, dn; /* offset from the head of the mbuf, to meat */
931 const size_t ivoff = off + sizeof(struct newesp);
932 const size_t bodyoff = ivoff + ivlen;
933 u_int8_t *dptr;
934 u_int8_t *sp, *sp_unaligned, *sp_aligned = NULL;
935 aes_gcm_ctx *ctx;
936 struct mbuf *scut;
937 int scutoff;
938 int i, len;
939 const bool implicit_iv = ((sav->flags & SADB_X_EXT_IIV) != 0);
940 struct newesp esp;
941 unsigned char nonce[ESP_GCM_SALT_LEN + ESP_GCM_IVLEN];
942
943 VERIFY(off <= INT_MAX);
944 VERIFY(ivoff <= INT_MAX);
945 VERIFY(bodyoff <= INT_MAX);
946
947 if (ivlen != (implicit_iv ? 0 : ESP_GCM_IVLEN)) {
948 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
949 m_freem(m);
950 return EINVAL;
951 }
952
953 if (m->m_pkthdr.len < bodyoff) {
954 ipseclog((LOG_ERR, "%s: bad len %d/%u\n", __FUNCTION__,
955 m->m_pkthdr.len, (u_int32_t)bodyoff));
956 m_freem(m);
957 return EINVAL;
958 }
959
960 /* Copy the ESP header */
961 m_copydata(m, (int)off, sizeof(esp), (caddr_t) &esp);
962
963 /* Construct IV starting with salt */
964 memset(nonce, 0, sizeof(nonce));
965 memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
966 if (!implicit_iv) {
967 /* grab IV from packet */
968 u_int8_t iv[ESP_GCM_IVLEN] __attribute__((aligned(4)));
969 m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv);
970 memcpy(nonce + ESP_GCM_SALT_LEN, iv, ivlen);
971 /* just in case */
972 cc_clear(sizeof(iv), iv);
973 } else {
974 /* Use the ESP sequence number in the header to form the
975 * rest of the nonce according to RFC 8750.
976 */
977 memcpy(nonce + sizeof(nonce) - sizeof(esp.esp_seq), &esp.esp_seq, sizeof(esp.esp_seq));
978 }
979
980 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
981 int rc = aes_decrypt_set_iv_gcm(nonce, sizeof(nonce), ctx->decrypt);
982 cc_clear(sizeof(nonce), nonce);
983 if (rc) {
984 ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__));
985 m_freem(m);
986 return EINVAL;
987 }
988
989 /* Set Additional Authentication Data */
990 if (aes_decrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->decrypt)) {
991 ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__));
992 return EINVAL;
993 }
994
995 s = m;
996 soff = sn = dn = 0;
997 d = d0 = dp = NULL;
998 sp = dptr = NULL;
999
1000 /* skip header/IV offset */
1001 while (soff < bodyoff) {
1002 if (soff + s->m_len > bodyoff) {
1003 sn = (int)(bodyoff - soff);
1004 break;
1005 }
1006
1007 soff += s->m_len;
1008 s = s->m_next;
1009 }
1010 scut = s;
1011 scutoff = sn;
1012
1013 /* skip over empty mbuf */
1014 while (s && s->m_len == 0) {
1015 s = s->m_next;
1016 }
1017
1018 while (soff < m->m_pkthdr.len) {
1019 /* source */
1020 sp = mtod(s, u_int8_t *) + sn;
1021 len = s->m_len - sn;
1022
1023 /* destination */
1024 if (!d || (dn + len > d->m_len)) {
1025 if (d) {
1026 dp = d;
1027 }
1028 MGET(d, M_DONTWAIT, MT_DATA);
1029 i = m->m_pkthdr.len - (soff + sn);
1030 if (d && i > MLEN) {
1031 MCLGET(d, M_DONTWAIT);
1032 if ((d->m_flags & M_EXT) == 0) {
1033 d = m_mbigget(d, M_DONTWAIT);
1034 if ((d->m_flags & M_EXT) == 0) {
1035 m_free(d);
1036 d = NULL;
1037 }
1038 }
1039 }
1040 if (!d) {
1041 m_freem(m);
1042 if (d0) {
1043 m_freem(d0);
1044 }
1045 return ENOBUFS;
1046 }
1047 if (!d0) {
1048 d0 = d;
1049 }
1050 if (dp) {
1051 dp->m_next = d;
1052 }
1053
1054 // try to make mbuf data aligned
1055 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
1056 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
1057 }
1058
1059 d->m_len = (int)M_TRAILINGSPACE(d);
1060
1061 if (d->m_len > i) {
1062 d->m_len = i;
1063 }
1064
1065 dptr = mtod(d, u_int8_t *);
1066 dn = 0;
1067 }
1068
1069 /* adjust len if greater than space available in dest */
1070 if (len > d->m_len - dn) {
1071 len = d->m_len - dn;
1072 }
1073
1074 /* Decrypt */
1075 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
1076 if (IPSEC_IS_P2ALIGNED(sp)) {
1077 sp_unaligned = NULL;
1078 } else {
1079 sp_unaligned = sp;
1080 if (len > MAX_REALIGN_LEN) {
1081 m_freem(m);
1082 if (d0) {
1083 m_freem(d0);
1084 }
1085 if (sp_aligned != NULL) {
1086 kfree_data(sp_aligned, MAX_REALIGN_LEN);
1087 sp_aligned = NULL;
1088 }
1089 return ENOBUFS;
1090 }
1091 if (sp_aligned == NULL) {
1092 sp_aligned = (u_int8_t *)kalloc_data(MAX_REALIGN_LEN, Z_NOWAIT);
1093 if (sp_aligned == NULL) {
1094 m_freem(m);
1095 if (d0) {
1096 m_freem(d0);
1097 }
1098 return ENOMEM;
1099 }
1100 }
1101 sp = sp_aligned;
1102 memcpy(sp, sp_unaligned, len);
1103 }
1104 // no need to check output pointer alignment
1105
1106 if (aes_decrypt_gcm(sp, len, dptr + dn, ctx->decrypt)) {
1107 ipseclog((LOG_ERR, "%s: failed to decrypt\n", __FUNCTION__));
1108 m_freem(m);
1109 return EINVAL;
1110 }
1111
1112 // update unaligned pointers
1113 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
1114 sp = sp_unaligned;
1115 }
1116
1117 /* udpate offsets */
1118 sn += len;
1119 dn += len;
1120
1121 /* find the next source block */
1122 while (s && sn >= s->m_len) {
1123 sn -= s->m_len;
1124 soff += s->m_len;
1125 s = s->m_next;
1126 }
1127 }
1128
1129 /* free un-needed source mbufs and add dest mbufs to chain */
1130 m_freem(scut->m_next);
1131 scut->m_len = scutoff;
1132 scut->m_next = d0;
1133
1134 // free memory
1135 if (sp_aligned != NULL) {
1136 kfree_data(sp_aligned, MAX_REALIGN_LEN);
1137 sp_aligned = NULL;
1138 }
1139
1140 return 0;
1141 }
1142