xref: /xnu-8796.101.5/bsd/vfs/vfs_cprotect.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2015-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/cprotect.h>
30 #include <sys/malloc.h>
31 #include <sys/mount_internal.h>
32 #include <sys/filio.h>
33 #include <sys/content_protection.h>
34 #include <libkern/crypto/sha1.h>
35 #include <libkern/libkern.h>
36 //for write protection
37 #include <vm/vm_kern.h>
38 #include <vm/vm_map.h>
39 
40 #define PTR_ADD(type, base, offset)             (type)((uintptr_t)(base) + (offset))
41 
42 // -- struct cpx --
43 
44 /*
45  * This structure contains the unwrapped key and is passed to the lower layers.
46  * It is private so users must use the accessors declared in sys/cprotect.h
47  * to read/write it.
48  */
49 
50 // cpx_flags defined in cprotect.h
51 enum {
52 	CPX_SEP_WRAPPEDKEY                      = 0x01,
53 	CPX_IV_AES_CTX_INITIALIZED      = 0x02,
54 	CPX_USE_OFFSET_FOR_IV           = 0x04,
55 
56 	// Using AES IV context generated from key
57 	CPX_IV_AES_CTX_VFS                      = 0x08,
58 	CPX_SYNTHETIC_OFFSET_FOR_IV = 0x10,
59 	CPX_COMPOSITEKEY            = 0x20,
60 
61 	//write page protection
62 	CPX_WRITE_PROTECTABLE           = 0x40
63 };
64 
65 /*
66  * variable-length CPX structure. See fixed-length variant in cprotect.h
67  */
68 struct cpx {
69 #if DEBUG
70 	uint32_t                cpx_magic1;
71 #endif
72 	aes_encrypt_ctx         *cpx_iv_aes_ctx_ptr;// Pointer to context used for generating the IV
73 	cpx_flags_t             cpx_flags;
74 	uint16_t                cpx_max_key_len;
75 	uint16_t                cpx_key_len;
76 	//fixed length up to here.  cpx_cached_key is variable-length
77 	uint8_t                 cpx_cached_key[];
78 };
79 
80 /* Allows us to switch between CPX types */
81 typedef union cpxunion {
82 	struct cpx cpx_var;
83 	fcpx_t cpx_fixed;
84 } cpxunion_t;
85 
86 ZONE_DEFINE(cpx_zone, "cpx",
87     sizeof(struct fcpx), ZC_ZFREE_CLEARMEM);
88 ZONE_DEFINE(aes_ctz_zone, "AES ctx",
89     sizeof(aes_encrypt_ctx), ZC_ZFREE_CLEARMEM);
90 
91 // Note: see struct fcpx defined in sys/cprotect.h
92 
93 // -- cpx_t accessors --
94 
95 size_t
cpx_size(size_t key_len)96 cpx_size(size_t key_len)
97 {
98 	// This should pick up the 'magic' word in DEBUG for free.
99 	size_t size = sizeof(struct cpx) + key_len;
100 
101 	return size;
102 }
103 
104 size_t
cpx_sizex(const struct cpx * cpx)105 cpx_sizex(const struct cpx *cpx)
106 {
107 	return cpx_size(cpx->cpx_max_key_len);
108 }
109 
110 cpx_t
cpx_alloc(size_t key_len,bool needs_ctx)111 cpx_alloc(size_t key_len, bool needs_ctx)
112 {
113 	cpx_t cpx = NULL;
114 
115 #if CONFIG_KEYPAGE_WP
116 #pragma unused(key_len, needs_ctx)
117 
118 	/*
119 	 * Macs only use 1 key per volume, so force it into its own page.
120 	 * This way, we can write-protect as needed.
121 	 */
122 
123 	assert(cpx_size(key_len) <= PAGE_SIZE);
124 	kmem_alloc(kernel_map, (vm_offset_t *)&cpx, PAGE_SIZE,
125 	    KMA_DATA | KMA_NOFAIL, VM_KERN_MEMORY_FILE);
126 	//mark the page as protectable, since kmem_alloc succeeded.
127 	cpx->cpx_flags |= CPX_WRITE_PROTECTABLE;
128 #else
129 	/* If key page write protection disabled, just switch to zalloc */
130 
131 	// error out if you try to request a key that's too big
132 	if (key_len > VFS_CP_MAX_CACHEBUFLEN) {
133 		return NULL;
134 	}
135 
136 	// the actual key array is fixed-length, but the amount of usable content can vary, via 'key_len'
137 	cpx = zalloc_flags(cpx_zone, Z_WAITOK | Z_ZERO);
138 
139 	// if our encryption type needs it, alloc the context
140 	if (needs_ctx) {
141 		cpx_alloc_ctx(cpx);
142 	}
143 
144 #endif
145 	cpx_init(cpx, key_len);
146 
147 	return cpx;
148 }
149 
150 int
cpx_alloc_ctx(cpx_t cpx)151 cpx_alloc_ctx(cpx_t cpx)
152 {
153 #if CONFIG_KEYPAGE_WP
154 	(void) cpx;
155 #else
156 	if (cpx->cpx_iv_aes_ctx_ptr) {
157 		// already allocated?
158 		return 0;
159 	}
160 
161 	cpx->cpx_iv_aes_ctx_ptr = zalloc_flags(aes_ctz_zone, Z_WAITOK | Z_ZERO);
162 #endif // CONFIG_KEYPAGE_WP
163 
164 	return 0;
165 }
166 
167 void
cpx_free_ctx(cpx_t cpx)168 cpx_free_ctx(cpx_t cpx)
169 {
170 #if CONFIG_KEYPAGE_WP
171 	(void) cpx;
172 # else
173 	if (cpx->cpx_iv_aes_ctx_ptr) {
174 		zfree(aes_ctz_zone, cpx->cpx_iv_aes_ctx_ptr);
175 	}
176 #endif // CONFIG_KEYPAGE_WP
177 }
178 
179 void
cpx_writeprotect(cpx_t cpx)180 cpx_writeprotect(cpx_t cpx)
181 {
182 #if CONFIG_KEYPAGE_WP
183 	void *cpxstart = (void*)cpx;
184 	void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE);
185 	if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) {
186 		vm_map_protect(kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_READ), FALSE);
187 	}
188 #else
189 	(void) cpx;
190 #endif
191 	return;
192 }
193 
194 #if DEBUG
195 static const uint32_t cpx_magic1 = 0x7b787063;          // cpx{
196 static const uint32_t cpx_magic2 = 0x7870637d;          // }cpx
197 #endif
198 
199 void
cpx_free(cpx_t cpx)200 cpx_free(cpx_t cpx)
201 {
202 #if DEBUG
203 	assert(cpx->cpx_magic1 == cpx_magic1);
204 	assert(*PTR_ADD(uint32_t *, cpx, cpx_sizex(cpx) - 4) == cpx_magic2);
205 #endif
206 
207 #if CONFIG_KEYPAGE_WP
208 	/* unprotect the page before bzeroing */
209 	void *cpxstart = (void*)cpx;
210 	void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE);
211 	if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) {
212 		vm_map_protect(kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_DEFAULT), FALSE);
213 
214 		//now zero the memory after un-protecting it
215 		bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len);
216 
217 		//If we are here, then we used kmem_alloc to get the page. Must use kmem_free to drop it.
218 		kmem_free(kernel_map, (vm_offset_t)cpx, PAGE_SIZE);
219 		return;
220 	}
221 #else
222 	// free the context if it wasn't already freed
223 	cpx_free_ctx(cpx);
224 	zfree(cpx_zone, cpx);
225 	return;
226 #endif
227 }
228 
229 void
cpx_init(cpx_t cpx,size_t key_len)230 cpx_init(cpx_t cpx, size_t key_len)
231 {
232 #if DEBUG
233 	cpx->cpx_magic1 = cpx_magic1;
234 	*PTR_ADD(uint32_t *, cpx, cpx_size(key_len) - 4) = cpx_magic2;
235 #endif
236 	cpx->cpx_flags = 0;
237 	cpx->cpx_key_len = 0;
238 	assert(key_len <= UINT16_MAX);
239 	cpx->cpx_max_key_len = (uint16_t)key_len;
240 }
241 
242 bool
cpx_is_sep_wrapped_key(const struct cpx * cpx)243 cpx_is_sep_wrapped_key(const struct cpx *cpx)
244 {
245 	return ISSET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
246 }
247 
248 void
cpx_set_is_sep_wrapped_key(struct cpx * cpx,bool v)249 cpx_set_is_sep_wrapped_key(struct cpx *cpx, bool v)
250 {
251 	if (v) {
252 		SET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
253 	} else {
254 		CLR(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
255 	}
256 }
257 
258 bool
cpx_is_composite_key(const struct cpx * cpx)259 cpx_is_composite_key(const struct cpx *cpx)
260 {
261 	return ISSET(cpx->cpx_flags, CPX_COMPOSITEKEY);
262 }
263 
264 void
cpx_set_is_composite_key(struct cpx * cpx,bool v)265 cpx_set_is_composite_key(struct cpx *cpx, bool v)
266 {
267 	if (v) {
268 		SET(cpx->cpx_flags, CPX_COMPOSITEKEY);
269 	} else {
270 		CLR(cpx->cpx_flags, CPX_COMPOSITEKEY);
271 	}
272 }
273 
274 bool
cpx_use_offset_for_iv(const struct cpx * cpx)275 cpx_use_offset_for_iv(const struct cpx *cpx)
276 {
277 	return ISSET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
278 }
279 
280 void
cpx_set_use_offset_for_iv(struct cpx * cpx,bool v)281 cpx_set_use_offset_for_iv(struct cpx *cpx, bool v)
282 {
283 	if (v) {
284 		SET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
285 	} else {
286 		CLR(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
287 	}
288 }
289 
290 bool
cpx_synthetic_offset_for_iv(const struct cpx * cpx)291 cpx_synthetic_offset_for_iv(const struct cpx *cpx)
292 {
293 	return ISSET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV);
294 }
295 
296 void
cpx_set_synthetic_offset_for_iv(struct cpx * cpx,bool v)297 cpx_set_synthetic_offset_for_iv(struct cpx *cpx, bool v)
298 {
299 	if (v) {
300 		SET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV);
301 	} else {
302 		CLR(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV);
303 	}
304 }
305 
306 uint16_t
cpx_max_key_len(const struct cpx * cpx)307 cpx_max_key_len(const struct cpx *cpx)
308 {
309 	return cpx->cpx_max_key_len;
310 }
311 
312 uint16_t
cpx_key_len(const struct cpx * cpx)313 cpx_key_len(const struct cpx *cpx)
314 {
315 	return cpx->cpx_key_len;
316 }
317 
318 void
cpx_set_key_len(struct cpx * cpx,uint16_t key_len)319 cpx_set_key_len(struct cpx *cpx, uint16_t key_len)
320 {
321 	cpx->cpx_key_len = key_len;
322 
323 	if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_VFS)) {
324 		/*
325 		 * We assume that if the key length is being modified, the key
326 		 * has changed.  As a result, un-set any bits related to the
327 		 * AES context, if needed. They should be re-generated
328 		 * on-demand.
329 		 */
330 		CLR(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_IV_AES_CTX_VFS);
331 	}
332 }
333 
334 bool
cpx_has_key(const struct cpx * cpx)335 cpx_has_key(const struct cpx *cpx)
336 {
337 	return cpx->cpx_key_len > 0;
338 }
339 
340 #pragma clang diagnostic push
341 #pragma clang diagnostic ignored "-Wcast-qual"
342 void *
cpx_key(const struct cpx * cpx)343 cpx_key(const struct cpx *cpx)
344 {
345 	return (void *)cpx->cpx_cached_key;
346 }
347 #pragma clang diagnostic pop
348 
349 void
cpx_set_aes_iv_key(struct cpx * cpx,void * iv_key)350 cpx_set_aes_iv_key(struct cpx *cpx, void *iv_key)
351 {
352 	if (cpx->cpx_iv_aes_ctx_ptr) {
353 		aes_encrypt_key128(iv_key, cpx->cpx_iv_aes_ctx_ptr);
354 		SET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_USE_OFFSET_FOR_IV);
355 		CLR(cpx->cpx_flags, CPX_IV_AES_CTX_VFS);
356 	}
357 }
358 
359 aes_encrypt_ctx *
cpx_iv_aes_ctx(struct cpx * cpx)360 cpx_iv_aes_ctx(struct cpx *cpx)
361 {
362 	if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) {
363 		return cpx->cpx_iv_aes_ctx_ptr;
364 	}
365 
366 	SHA1_CTX sha1ctxt;
367 	uint8_t digest[SHA_DIGEST_LENGTH]; /* Kiv */
368 
369 	/* First init the cp_cache_iv_key[] */
370 	SHA1Init(&sha1ctxt);
371 
372 	/*
373 	 * We can only use this when the keys are generated in the AP; As a result
374 	 * we only use the first 32 bytes of key length in the cache key
375 	 */
376 	SHA1Update(&sha1ctxt, cpx->cpx_cached_key, cpx->cpx_key_len);
377 	SHA1Final(digest, &sha1ctxt);
378 
379 	cpx_set_aes_iv_key(cpx, digest);
380 	SET(cpx->cpx_flags, CPX_IV_AES_CTX_VFS);
381 
382 	return cpx->cpx_iv_aes_ctx_ptr;
383 }
384 
385 void
cpx_flush(cpx_t cpx)386 cpx_flush(cpx_t cpx)
387 {
388 	bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len);
389 	if (cpx->cpx_iv_aes_ctx_ptr) {
390 		bzero(cpx->cpx_iv_aes_ctx_ptr, sizeof(aes_encrypt_ctx));
391 	}
392 	cpx->cpx_flags = 0;
393 	cpx->cpx_key_len = 0;
394 }
395 
396 bool
cpx_can_copy(const struct cpx * src,const struct cpx * dst)397 cpx_can_copy(const struct cpx *src, const struct cpx *dst)
398 {
399 	return src->cpx_key_len <= dst->cpx_max_key_len;
400 }
401 
402 void
cpx_copy(const struct cpx * src,cpx_t dst)403 cpx_copy(const struct cpx *src, cpx_t dst)
404 {
405 	uint16_t key_len = cpx_key_len(src);
406 	cpx_set_key_len(dst, key_len);
407 	memcpy(cpx_key(dst), cpx_key(src), key_len);
408 	dst->cpx_flags = src->cpx_flags;
409 	if (ISSET(dst->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) {
410 		*(dst->cpx_iv_aes_ctx_ptr) = *(src->cpx_iv_aes_ctx_ptr); // deep copy
411 	}
412 }
413 
414 typedef unsigned char cp_vfs_callback_arg_type_t;
415 enum {
416 	CP_TYPE_LOCK_STATE   = 0,
417 	CP_TYPE_EP_STATE     = 1,
418 };
419 
420 typedef struct {
421 	cp_vfs_callback_arg_type_t type;
422 	union {
423 		cp_lock_state_t lock_state;
424 		cp_ep_state_t   ep_state;
425 	};
426 	int             valid_uuid;
427 	uuid_t          volume_uuid;
428 } cp_vfs_callback_arg;
429 
430 static int
cp_vfs_callback(mount_t mp,void * arg)431 cp_vfs_callback(mount_t mp, void *arg)
432 {
433 	cp_vfs_callback_arg *callback_arg = (cp_vfs_callback_arg *)arg;
434 
435 	if (callback_arg->valid_uuid) {
436 		struct vfs_attr va;
437 		VFSATTR_INIT(&va);
438 		VFSATTR_WANTED(&va, f_uuid);
439 
440 		if (vfs_getattr(mp, &va, vfs_context_current())) {
441 			return 0;
442 		}
443 
444 		if (!VFSATTR_IS_SUPPORTED(&va, f_uuid)) {
445 			return 0;
446 		}
447 
448 		if (memcmp(va.f_uuid, callback_arg->volume_uuid, sizeof(uuid_t))) {
449 			return 0;
450 		}
451 	}
452 
453 	switch (callback_arg->type) {
454 	case(CP_TYPE_LOCK_STATE):
455 		VFS_IOCTL(mp, FIODEVICELOCKED, (void *)(uintptr_t)callback_arg->lock_state, 0, vfs_context_kernel());
456 		break;
457 	case(CP_TYPE_EP_STATE):
458 		VFS_IOCTL(mp, FIODEVICEEPSTATE, (void *)(uintptr_t)callback_arg->ep_state, 0, vfs_context_kernel());
459 		break;
460 	default:
461 		break;
462 	}
463 	return 0;
464 }
465 
466 int
cp_key_store_action(cp_key_store_action_t action)467 cp_key_store_action(cp_key_store_action_t action)
468 {
469 	cp_vfs_callback_arg callback_arg;
470 
471 	switch (action) {
472 	case CP_ACTION_LOCKED:
473 	case CP_ACTION_UNLOCKED:
474 		callback_arg.type = CP_TYPE_LOCK_STATE;
475 		callback_arg.lock_state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE);
476 		memset(callback_arg.volume_uuid, 0, sizeof(uuid_t));
477 		callback_arg.valid_uuid = 0;
478 		return vfs_iterate(0, cp_vfs_callback, (void *)&callback_arg);
479 	case CP_ACTION_EP_INVALIDATED:
480 		callback_arg.type = CP_TYPE_EP_STATE;
481 		callback_arg.ep_state = CP_EP_INVALIDATED;
482 		memset(callback_arg.volume_uuid, 0, sizeof(uuid_t));
483 		callback_arg.valid_uuid = 0;
484 		return vfs_iterate(0, cp_vfs_callback, (void *)&callback_arg);
485 	default:
486 		return -1;
487 	}
488 }
489 
490 int
cp_key_store_action_for_volume(uuid_t volume_uuid,cp_key_store_action_t action)491 cp_key_store_action_for_volume(uuid_t volume_uuid, cp_key_store_action_t action)
492 {
493 	cp_vfs_callback_arg callback_arg;
494 
495 	switch (action) {
496 	case CP_ACTION_LOCKED:
497 	case CP_ACTION_UNLOCKED:
498 		callback_arg.type = CP_TYPE_LOCK_STATE;
499 		callback_arg.lock_state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE);
500 		memcpy(callback_arg.volume_uuid, volume_uuid, sizeof(uuid_t));
501 		callback_arg.valid_uuid = 1;
502 		return vfs_iterate(0, cp_vfs_callback, (void *)&callback_arg);
503 	case CP_ACTION_EP_INVALIDATED:
504 		callback_arg.type = CP_TYPE_EP_STATE;
505 		callback_arg.ep_state = CP_EP_INVALIDATED;
506 		memcpy(callback_arg.volume_uuid, volume_uuid, sizeof(uuid_t));
507 		callback_arg.valid_uuid = 1;
508 		return vfs_iterate(0, cp_vfs_callback, (void *)&callback_arg);
509 	default:
510 		return -1;
511 	}
512 }
513 
514 int
cp_is_valid_class(int isdir,int32_t protectionclass)515 cp_is_valid_class(int isdir, int32_t protectionclass)
516 {
517 	/*
518 	 * The valid protection classes are from 0 -> N
519 	 * We use a signed argument to detect unassigned values from
520 	 * directory entry creation time in HFS.
521 	 */
522 	if (isdir) {
523 		/* Directories are not allowed to have F, but they can have "NONE" */
524 		return (protectionclass >= PROTECTION_CLASS_DIR_NONE) &&
525 		       (protectionclass <= PROTECTION_CLASS_D);
526 	} else {
527 		return (protectionclass >= PROTECTION_CLASS_A) &&
528 		       (protectionclass <= PROTECTION_CLASS_F);
529 	}
530 }
531 
532 /*
533  * Parses versions of the form 12A316, i.e. <major><minor><revision> and
534  * returns a uint32_t in the form 0xaabbcccc where aa = <major>,
535  * bb = <ASCII char>, cccc = <revision>.
536  */
537 static cp_key_os_version_t
parse_os_version(const char * vers)538 parse_os_version(const char *vers)
539 {
540 	const char *p = vers;
541 
542 	int a = 0;
543 	while (*p >= '0' && *p <= '9') {
544 		a = a * 10 + *p - '0';
545 		++p;
546 	}
547 
548 	if (!a) {
549 		return 0;
550 	}
551 
552 	int b = *p++;
553 	if (!b) {
554 		return 0;
555 	}
556 
557 	int c = 0;
558 	while (*p >= '0' && *p <= '9') {
559 		c = c * 10 + *p - '0';
560 		++p;
561 	}
562 
563 	if (!c) {
564 		return 0;
565 	}
566 
567 	return (a & 0xff) << 24 | b << 16 | (c & 0xffff);
568 }
569 
570 cp_key_os_version_t
cp_os_version(void)571 cp_os_version(void)
572 {
573 	static cp_key_os_version_t cp_os_version;
574 
575 	if (cp_os_version) {
576 		return cp_os_version;
577 	}
578 
579 	if (!osversion[0]) {
580 		return 0;
581 	}
582 
583 	cp_os_version = parse_os_version(osversion);
584 	if (!cp_os_version) {
585 		printf("cp_os_version: unable to parse osversion `%s'\n", osversion);
586 		cp_os_version = 1;
587 	}
588 
589 	return cp_os_version;
590 }
591