xref: /xnu-10002.61.3/bsd/kern/ubc_subr.c (revision 0f4c859e951fba394238ab619495c4e1d54d0f34)
1 /*
2  * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  *	File:	ubc_subr.c
30  *	Author:	Umesh Vaishampayan [[email protected]]
31  *		05-Aug-1999	umeshv	Created.
32  *
33  *	Functions related to Unified Buffer cache.
34  *
35  * Caller of UBC functions MUST have a valid reference on the vnode.
36  *
37  */
38 
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/lock.h>
43 #include <sys/mman.h>
44 #include <sys/mount_internal.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/ucred.h>
48 #include <sys/proc_internal.h>
49 #include <sys/kauth.h>
50 #include <sys/buf.h>
51 #include <sys/user.h>
52 #include <sys/codesign.h>
53 #include <sys/codedir_internal.h>
54 #include <sys/fsevents.h>
55 #include <sys/fcntl.h>
56 #include <sys/reboot.h>
57 #include <sys/code_signing.h>
58 
59 #include <mach/mach_types.h>
60 #include <mach/memory_object_types.h>
61 #include <mach/memory_object_control.h>
62 #include <mach/vm_map.h>
63 #include <mach/mach_vm.h>
64 #include <mach/upl.h>
65 
66 #include <kern/kern_types.h>
67 #include <kern/kalloc.h>
68 #include <kern/zalloc.h>
69 #include <kern/thread.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_protos.h> /* last */
73 
74 #include <libkern/crypto/sha1.h>
75 #include <libkern/crypto/sha2.h>
76 #include <libkern/libkern.h>
77 
78 #include <security/mac_framework.h>
79 #include <stdbool.h>
80 #include <stdatomic.h>
81 #include <libkern/amfi/amfi.h>
82 
83 /* XXX These should be in a BSD accessible Mach header, but aren't. */
84 extern kern_return_t memory_object_pages_resident(memory_object_control_t,
85     boolean_t *);
86 extern kern_return_t    memory_object_signed(memory_object_control_t control,
87     boolean_t is_signed);
88 extern boolean_t        memory_object_is_signed(memory_object_control_t);
89 extern void             memory_object_mark_trusted(
90 	memory_object_control_t         control);
91 
92 extern void Debugger(const char *message);
93 
94 #if DIAGNOSTIC
95 #if defined(assert)
96 #undef assert
97 #endif
98 #define assert(cond)    \
99     ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
100 #else
101 #include <kern/assert.h>
102 #endif /* DIAGNOSTIC */
103 
104 static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
105 static int ubc_umcallback(vnode_t, void *);
106 static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
107 static void ubc_cs_free(struct ubc_info *uip);
108 
109 static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
110 static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
111 
112 ZONE_DEFINE_TYPE(ubc_info_zone, "ubc_info zone", struct ubc_info,
113     ZC_ZFREE_CLEARMEM);
114 static uint32_t cs_blob_generation_count = 1;
115 
116 /*
117  * CODESIGNING
118  * Routines to navigate code signing data structures in the kernel...
119  */
120 
121 ZONE_DEFINE_ID(ZONE_ID_CS_BLOB, "cs_blob zone", struct cs_blob,
122     ZC_READONLY | ZC_ZFREE_CLEARMEM);
123 
124 extern int cs_debug;
125 
126 #define PAGE_SHIFT_4K           (12)
127 
128 static boolean_t
cs_valid_range(const void * start,const void * end,const void * lower_bound,const void * upper_bound)129 cs_valid_range(
130 	const void *start,
131 	const void *end,
132 	const void *lower_bound,
133 	const void *upper_bound)
134 {
135 	if (upper_bound < lower_bound ||
136 	    end < start) {
137 		return FALSE;
138 	}
139 
140 	if (start < lower_bound ||
141 	    end > upper_bound) {
142 		return FALSE;
143 	}
144 
145 	return TRUE;
146 }
147 
148 typedef void (*cs_md_init)(void *ctx);
149 typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
150 typedef void (*cs_md_final)(void *hash, void *ctx);
151 
152 struct cs_hash {
153 	uint8_t             cs_type;    /* type code as per code signing */
154 	size_t              cs_size;    /* size of effective hash (may be truncated) */
155 	size_t              cs_digest_size;/* size of native hash */
156 	cs_md_init          cs_init;
157 	cs_md_update        cs_update;
158 	cs_md_final         cs_final;
159 };
160 
161 uint8_t
cs_hash_type(struct cs_hash const * const cs_hash)162 cs_hash_type(
163 	struct cs_hash const * const cs_hash)
164 {
165 	return cs_hash->cs_type;
166 }
167 
168 static const struct cs_hash cs_hash_sha1 = {
169 	.cs_type = CS_HASHTYPE_SHA1,
170 	.cs_size = CS_SHA1_LEN,
171 	.cs_digest_size = SHA_DIGEST_LENGTH,
172 	.cs_init = (cs_md_init)SHA1Init,
173 	.cs_update = (cs_md_update)SHA1Update,
174 	.cs_final = (cs_md_final)SHA1Final,
175 };
176 #if CRYPTO_SHA2
177 static const struct cs_hash cs_hash_sha256 = {
178 	.cs_type = CS_HASHTYPE_SHA256,
179 	.cs_size = SHA256_DIGEST_LENGTH,
180 	.cs_digest_size = SHA256_DIGEST_LENGTH,
181 	.cs_init = (cs_md_init)SHA256_Init,
182 	.cs_update = (cs_md_update)SHA256_Update,
183 	.cs_final = (cs_md_final)SHA256_Final,
184 };
185 static const struct cs_hash cs_hash_sha256_truncate = {
186 	.cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
187 	.cs_size = CS_SHA256_TRUNCATED_LEN,
188 	.cs_digest_size = SHA256_DIGEST_LENGTH,
189 	.cs_init = (cs_md_init)SHA256_Init,
190 	.cs_update = (cs_md_update)SHA256_Update,
191 	.cs_final = (cs_md_final)SHA256_Final,
192 };
193 static const struct cs_hash cs_hash_sha384 = {
194 	.cs_type = CS_HASHTYPE_SHA384,
195 	.cs_size = SHA384_DIGEST_LENGTH,
196 	.cs_digest_size = SHA384_DIGEST_LENGTH,
197 	.cs_init = (cs_md_init)SHA384_Init,
198 	.cs_update = (cs_md_update)SHA384_Update,
199 	.cs_final = (cs_md_final)SHA384_Final,
200 };
201 #endif
202 
203 static struct cs_hash const *
cs_find_md(uint8_t type)204 cs_find_md(uint8_t type)
205 {
206 	if (type == CS_HASHTYPE_SHA1) {
207 		return &cs_hash_sha1;
208 #if CRYPTO_SHA2
209 	} else if (type == CS_HASHTYPE_SHA256) {
210 		return &cs_hash_sha256;
211 	} else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
212 		return &cs_hash_sha256_truncate;
213 	} else if (type == CS_HASHTYPE_SHA384) {
214 		return &cs_hash_sha384;
215 #endif
216 	}
217 	return NULL;
218 }
219 
220 union cs_hash_union {
221 	SHA1_CTX                sha1ctxt;
222 	SHA256_CTX              sha256ctx;
223 	SHA384_CTX              sha384ctx;
224 };
225 
226 
227 /*
228  * Choose among different hash algorithms.
229  * Higher is better, 0 => don't use at all.
230  */
231 static const uint32_t hashPriorities[] = {
232 	CS_HASHTYPE_SHA1,
233 	CS_HASHTYPE_SHA256_TRUNCATED,
234 	CS_HASHTYPE_SHA256,
235 	CS_HASHTYPE_SHA384,
236 };
237 
238 static unsigned int
hash_rank(const CS_CodeDirectory * cd)239 hash_rank(const CS_CodeDirectory *cd)
240 {
241 	uint32_t type = cd->hashType;
242 	unsigned int n;
243 
244 	for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) {
245 		if (hashPriorities[n] == type) {
246 			return n + 1;
247 		}
248 	}
249 	return 0;       /* not supported */
250 }
251 
252 
253 /*
254  * Locating a page hash
255  */
256 static const unsigned char *
hashes(const CS_CodeDirectory * cd,uint32_t page,size_t hash_len,const char * lower_bound,const char * upper_bound)257 hashes(
258 	const CS_CodeDirectory *cd,
259 	uint32_t page,
260 	size_t hash_len,
261 	const char *lower_bound,
262 	const char *upper_bound)
263 {
264 	const unsigned char *base, *top, *hash;
265 	uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
266 
267 	assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
268 
269 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
270 		/* Get first scatter struct */
271 		const SC_Scatter *scatter = (const SC_Scatter*)
272 		    ((const char*)cd + ntohl(cd->scatterOffset));
273 		uint32_t hashindex = 0, scount, sbase = 0;
274 		/* iterate all scatter structs */
275 		do {
276 			if ((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
277 				if (cs_debug) {
278 					printf("CODE SIGNING: Scatter extends past Code Directory\n");
279 				}
280 				return NULL;
281 			}
282 
283 			scount = ntohl(scatter->count);
284 			uint32_t new_base = ntohl(scatter->base);
285 
286 			/* last scatter? */
287 			if (scount == 0) {
288 				return NULL;
289 			}
290 
291 			if ((hashindex > 0) && (new_base <= sbase)) {
292 				if (cs_debug) {
293 					printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
294 					    sbase, new_base);
295 				}
296 				return NULL;    /* unordered scatter array */
297 			}
298 			sbase = new_base;
299 
300 			/* this scatter beyond page we're looking for? */
301 			if (sbase > page) {
302 				return NULL;
303 			}
304 
305 			if (sbase + scount >= page) {
306 				/* Found the scatter struct that is
307 				 * referencing our page */
308 
309 				/* base = address of first hash covered by scatter */
310 				base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
311 				    hashindex * hash_len;
312 				/* top = address of first hash after this scatter */
313 				top = base + scount * hash_len;
314 				if (!cs_valid_range(base, top, lower_bound,
315 				    upper_bound) ||
316 				    hashindex > nCodeSlots) {
317 					return NULL;
318 				}
319 
320 				break;
321 			}
322 
323 			/* this scatter struct is before the page we're looking
324 			 * for. Iterate. */
325 			hashindex += scount;
326 			scatter++;
327 		} while (1);
328 
329 		hash = base + (page - sbase) * hash_len;
330 	} else {
331 		base = (const unsigned char *)cd + ntohl(cd->hashOffset);
332 		top = base + nCodeSlots * hash_len;
333 		if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
334 		    page > nCodeSlots) {
335 			return NULL;
336 		}
337 		assert(page < nCodeSlots);
338 
339 		hash = base + page * hash_len;
340 	}
341 
342 	if (!cs_valid_range(hash, hash + hash_len,
343 	    lower_bound, upper_bound)) {
344 		hash = NULL;
345 	}
346 
347 	return hash;
348 }
349 
350 /*
351  * cs_validate_codedirectory
352  *
353  * Validate that pointers inside the code directory to make sure that
354  * all offsets and lengths are constrained within the buffer.
355  *
356  * Parameters:	cd			Pointer to code directory buffer
357  *		length			Length of buffer
358  *
359  * Returns:	0			Success
360  *		EBADEXEC		Invalid code signature
361  */
362 
363 static int
cs_validate_codedirectory(const CS_CodeDirectory * cd,size_t length)364 cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
365 {
366 	struct cs_hash const *hashtype;
367 
368 	if (length < sizeof(*cd)) {
369 		return EBADEXEC;
370 	}
371 	if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) {
372 		return EBADEXEC;
373 	}
374 	if ((cd->pageSize != PAGE_SHIFT_4K) && (cd->pageSize != PAGE_SHIFT)) {
375 		printf("disallowing unsupported code signature page shift: %u\n", cd->pageSize);
376 		return EBADEXEC;
377 	}
378 	hashtype = cs_find_md(cd->hashType);
379 	if (hashtype == NULL) {
380 		return EBADEXEC;
381 	}
382 
383 	if (cd->hashSize != hashtype->cs_size) {
384 		return EBADEXEC;
385 	}
386 
387 	if (length < ntohl(cd->hashOffset)) {
388 		return EBADEXEC;
389 	}
390 
391 	/* check that nSpecialSlots fits in the buffer in front of hashOffset */
392 	if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) {
393 		return EBADEXEC;
394 	}
395 
396 	/* check that codeslots fits in the buffer */
397 	if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) {
398 		return EBADEXEC;
399 	}
400 
401 	if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
402 		if (length < ntohl(cd->scatterOffset)) {
403 			return EBADEXEC;
404 		}
405 
406 		const SC_Scatter *scatter = (const SC_Scatter *)
407 		    (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
408 		uint32_t nPages = 0;
409 
410 		/*
411 		 * Check each scatter buffer, since we don't know the
412 		 * length of the scatter buffer array, we have to
413 		 * check each entry.
414 		 */
415 		while (1) {
416 			/* check that the end of each scatter buffer in within the length */
417 			if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) {
418 				return EBADEXEC;
419 			}
420 			uint32_t scount = ntohl(scatter->count);
421 			if (scount == 0) {
422 				break;
423 			}
424 			if (nPages + scount < nPages) {
425 				return EBADEXEC;
426 			}
427 			nPages += scount;
428 			scatter++;
429 
430 			/* XXX check that basees doesn't overlap */
431 			/* XXX check that targetOffset doesn't overlap */
432 		}
433 #if 0 /* rdar://12579439 */
434 		if (nPages != ntohl(cd->nCodeSlots)) {
435 			return EBADEXEC;
436 		}
437 #endif
438 	}
439 
440 	if (length < ntohl(cd->identOffset)) {
441 		return EBADEXEC;
442 	}
443 
444 	/* identifier is NUL terminated string */
445 	if (cd->identOffset) {
446 		const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
447 		if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) {
448 			return EBADEXEC;
449 		}
450 	}
451 
452 	/* team identifier is NULL terminated string */
453 	if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
454 		if (length < ntohl(cd->teamOffset)) {
455 			return EBADEXEC;
456 		}
457 
458 		const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
459 		if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) {
460 			return EBADEXEC;
461 		}
462 	}
463 
464 	/* linkage is variable length binary data */
465 	if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0) {
466 		const uintptr_t ptr = (uintptr_t)cd + ntohl(cd->linkageOffset);
467 		const uintptr_t ptr_end = ptr + ntohl(cd->linkageSize);
468 
469 		if (ptr_end < ptr || ptr < (uintptr_t)cd || ptr_end > (uintptr_t)cd + length) {
470 			return EBADEXEC;
471 		}
472 	}
473 
474 
475 	return 0;
476 }
477 
478 /*
479  *
480  */
481 
482 static int
cs_validate_blob(const CS_GenericBlob * blob,size_t length)483 cs_validate_blob(const CS_GenericBlob *blob, size_t length)
484 {
485 	if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) {
486 		return EBADEXEC;
487 	}
488 	return 0;
489 }
490 
491 /*
492  * cs_validate_csblob
493  *
494  * Validate that superblob/embedded code directory to make sure that
495  * all internal pointers are valid.
496  *
497  * Will validate both a superblob csblob and a "raw" code directory.
498  *
499  *
500  * Parameters:	buffer			Pointer to code signature
501  *		length			Length of buffer
502  *		rcd			returns pointer to code directory
503  *
504  * Returns:	0			Success
505  *		EBADEXEC		Invalid code signature
506  */
507 
508 static int
cs_validate_csblob(const uint8_t * addr,const size_t blob_size,const CS_CodeDirectory ** rcd,const CS_GenericBlob ** rentitlements,const CS_GenericBlob ** rder_entitlements)509 cs_validate_csblob(
510 	const uint8_t *addr,
511 	const size_t blob_size,
512 	const CS_CodeDirectory **rcd,
513 	const CS_GenericBlob **rentitlements,
514 	const CS_GenericBlob **rder_entitlements)
515 {
516 	const CS_GenericBlob *blob;
517 	int error;
518 	size_t length;
519 	const CS_GenericBlob *self_constraint = NULL;
520 	const CS_GenericBlob *parent_constraint = NULL;
521 	const CS_GenericBlob *responsible_proc_constraint = NULL;
522 	const CS_GenericBlob *library_constraint = NULL;
523 
524 	*rcd = NULL;
525 	*rentitlements = NULL;
526 	*rder_entitlements = NULL;
527 
528 	blob = (const CS_GenericBlob *)(const void *)addr;
529 
530 	length = blob_size;
531 	error = cs_validate_blob(blob, length);
532 	if (error) {
533 		return error;
534 	}
535 	length = ntohl(blob->length);
536 
537 	if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
538 		const CS_SuperBlob *sb;
539 		uint32_t n, count;
540 		const CS_CodeDirectory *best_cd = NULL;
541 		unsigned int best_rank = 0;
542 #if XNU_PLATFORM_WatchOS
543 		const CS_CodeDirectory *sha1_cd = NULL;
544 #endif
545 
546 		if (length < sizeof(CS_SuperBlob)) {
547 			return EBADEXEC;
548 		}
549 
550 		sb = (const CS_SuperBlob *)blob;
551 		count = ntohl(sb->count);
552 
553 		/* check that the array of BlobIndex fits in the rest of the data */
554 		if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) {
555 			return EBADEXEC;
556 		}
557 
558 		/* now check each BlobIndex */
559 		for (n = 0; n < count; n++) {
560 			const CS_BlobIndex *blobIndex = &sb->index[n];
561 			uint32_t type = ntohl(blobIndex->type);
562 			uint32_t offset = ntohl(blobIndex->offset);
563 			if (length < offset) {
564 				return EBADEXEC;
565 			}
566 
567 			const CS_GenericBlob *subBlob =
568 			    (const CS_GenericBlob *)(const void *)(addr + offset);
569 
570 			size_t subLength = length - offset;
571 
572 			if ((error = cs_validate_blob(subBlob, subLength)) != 0) {
573 				return error;
574 			}
575 			subLength = ntohl(subBlob->length);
576 
577 			/* extra validation for CDs, that is also returned */
578 			if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
579 				const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
580 				if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) {
581 					return error;
582 				}
583 				unsigned int rank = hash_rank(candidate);
584 				if (cs_debug > 3) {
585 					printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
586 				}
587 				if (best_cd == NULL || rank > best_rank) {
588 					best_cd = candidate;
589 					best_rank = rank;
590 
591 					if (cs_debug > 2) {
592 						printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
593 					}
594 					*rcd = best_cd;
595 				} else if (best_cd != NULL && rank == best_rank) {
596 					/* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
597 					printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
598 					return EBADEXEC;
599 				}
600 #if XNU_PLATFORM_WatchOS
601 				if (candidate->hashType == CS_HASHTYPE_SHA1) {
602 					if (sha1_cd != NULL) {
603 						printf("multiple sha1 CodeDirectories in signature; rejecting\n");
604 						return EBADEXEC;
605 					}
606 					sha1_cd = candidate;
607 				}
608 #endif
609 			} else if (type == CSSLOT_ENTITLEMENTS) {
610 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
611 					return EBADEXEC;
612 				}
613 				if (*rentitlements != NULL) {
614 					printf("multiple entitlements blobs\n");
615 					return EBADEXEC;
616 				}
617 				*rentitlements = subBlob;
618 			} else if (type == CSSLOT_DER_ENTITLEMENTS) {
619 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_DER_ENTITLEMENTS) {
620 					return EBADEXEC;
621 				}
622 				if (*rder_entitlements != NULL) {
623 					printf("multiple der entitlements blobs\n");
624 					return EBADEXEC;
625 				}
626 				*rder_entitlements = subBlob;
627 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_SELF) {
628 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
629 					return EBADEXEC;
630 				}
631 				if (self_constraint != NULL) {
632 					printf("multiple self constraint blobs\n");
633 					return EBADEXEC;
634 				}
635 				self_constraint = subBlob;
636 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_PARENT) {
637 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
638 					return EBADEXEC;
639 				}
640 				if (parent_constraint != NULL) {
641 					printf("multiple parent constraint blobs\n");
642 					return EBADEXEC;
643 				}
644 				parent_constraint = subBlob;
645 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE) {
646 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
647 					return EBADEXEC;
648 				}
649 				if (responsible_proc_constraint != NULL) {
650 					printf("multiple responsible process constraint blobs\n");
651 					return EBADEXEC;
652 				}
653 				responsible_proc_constraint = subBlob;
654 			} else if (type == CSSLOT_LIBRARY_CONSTRAINT) {
655 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
656 					return EBADEXEC;
657 				}
658 				if (library_constraint != NULL) {
659 					printf("multiple library constraint blobs\n");
660 					return EBADEXEC;
661 				}
662 				library_constraint = subBlob;
663 			}
664 		}
665 
666 #if XNU_PLATFORM_WatchOS
667 		/* To keep watchOS fast enough, we have to resort to sha1 for
668 		 * some code.
669 		 *
670 		 * At the time of writing this comment, known sha1 attacks are
671 		 * collision attacks (not preimage or second preimage
672 		 * attacks), which do not apply to platform binaries since
673 		 * they have a fixed hash in the trust cache.  Given this
674 		 * property, we only prefer sha1 code directories for adhoc
675 		 * signatures, which always have to be in a trust cache to be
676 		 * valid (can-load-cdhash does not exist for watchOS). Those
677 		 * are, incidentally, also the platform binaries, for which we
678 		 * care about the performance hit that sha256 would bring us.
679 		 *
680 		 * Platform binaries may still contain a (not chosen) sha256
681 		 * code directory, which keeps software updates that switch to
682 		 * sha256-only small.
683 		 */
684 
685 		if (*rcd != NULL && sha1_cd != NULL && (ntohl(sha1_cd->flags) & CS_ADHOC)) {
686 			if (sha1_cd->flags != (*rcd)->flags) {
687 				printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
688 				    (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags);
689 				*rcd = NULL;
690 				return EBADEXEC;
691 			}
692 
693 			*rcd = sha1_cd;
694 		}
695 #endif
696 	} else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
697 		if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) {
698 			return error;
699 		}
700 		*rcd = (const CS_CodeDirectory *)blob;
701 	} else {
702 		return EBADEXEC;
703 	}
704 
705 	if (*rcd == NULL) {
706 		return EBADEXEC;
707 	}
708 
709 	return 0;
710 }
711 
712 /*
713  * cs_find_blob_bytes
714  *
715  * Find an blob from the superblob/code directory. The blob must have
716  * been been validated by cs_validate_csblob() before calling
717  * this. Use csblob_find_blob() instead.
718  *
719  * Will also find a "raw" code directory if its stored as well as
720  * searching the superblob.
721  *
722  * Parameters:	buffer			Pointer to code signature
723  *		length			Length of buffer
724  *		type			type of blob to find
725  *		magic			the magic number for that blob
726  *
727  * Returns:	pointer			Success
728  *		NULL			Buffer not found
729  */
730 
731 const CS_GenericBlob *
csblob_find_blob_bytes(const uint8_t * addr,size_t length,uint32_t type,uint32_t magic)732 csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
733 {
734 	const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
735 
736 	if ((addr + length) < addr) {
737 		panic("CODE SIGNING: CS Blob length overflow for addr: %p", addr);
738 	}
739 
740 	if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
741 		const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
742 		size_t n, count = ntohl(sb->count);
743 
744 		for (n = 0; n < count; n++) {
745 			if (ntohl(sb->index[n].type) != type) {
746 				continue;
747 			}
748 			uint32_t offset = ntohl(sb->index[n].offset);
749 			if (length - sizeof(const CS_GenericBlob) < offset) {
750 				return NULL;
751 			}
752 			blob = (const CS_GenericBlob *)(const void *)(addr + offset);
753 			if (ntohl(blob->magic) != magic) {
754 				continue;
755 			}
756 			if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
757 				panic("CODE SIGNING: CS Blob length overflow for blob at: %p", blob);
758 			} else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
759 				continue;
760 			}
761 			return blob;
762 		}
763 	} else if (type == CSSLOT_CODEDIRECTORY && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
764 	    && magic == CSMAGIC_CODEDIRECTORY) {
765 		if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
766 			panic("CODE SIGNING: CS Blob length overflow for code directory blob at: %p", blob);
767 		} else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
768 			return NULL;
769 		}
770 		return blob;
771 	}
772 	return NULL;
773 }
774 
775 
776 const CS_GenericBlob *
csblob_find_blob(struct cs_blob * csblob,uint32_t type,uint32_t magic)777 csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
778 {
779 	if ((csblob->csb_flags & CS_VALID) == 0) {
780 		return NULL;
781 	}
782 	return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
783 }
784 
785 static const uint8_t *
find_special_slot(const CS_CodeDirectory * cd,size_t slotsize,uint32_t slot)786 find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
787 {
788 	/* there is no zero special slot since that is the first code slot */
789 	if (ntohl(cd->nSpecialSlots) < slot || slot == 0) {
790 		return NULL;
791 	}
792 
793 	return (const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot);
794 }
795 
796 static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
797 
798 static int
csblob_find_special_slot_blob(struct cs_blob * csblob,uint32_t slot,uint32_t magic,const CS_GenericBlob ** out_start,size_t * out_length)799 csblob_find_special_slot_blob(struct cs_blob* csblob, uint32_t slot, uint32_t magic, const CS_GenericBlob **out_start, size_t *out_length)
800 {
801 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
802 	const CS_GenericBlob *blob;
803 	const CS_CodeDirectory *code_dir;
804 	const uint8_t *embedded_hash;
805 	union cs_hash_union context;
806 
807 	if (out_start) {
808 		*out_start = NULL;
809 	}
810 	if (out_length) {
811 		*out_length = 0;
812 	}
813 
814 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
815 		return EBADEXEC;
816 	}
817 
818 	code_dir = csblob->csb_cd;
819 
820 	blob = csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, slot, magic);
821 
822 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, slot);
823 
824 	if (embedded_hash == NULL) {
825 		if (blob) {
826 			return EBADEXEC;
827 		}
828 		return 0;
829 	} else if (blob == NULL) {
830 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
831 			return EBADEXEC;
832 		} else {
833 			return 0;
834 		}
835 	}
836 
837 	csblob->csb_hashtype->cs_init(&context);
838 	csblob->csb_hashtype->cs_update(&context, blob, ntohl(blob->length));
839 	csblob->csb_hashtype->cs_final(computed_hash, &context);
840 
841 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
842 		return EBADEXEC;
843 	}
844 	if (out_start) {
845 		*out_start = blob;
846 	}
847 	if (out_length) {
848 		*out_length = ntohl(blob->length);
849 	}
850 
851 	return 0;
852 }
853 
854 int
csblob_get_entitlements(struct cs_blob * csblob,void ** out_start,size_t * out_length)855 csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
856 {
857 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
858 	const CS_GenericBlob *entitlements;
859 	const CS_CodeDirectory *code_dir;
860 	const uint8_t *embedded_hash;
861 	union cs_hash_union context;
862 
863 	*out_start = NULL;
864 	*out_length = 0;
865 
866 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
867 		return EBADEXEC;
868 	}
869 
870 	code_dir = csblob->csb_cd;
871 
872 	if ((csblob->csb_flags & CS_VALID) == 0) {
873 		entitlements = NULL;
874 	} else {
875 		entitlements = csblob->csb_entitlements_blob;
876 	}
877 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
878 
879 	if (embedded_hash == NULL) {
880 		if (entitlements) {
881 			return EBADEXEC;
882 		}
883 		return 0;
884 	} else if (entitlements == NULL) {
885 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
886 			return EBADEXEC;
887 		} else {
888 			return 0;
889 		}
890 	}
891 
892 	csblob->csb_hashtype->cs_init(&context);
893 	csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
894 	csblob->csb_hashtype->cs_final(computed_hash, &context);
895 
896 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
897 		return EBADEXEC;
898 	}
899 
900 	*out_start = __DECONST(void *, entitlements);
901 	*out_length = ntohl(entitlements->length);
902 
903 	return 0;
904 }
905 
906 const CS_GenericBlob*
csblob_get_der_entitlements_unsafe(struct cs_blob * csblob)907 csblob_get_der_entitlements_unsafe(struct cs_blob * csblob)
908 {
909 	if ((csblob->csb_flags & CS_VALID) == 0) {
910 		return NULL;
911 	}
912 
913 	return csblob->csb_der_entitlements_blob;
914 }
915 
916 int
csblob_get_der_entitlements(struct cs_blob * csblob,const CS_GenericBlob ** out_start,size_t * out_length)917 csblob_get_der_entitlements(struct cs_blob *csblob, const CS_GenericBlob **out_start, size_t *out_length)
918 {
919 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
920 	const CS_GenericBlob *der_entitlements;
921 	const CS_CodeDirectory *code_dir;
922 	const uint8_t *embedded_hash;
923 	union cs_hash_union context;
924 
925 	*out_start = NULL;
926 	*out_length = 0;
927 
928 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
929 		return EBADEXEC;
930 	}
931 
932 	code_dir = csblob->csb_cd;
933 
934 	if ((csblob->csb_flags & CS_VALID) == 0) {
935 		der_entitlements = NULL;
936 	} else {
937 		der_entitlements = csblob->csb_der_entitlements_blob;
938 	}
939 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_DER_ENTITLEMENTS);
940 
941 	if (embedded_hash == NULL) {
942 		if (der_entitlements) {
943 			return EBADEXEC;
944 		}
945 		return 0;
946 	} else if (der_entitlements == NULL) {
947 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
948 			return EBADEXEC;
949 		} else {
950 			return 0;
951 		}
952 	}
953 
954 	csblob->csb_hashtype->cs_init(&context);
955 	csblob->csb_hashtype->cs_update(&context, der_entitlements, ntohl(der_entitlements->length));
956 	csblob->csb_hashtype->cs_final(computed_hash, &context);
957 
958 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
959 		return EBADEXEC;
960 	}
961 
962 	*out_start = der_entitlements;
963 	*out_length = ntohl(der_entitlements->length);
964 
965 	return 0;
966 }
967 
968 static bool
ubc_cs_blob_pagewise_allocate(__unused vm_size_t size)969 ubc_cs_blob_pagewise_allocate(
970 	__unused vm_size_t size)
971 {
972 #if CODE_SIGNING_MONITOR
973 	/* If the monitor isn't enabled, then we don't need to page-align */
974 	if (csm_enabled() == false) {
975 		return false;
976 	}
977 
978 	/*
979 	 * Small allocations can be maanged by the monitor itself. We only need to allocate
980 	 * page-wise when it is a sufficiently large allocation and the monitor cannot manage
981 	 * it on its own.
982 	 */
983 	if (size <= csm_signature_size_limit()) {
984 		return false;
985 	}
986 
987 	return true;
988 #else
989 	/* Without a monitor, we never need to page align */
990 	return false;
991 #endif /* CODE_SIGNING_MONITOR */
992 }
993 
994 int
csblob_register_profile_uuid(struct cs_blob __unused * csblob,const uuid_t __unused profile_uuid,void __unused * profile_addr,vm_size_t __unused profile_size)995 csblob_register_profile_uuid(
996 	struct cs_blob __unused *csblob,
997 	const uuid_t __unused profile_uuid,
998 	void __unused *profile_addr,
999 	vm_size_t __unused profile_size)
1000 {
1001 #if CODE_SIGNING_MONITOR
1002 	/* Profiles only need to be registered for monitor environments */
1003 	assert(profile_addr != NULL);
1004 	assert(profile_size != 0);
1005 	assert(csblob != NULL);
1006 
1007 	kern_return_t kr = csm_register_provisioning_profile(
1008 		profile_uuid,
1009 		profile_addr, profile_size);
1010 
1011 	if ((kr != KERN_SUCCESS) && (kr != KERN_ALREADY_IN_SET)) {
1012 		return EPERM;
1013 	}
1014 
1015 	/* Associate the profile with the monitor's signature object */
1016 	kr = csm_associate_provisioning_profile(
1017 		csblob->csb_csm_obj,
1018 		profile_uuid);
1019 
1020 	if ((kr != KERN_SUCCESS) && (kr != KERN_NOT_SUPPORTED)) {
1021 		return EPERM;
1022 	}
1023 
1024 	return 0;
1025 #else
1026 	return 0;
1027 #endif /* CODE_SIGNING_MONITOR */
1028 }
1029 
1030 /*
1031  * CODESIGNING
1032  * End of routines to navigate code signing data structures in the kernel.
1033  */
1034 
1035 
1036 
1037 /*
1038  * ubc_info_init
1039  *
1040  * Allocate and attach an empty ubc_info structure to a vnode
1041  *
1042  * Parameters:	vp			Pointer to the vnode
1043  *
1044  * Returns:	0			Success
1045  *	vnode_size:ENOMEM		Not enough space
1046  *	vnode_size:???			Other error from vnode_getattr
1047  *
1048  */
1049 int
ubc_info_init(struct vnode * vp)1050 ubc_info_init(struct vnode *vp)
1051 {
1052 	return ubc_info_init_internal(vp, 0, 0);
1053 }
1054 
1055 
1056 /*
1057  * ubc_info_init_withsize
1058  *
1059  * Allocate and attach a sized ubc_info structure to a vnode
1060  *
1061  * Parameters:	vp			Pointer to the vnode
1062  *		filesize		The size of the file
1063  *
1064  * Returns:	0			Success
1065  *	vnode_size:ENOMEM		Not enough space
1066  *	vnode_size:???			Other error from vnode_getattr
1067  */
1068 int
ubc_info_init_withsize(struct vnode * vp,off_t filesize)1069 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
1070 {
1071 	return ubc_info_init_internal(vp, 1, filesize);
1072 }
1073 
1074 
1075 /*
1076  * ubc_info_init_internal
1077  *
1078  * Allocate and attach a ubc_info structure to a vnode
1079  *
1080  * Parameters:	vp			Pointer to the vnode
1081  *		withfsize{0,1}		Zero if the size should be obtained
1082  *					from the vnode; otherwise, use filesize
1083  *		filesize		The size of the file, if withfsize == 1
1084  *
1085  * Returns:	0			Success
1086  *	vnode_size:ENOMEM		Not enough space
1087  *	vnode_size:???			Other error from vnode_getattr
1088  *
1089  * Notes:	We call a blocking zalloc(), and the zone was created as an
1090  *		expandable and collectable zone, so if no memory is available,
1091  *		it is possible for zalloc() to block indefinitely.  zalloc()
1092  *		may also panic if the zone of zones is exhausted, since it's
1093  *		NOT expandable.
1094  *
1095  *		We unconditionally call vnode_pager_setup(), even if this is
1096  *		a reuse of a ubc_info; in that case, we should probably assert
1097  *		that it does not already have a pager association, but do not.
1098  *
1099  *		Since memory_object_create_named() can only fail from receiving
1100  *		an invalid pager argument, the explicit check and panic is
1101  *		merely precautionary.
1102  */
1103 static int
ubc_info_init_internal(vnode_t vp,int withfsize,off_t filesize)1104 ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
1105 {
1106 	struct ubc_info *uip;
1107 	void *  pager;
1108 	int error = 0;
1109 	kern_return_t kret;
1110 	memory_object_control_t control;
1111 
1112 	uip = vp->v_ubcinfo;
1113 
1114 	/*
1115 	 * If there is not already a ubc_info attached to the vnode, we
1116 	 * attach one; otherwise, we will reuse the one that's there.
1117 	 */
1118 	if (uip == UBC_INFO_NULL) {
1119 		uip = zalloc_flags(ubc_info_zone, Z_WAITOK | Z_ZERO);
1120 
1121 		uip->ui_vnode = vp;
1122 		uip->ui_flags = UI_INITED;
1123 		uip->ui_ucred = NOCRED;
1124 	}
1125 	assert(uip->ui_flags != UI_NONE);
1126 	assert(uip->ui_vnode == vp);
1127 
1128 	/* now set this ubc_info in the vnode */
1129 	vp->v_ubcinfo = uip;
1130 
1131 	/*
1132 	 * Allocate a pager object for this vnode
1133 	 *
1134 	 * XXX The value of the pager parameter is currently ignored.
1135 	 * XXX Presumably, this API changed to avoid the race between
1136 	 * XXX setting the pager and the UI_HASPAGER flag.
1137 	 */
1138 	pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
1139 	assert(pager);
1140 
1141 	/*
1142 	 * Explicitly set the pager into the ubc_info, after setting the
1143 	 * UI_HASPAGER flag.
1144 	 */
1145 	SET(uip->ui_flags, UI_HASPAGER);
1146 	uip->ui_pager = pager;
1147 
1148 	/*
1149 	 * Note: We can not use VNOP_GETATTR() to get accurate
1150 	 * value of ui_size because this may be an NFS vnode, and
1151 	 * nfs_getattr() can call vinvalbuf(); if this happens,
1152 	 * ubc_info is not set up to deal with that event.
1153 	 * So use bogus size.
1154 	 */
1155 
1156 	/*
1157 	 * create a vnode - vm_object association
1158 	 * memory_object_create_named() creates a "named" reference on the
1159 	 * memory object we hold this reference as long as the vnode is
1160 	 * "alive."  Since memory_object_create_named() took its own reference
1161 	 * on the vnode pager we passed it, we can drop the reference
1162 	 * vnode_pager_setup() returned here.
1163 	 */
1164 	kret = memory_object_create_named(pager,
1165 	    (memory_object_size_t)uip->ui_size, &control);
1166 	vnode_pager_deallocate(pager);
1167 	if (kret != KERN_SUCCESS) {
1168 		panic("ubc_info_init: memory_object_create_named returned %d", kret);
1169 	}
1170 
1171 	assert(control);
1172 	uip->ui_control = control;      /* cache the value of the mo control */
1173 	SET(uip->ui_flags, UI_HASOBJREF);       /* with a named reference */
1174 
1175 	if (withfsize == 0) {
1176 		/* initialize the size */
1177 		error = vnode_size(vp, &uip->ui_size, vfs_context_current());
1178 		if (error) {
1179 			uip->ui_size = 0;
1180 		}
1181 	} else {
1182 		uip->ui_size = filesize;
1183 	}
1184 	vp->v_lflag |= VNAMED_UBC;      /* vnode has a named ubc reference */
1185 
1186 	return error;
1187 }
1188 
1189 
1190 /*
1191  * ubc_info_free
1192  *
1193  * Free a ubc_info structure
1194  *
1195  * Parameters:	uip			A pointer to the ubc_info to free
1196  *
1197  * Returns:	(void)
1198  *
1199  * Notes:	If there is a credential that has subsequently been associated
1200  *		with the ubc_info, the reference to the credential is dropped.
1201  *
1202  *		It's actually impossible for a ubc_info.ui_control to take the
1203  *		value MEMORY_OBJECT_CONTROL_NULL.
1204  */
1205 static void
ubc_info_free(struct ubc_info * uip)1206 ubc_info_free(struct ubc_info *uip)
1207 {
1208 	if (IS_VALID_CRED(uip->ui_ucred)) {
1209 		kauth_cred_unref(&uip->ui_ucred);
1210 	}
1211 
1212 	if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) {
1213 		memory_object_control_deallocate(uip->ui_control);
1214 	}
1215 
1216 	cluster_release(uip);
1217 	ubc_cs_free(uip);
1218 
1219 	zfree(ubc_info_zone, uip);
1220 	return;
1221 }
1222 
1223 
1224 void
ubc_info_deallocate(struct ubc_info * uip)1225 ubc_info_deallocate(struct ubc_info *uip)
1226 {
1227 	ubc_info_free(uip);
1228 }
1229 
1230 /*
1231  * ubc_setsize_ex
1232  *
1233  * Tell the VM that the the size of the file represented by the vnode has
1234  * changed
1235  *
1236  * Parameters:	vp	   The vp whose backing file size is
1237  *					   being changed
1238  *				nsize  The new size of the backing file
1239  *				opts   Options
1240  *
1241  * Returns:	EINVAL for new size < 0
1242  *			ENOENT if no UBC info exists
1243  *          EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1244  *          Other errors (mapped to errno_t) returned by VM functions
1245  *
1246  * Notes:   This function will indicate success if the new size is the
1247  *		    same or larger than the old size (in this case, the
1248  *		    remainder of the file will require modification or use of
1249  *		    an existing upl to access successfully).
1250  *
1251  *		    This function will fail if the new file size is smaller,
1252  *		    and the memory region being invalidated was unable to
1253  *		    actually be invalidated and/or the last page could not be
1254  *		    flushed, if the new size is not aligned to a page
1255  *		    boundary.  This is usually indicative of an I/O error.
1256  */
1257 errno_t
ubc_setsize_ex(struct vnode * vp,off_t nsize,ubc_setsize_opts_t opts)1258 ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
1259 {
1260 	off_t osize;    /* ui_size before change */
1261 	off_t lastpg, olastpgend, lastoff;
1262 	struct ubc_info *uip;
1263 	memory_object_control_t control;
1264 	kern_return_t kret = KERN_SUCCESS;
1265 
1266 	if (nsize < (off_t)0) {
1267 		return EINVAL;
1268 	}
1269 
1270 	if (!UBCINFOEXISTS(vp)) {
1271 		return ENOENT;
1272 	}
1273 
1274 	uip = vp->v_ubcinfo;
1275 	osize = uip->ui_size;
1276 
1277 	if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) {
1278 		return EAGAIN;
1279 	}
1280 
1281 	/*
1282 	 * Update the size before flushing the VM
1283 	 */
1284 	uip->ui_size = nsize;
1285 
1286 	if (nsize >= osize) {   /* Nothing more to do */
1287 		if (nsize > osize) {
1288 			lock_vnode_and_post(vp, NOTE_EXTEND);
1289 		}
1290 
1291 		return 0;
1292 	}
1293 
1294 	/*
1295 	 * When the file shrinks, invalidate the pages beyond the
1296 	 * new size. Also get rid of garbage beyond nsize on the
1297 	 * last page. The ui_size already has the nsize, so any
1298 	 * subsequent page-in will zero-fill the tail properly
1299 	 */
1300 	lastpg = trunc_page_64(nsize);
1301 	olastpgend = round_page_64(osize);
1302 	control = uip->ui_control;
1303 	assert(control);
1304 	lastoff = (nsize & PAGE_MASK_64);
1305 
1306 	if (lastoff) {
1307 		upl_t           upl;
1308 		upl_page_info_t *pl;
1309 
1310 		/*
1311 		 * new EOF ends up in the middle of a page
1312 		 * zero the tail of this page if it's currently
1313 		 * present in the cache
1314 		 */
1315 		kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
1316 
1317 		if (kret != KERN_SUCCESS) {
1318 			panic("ubc_setsize: ubc_create_upl (error = %d)", kret);
1319 		}
1320 
1321 		if (upl_valid_page(pl, 0)) {
1322 			cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
1323 		}
1324 
1325 		ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1326 
1327 		lastpg += PAGE_SIZE_64;
1328 	}
1329 	if (olastpgend > lastpg) {
1330 		int     flags;
1331 
1332 		if (lastpg == 0) {
1333 			flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
1334 		} else {
1335 			flags = MEMORY_OBJECT_DATA_FLUSH;
1336 		}
1337 		/*
1338 		 * invalidate the pages beyond the new EOF page
1339 		 *
1340 		 */
1341 		kret = memory_object_lock_request(control,
1342 		    (memory_object_offset_t)lastpg,
1343 		    (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
1344 		    MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
1345 		if (kret != KERN_SUCCESS) {
1346 			printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
1347 		}
1348 	}
1349 	return mach_to_bsd_errno(kret);
1350 }
1351 
1352 // Returns true for success
1353 int
ubc_setsize(vnode_t vp,off_t nsize)1354 ubc_setsize(vnode_t vp, off_t nsize)
1355 {
1356 	return ubc_setsize_ex(vp, nsize, 0) == 0;
1357 }
1358 
1359 /*
1360  * ubc_getsize
1361  *
1362  * Get the size of the file assocated with the specified vnode
1363  *
1364  * Parameters:	vp			The vnode whose size is of interest
1365  *
1366  * Returns:	0			There is no ubc_info associated with
1367  *					this vnode, or the size is zero
1368  *		!0			The size of the file
1369  *
1370  * Notes:	Using this routine, it is not possible for a caller to
1371  *		successfully distinguish between a vnode associate with a zero
1372  *		length file, and a vnode with no associated ubc_info.  The
1373  *		caller therefore needs to not care, or needs to ensure that
1374  *		they have previously successfully called ubc_info_init() or
1375  *		ubc_info_init_withsize().
1376  */
1377 off_t
ubc_getsize(struct vnode * vp)1378 ubc_getsize(struct vnode *vp)
1379 {
1380 	/* people depend on the side effect of this working this way
1381 	 * as they call this for directory
1382 	 */
1383 	if (!UBCINFOEXISTS(vp)) {
1384 		return (off_t)0;
1385 	}
1386 	return vp->v_ubcinfo->ui_size;
1387 }
1388 
1389 
1390 /*
1391  * ubc_umount
1392  *
1393  * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1394  * mount point
1395  *
1396  * Parameters:	mp			The mount point
1397  *
1398  * Returns:	0			Success
1399  *
1400  * Notes:	There is no failure indication for this function.
1401  *
1402  *		This function is used in the unmount path; since it may block
1403  *		I/O indefinitely, it should not be used in the forced unmount
1404  *		path, since a device unavailability could also block that
1405  *		indefinitely.
1406  *
1407  *		Because there is no device ejection interlock on USB, FireWire,
1408  *		or similar devices, it's possible that an ejection that begins
1409  *		subsequent to the vnode_iterate() completing, either on one of
1410  *		those devices, or a network mount for which the server quits
1411  *		responding, etc., may cause the caller to block indefinitely.
1412  */
1413 __private_extern__ int
ubc_umount(struct mount * mp)1414 ubc_umount(struct mount *mp)
1415 {
1416 	vnode_iterate(mp, 0, ubc_umcallback, 0);
1417 	return 0;
1418 }
1419 
1420 
1421 /*
1422  * ubc_umcallback
1423  *
1424  * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1425  * and vnode_iterate() for details of implementation.
1426  */
1427 static int
ubc_umcallback(vnode_t vp,__unused void * args)1428 ubc_umcallback(vnode_t vp, __unused void * args)
1429 {
1430 	if (UBCINFOEXISTS(vp)) {
1431 		(void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1432 	}
1433 	return VNODE_RETURNED;
1434 }
1435 
1436 
1437 /*
1438  * ubc_getcred
1439  *
1440  * Get the credentials currently active for the ubc_info associated with the
1441  * vnode.
1442  *
1443  * Parameters:	vp			The vnode whose ubc_info credentials
1444  *					are to be retrieved
1445  *
1446  * Returns:	!NOCRED			The credentials
1447  *		NOCRED			If there is no ubc_info for the vnode,
1448  *					or if there is one, but it has not had
1449  *					any credentials associated with it.
1450  */
1451 kauth_cred_t
ubc_getcred(struct vnode * vp)1452 ubc_getcred(struct vnode *vp)
1453 {
1454 	if (UBCINFOEXISTS(vp)) {
1455 		return vp->v_ubcinfo->ui_ucred;
1456 	}
1457 
1458 	return NOCRED;
1459 }
1460 
1461 
1462 /*
1463  * ubc_setthreadcred
1464  *
1465  * If they are not already set, set the credentials of the ubc_info structure
1466  * associated with the vnode to those of the supplied thread; otherwise leave
1467  * them alone.
1468  *
1469  * Parameters:	vp			The vnode whose ubc_info creds are to
1470  *					be set
1471  *		p			The process whose credentials are to
1472  *					be used, if not running on an assumed
1473  *					credential
1474  *		thread			The thread whose credentials are to
1475  *					be used
1476  *
1477  * Returns:	1			This vnode has no associated ubc_info
1478  *		0			Success
1479  *
1480  * Notes:	This function is generally used only in the following cases:
1481  *
1482  *		o	a memory mapped file via the mmap() system call
1483  *		o	a swap store backing file
1484  *		o	subsequent to a successful write via vn_write()
1485  *
1486  *		The information is then used by the NFS client in order to
1487  *		cons up a wire message in either the page-in or page-out path.
1488  *
1489  *		There are two potential problems with the use of this API:
1490  *
1491  *		o	Because the write path only set it on a successful
1492  *			write, there is a race window between setting the
1493  *			credential and its use to evict the pages to the
1494  *			remote file server
1495  *
1496  *		o	Because a page-in may occur prior to a write, the
1497  *			credential may not be set at this time, if the page-in
1498  *			is not the result of a mapping established via mmap().
1499  *
1500  *		In both these cases, this will be triggered from the paging
1501  *		path, which will instead use the credential of the current
1502  *		process, which in this case is either the dynamic_pager or
1503  *		the kernel task, both of which utilize "root" credentials.
1504  *
1505  *		This may potentially permit operations to occur which should
1506  *		be denied, or it may cause to be denied operations which
1507  *		should be permitted, depending on the configuration of the NFS
1508  *		server.
1509  */
1510 int
ubc_setthreadcred(struct vnode * vp,proc_t p,thread_t thread)1511 ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
1512 {
1513 #pragma unused(p, thread)
1514 	assert(p == current_proc());
1515 	assert(thread == current_thread());
1516 
1517 	return ubc_setcred(vp, kauth_cred_get());
1518 }
1519 
1520 
1521 /*
1522  * ubc_setcred
1523  *
1524  * If they are not already set, set the credentials of the ubc_info structure
1525  * associated with the vnode to those specified; otherwise leave them
1526  * alone.
1527  *
1528  * Parameters:	vp			The vnode whose ubc_info creds are to
1529  *					be set
1530  *		ucred			The credentials to use
1531  *
1532  * Returns:	0			This vnode has no associated ubc_info
1533  *		1			Success
1534  *
1535  * Notes:	The return values for this function are inverted from nearly
1536  *		all other uses in the kernel.
1537  *
1538  *		See also ubc_setthreadcred(), above.
1539  */
1540 int
ubc_setcred(struct vnode * vp,kauth_cred_t ucred)1541 ubc_setcred(struct vnode *vp, kauth_cred_t ucred)
1542 {
1543 	struct ubc_info *uip;
1544 
1545 	/* If there is no ubc_info, deny the operation */
1546 	if (!UBCINFOEXISTS(vp)) {
1547 		return 0;
1548 	}
1549 
1550 	/*
1551 	 * Check to see if there is already a credential reference in the
1552 	 * ubc_info; if there is not, take one on the supplied credential.
1553 	 */
1554 	vnode_lock(vp);
1555 	uip = vp->v_ubcinfo;
1556 	if (!IS_VALID_CRED(uip->ui_ucred)) {
1557 		kauth_cred_ref(ucred);
1558 		uip->ui_ucred = ucred;
1559 	}
1560 	vnode_unlock(vp);
1561 
1562 	return 1;
1563 }
1564 
1565 /*
1566  * ubc_getpager
1567  *
1568  * Get the pager associated with the ubc_info associated with the vnode.
1569  *
1570  * Parameters:	vp			The vnode to obtain the pager from
1571  *
1572  * Returns:	!VNODE_PAGER_NULL	The memory_object_t for the pager
1573  *		VNODE_PAGER_NULL	There is no ubc_info for this vnode
1574  *
1575  * Notes:	For each vnode that has a ubc_info associated with it, that
1576  *		ubc_info SHALL have a pager associated with it, so in the
1577  *		normal case, it's impossible to return VNODE_PAGER_NULL for
1578  *		a vnode with an associated ubc_info.
1579  */
1580 __private_extern__ memory_object_t
ubc_getpager(struct vnode * vp)1581 ubc_getpager(struct vnode *vp)
1582 {
1583 	if (UBCINFOEXISTS(vp)) {
1584 		return vp->v_ubcinfo->ui_pager;
1585 	}
1586 
1587 	return 0;
1588 }
1589 
1590 
1591 /*
1592  * ubc_getobject
1593  *
1594  * Get the memory object control associated with the ubc_info associated with
1595  * the vnode
1596  *
1597  * Parameters:	vp			The vnode to obtain the memory object
1598  *					from
1599  *		flags			DEPRECATED
1600  *
1601  * Returns:	!MEMORY_OBJECT_CONTROL_NULL
1602  *		MEMORY_OBJECT_CONTROL_NULL
1603  *
1604  * Notes:	Historically, if the flags were not "do not reactivate", this
1605  *		function would look up the memory object using the pager if
1606  *		it did not exist (this could be the case if the vnode had
1607  *		been previously reactivated).  The flags would also permit a
1608  *		hold to be requested, which would have created an object
1609  *		reference, if one had not already existed.  This usage is
1610  *		deprecated, as it would permit a race between finding and
1611  *		taking the reference vs. a single reference being dropped in
1612  *		another thread.
1613  */
1614 memory_object_control_t
ubc_getobject(struct vnode * vp,__unused int flags)1615 ubc_getobject(struct vnode *vp, __unused int flags)
1616 {
1617 	if (UBCINFOEXISTS(vp)) {
1618 		return vp->v_ubcinfo->ui_control;
1619 	}
1620 
1621 	return MEMORY_OBJECT_CONTROL_NULL;
1622 }
1623 
1624 /*
1625  * ubc_blktooff
1626  *
1627  * Convert a given block number to a memory backing object (file) offset for a
1628  * given vnode
1629  *
1630  * Parameters:	vp			The vnode in which the block is located
1631  *		blkno			The block number to convert
1632  *
1633  * Returns:	!-1			The offset into the backing object
1634  *		-1			There is no ubc_info associated with
1635  *					the vnode
1636  *		-1			An error occurred in the underlying VFS
1637  *					while translating the block to an
1638  *					offset; the most likely cause is that
1639  *					the caller specified a block past the
1640  *					end of the file, but this could also be
1641  *					any other error from VNOP_BLKTOOFF().
1642  *
1643  * Note:	Representing the error in band loses some information, but does
1644  *		not occlude a valid offset, since an off_t of -1 is normally
1645  *		used to represent EOF.  If we had a more reliable constant in
1646  *		our header files for it (i.e. explicitly cast to an off_t), we
1647  *		would use it here instead.
1648  */
1649 off_t
ubc_blktooff(vnode_t vp,daddr64_t blkno)1650 ubc_blktooff(vnode_t vp, daddr64_t blkno)
1651 {
1652 	off_t file_offset = -1;
1653 	int error;
1654 
1655 	if (UBCINFOEXISTS(vp)) {
1656 		error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
1657 		if (error) {
1658 			file_offset = -1;
1659 		}
1660 	}
1661 
1662 	return file_offset;
1663 }
1664 
1665 
1666 /*
1667  * ubc_offtoblk
1668  *
1669  * Convert a given offset in a memory backing object into a block number for a
1670  * given vnode
1671  *
1672  * Parameters:	vp			The vnode in which the offset is
1673  *					located
1674  *		offset			The offset into the backing object
1675  *
1676  * Returns:	!-1			The returned block number
1677  *		-1			There is no ubc_info associated with
1678  *					the vnode
1679  *		-1			An error occurred in the underlying VFS
1680  *					while translating the block to an
1681  *					offset; the most likely cause is that
1682  *					the caller specified a block past the
1683  *					end of the file, but this could also be
1684  *					any other error from VNOP_OFFTOBLK().
1685  *
1686  * Note:	Representing the error in band loses some information, but does
1687  *		not occlude a valid block number, since block numbers exceed
1688  *		the valid range for offsets, due to their relative sizes.  If
1689  *		we had a more reliable constant than -1 in our header files
1690  *		for it (i.e. explicitly cast to an daddr64_t), we would use it
1691  *		here instead.
1692  */
1693 daddr64_t
ubc_offtoblk(vnode_t vp,off_t offset)1694 ubc_offtoblk(vnode_t vp, off_t offset)
1695 {
1696 	daddr64_t blkno = -1;
1697 	int error = 0;
1698 
1699 	if (UBCINFOEXISTS(vp)) {
1700 		error = VNOP_OFFTOBLK(vp, offset, &blkno);
1701 		if (error) {
1702 			blkno = -1;
1703 		}
1704 	}
1705 
1706 	return blkno;
1707 }
1708 
1709 
1710 /*
1711  * ubc_pages_resident
1712  *
1713  * Determine whether or not a given vnode has pages resident via the memory
1714  * object control associated with the ubc_info associated with the vnode
1715  *
1716  * Parameters:	vp			The vnode we want to know about
1717  *
1718  * Returns:	1			Yes
1719  *		0			No
1720  */
1721 int
ubc_pages_resident(vnode_t vp)1722 ubc_pages_resident(vnode_t vp)
1723 {
1724 	kern_return_t           kret;
1725 	boolean_t                       has_pages_resident;
1726 
1727 	if (!UBCINFOEXISTS(vp)) {
1728 		return 0;
1729 	}
1730 
1731 	/*
1732 	 * The following call may fail if an invalid ui_control is specified,
1733 	 * or if there is no VM object associated with the control object.  In
1734 	 * either case, reacting to it as if there were no pages resident will
1735 	 * result in correct behavior.
1736 	 */
1737 	kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
1738 
1739 	if (kret != KERN_SUCCESS) {
1740 		return 0;
1741 	}
1742 
1743 	if (has_pages_resident == TRUE) {
1744 		return 1;
1745 	}
1746 
1747 	return 0;
1748 }
1749 
1750 /*
1751  * ubc_msync
1752  *
1753  * Clean and/or invalidate a range in the memory object that backs this vnode
1754  *
1755  * Parameters:	vp			The vnode whose associated ubc_info's
1756  *					associated memory object is to have a
1757  *					range invalidated within it
1758  *		beg_off			The start of the range, as an offset
1759  *		end_off			The end of the range, as an offset
1760  *		resid_off		The address of an off_t supplied by the
1761  *					caller; may be set to NULL to ignore
1762  *		flags			See ubc_msync_internal()
1763  *
1764  * Returns:	0			Success
1765  *		!0			Failure; an errno is returned
1766  *
1767  * Implicit Returns:
1768  *		*resid_off, modified	If non-NULL, the  contents are ALWAYS
1769  *					modified; they are initialized to the
1770  *					beg_off, and in case of an I/O error,
1771  *					the difference between beg_off and the
1772  *					current value will reflect what was
1773  *					able to be written before the error
1774  *					occurred.  If no error is returned, the
1775  *					value of the resid_off is undefined; do
1776  *					NOT use it in place of end_off if you
1777  *					intend to increment from the end of the
1778  *					last call and call iteratively.
1779  *
1780  * Notes:	see ubc_msync_internal() for more detailed information.
1781  *
1782  */
1783 errno_t
ubc_msync(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags)1784 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
1785 {
1786 	int retval;
1787 	int io_errno = 0;
1788 
1789 	if (resid_off) {
1790 		*resid_off = beg_off;
1791 	}
1792 
1793 	retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
1794 
1795 	if (retval == 0 && io_errno == 0) {
1796 		return EINVAL;
1797 	}
1798 	return io_errno;
1799 }
1800 
1801 
1802 /*
1803  * ubc_msync_internal
1804  *
1805  * Clean and/or invalidate a range in the memory object that backs this vnode
1806  *
1807  * Parameters:	vp			The vnode whose associated ubc_info's
1808  *					associated memory object is to have a
1809  *					range invalidated within it
1810  *		beg_off			The start of the range, as an offset
1811  *		end_off			The end of the range, as an offset
1812  *		resid_off		The address of an off_t supplied by the
1813  *					caller; may be set to NULL to ignore
1814  *		flags			MUST contain at least one of the flags
1815  *					UBC_INVALIDATE, UBC_PUSHDIRTY, or
1816  *					UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1817  *					UBC_SYNC may also be specified to cause
1818  *					this function to block until the
1819  *					operation is complete.  The behavior
1820  *					of UBC_SYNC is otherwise undefined.
1821  *		io_errno		The address of an int to contain the
1822  *					errno from a failed I/O operation, if
1823  *					one occurs; may be set to NULL to
1824  *					ignore
1825  *
1826  * Returns:	1			Success
1827  *		0			Failure
1828  *
1829  * Implicit Returns:
1830  *		*resid_off, modified	The contents of this offset MAY be
1831  *					modified; in case of an I/O error, the
1832  *					difference between beg_off and the
1833  *					current value will reflect what was
1834  *					able to be written before the error
1835  *					occurred.
1836  *		*io_errno, modified	The contents of this offset are set to
1837  *					an errno, if an error occurs; if the
1838  *					caller supplies an io_errno parameter,
1839  *					they should be careful to initialize it
1840  *					to 0 before calling this function to
1841  *					enable them to distinguish an error
1842  *					with a valid *resid_off from an invalid
1843  *					one, and to avoid potentially falsely
1844  *					reporting an error, depending on use.
1845  *
1846  * Notes:	If there is no ubc_info associated with the vnode supplied,
1847  *		this function immediately returns success.
1848  *
1849  *		If the value of end_off is less than or equal to beg_off, this
1850  *		function immediately returns success; that is, end_off is NOT
1851  *		inclusive.
1852  *
1853  *		IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1854  *		UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1855  *		attempt to block on in-progress I/O by calling this function
1856  *		with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1857  *		in order to block pending on the I/O already in progress.
1858  *
1859  *		The start offset is truncated to the page boundary and the
1860  *		size is adjusted to include the last page in the range; that
1861  *		is, end_off on exactly a page boundary will not change if it
1862  *		is rounded, and the range of bytes written will be from the
1863  *		truncate beg_off to the rounded (end_off - 1).
1864  */
1865 static int
ubc_msync_internal(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags,int * io_errno)1866 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1867 {
1868 	memory_object_size_t    tsize;
1869 	kern_return_t           kret;
1870 	int request_flags = 0;
1871 	int flush_flags   = MEMORY_OBJECT_RETURN_NONE;
1872 
1873 	if (!UBCINFOEXISTS(vp)) {
1874 		return 0;
1875 	}
1876 	if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) {
1877 		return 0;
1878 	}
1879 	if (end_off <= beg_off) {
1880 		return 1;
1881 	}
1882 
1883 	if (flags & UBC_INVALIDATE) {
1884 		/*
1885 		 * discard the resident pages
1886 		 */
1887 		request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
1888 	}
1889 
1890 	if (flags & UBC_SYNC) {
1891 		/*
1892 		 * wait for all the I/O to complete before returning
1893 		 */
1894 		request_flags |= MEMORY_OBJECT_IO_SYNC;
1895 	}
1896 
1897 	if (flags & UBC_PUSHDIRTY) {
1898 		/*
1899 		 * we only return the dirty pages in the range
1900 		 */
1901 		flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
1902 	}
1903 
1904 	if (flags & UBC_PUSHALL) {
1905 		/*
1906 		 * then return all the interesting pages in the range (both
1907 		 * dirty and precious) to the pager
1908 		 */
1909 		flush_flags = MEMORY_OBJECT_RETURN_ALL;
1910 	}
1911 
1912 	beg_off = trunc_page_64(beg_off);
1913 	end_off = round_page_64(end_off);
1914 	tsize   = (memory_object_size_t)end_off - beg_off;
1915 
1916 	/* flush and/or invalidate pages in the range requested */
1917 	kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
1918 	    beg_off, tsize,
1919 	    (memory_object_offset_t *)resid_off,
1920 	    io_errno, flush_flags, request_flags,
1921 	    VM_PROT_NO_CHANGE);
1922 
1923 	return (kret == KERN_SUCCESS) ? 1 : 0;
1924 }
1925 
1926 
1927 /*
1928  * ubc_map
1929  *
1930  * Explicitly map a vnode that has an associate ubc_info, and add a reference
1931  * to it for the ubc system, if there isn't one already, so it will not be
1932  * recycled while it's in use, and set flags on the ubc_info to indicate that
1933  * we have done this
1934  *
1935  * Parameters:	vp			The vnode to map
1936  *		flags			The mapping flags for the vnode; this
1937  *					will be a combination of one or more of
1938  *					PROT_READ, PROT_WRITE, and PROT_EXEC
1939  *
1940  * Returns:	0			Success
1941  *		EPERM			Permission was denied
1942  *
1943  * Notes:	An I/O reference on the vnode must already be held on entry
1944  *
1945  *		If there is no ubc_info associated with the vnode, this function
1946  *		will return success.
1947  *
1948  *		If a permission error occurs, this function will return
1949  *		failure; all other failures will cause this function to return
1950  *		success.
1951  *
1952  *		IMPORTANT: This is an internal use function, and its symbols
1953  *		are not exported, hence its error checking is not very robust.
1954  *		It is primarily used by:
1955  *
1956  *		o	mmap(), when mapping a file
1957  *		o	When mapping a shared file (a shared library in the
1958  *			shared segment region)
1959  *		o	When loading a program image during the exec process
1960  *
1961  *		...all of these uses ignore the return code, and any fault that
1962  *		results later because of a failure is handled in the fix-up path
1963  *		of the fault handler.  The interface exists primarily as a
1964  *		performance hint.
1965  *
1966  *		Given that third party implementation of the type of interfaces
1967  *		that would use this function, such as alternative executable
1968  *		formats, etc., are unsupported, this function is not exported
1969  *		for general use.
1970  *
1971  *		The extra reference is held until the VM system unmaps the
1972  *		vnode from its own context to maintain a vnode reference in
1973  *		cases like open()/mmap()/close(), which leave the backing
1974  *		object referenced by a mapped memory region in a process
1975  *		address space.
1976  */
1977 __private_extern__ int
ubc_map(vnode_t vp,int flags)1978 ubc_map(vnode_t vp, int flags)
1979 {
1980 	struct ubc_info *uip;
1981 	int error = 0;
1982 	int need_ref = 0;
1983 	int need_wakeup = 0;
1984 
1985 	if (UBCINFOEXISTS(vp)) {
1986 		vnode_lock(vp);
1987 		uip = vp->v_ubcinfo;
1988 
1989 		while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
1990 			SET(uip->ui_flags, UI_MAPWAITING);
1991 			(void) msleep(&uip->ui_flags, &vp->v_lock,
1992 			    PRIBIO, "ubc_map", NULL);
1993 		}
1994 		SET(uip->ui_flags, UI_MAPBUSY);
1995 		vnode_unlock(vp);
1996 
1997 		error = VNOP_MMAP(vp, flags, vfs_context_current());
1998 
1999 		/*
2000 		 * rdar://problem/22587101 required that we stop propagating
2001 		 * EPERM up the stack. Otherwise, we would have to funnel up
2002 		 * the error at all the call sites for memory_object_map().
2003 		 * The risk is in having to undo the map/object/entry state at
2004 		 * all these call sites. It would also affect more than just mmap()
2005 		 * e.g. vm_remap().
2006 		 *
2007 		 *	if (error != EPERM)
2008 		 *              error = 0;
2009 		 */
2010 
2011 		error = 0;
2012 
2013 		vnode_lock_spin(vp);
2014 
2015 		if (error == 0) {
2016 			if (!ISSET(uip->ui_flags, UI_ISMAPPED)) {
2017 				need_ref = 1;
2018 			}
2019 			SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
2020 			if (flags & PROT_WRITE) {
2021 				SET(uip->ui_flags, (UI_WASMAPPEDWRITE | UI_MAPPEDWRITE));
2022 			}
2023 		}
2024 		CLR(uip->ui_flags, UI_MAPBUSY);
2025 
2026 		if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2027 			CLR(uip->ui_flags, UI_MAPWAITING);
2028 			need_wakeup = 1;
2029 		}
2030 		vnode_unlock(vp);
2031 
2032 		if (need_wakeup) {
2033 			wakeup(&uip->ui_flags);
2034 		}
2035 
2036 		if (need_ref) {
2037 			/*
2038 			 * Make sure we get a ref as we can't unwind from here
2039 			 */
2040 			if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) {
2041 				panic("%s : VNODE_REF_FORCE failed", __FUNCTION__);
2042 			}
2043 			/*
2044 			 * Vnodes that are on "unreliable" media (like disk
2045 			 * images, network filesystems, 3rd-party filesystems,
2046 			 * and possibly external devices) could see their
2047 			 * contents be changed via the backing store without
2048 			 * triggering copy-on-write, so we can't fully rely
2049 			 * on copy-on-write and might have to resort to
2050 			 * copy-on-read to protect "privileged" processes and
2051 			 * prevent privilege escalation.
2052 			 *
2053 			 * The root filesystem is considered "reliable" because
2054 			 * there's not much point in trying to protect
2055 			 * ourselves from such a vulnerability and the extra
2056 			 * cost of copy-on-read (CPU time and memory pressure)
2057 			 * could result in some serious regressions.
2058 			 */
2059 			if (vp->v_mount != NULL &&
2060 			    ((vp->v_mount->mnt_flag & MNT_ROOTFS) ||
2061 			    vnode_on_reliable_media(vp))) {
2062 				/*
2063 				 * This vnode is deemed "reliable" so mark
2064 				 * its VM object as "trusted".
2065 				 */
2066 				memory_object_mark_trusted(uip->ui_control);
2067 			} else {
2068 //				printf("BUGGYCOW: %s:%d vp %p \"%s\" in mnt %p \"%s\" is untrusted\n", __FUNCTION__, __LINE__, vp, vp->v_name, vp->v_mount, vp->v_mount->mnt_vnodecovered->v_name);
2069 			}
2070 		}
2071 	}
2072 	return error;
2073 }
2074 
2075 
2076 /*
2077  * ubc_destroy_named
2078  *
2079  * Destroy the named memory object associated with the ubc_info control object
2080  * associated with the designated vnode, if there is a ubc_info associated
2081  * with the vnode, and a control object is associated with it
2082  *
2083  * Parameters:	vp			The designated vnode
2084  *
2085  * Returns:	(void)
2086  *
2087  * Notes:	This function is called on vnode termination for all vnodes,
2088  *		and must therefore not assume that there is a ubc_info that is
2089  *		associated with the vnode, nor that there is a control object
2090  *		associated with the ubc_info.
2091  *
2092  *		If all the conditions necessary are present, this function
2093  *		calls memory_object_destory(), which will in turn end up
2094  *		calling ubc_unmap() to release any vnode references that were
2095  *		established via ubc_map().
2096  *
2097  *		IMPORTANT: This is an internal use function that is used
2098  *		exclusively by the internal use function vclean().
2099  */
2100 __private_extern__ void
ubc_destroy_named(vnode_t vp,vm_object_destroy_reason_t reason)2101 ubc_destroy_named(vnode_t vp, vm_object_destroy_reason_t reason)
2102 {
2103 	memory_object_control_t control;
2104 	struct ubc_info *uip;
2105 	kern_return_t kret;
2106 
2107 	if (UBCINFOEXISTS(vp)) {
2108 		uip = vp->v_ubcinfo;
2109 
2110 		/* Terminate the memory object  */
2111 		control = ubc_getobject(vp, UBC_HOLDOBJECT);
2112 		if (control != MEMORY_OBJECT_CONTROL_NULL) {
2113 			kret = memory_object_destroy(control, reason);
2114 			if (kret != KERN_SUCCESS) {
2115 				panic("ubc_destroy_named: memory_object_destroy failed");
2116 			}
2117 		}
2118 	}
2119 }
2120 
2121 
2122 /*
2123  * ubc_isinuse
2124  *
2125  * Determine whether or not a vnode is currently in use by ubc at a level in
2126  * excess of the requested busycount
2127  *
2128  * Parameters:	vp			The vnode to check
2129  *		busycount		The threshold busy count, used to bias
2130  *					the count usually already held by the
2131  *					caller to avoid races
2132  *
2133  * Returns:	1			The vnode is in use over the threshold
2134  *		0			The vnode is not in use over the
2135  *					threshold
2136  *
2137  * Notes:	Because the vnode is only held locked while actually asking
2138  *		the use count, this function only represents a snapshot of the
2139  *		current state of the vnode.  If more accurate information is
2140  *		required, an additional busycount should be held by the caller
2141  *		and a non-zero busycount used.
2142  *
2143  *		If there is no ubc_info associated with the vnode, this
2144  *		function will report that the vnode is not in use by ubc.
2145  */
2146 int
ubc_isinuse(struct vnode * vp,int busycount)2147 ubc_isinuse(struct vnode *vp, int busycount)
2148 {
2149 	if (!UBCINFOEXISTS(vp)) {
2150 		return 0;
2151 	}
2152 	return ubc_isinuse_locked(vp, busycount, 0);
2153 }
2154 
2155 
2156 /*
2157  * ubc_isinuse_locked
2158  *
2159  * Determine whether or not a vnode is currently in use by ubc at a level in
2160  * excess of the requested busycount
2161  *
2162  * Parameters:	vp			The vnode to check
2163  *		busycount		The threshold busy count, used to bias
2164  *					the count usually already held by the
2165  *					caller to avoid races
2166  *		locked			True if the vnode is already locked by
2167  *					the caller
2168  *
2169  * Returns:	1			The vnode is in use over the threshold
2170  *		0			The vnode is not in use over the
2171  *					threshold
2172  *
2173  * Notes:	If the vnode is not locked on entry, it is locked while
2174  *		actually asking the use count.  If this is the case, this
2175  *		function only represents a snapshot of the current state of
2176  *		the vnode.  If more accurate information is required, the
2177  *		vnode lock should be held by the caller, otherwise an
2178  *		additional busycount should be held by the caller and a
2179  *		non-zero busycount used.
2180  *
2181  *		If there is no ubc_info associated with the vnode, this
2182  *		function will report that the vnode is not in use by ubc.
2183  */
2184 int
ubc_isinuse_locked(struct vnode * vp,int busycount,int locked)2185 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
2186 {
2187 	int retval = 0;
2188 
2189 
2190 	if (!locked) {
2191 		vnode_lock_spin(vp);
2192 	}
2193 
2194 	if ((vp->v_usecount - vp->v_kusecount) > busycount) {
2195 		retval = 1;
2196 	}
2197 
2198 	if (!locked) {
2199 		vnode_unlock(vp);
2200 	}
2201 	return retval;
2202 }
2203 
2204 
2205 /*
2206  * ubc_unmap
2207  *
2208  * Reverse the effects of a ubc_map() call for a given vnode
2209  *
2210  * Parameters:	vp			vnode to unmap from ubc
2211  *
2212  * Returns:	(void)
2213  *
2214  * Notes:	This is an internal use function used by vnode_pager_unmap().
2215  *		It will attempt to obtain a reference on the supplied vnode,
2216  *		and if it can do so, and there is an associated ubc_info, and
2217  *		the flags indicate that it was mapped via ubc_map(), then the
2218  *		flag is cleared, the mapping removed, and the reference taken
2219  *		by ubc_map() is released.
2220  *
2221  *		IMPORTANT: This MUST only be called by the VM
2222  *		to prevent race conditions.
2223  */
2224 __private_extern__ void
ubc_unmap(struct vnode * vp)2225 ubc_unmap(struct vnode *vp)
2226 {
2227 	struct ubc_info *uip;
2228 	int     need_rele = 0;
2229 	int     need_wakeup = 0;
2230 
2231 	if (vnode_getwithref(vp)) {
2232 		return;
2233 	}
2234 
2235 	if (UBCINFOEXISTS(vp)) {
2236 		bool want_fsevent = false;
2237 
2238 		vnode_lock(vp);
2239 		uip = vp->v_ubcinfo;
2240 
2241 		while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2242 			SET(uip->ui_flags, UI_MAPWAITING);
2243 			(void) msleep(&uip->ui_flags, &vp->v_lock,
2244 			    PRIBIO, "ubc_unmap", NULL);
2245 		}
2246 		SET(uip->ui_flags, UI_MAPBUSY);
2247 
2248 		if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
2249 			if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) {
2250 				want_fsevent = true;
2251 			}
2252 
2253 			need_rele = 1;
2254 
2255 			/*
2256 			 * We want to clear the mapped flags after we've called
2257 			 * VNOP_MNOMAP to avoid certain races and allow
2258 			 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2259 			 */
2260 		}
2261 		vnode_unlock(vp);
2262 
2263 		if (need_rele) {
2264 			vfs_context_t ctx = vfs_context_current();
2265 
2266 			(void)VNOP_MNOMAP(vp, ctx);
2267 
2268 #if CONFIG_FSE
2269 			/*
2270 			 * Why do we want an fsevent here?  Normally the
2271 			 * content modified fsevent is posted when a file is
2272 			 * closed and only if it's written to via conventional
2273 			 * means.  It's perfectly legal to close a file and
2274 			 * keep your mappings and we don't currently track
2275 			 * whether it was written to via a mapping.
2276 			 * Therefore, we need to post an fsevent here if the
2277 			 * file was mapped writable.  This may result in false
2278 			 * events, i.e. we post a notification when nothing
2279 			 * has really changed.
2280 			 */
2281 			if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
2282 				add_fsevent(FSE_CONTENT_MODIFIED_NO_HLINK, ctx,
2283 				    FSE_ARG_VNODE, vp,
2284 				    FSE_ARG_DONE);
2285 			}
2286 #endif
2287 
2288 			vnode_rele(vp);
2289 		}
2290 
2291 		vnode_lock_spin(vp);
2292 
2293 		if (need_rele) {
2294 			CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
2295 		}
2296 
2297 		CLR(uip->ui_flags, UI_MAPBUSY);
2298 
2299 		if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2300 			CLR(uip->ui_flags, UI_MAPWAITING);
2301 			need_wakeup = 1;
2302 		}
2303 		vnode_unlock(vp);
2304 
2305 		if (need_wakeup) {
2306 			wakeup(&uip->ui_flags);
2307 		}
2308 	}
2309 	/*
2310 	 * the drop of the vnode ref will cleanup
2311 	 */
2312 	vnode_put(vp);
2313 }
2314 
2315 
2316 /*
2317  * ubc_page_op
2318  *
2319  * Manipulate individual page state for a vnode with an associated ubc_info
2320  * with an associated memory object control.
2321  *
2322  * Parameters:	vp			The vnode backing the page
2323  *		f_offset		A file offset interior to the page
2324  *		ops			The operations to perform, as a bitmap
2325  *					(see below for more information)
2326  *		phys_entryp		The address of a ppnum_t; may be NULL
2327  *					to ignore
2328  *		flagsp			A pointer to an int to contain flags;
2329  *					may be NULL to ignore
2330  *
2331  * Returns:	KERN_SUCCESS		Success
2332  *		KERN_INVALID_ARGUMENT	If the memory object control has no VM
2333  *					object associated
2334  *		KERN_INVALID_OBJECT	If UPL_POP_PHYSICAL and the object is
2335  *					not physically contiguous
2336  *		KERN_INVALID_OBJECT	If !UPL_POP_PHYSICAL and the object is
2337  *					physically contiguous
2338  *		KERN_FAILURE		If the page cannot be looked up
2339  *
2340  * Implicit Returns:
2341  *		*phys_entryp (modified)	If phys_entryp is non-NULL and
2342  *					UPL_POP_PHYSICAL
2343  *		*flagsp (modified)	If flagsp is non-NULL and there was
2344  *					!UPL_POP_PHYSICAL and a KERN_SUCCESS
2345  *
2346  * Notes:	For object boundaries, it is considerably more efficient to
2347  *		ensure that f_offset is in fact on a page boundary, as this
2348  *		will avoid internal use of the hash table to identify the
2349  *		page, and would therefore skip a number of early optimizations.
2350  *		Since this is a page operation anyway, the caller should try
2351  *		to pass only a page aligned offset because of this.
2352  *
2353  *		*flagsp may be modified even if this function fails.  If it is
2354  *		modified, it will contain the condition of the page before the
2355  *		requested operation was attempted; these will only include the
2356  *		bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2357  *		UPL_POP_SET, or UPL_POP_CLR bits.
2358  *
2359  *		The flags field may contain a specific operation, such as
2360  *		UPL_POP_PHYSICAL or UPL_POP_DUMP:
2361  *
2362  *		o	UPL_POP_PHYSICAL	Fail if not contiguous; if
2363  *						*phys_entryp and successful, set
2364  *						*phys_entryp
2365  *		o	UPL_POP_DUMP		Dump the specified page
2366  *
2367  *		Otherwise, it is treated as a bitmap of one or more page
2368  *		operations to perform on the final memory object; allowable
2369  *		bit values are:
2370  *
2371  *		o	UPL_POP_DIRTY		The page is dirty
2372  *		o	UPL_POP_PAGEOUT		The page is paged out
2373  *		o	UPL_POP_PRECIOUS	The page is precious
2374  *		o	UPL_POP_ABSENT		The page is absent
2375  *		o	UPL_POP_BUSY		The page is busy
2376  *
2377  *		If the page status is only being queried and not modified, then
2378  *		not other bits should be specified.  However, if it is being
2379  *		modified, exactly ONE of the following bits should be set:
2380  *
2381  *		o	UPL_POP_SET		Set the current bitmap bits
2382  *		o	UPL_POP_CLR		Clear the current bitmap bits
2383  *
2384  *		Thus to effect a combination of setting an clearing, it may be
2385  *		necessary to call this function twice.  If this is done, the
2386  *		set should be used before the clear, since clearing may trigger
2387  *		a wakeup on the destination page, and if the page is backed by
2388  *		an encrypted swap file, setting will trigger the decryption
2389  *		needed before the wakeup occurs.
2390  */
2391 kern_return_t
ubc_page_op(struct vnode * vp,off_t f_offset,int ops,ppnum_t * phys_entryp,int * flagsp)2392 ubc_page_op(
2393 	struct vnode    *vp,
2394 	off_t           f_offset,
2395 	int             ops,
2396 	ppnum_t *phys_entryp,
2397 	int             *flagsp)
2398 {
2399 	memory_object_control_t         control;
2400 
2401 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2402 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2403 		return KERN_INVALID_ARGUMENT;
2404 	}
2405 
2406 	return memory_object_page_op(control,
2407 	           (memory_object_offset_t)f_offset,
2408 	           ops,
2409 	           phys_entryp,
2410 	           flagsp);
2411 }
2412 
2413 
2414 /*
2415  * ubc_range_op
2416  *
2417  * Manipulate page state for a range of memory for a vnode with an associated
2418  * ubc_info with an associated memory object control, when page level state is
2419  * not required to be returned from the call (i.e. there are no phys_entryp or
2420  * flagsp parameters to this call, and it takes a range which may contain
2421  * multiple pages, rather than an offset interior to a single page).
2422  *
2423  * Parameters:	vp			The vnode backing the page
2424  *		f_offset_beg		A file offset interior to the start page
2425  *		f_offset_end		A file offset interior to the end page
2426  *		ops			The operations to perform, as a bitmap
2427  *					(see below for more information)
2428  *		range			The address of an int; may be NULL to
2429  *					ignore
2430  *
2431  * Returns:	KERN_SUCCESS		Success
2432  *		KERN_INVALID_ARGUMENT	If the memory object control has no VM
2433  *					object associated
2434  *		KERN_INVALID_OBJECT	If the object is physically contiguous
2435  *
2436  * Implicit Returns:
2437  *		*range (modified)	If range is non-NULL, its contents will
2438  *					be modified to contain the number of
2439  *					bytes successfully operated upon.
2440  *
2441  * Notes:	IMPORTANT: This function cannot be used on a range that
2442  *		consists of physically contiguous pages.
2443  *
2444  *		For object boundaries, it is considerably more efficient to
2445  *		ensure that f_offset_beg and f_offset_end are in fact on page
2446  *		boundaries, as this will avoid internal use of the hash table
2447  *		to identify the page, and would therefore skip a number of
2448  *		early optimizations.  Since this is an operation on a set of
2449  *		pages anyway, the caller should try to pass only a page aligned
2450  *		offsets because of this.
2451  *
2452  *		*range will be modified only if this function succeeds.
2453  *
2454  *		The flags field MUST contain a specific operation; allowable
2455  *		values are:
2456  *
2457  *		o	UPL_ROP_ABSENT	Returns the extent of the range
2458  *					presented which is absent, starting
2459  *					with the start address presented
2460  *
2461  *		o	UPL_ROP_PRESENT	Returns the extent of the range
2462  *					presented which is present (resident),
2463  *					starting with the start address
2464  *					presented
2465  *		o	UPL_ROP_DUMP	Dump the pages which are found in the
2466  *					target object for the target range.
2467  *
2468  *		IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2469  *		multiple regions in the range, only the first matching region
2470  *		is returned.
2471  */
2472 kern_return_t
ubc_range_op(struct vnode * vp,off_t f_offset_beg,off_t f_offset_end,int ops,int * range)2473 ubc_range_op(
2474 	struct vnode    *vp,
2475 	off_t           f_offset_beg,
2476 	off_t           f_offset_end,
2477 	int             ops,
2478 	int             *range)
2479 {
2480 	memory_object_control_t         control;
2481 
2482 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2483 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2484 		return KERN_INVALID_ARGUMENT;
2485 	}
2486 
2487 	return memory_object_range_op(control,
2488 	           (memory_object_offset_t)f_offset_beg,
2489 	           (memory_object_offset_t)f_offset_end,
2490 	           ops,
2491 	           range);
2492 }
2493 
2494 
2495 /*
2496  * ubc_create_upl
2497  *
2498  * Given a vnode, cause the population of a portion of the vm_object; based on
2499  * the nature of the request, the pages returned may contain valid data, or
2500  * they may be uninitialized.
2501  *
2502  * Parameters:	vp			The vnode from which to create the upl
2503  *		f_offset		The start offset into the backing store
2504  *					represented by the vnode
2505  *		bufsize			The size of the upl to create
2506  *		uplp			Pointer to the upl_t to receive the
2507  *					created upl; MUST NOT be NULL
2508  *		plp			Pointer to receive the internal page
2509  *					list for the created upl; MAY be NULL
2510  *					to ignore
2511  *
2512  * Returns:	KERN_SUCCESS		The requested upl has been created
2513  *		KERN_INVALID_ARGUMENT	The bufsize argument is not an even
2514  *					multiple of the page size
2515  *		KERN_INVALID_ARGUMENT	There is no ubc_info associated with
2516  *					the vnode, or there is no memory object
2517  *					control associated with the ubc_info
2518  *	memory_object_upl_request:KERN_INVALID_VALUE
2519  *					The supplied upl_flags argument is
2520  *					invalid
2521  * Implicit Returns:
2522  *		*uplp (modified)
2523  *		*plp (modified)		If non-NULL, the value of *plp will be
2524  *					modified to point to the internal page
2525  *					list; this modification may occur even
2526  *					if this function is unsuccessful, in
2527  *					which case the contents may be invalid
2528  *
2529  * Note:	If successful, the returned *uplp MUST subsequently be freed
2530  *		via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2531  *		ubc_upl_abort(), or ubc_upl_abort_range().
2532  */
2533 kern_return_t
ubc_create_upl_external(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags)2534 ubc_create_upl_external(
2535 	struct vnode    *vp,
2536 	off_t           f_offset,
2537 	int             bufsize,
2538 	upl_t           *uplp,
2539 	upl_page_info_t **plp,
2540 	int             uplflags)
2541 {
2542 	return ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt());
2543 }
2544 
2545 kern_return_t
ubc_create_upl_kernel(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags,vm_tag_t tag)2546 ubc_create_upl_kernel(
2547 	struct vnode    *vp,
2548 	off_t           f_offset,
2549 	int             bufsize,
2550 	upl_t           *uplp,
2551 	upl_page_info_t **plp,
2552 	int             uplflags,
2553 	vm_tag_t tag)
2554 {
2555 	memory_object_control_t         control;
2556 	kern_return_t                   kr;
2557 
2558 	if (plp != NULL) {
2559 		*plp = NULL;
2560 	}
2561 	*uplp = NULL;
2562 
2563 	if (bufsize & 0xfff) {
2564 		return KERN_INVALID_ARGUMENT;
2565 	}
2566 
2567 	if (bufsize > MAX_UPL_SIZE_BYTES) {
2568 		return KERN_INVALID_ARGUMENT;
2569 	}
2570 
2571 	if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
2572 		if (uplflags & UPL_UBC_MSYNC) {
2573 			uplflags &= UPL_RET_ONLY_DIRTY;
2574 
2575 			uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
2576 			    UPL_SET_INTERNAL | UPL_SET_LITE;
2577 		} else if (uplflags & UPL_UBC_PAGEOUT) {
2578 			uplflags &= UPL_RET_ONLY_DIRTY;
2579 
2580 			if (uplflags & UPL_RET_ONLY_DIRTY) {
2581 				uplflags |= UPL_NOBLOCK;
2582 			}
2583 
2584 			uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
2585 			    UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
2586 		} else {
2587 			uplflags |= UPL_RET_ONLY_ABSENT |
2588 			    UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
2589 			    UPL_SET_INTERNAL | UPL_SET_LITE;
2590 
2591 			/*
2592 			 * if the requested size == PAGE_SIZE, we don't want to set
2593 			 * the UPL_NOBLOCK since we may be trying to recover from a
2594 			 * previous partial pagein I/O that occurred because we were low
2595 			 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2596 			 * since we're only asking for a single page, we can block w/o fear
2597 			 * of tying up pages while waiting for more to become available
2598 			 */
2599 			if (bufsize > PAGE_SIZE) {
2600 				uplflags |= UPL_NOBLOCK;
2601 			}
2602 		}
2603 	} else {
2604 		uplflags &= ~UPL_FOR_PAGEOUT;
2605 
2606 		if (uplflags & UPL_WILL_BE_DUMPED) {
2607 			uplflags &= ~UPL_WILL_BE_DUMPED;
2608 			uplflags |= (UPL_NO_SYNC | UPL_SET_INTERNAL);
2609 		} else {
2610 			uplflags |= (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
2611 		}
2612 	}
2613 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2614 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2615 		return KERN_INVALID_ARGUMENT;
2616 	}
2617 
2618 	kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
2619 	if (kr == KERN_SUCCESS && plp != NULL) {
2620 		*plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
2621 	}
2622 	return kr;
2623 }
2624 
2625 
2626 /*
2627  * ubc_upl_maxbufsize
2628  *
2629  * Return the maximum bufsize ubc_create_upl( ) will take.
2630  *
2631  * Parameters:	none
2632  *
2633  * Returns:	maximum size buffer (in bytes) ubc_create_upl( ) will take.
2634  */
2635 upl_size_t
ubc_upl_maxbufsize(void)2636 ubc_upl_maxbufsize(
2637 	void)
2638 {
2639 	return MAX_UPL_SIZE_BYTES;
2640 }
2641 
2642 /*
2643  * ubc_upl_map
2644  *
2645  * Map the page list assocated with the supplied upl into the kernel virtual
2646  * address space at the virtual address indicated by the dst_addr argument;
2647  * the entire upl is mapped
2648  *
2649  * Parameters:	upl			The upl to map
2650  *		dst_addr		The address at which to map the upl
2651  *
2652  * Returns:	KERN_SUCCESS		The upl has been mapped
2653  *		KERN_INVALID_ARGUMENT	The upl is UPL_NULL
2654  *		KERN_FAILURE		The upl is already mapped
2655  *	vm_map_enter:KERN_INVALID_ARGUMENT
2656  *					A failure code from vm_map_enter() due
2657  *					to an invalid argument
2658  */
2659 kern_return_t
ubc_upl_map(upl_t upl,vm_offset_t * dst_addr)2660 ubc_upl_map(
2661 	upl_t           upl,
2662 	vm_offset_t     *dst_addr)
2663 {
2664 	return vm_upl_map(kernel_map, upl, dst_addr);
2665 }
2666 
2667 /*
2668  * ubc_upl_map_range:- similar to ubc_upl_map but the focus is on a range
2669  * of the UPL. Takes an offset, size, and protection so that only a  part
2670  * of the UPL can be mapped with the right protections.
2671  */
2672 kern_return_t
ubc_upl_map_range(upl_t upl,vm_offset_t offset_to_map,vm_size_t size_to_map,vm_prot_t prot_to_map,vm_offset_t * dst_addr)2673 ubc_upl_map_range(
2674 	upl_t           upl,
2675 	vm_offset_t     offset_to_map,
2676 	vm_size_t       size_to_map,
2677 	vm_prot_t       prot_to_map,
2678 	vm_offset_t     *dst_addr)
2679 {
2680 	return vm_upl_map_range(kernel_map, upl, offset_to_map, size_to_map, prot_to_map, dst_addr);
2681 }
2682 
2683 
2684 /*
2685  * ubc_upl_unmap
2686  *
2687  * Unmap the page list assocated with the supplied upl from the kernel virtual
2688  * address space; the entire upl is unmapped.
2689  *
2690  * Parameters:	upl			The upl to unmap
2691  *
2692  * Returns:	KERN_SUCCESS		The upl has been unmapped
2693  *		KERN_FAILURE		The upl is not currently mapped
2694  *		KERN_INVALID_ARGUMENT	If the upl is UPL_NULL
2695  */
2696 kern_return_t
ubc_upl_unmap(upl_t upl)2697 ubc_upl_unmap(
2698 	upl_t   upl)
2699 {
2700 	return vm_upl_unmap(kernel_map, upl);
2701 }
2702 
2703 /*
2704  * ubc_upl_unmap_range:- similar to ubc_upl_unmap but the focus is
2705  * on part of the UPL that is mapped. The offset and size parameter
2706  * specifies what part of the UPL needs to be unmapped.
2707  *
2708  * Note: Currrently offset & size are unused as we always initiate the unmap from the
2709  * very beginning of the UPL's mapping and track the mapped size in the UPL. But we
2710  * might want to allow unmapping a UPL in the middle, for example, and we can use the
2711  * offset + size parameters for that purpose.
2712  */
2713 kern_return_t
ubc_upl_unmap_range(upl_t upl,vm_offset_t offset_to_unmap,vm_size_t size_to_unmap)2714 ubc_upl_unmap_range(
2715 	upl_t   upl,
2716 	vm_offset_t     offset_to_unmap,
2717 	vm_size_t       size_to_unmap)
2718 {
2719 	return vm_upl_unmap_range(kernel_map, upl, offset_to_unmap, size_to_unmap);
2720 }
2721 
2722 
2723 /*
2724  * ubc_upl_commit
2725  *
2726  * Commit the contents of the upl to the backing store
2727  *
2728  * Parameters:	upl			The upl to commit
2729  *
2730  * Returns:	KERN_SUCCESS		The upl has been committed
2731  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2732  *		KERN_FAILURE		The supplied upl does not represent
2733  *					device memory, and the offset plus the
2734  *					size would exceed the actual size of
2735  *					the upl
2736  *
2737  * Notes:	In practice, the only return value for this function should be
2738  *		KERN_SUCCESS, unless there has been data structure corruption;
2739  *		since the upl is deallocated regardless of success or failure,
2740  *		there's really nothing to do about this other than panic.
2741  *
2742  *		IMPORTANT: Use of this function should not be mixed with use of
2743  *		ubc_upl_commit_range(), due to the unconditional deallocation
2744  *		by this function.
2745  */
2746 kern_return_t
ubc_upl_commit(upl_t upl)2747 ubc_upl_commit(
2748 	upl_t                   upl)
2749 {
2750 	upl_page_info_t *pl;
2751 	kern_return_t   kr;
2752 
2753 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2754 	kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
2755 	upl_deallocate(upl);
2756 	return kr;
2757 }
2758 
2759 
2760 /*
2761  * ubc_upl_commit
2762  *
2763  * Commit the contents of the specified range of the upl to the backing store
2764  *
2765  * Parameters:	upl			The upl to commit
2766  *		offset			The offset into the upl
2767  *		size			The size of the region to be committed,
2768  *					starting at the specified offset
2769  *		flags			commit type (see below)
2770  *
2771  * Returns:	KERN_SUCCESS		The range has been committed
2772  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2773  *		KERN_FAILURE		The supplied upl does not represent
2774  *					device memory, and the offset plus the
2775  *					size would exceed the actual size of
2776  *					the upl
2777  *
2778  * Notes:	IMPORTANT: If the commit is successful, and the object is now
2779  *		empty, the upl will be deallocated.  Since the caller cannot
2780  *		check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2781  *		should generally only be used when the offset is 0 and the size
2782  *		is equal to the upl size.
2783  *
2784  *		The flags argument is a bitmap of flags on the rage of pages in
2785  *		the upl to be committed; allowable flags are:
2786  *
2787  *		o	UPL_COMMIT_FREE_ON_EMPTY	Free the upl when it is
2788  *							both empty and has been
2789  *							successfully committed
2790  *		o	UPL_COMMIT_CLEAR_DIRTY		Clear each pages dirty
2791  *							bit; will prevent a
2792  *							later pageout
2793  *		o	UPL_COMMIT_SET_DIRTY		Set each pages dirty
2794  *							bit; will cause a later
2795  *							pageout
2796  *		o	UPL_COMMIT_INACTIVATE		Clear each pages
2797  *							reference bit; the page
2798  *							will not be accessed
2799  *		o	UPL_COMMIT_ALLOW_ACCESS		Unbusy each page; pages
2800  *							become busy when an
2801  *							IOMemoryDescriptor is
2802  *							mapped or redirected,
2803  *							and we have to wait for
2804  *							an IOKit driver
2805  *
2806  *		The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2807  *		not be specified by the caller.
2808  *
2809  *		The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2810  *		mutually exclusive, and should not be combined.
2811  */
2812 kern_return_t
ubc_upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags)2813 ubc_upl_commit_range(
2814 	upl_t                   upl,
2815 	upl_offset_t            offset,
2816 	upl_size_t              size,
2817 	int                             flags)
2818 {
2819 	upl_page_info_t *pl;
2820 	boolean_t               empty;
2821 	kern_return_t   kr;
2822 
2823 	if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
2824 		flags |= UPL_COMMIT_NOTIFY_EMPTY;
2825 	}
2826 
2827 	if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
2828 		return KERN_INVALID_ARGUMENT;
2829 	}
2830 
2831 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2832 
2833 	kr = upl_commit_range(upl, offset, size, flags,
2834 	    pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
2835 
2836 	if ((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) {
2837 		upl_deallocate(upl);
2838 	}
2839 
2840 	return kr;
2841 }
2842 
2843 
2844 /*
2845  * ubc_upl_abort_range
2846  *
2847  * Abort the contents of the specified range of the specified upl
2848  *
2849  * Parameters:	upl			The upl to abort
2850  *		offset			The offset into the upl
2851  *		size			The size of the region to be aborted,
2852  *					starting at the specified offset
2853  *		abort_flags		abort type (see below)
2854  *
2855  * Returns:	KERN_SUCCESS		The range has been aborted
2856  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2857  *		KERN_FAILURE		The supplied upl does not represent
2858  *					device memory, and the offset plus the
2859  *					size would exceed the actual size of
2860  *					the upl
2861  *
2862  * Notes:	IMPORTANT: If the abort is successful, and the object is now
2863  *		empty, the upl will be deallocated.  Since the caller cannot
2864  *		check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2865  *		should generally only be used when the offset is 0 and the size
2866  *		is equal to the upl size.
2867  *
2868  *		The abort_flags argument is a bitmap of flags on the range of
2869  *		pages in the upl to be aborted; allowable flags are:
2870  *
2871  *		o	UPL_ABORT_FREE_ON_EMPTY	Free the upl when it is both
2872  *						empty and has been successfully
2873  *						aborted
2874  *		o	UPL_ABORT_RESTART	The operation must be restarted
2875  *		o	UPL_ABORT_UNAVAILABLE	The pages are unavailable
2876  *		o	UPL_ABORT_ERROR		An I/O error occurred
2877  *		o	UPL_ABORT_DUMP_PAGES	Just free the pages
2878  *		o	UPL_ABORT_NOTIFY_EMPTY	RESERVED
2879  *		o	UPL_ABORT_ALLOW_ACCESS	RESERVED
2880  *
2881  *		The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2882  *		not be specified by the caller.  It is intended to fulfill the
2883  *		same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2884  *		ubc_upl_commit_range(), but is never referenced internally.
2885  *
2886  *		The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2887  *		referenced; do not use it.
2888  */
2889 kern_return_t
ubc_upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int abort_flags)2890 ubc_upl_abort_range(
2891 	upl_t                   upl,
2892 	upl_offset_t            offset,
2893 	upl_size_t              size,
2894 	int                             abort_flags)
2895 {
2896 	kern_return_t   kr;
2897 	boolean_t               empty = FALSE;
2898 
2899 	if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) {
2900 		abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
2901 	}
2902 
2903 	kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
2904 
2905 	if ((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) {
2906 		upl_deallocate(upl);
2907 	}
2908 
2909 	return kr;
2910 }
2911 
2912 
2913 /*
2914  * ubc_upl_abort
2915  *
2916  * Abort the contents of the specified upl
2917  *
2918  * Parameters:	upl			The upl to abort
2919  *		abort_type		abort type (see below)
2920  *
2921  * Returns:	KERN_SUCCESS		The range has been aborted
2922  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2923  *		KERN_FAILURE		The supplied upl does not represent
2924  *					device memory, and the offset plus the
2925  *					size would exceed the actual size of
2926  *					the upl
2927  *
2928  * Notes:	IMPORTANT: If the abort is successful, and the object is now
2929  *		empty, the upl will be deallocated.  Since the caller cannot
2930  *		check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2931  *		should generally only be used when the offset is 0 and the size
2932  *		is equal to the upl size.
2933  *
2934  *		The abort_type is a bitmap of flags on the range of
2935  *		pages in the upl to be aborted; allowable flags are:
2936  *
2937  *		o	UPL_ABORT_FREE_ON_EMPTY	Free the upl when it is both
2938  *						empty and has been successfully
2939  *						aborted
2940  *		o	UPL_ABORT_RESTART	The operation must be restarted
2941  *		o	UPL_ABORT_UNAVAILABLE	The pages are unavailable
2942  *		o	UPL_ABORT_ERROR		An I/O error occurred
2943  *		o	UPL_ABORT_DUMP_PAGES	Just free the pages
2944  *		o	UPL_ABORT_NOTIFY_EMPTY	RESERVED
2945  *		o	UPL_ABORT_ALLOW_ACCESS	RESERVED
2946  *
2947  *		The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2948  *		not be specified by the caller.  It is intended to fulfill the
2949  *		same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2950  *		ubc_upl_commit_range(), but is never referenced internally.
2951  *
2952  *		The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2953  *		referenced; do not use it.
2954  */
2955 kern_return_t
ubc_upl_abort(upl_t upl,int abort_type)2956 ubc_upl_abort(
2957 	upl_t                   upl,
2958 	int                             abort_type)
2959 {
2960 	kern_return_t   kr;
2961 
2962 	kr = upl_abort(upl, abort_type);
2963 	upl_deallocate(upl);
2964 	return kr;
2965 }
2966 
2967 
2968 /*
2969  * ubc_upl_pageinfo
2970  *
2971  *  Retrieve the internal page list for the specified upl
2972  *
2973  * Parameters:	upl			The upl to obtain the page list from
2974  *
2975  * Returns:	!NULL			The (upl_page_info_t *) for the page
2976  *					list internal to the upl
2977  *		NULL			Error/no page list associated
2978  *
2979  * Notes:	IMPORTANT: The function is only valid on internal objects
2980  *		where the list request was made with the UPL_INTERNAL flag.
2981  *
2982  *		This function is a utility helper function, since some callers
2983  *		may not have direct access to the header defining the macro,
2984  *		due to abstraction layering constraints.
2985  */
2986 upl_page_info_t *
ubc_upl_pageinfo(upl_t upl)2987 ubc_upl_pageinfo(
2988 	upl_t                   upl)
2989 {
2990 	return UPL_GET_INTERNAL_PAGE_LIST(upl);
2991 }
2992 
2993 
2994 int
UBCINFOEXISTS(const struct vnode * vp)2995 UBCINFOEXISTS(const struct vnode * vp)
2996 {
2997 	return (vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL);
2998 }
2999 
3000 
3001 void
ubc_upl_range_needed(upl_t upl,int index,int count)3002 ubc_upl_range_needed(
3003 	upl_t           upl,
3004 	int             index,
3005 	int             count)
3006 {
3007 	upl_range_needed(upl, index, count);
3008 }
3009 
3010 boolean_t
ubc_is_mapped(const struct vnode * vp,boolean_t * writable)3011 ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
3012 {
3013 	if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) {
3014 		return FALSE;
3015 	}
3016 	if (writable) {
3017 		*writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
3018 	}
3019 	return TRUE;
3020 }
3021 
3022 boolean_t
ubc_is_mapped_writable(const struct vnode * vp)3023 ubc_is_mapped_writable(const struct vnode *vp)
3024 {
3025 	boolean_t writable;
3026 	return ubc_is_mapped(vp, &writable) && writable;
3027 }
3028 
3029 boolean_t
ubc_was_mapped(const struct vnode * vp,boolean_t * writable)3030 ubc_was_mapped(const struct vnode *vp, boolean_t *writable)
3031 {
3032 	if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_WASMAPPED)) {
3033 		return FALSE;
3034 	}
3035 	if (writable) {
3036 		*writable = ISSET(vp->v_ubcinfo->ui_flags, UI_WASMAPPEDWRITE);
3037 	}
3038 	return TRUE;
3039 }
3040 
3041 boolean_t
ubc_was_mapped_writable(const struct vnode * vp)3042 ubc_was_mapped_writable(const struct vnode *vp)
3043 {
3044 	boolean_t writable;
3045 	return ubc_was_mapped(vp, &writable) && writable;
3046 }
3047 
3048 
3049 /*
3050  * CODE SIGNING
3051  */
3052 static atomic_size_t cs_blob_size = 0;
3053 static atomic_uint_fast32_t cs_blob_count = 0;
3054 static atomic_size_t cs_blob_size_peak = 0;
3055 static atomic_size_t cs_blob_size_max = 0;
3056 static atomic_uint_fast32_t cs_blob_count_peak = 0;
3057 
3058 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count, 0, "Current number of code signature blobs");
3059 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size, "Current size of all code signature blobs");
3060 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
3061 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, "Peak size of code signature blobs");
3062 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, "Size of biggest code signature blob");
3063 
3064 /*
3065  * Function: csblob_parse_teamid
3066  *
3067  * Description: This function returns a pointer to the team id
3068  *               stored within the codedirectory of the csblob.
3069  *               If the codedirectory predates team-ids, it returns
3070  *               NULL.
3071  *               This does not copy the name but returns a pointer to
3072  *               it within the CD. Subsequently, the CD must be
3073  *               available when this is used.
3074  */
3075 
3076 static const char *
csblob_parse_teamid(struct cs_blob * csblob)3077 csblob_parse_teamid(struct cs_blob *csblob)
3078 {
3079 	const CS_CodeDirectory *cd;
3080 
3081 	cd = csblob->csb_cd;
3082 
3083 	if (ntohl(cd->version) < CS_SUPPORTSTEAMID) {
3084 		return NULL;
3085 	}
3086 
3087 	if (cd->teamOffset == 0) {
3088 		return NULL;
3089 	}
3090 
3091 	const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
3092 	if (cs_debug > 1) {
3093 		printf("found team-id %s in cdblob\n", name);
3094 	}
3095 
3096 	return name;
3097 }
3098 
3099 kern_return_t
ubc_cs_blob_allocate(vm_offset_t * blob_addr_p,vm_size_t * blob_size_p)3100 ubc_cs_blob_allocate(
3101 	vm_offset_t     *blob_addr_p,
3102 	vm_size_t       *blob_size_p)
3103 {
3104 	kern_return_t   kr = KERN_FAILURE;
3105 	vm_size_t       allocation_size = 0;
3106 
3107 	if (!blob_addr_p || !blob_size_p) {
3108 		return KERN_INVALID_ARGUMENT;
3109 	}
3110 	allocation_size = *blob_size_p;
3111 
3112 	if (ubc_cs_blob_pagewise_allocate(allocation_size) == true) {
3113 		/* Round up to page size */
3114 		allocation_size = round_page(allocation_size);
3115 
3116 		/* Allocate page-wise */
3117 		kr = kmem_alloc(
3118 			kernel_map,
3119 			blob_addr_p,
3120 			allocation_size,
3121 			KMA_KOBJECT | KMA_DATA | KMA_ZERO,
3122 			VM_KERN_MEMORY_SECURITY);
3123 	} else {
3124 		*blob_addr_p = (vm_offset_t)kalloc_data_tag(
3125 			allocation_size,
3126 			Z_WAITOK | Z_ZERO,
3127 			VM_KERN_MEMORY_SECURITY);
3128 
3129 		assert(*blob_addr_p != 0);
3130 		kr = KERN_SUCCESS;
3131 	}
3132 
3133 	if (kr == KERN_SUCCESS) {
3134 		*blob_size_p = allocation_size;
3135 	}
3136 
3137 	return kr;
3138 }
3139 
3140 void
ubc_cs_blob_deallocate(vm_offset_t blob_addr,vm_size_t blob_size)3141 ubc_cs_blob_deallocate(
3142 	vm_offset_t     blob_addr,
3143 	vm_size_t       blob_size)
3144 {
3145 	if (ubc_cs_blob_pagewise_allocate(blob_size) == true) {
3146 		kmem_free(kernel_map, blob_addr, blob_size);
3147 	} else {
3148 		kfree_data(blob_addr, blob_size);
3149 	}
3150 }
3151 
3152 /*
3153  * Some codesigned files use a lowest common denominator page size of
3154  * 4KiB, but can be used on systems that have a runtime page size of
3155  * 16KiB. Since faults will only occur on 16KiB ranges in
3156  * cs_validate_range(), we can convert the original Code Directory to
3157  * a multi-level scheme where groups of 4 hashes are combined to form
3158  * a new hash, which represents 16KiB in the on-disk file.  This can
3159  * reduce the wired memory requirement for the Code Directory by
3160  * 75%. Care must be taken for binaries that use the "fourk" VM pager
3161  * for unaligned access, which may still attempt to validate on
3162  * non-16KiB multiples for compatibility with 3rd party binaries.
3163  */
3164 static boolean_t
ubc_cs_supports_multilevel_hash(struct cs_blob * blob __unused)3165 ubc_cs_supports_multilevel_hash(struct cs_blob *blob __unused)
3166 {
3167 	const CS_CodeDirectory *cd;
3168 
3169 #if CODE_SIGNING_MONITOR
3170 	// TODO: <rdar://problem/30954826>
3171 	if (csm_enabled() == true) {
3172 		return FALSE;
3173 	}
3174 #endif
3175 
3176 	/*
3177 	 * Only applies to binaries that ship as part of the OS,
3178 	 * primarily the shared cache.
3179 	 */
3180 	if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
3181 		return FALSE;
3182 	}
3183 
3184 	/*
3185 	 * If the runtime page size matches the code signing page
3186 	 * size, there is no work to do.
3187 	 */
3188 	if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
3189 		return FALSE;
3190 	}
3191 
3192 	cd = blob->csb_cd;
3193 
3194 	/*
3195 	 * There must be a valid integral multiple of hashes
3196 	 */
3197 	if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3198 		return FALSE;
3199 	}
3200 
3201 	/*
3202 	 * Scatter lists must also have ranges that have an integral number of hashes
3203 	 */
3204 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3205 		const SC_Scatter *scatter = (const SC_Scatter*)
3206 		    ((const char*)cd + ntohl(cd->scatterOffset));
3207 		/* iterate all scatter structs to make sure they are all aligned */
3208 		do {
3209 			uint32_t sbase = ntohl(scatter->base);
3210 			uint32_t scount = ntohl(scatter->count);
3211 
3212 			/* last scatter? */
3213 			if (scount == 0) {
3214 				break;
3215 			}
3216 
3217 			if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3218 				return FALSE;
3219 			}
3220 
3221 			if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3222 				return FALSE;
3223 			}
3224 
3225 			scatter++;
3226 		} while (1);
3227 	}
3228 
3229 	/* Covered range must be a multiple of the new page size */
3230 	if (ntohl(cd->codeLimit) & PAGE_MASK) {
3231 		return FALSE;
3232 	}
3233 
3234 	/* All checks pass */
3235 	return TRUE;
3236 }
3237 
3238 /*
3239  * Reconstruct a cs_blob with the code signature fields. This helper function
3240  * is useful because a lot of things often change the base address of the code
3241  * signature blob, which requires reconstructing some of the other pointers
3242  * within.
3243  */
3244 static errno_t
ubc_cs_blob_reconstruct(struct cs_blob * cs_blob,const vm_address_t signature_addr,const vm_address_t signature_size,const vm_offset_t code_directory_offset)3245 ubc_cs_blob_reconstruct(
3246 	struct cs_blob *cs_blob,
3247 	const vm_address_t signature_addr,
3248 	const vm_address_t signature_size,
3249 	const vm_offset_t code_directory_offset)
3250 {
3251 	const CS_CodeDirectory *code_directory = NULL;
3252 
3253 	/* Setup the signature blob address */
3254 	cs_blob->csb_mem_kaddr = (void*)signature_addr;
3255 	cs_blob->csb_mem_size = signature_size;
3256 
3257 	/* Setup the code directory in the blob */
3258 	code_directory = (const CS_CodeDirectory*)(signature_addr + code_directory_offset);
3259 	cs_blob->csb_cd = code_directory;
3260 
3261 	/* Setup the XML entitlements */
3262 	cs_blob->csb_entitlements_blob = csblob_find_blob_bytes(
3263 		(uint8_t*)signature_addr,
3264 		signature_size,
3265 		CSSLOT_ENTITLEMENTS,
3266 		CSMAGIC_EMBEDDED_ENTITLEMENTS);
3267 
3268 	/* Setup the DER entitlements */
3269 	cs_blob->csb_der_entitlements_blob = csblob_find_blob_bytes(
3270 		(uint8_t*)signature_addr,
3271 		signature_size,
3272 		CSSLOT_DER_ENTITLEMENTS,
3273 		CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3274 
3275 	return 0;
3276 }
3277 
3278 /*
3279  * Given a validated cs_blob, we reformat the structure to only include
3280  * the blobs which are required by the kernel for our current platform.
3281  * This saves significant memory with agile signatures.
3282  *
3283  * To support rewriting the code directory, potentially through
3284  * multilevel hashes, we provide a mechanism to allocate a code directory
3285  * of a specified size and zero it out --> caller can fill it in.
3286  *
3287  * We don't need to perform a lot of overflow checks as the assumption
3288  * here is that the cs_blob has already been validated.
3289  */
3290 static errno_t
ubc_cs_reconstitute_code_signature(const struct cs_blob * const blob,vm_address_t * const ret_mem_kaddr,vm_size_t * const ret_mem_size,vm_size_t code_directory_size,CS_CodeDirectory ** const code_directory)3291 ubc_cs_reconstitute_code_signature(
3292 	const struct cs_blob * const blob,
3293 	vm_address_t * const ret_mem_kaddr,
3294 	vm_size_t * const ret_mem_size,
3295 	vm_size_t code_directory_size,
3296 	CS_CodeDirectory ** const code_directory
3297 	)
3298 {
3299 	vm_address_t new_blob_addr = 0;
3300 	vm_size_t new_blob_size = 0;
3301 	vm_size_t new_code_directory_size = 0;
3302 	const CS_GenericBlob *best_code_directory = NULL;
3303 	const CS_GenericBlob *first_code_directory = NULL;
3304 	const CS_GenericBlob *der_entitlements_blob = NULL;
3305 	const CS_GenericBlob *entitlements_blob = NULL;
3306 	const CS_GenericBlob *cms_blob = NULL;
3307 	const CS_GenericBlob *launch_constraint_self = NULL;
3308 	const CS_GenericBlob *launch_constraint_parent = NULL;
3309 	const CS_GenericBlob *launch_constraint_responsible = NULL;
3310 	const CS_GenericBlob *library_constraint = NULL;
3311 	CS_SuperBlob *superblob = NULL;
3312 	uint32_t num_blobs = 0;
3313 	uint32_t blob_index = 0;
3314 	uint32_t blob_offset = 0;
3315 	kern_return_t ret;
3316 	int err;
3317 
3318 	if (!blob) {
3319 		if (cs_debug > 1) {
3320 			printf("CODE SIGNING: CS Blob passed in is NULL\n");
3321 		}
3322 		return EINVAL;
3323 	}
3324 
3325 	best_code_directory = (const CS_GenericBlob*)blob->csb_cd;
3326 	if (!best_code_directory) {
3327 		/* This case can never happen, and it is a sign of bad things */
3328 		panic("CODE SIGNING: Validated CS Blob has no code directory");
3329 	}
3330 
3331 	new_code_directory_size = code_directory_size;
3332 	if (new_code_directory_size == 0) {
3333 		new_code_directory_size = ntohl(best_code_directory->length);
3334 	}
3335 
3336 	/*
3337 	 * A code signature can contain multiple code directories, each of which contains hashes
3338 	 * of pages based on a hashing algorithm. The kernel selects which hashing algorithm is
3339 	 * the strongest, and consequently, marks one of these code directories as the best
3340 	 * matched one. More often than not, the best matched one is _not_ the first one.
3341 	 *
3342 	 * However, the CMS blob which cryptographically verifies the code signature is only
3343 	 * signed against the first code directory. Therefore, if the CMS blob is present, we also
3344 	 * need the first code directory to be able to verify it. Given this, we organize the
3345 	 * new cs_blob as following order:
3346 	 *
3347 	 * 1. best code directory
3348 	 * 2. DER encoded entitlements blob (if present)
3349 	 * 3. launch constraint self (if present)
3350 	 * 4. launch constraint parent (if present)
3351 	 * 5. launch constraint responsible (if present)
3352 	 * 6. library constraint (if present)
3353 	 * 7. entitlements blob (if present)
3354 	 * 8. cms blob (if present)
3355 	 * 9. first code directory (if not already the best match, and if cms blob is present)
3356 	 *
3357 	 * This order is chosen deliberately, as later on, we expect to get rid of the CMS blob
3358 	 * and the first code directory once their verification is complete.
3359 	 */
3360 
3361 	/* Storage for the super blob header */
3362 	new_blob_size += sizeof(CS_SuperBlob);
3363 
3364 	/* Guaranteed storage for the best code directory */
3365 	new_blob_size += sizeof(CS_BlobIndex);
3366 	new_blob_size += new_code_directory_size;
3367 	num_blobs += 1;
3368 
3369 	/* Conditional storage for the DER entitlements blob */
3370 	der_entitlements_blob = blob->csb_der_entitlements_blob;
3371 	if (der_entitlements_blob) {
3372 		new_blob_size += sizeof(CS_BlobIndex);
3373 		new_blob_size += ntohl(der_entitlements_blob->length);
3374 		num_blobs += 1;
3375 	}
3376 
3377 	/* Conditional storage for the launch constraints self blob */
3378 	launch_constraint_self = csblob_find_blob_bytes(
3379 		(const uint8_t *)blob->csb_mem_kaddr,
3380 		blob->csb_mem_size,
3381 		CSSLOT_LAUNCH_CONSTRAINT_SELF,
3382 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3383 	if (launch_constraint_self) {
3384 		new_blob_size += sizeof(CS_BlobIndex);
3385 		new_blob_size += ntohl(launch_constraint_self->length);
3386 		num_blobs += 1;
3387 	}
3388 
3389 	/* Conditional storage for the launch constraints parent blob */
3390 	launch_constraint_parent = csblob_find_blob_bytes(
3391 		(const uint8_t *)blob->csb_mem_kaddr,
3392 		blob->csb_mem_size,
3393 		CSSLOT_LAUNCH_CONSTRAINT_PARENT,
3394 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3395 	if (launch_constraint_parent) {
3396 		new_blob_size += sizeof(CS_BlobIndex);
3397 		new_blob_size += ntohl(launch_constraint_parent->length);
3398 		num_blobs += 1;
3399 	}
3400 
3401 	/* Conditional storage for the launch constraints responsible blob */
3402 	launch_constraint_responsible = csblob_find_blob_bytes(
3403 		(const uint8_t *)blob->csb_mem_kaddr,
3404 		blob->csb_mem_size,
3405 		CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE,
3406 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3407 	if (launch_constraint_responsible) {
3408 		new_blob_size += sizeof(CS_BlobIndex);
3409 		new_blob_size += ntohl(launch_constraint_responsible->length);
3410 		num_blobs += 1;
3411 	}
3412 
3413 	/* Conditional storage for the library constraintsblob */
3414 	library_constraint = csblob_find_blob_bytes(
3415 		(const uint8_t *)blob->csb_mem_kaddr,
3416 		blob->csb_mem_size,
3417 		CSSLOT_LIBRARY_CONSTRAINT,
3418 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3419 	if (library_constraint) {
3420 		new_blob_size += sizeof(CS_BlobIndex);
3421 		new_blob_size += ntohl(library_constraint->length);
3422 		num_blobs += 1;
3423 	}
3424 
3425 	/* Conditional storage for the entitlements blob */
3426 	entitlements_blob = blob->csb_entitlements_blob;
3427 	if (entitlements_blob) {
3428 		new_blob_size += sizeof(CS_BlobIndex);
3429 		new_blob_size += ntohl(entitlements_blob->length);
3430 		num_blobs += 1;
3431 	}
3432 
3433 	/* Conditional storage for the CMS blob */
3434 	cms_blob = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_SIGNATURESLOT, CSMAGIC_BLOBWRAPPER);
3435 	if (cms_blob) {
3436 		new_blob_size += sizeof(CS_BlobIndex);
3437 		new_blob_size += ntohl(cms_blob->length);
3438 		num_blobs += 1;
3439 	}
3440 
3441 	/*
3442 	 * Conditional storage for the first code directory.
3443 	 * This is only needed if a CMS blob exists and the best code directory isn't already
3444 	 * the first one. It is an error if we find a CMS blob but do not find a first code directory.
3445 	 */
3446 	if (cms_blob) {
3447 		first_code_directory = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_CODEDIRECTORY, CSMAGIC_CODEDIRECTORY);
3448 		if (first_code_directory == best_code_directory) {
3449 			/* We don't need the first code directory anymore, since the best one is already it */
3450 			first_code_directory = NULL;
3451 		} else if (first_code_directory) {
3452 			new_blob_size += sizeof(CS_BlobIndex);
3453 			new_blob_size += ntohl(first_code_directory->length);
3454 			num_blobs += 1;
3455 		} else {
3456 			printf("CODE SIGNING: Invalid CS Blob: found CMS blob but not a first code directory\n");
3457 			return EINVAL;
3458 		}
3459 	}
3460 
3461 	/*
3462 	 * The blob size could be rouded up to page size here, so we keep a copy
3463 	 * of the actual superblob length as well.
3464 	 */
3465 	vm_size_t new_blob_allocation_size = new_blob_size;
3466 	ret = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_allocation_size);
3467 	if (ret != KERN_SUCCESS) {
3468 		printf("CODE SIGNING: Failed to allocate memory for new code signing blob: %d\n", ret);
3469 		return ENOMEM;
3470 	}
3471 
3472 	/*
3473 	 * Fill out the superblob header and then all the blobs in the order listed
3474 	 * above.
3475 	 */
3476 	superblob = (CS_SuperBlob*)new_blob_addr;
3477 	superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
3478 	superblob->length = htonl((uint32_t)new_blob_size);
3479 	superblob->count = htonl(num_blobs);
3480 
3481 	blob_index = 0;
3482 	blob_offset = sizeof(CS_SuperBlob) + (num_blobs * sizeof(CS_BlobIndex));
3483 
3484 	/* Best code directory */
3485 	superblob->index[blob_index].offset = htonl(blob_offset);
3486 	if (first_code_directory) {
3487 		superblob->index[blob_index].type = htonl(CSSLOT_ALTERNATE_CODEDIRECTORIES);
3488 	} else {
3489 		superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3490 	}
3491 
3492 	if (code_directory_size > 0) {
3493 		/* We zero out the code directory, as we expect the caller to fill it in */
3494 		memset((void*)(new_blob_addr + blob_offset), 0, new_code_directory_size);
3495 	} else {
3496 		memcpy((void*)(new_blob_addr + blob_offset), best_code_directory, new_code_directory_size);
3497 	}
3498 
3499 	if (code_directory) {
3500 		*code_directory = (CS_CodeDirectory*)(new_blob_addr + blob_offset);
3501 	}
3502 	blob_offset += new_code_directory_size;
3503 
3504 	/* DER entitlements blob */
3505 	if (der_entitlements_blob) {
3506 		blob_index += 1;
3507 		superblob->index[blob_index].offset = htonl(blob_offset);
3508 		superblob->index[blob_index].type = htonl(CSSLOT_DER_ENTITLEMENTS);
3509 
3510 		memcpy((void*)(new_blob_addr + blob_offset), der_entitlements_blob, ntohl(der_entitlements_blob->length));
3511 		blob_offset += ntohl(der_entitlements_blob->length);
3512 	}
3513 
3514 	/* Launch constraints self blob */
3515 	if (launch_constraint_self) {
3516 		blob_index += 1;
3517 		superblob->index[blob_index].offset = htonl(blob_offset);
3518 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_SELF);
3519 
3520 		memcpy(
3521 			(void*)(new_blob_addr + blob_offset),
3522 			launch_constraint_self,
3523 			ntohl(launch_constraint_self->length));
3524 
3525 		blob_offset += ntohl(launch_constraint_self->length);
3526 	}
3527 
3528 	/* Launch constraints parent blob */
3529 	if (launch_constraint_parent) {
3530 		blob_index += 1;
3531 		superblob->index[blob_index].offset = htonl(blob_offset);
3532 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_PARENT);
3533 
3534 		memcpy(
3535 			(void*)(new_blob_addr + blob_offset),
3536 			launch_constraint_parent,
3537 			ntohl(launch_constraint_parent->length));
3538 
3539 		blob_offset += ntohl(launch_constraint_parent->length);
3540 	}
3541 
3542 	/* Launch constraints responsible blob */
3543 	if (launch_constraint_responsible) {
3544 		blob_index += 1;
3545 		superblob->index[blob_index].offset = htonl(blob_offset);
3546 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE);
3547 
3548 		memcpy(
3549 			(void*)(new_blob_addr + blob_offset),
3550 			launch_constraint_responsible,
3551 			ntohl(launch_constraint_responsible->length));
3552 
3553 		blob_offset += ntohl(launch_constraint_responsible->length);
3554 	}
3555 
3556 	/* library constraints blob */
3557 	if (library_constraint) {
3558 		blob_index += 1;
3559 		superblob->index[blob_index].offset = htonl(blob_offset);
3560 		superblob->index[blob_index].type = htonl(CSSLOT_LIBRARY_CONSTRAINT);
3561 
3562 		memcpy(
3563 			(void*)(new_blob_addr + blob_offset),
3564 			library_constraint,
3565 			ntohl(library_constraint->length));
3566 
3567 		blob_offset += ntohl(library_constraint->length);
3568 	}
3569 
3570 	/* Entitlements blob */
3571 	if (entitlements_blob) {
3572 		blob_index += 1;
3573 		superblob->index[blob_index].offset = htonl(blob_offset);
3574 		superblob->index[blob_index].type = htonl(CSSLOT_ENTITLEMENTS);
3575 
3576 		memcpy((void*)(new_blob_addr + blob_offset), entitlements_blob, ntohl(entitlements_blob->length));
3577 		blob_offset += ntohl(entitlements_blob->length);
3578 	}
3579 
3580 	/* CMS blob */
3581 	if (cms_blob) {
3582 		blob_index += 1;
3583 		superblob->index[blob_index].offset = htonl(blob_offset);
3584 		superblob->index[blob_index].type = htonl(CSSLOT_SIGNATURESLOT);
3585 		memcpy((void*)(new_blob_addr + blob_offset), cms_blob, ntohl(cms_blob->length));
3586 		blob_offset += ntohl(cms_blob->length);
3587 	}
3588 
3589 	/* First code directory */
3590 	if (first_code_directory) {
3591 		blob_index += 1;
3592 		superblob->index[blob_index].offset = htonl(blob_offset);
3593 		superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3594 		memcpy((void*)(new_blob_addr + blob_offset), first_code_directory, ntohl(first_code_directory->length));
3595 		blob_offset += ntohl(first_code_directory->length);
3596 	}
3597 
3598 	/*
3599 	 * We only validate the blob in case we copied in the best code directory.
3600 	 * In case the code directory size we were passed in wasn't 0, we memset the best
3601 	 * code directory to 0 and expect the caller to fill it in. In the same spirit, we
3602 	 * expect the caller to validate the code signature after they fill in the code
3603 	 * directory.
3604 	 */
3605 	if (code_directory_size == 0) {
3606 		const CS_CodeDirectory *validated_code_directory = NULL;
3607 		const CS_GenericBlob *validated_entitlements_blob = NULL;
3608 		const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3609 
3610 		ret = cs_validate_csblob(
3611 			(const uint8_t *)superblob,
3612 			new_blob_size,
3613 			&validated_code_directory,
3614 			&validated_entitlements_blob,
3615 			&validated_der_entitlements_blob);
3616 
3617 		if (ret) {
3618 			printf("unable to validate reconstituted cs_blob: %d\n", ret);
3619 			err = EINVAL;
3620 			goto fail;
3621 		}
3622 	}
3623 
3624 	if (ret_mem_kaddr) {
3625 		*ret_mem_kaddr = new_blob_addr;
3626 	}
3627 	if (ret_mem_size) {
3628 		*ret_mem_size = new_blob_allocation_size;
3629 	}
3630 
3631 	return 0;
3632 
3633 fail:
3634 	ubc_cs_blob_deallocate(new_blob_addr, new_blob_allocation_size);
3635 	return err;
3636 }
3637 
3638 /*
3639  * We use this function to clear out unnecessary bits from the code signature
3640  * blob which are no longer needed. We free these bits and give them back to
3641  * the kernel. This is needed since reconstitution includes extra data which is
3642  * needed only for verification but has no point in keeping afterwards.
3643  *
3644  * This results in significant memory reduction, especially for 3rd party apps
3645  * since we also get rid of the CMS blob.
3646  */
3647 static errno_t
ubc_cs_reconstitute_code_signature_2nd_stage(struct cs_blob * blob)3648 ubc_cs_reconstitute_code_signature_2nd_stage(
3649 	struct cs_blob *blob
3650 	)
3651 {
3652 	kern_return_t ret = KERN_FAILURE;
3653 	const CS_GenericBlob *launch_constraint_self = NULL;
3654 	const CS_GenericBlob *launch_constraint_parent = NULL;
3655 	const CS_GenericBlob *launch_constraint_responsible = NULL;
3656 	const CS_GenericBlob *library_constraint = NULL;
3657 	CS_SuperBlob *superblob = NULL;
3658 	uint32_t num_blobs = 0;
3659 	vm_size_t last_needed_blob_offset = 0;
3660 	vm_offset_t code_directory_offset = 0;
3661 
3662 	/*
3663 	 * Ordering of blobs we need to keep:
3664 	 * 1. Code directory
3665 	 * 2. DER encoded entitlements (if present)
3666 	 * 3. Launch constraints self (if present)
3667 	 * 4. Launch constraints parent (if present)
3668 	 * 5. Launch constraints responsible (if present)
3669 	 * 6. Library constraints (if present)
3670 	 *
3671 	 * We need to clear out the remaining page after these blobs end, and fix up
3672 	 * the superblob for the changes. Things gets a little more complicated for
3673 	 * blobs which may not have been kmem_allocated. For those, we simply just
3674 	 * allocate the new required space and copy into it.
3675 	 */
3676 
3677 	if (blob == NULL) {
3678 		printf("NULL blob passed in for 2nd stage reconstitution\n");
3679 		return EINVAL;
3680 	}
3681 	assert(blob->csb_reconstituted == true);
3682 
3683 	/* Ensure we're not page-wise allocated when in this function */
3684 	assert(ubc_cs_blob_pagewise_allocate(blob->csb_mem_size) == false);
3685 
3686 	if (!blob->csb_cd) {
3687 		/* This case can never happen, and it is a sign of bad things */
3688 		panic("validated cs_blob has no code directory");
3689 	}
3690 	superblob = (CS_SuperBlob*)blob->csb_mem_kaddr;
3691 
3692 	num_blobs = 1;
3693 	last_needed_blob_offset = ntohl(superblob->index[0].offset) + ntohl(blob->csb_cd->length);
3694 
3695 	/* Check for DER entitlements */
3696 	if (blob->csb_der_entitlements_blob) {
3697 		num_blobs += 1;
3698 		last_needed_blob_offset += ntohl(blob->csb_der_entitlements_blob->length);
3699 	}
3700 
3701 	/* Check for launch constraints self */
3702 	launch_constraint_self = csblob_find_blob_bytes(
3703 		(const uint8_t *)blob->csb_mem_kaddr,
3704 		blob->csb_mem_size,
3705 		CSSLOT_LAUNCH_CONSTRAINT_SELF,
3706 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3707 	if (launch_constraint_self) {
3708 		num_blobs += 1;
3709 		last_needed_blob_offset += ntohl(launch_constraint_self->length);
3710 	}
3711 
3712 	/* Check for launch constraints parent */
3713 	launch_constraint_parent = csblob_find_blob_bytes(
3714 		(const uint8_t *)blob->csb_mem_kaddr,
3715 		blob->csb_mem_size,
3716 		CSSLOT_LAUNCH_CONSTRAINT_PARENT,
3717 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3718 	if (launch_constraint_parent) {
3719 		num_blobs += 1;
3720 		last_needed_blob_offset += ntohl(launch_constraint_parent->length);
3721 	}
3722 
3723 	/* Check for launch constraints responsible */
3724 	launch_constraint_responsible = csblob_find_blob_bytes(
3725 		(const uint8_t *)blob->csb_mem_kaddr,
3726 		blob->csb_mem_size,
3727 		CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE,
3728 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3729 	if (launch_constraint_responsible) {
3730 		num_blobs += 1;
3731 		last_needed_blob_offset += ntohl(launch_constraint_responsible->length);
3732 	}
3733 
3734 	/* Check for library constraint */
3735 	library_constraint = csblob_find_blob_bytes(
3736 		(const uint8_t *)blob->csb_mem_kaddr,
3737 		blob->csb_mem_size,
3738 		CSSLOT_LIBRARY_CONSTRAINT,
3739 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3740 	if (library_constraint) {
3741 		num_blobs += 1;
3742 		last_needed_blob_offset += ntohl(library_constraint->length);
3743 	}
3744 
3745 	superblob->count = htonl(num_blobs);
3746 	superblob->length = htonl((uint32_t)last_needed_blob_offset);
3747 
3748 	/*
3749 	 * There is a chance that the code directory is marked within the superblob as an
3750 	 * alternate code directory. This happens when the first code directory isn't the
3751 	 * best one chosen by the kernel, so to be able to access both the first and the best,
3752 	 * we save the best one as an alternate one. Since we're getting rid of the first one
3753 	 * here, we mark the best one as the first one.
3754 	 */
3755 	superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
3756 
3757 	vm_address_t new_superblob = 0;
3758 	vm_size_t new_superblob_size = last_needed_blob_offset;
3759 
3760 	ret = ubc_cs_blob_allocate(&new_superblob, &new_superblob_size);
3761 	if (ret != KERN_SUCCESS) {
3762 		printf("unable to allocate memory for 2nd stage reconstitution: %d\n", ret);
3763 		return ENOMEM;
3764 	}
3765 	assert(new_superblob_size == last_needed_blob_offset);
3766 
3767 	/* Calculate the code directory offset */
3768 	code_directory_offset = (vm_offset_t)blob->csb_cd - (vm_offset_t)blob->csb_mem_kaddr;
3769 
3770 	/* Copy in the updated superblob into the new memory */
3771 	memcpy((void*)new_superblob, superblob, new_superblob_size);
3772 
3773 	/* Free the old code signature and old memory */
3774 	ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3775 
3776 	/* Reconstruct critical fields in the blob object */
3777 	ubc_cs_blob_reconstruct(
3778 		blob,
3779 		new_superblob,
3780 		new_superblob_size,
3781 		code_directory_offset);
3782 
3783 	/* XML entitlements should've been removed */
3784 	assert(blob->csb_entitlements_blob == NULL);
3785 
3786 	const CS_CodeDirectory *validated_code_directory = NULL;
3787 	const CS_GenericBlob *validated_entitlements_blob = NULL;
3788 	const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3789 
3790 	ret = cs_validate_csblob(
3791 		(const uint8_t*)blob->csb_mem_kaddr,
3792 		blob->csb_mem_size,
3793 		&validated_code_directory,
3794 		&validated_entitlements_blob,
3795 		&validated_der_entitlements_blob);
3796 	if (ret) {
3797 		printf("unable to validate code signature after 2nd stage reconstitution: %d\n", ret);
3798 		return EINVAL;
3799 	}
3800 
3801 	return 0;
3802 }
3803 
3804 static int
ubc_cs_convert_to_multilevel_hash(struct cs_blob * blob)3805 ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
3806 {
3807 	const CS_CodeDirectory  *old_cd, *cd;
3808 	CS_CodeDirectory        *new_cd;
3809 	const CS_GenericBlob *entitlements;
3810 	const CS_GenericBlob *der_entitlements;
3811 	vm_offset_t     new_blob_addr;
3812 	vm_size_t       new_blob_size;
3813 	vm_size_t       new_cdsize;
3814 	int                             error;
3815 
3816 	uint32_t                hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
3817 
3818 	if (cs_debug > 1) {
3819 		printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
3820 		    (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
3821 	}
3822 
3823 	old_cd = blob->csb_cd;
3824 
3825 	/* Up to the hashes, we can copy all data */
3826 	new_cdsize  = ntohl(old_cd->hashOffset);
3827 	new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
3828 
3829 	error = ubc_cs_reconstitute_code_signature(blob, &new_blob_addr, &new_blob_size, new_cdsize, &new_cd);
3830 	if (error != 0) {
3831 		printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
3832 		return error;
3833 	}
3834 	entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_ENTITLEMENTS, CSMAGIC_EMBEDDED_ENTITLEMENTS);
3835 	der_entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_DER_ENTITLEMENTS, CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3836 
3837 	memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
3838 
3839 	/* Update fields in the Code Directory structure */
3840 	new_cd->length = htonl((uint32_t)new_cdsize);
3841 
3842 	uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
3843 	nCodeSlots >>= hashes_per_new_hash_shift;
3844 	new_cd->nCodeSlots = htonl(nCodeSlots);
3845 
3846 	new_cd->pageSize = (uint8_t)PAGE_SHIFT; /* Not byte-swapped */
3847 
3848 	if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
3849 		SC_Scatter *scatter = (SC_Scatter*)
3850 		    ((char *)new_cd + ntohl(new_cd->scatterOffset));
3851 		/* iterate all scatter structs to scale their counts */
3852 		do {
3853 			uint32_t scount = ntohl(scatter->count);
3854 			uint32_t sbase  = ntohl(scatter->base);
3855 
3856 			/* last scatter? */
3857 			if (scount == 0) {
3858 				break;
3859 			}
3860 
3861 			scount >>= hashes_per_new_hash_shift;
3862 			scatter->count = htonl(scount);
3863 
3864 			sbase >>= hashes_per_new_hash_shift;
3865 			scatter->base = htonl(sbase);
3866 
3867 			scatter++;
3868 		} while (1);
3869 	}
3870 
3871 	/* For each group of hashes, hash them together */
3872 	const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
3873 	unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
3874 
3875 	uint32_t hash_index;
3876 	for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
3877 		union cs_hash_union     mdctx;
3878 
3879 		uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
3880 		const unsigned char *src = src_base + hash_index * source_hash_len;
3881 		unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
3882 
3883 		blob->csb_hashtype->cs_init(&mdctx);
3884 		blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
3885 		blob->csb_hashtype->cs_final(dst, &mdctx);
3886 	}
3887 
3888 	error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements, &der_entitlements);
3889 	if (error != 0) {
3890 		printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3891 		    error);
3892 
3893 		ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3894 		return error;
3895 	}
3896 
3897 	/* New Code Directory is ready for use, swap it out in the blob structure */
3898 	ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3899 
3900 	blob->csb_mem_size = new_blob_size;
3901 	blob->csb_mem_kaddr = (void *)new_blob_addr;
3902 	blob->csb_cd = cd;
3903 	blob->csb_entitlements_blob = NULL;
3904 
3905 	blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
3906 	blob->csb_reconstituted = true;
3907 
3908 	/* The blob has some cached attributes of the Code Directory, so update those */
3909 
3910 	blob->csb_hash_firstlevel_pageshift = blob->csb_hash_pageshift; /* Save the original page size */
3911 
3912 	blob->csb_hash_pageshift = PAGE_SHIFT;
3913 	blob->csb_end_offset = ntohl(cd->codeLimit);
3914 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3915 		const SC_Scatter *scatter = (const SC_Scatter*)
3916 		    ((const char*)cd + ntohl(cd->scatterOffset));
3917 		blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
3918 	} else {
3919 		blob->csb_start_offset = 0;
3920 	}
3921 
3922 	return 0;
3923 }
3924 
3925 static void
cs_blob_cleanup(struct cs_blob * blob)3926 cs_blob_cleanup(struct cs_blob *blob)
3927 {
3928 	if (blob->csb_entitlements != NULL) {
3929 		amfi->OSEntitlements_invalidate(blob->csb_entitlements);
3930 		osobject_release(blob->csb_entitlements);
3931 		blob->csb_entitlements = NULL;
3932 	}
3933 
3934 #if CODE_SIGNING_MONITOR
3935 	if (blob->csb_csm_obj != NULL) {
3936 		/* Unconditionally remove any profiles we may have associated */
3937 		csm_disassociate_provisioning_profile(blob->csb_csm_obj);
3938 
3939 		kern_return_t kr = csm_unregister_code_signature(blob->csb_csm_obj);
3940 		if (kr == KERN_SUCCESS) {
3941 			/*
3942 			 * If the code signature was monitor managed, the monitor will have freed it
3943 			 * itself in the unregistration call. It means we do not need to free the data
3944 			 * over here.
3945 			 */
3946 			if (blob->csb_csm_managed) {
3947 				blob->csb_mem_kaddr = NULL;
3948 				blob->csb_mem_size = 0;
3949 			}
3950 		}
3951 	}
3952 
3953 	/* Unconditionally remove references to the monitor */
3954 	blob->csb_csm_obj = NULL;
3955 	blob->csb_csm_managed = false;
3956 #endif
3957 
3958 	if (blob->csb_mem_kaddr) {
3959 		ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3960 	}
3961 	blob->csb_mem_kaddr = NULL;
3962 	blob->csb_mem_size = 0;
3963 }
3964 
3965 static void
cs_blob_ro_free(struct cs_blob * blob)3966 cs_blob_ro_free(struct cs_blob *blob)
3967 {
3968 	struct cs_blob tmp;
3969 
3970 	if (blob != NULL) {
3971 		/*
3972 		 * cs_blob_cleanup clears fields, so we need to pass it a
3973 		 * mutable copy.
3974 		 */
3975 		tmp = *blob;
3976 		cs_blob_cleanup(&tmp);
3977 
3978 		zfree_ro(ZONE_ID_CS_BLOB, blob);
3979 	}
3980 }
3981 
3982 /*
3983  * Free a cs_blob previously created by cs_blob_create_validated.
3984  */
3985 void
cs_blob_free(struct cs_blob * blob)3986 cs_blob_free(
3987 	struct cs_blob *blob)
3988 {
3989 	cs_blob_ro_free(blob);
3990 }
3991 
3992 static int
cs_blob_init_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob * blob,CS_CodeDirectory const ** const ret_cd)3993 cs_blob_init_validated(
3994 	vm_address_t * const addr,
3995 	vm_size_t size,
3996 	struct cs_blob *blob,
3997 	CS_CodeDirectory const ** const ret_cd)
3998 {
3999 	int error = EINVAL;
4000 	const CS_CodeDirectory *cd = NULL;
4001 	const CS_GenericBlob *entitlements = NULL;
4002 	const CS_GenericBlob *der_entitlements = NULL;
4003 	union cs_hash_union mdctx;
4004 	size_t length;
4005 
4006 	bzero(blob, sizeof(*blob));
4007 
4008 	/* fill in the new blob */
4009 	blob->csb_mem_size = size;
4010 	blob->csb_mem_offset = 0;
4011 	blob->csb_mem_kaddr = (void *)*addr;
4012 	blob->csb_flags = 0;
4013 	blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
4014 	blob->csb_platform_binary = 0;
4015 	blob->csb_platform_path = 0;
4016 	blob->csb_teamid = NULL;
4017 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4018 	blob->csb_supplement_teamid = NULL;
4019 #endif
4020 	blob->csb_entitlements_blob = NULL;
4021 	blob->csb_der_entitlements_blob = NULL;
4022 	blob->csb_entitlements = NULL;
4023 #if CODE_SIGNING_MONITOR
4024 	blob->csb_csm_obj = NULL;
4025 	blob->csb_csm_managed = false;
4026 #endif
4027 	blob->csb_reconstituted = false;
4028 	blob->csb_validation_category = CS_VALIDATION_CATEGORY_INVALID;
4029 
4030 	/* Transfer ownership. Even on error, this function will deallocate */
4031 	*addr = 0;
4032 
4033 	/*
4034 	 * Validate the blob's contents
4035 	 */
4036 	length = (size_t) size;
4037 	error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
4038 	    length, &cd, &entitlements, &der_entitlements);
4039 	if (error) {
4040 		if (cs_debug) {
4041 			printf("CODESIGNING: csblob invalid: %d\n", error);
4042 		}
4043 		/*
4044 		 * The vnode checker can't make the rest of this function
4045 		 * succeed if csblob validation failed, so bail */
4046 		goto out;
4047 	} else {
4048 		const unsigned char *md_base;
4049 		uint8_t hash[CS_HASH_MAX_SIZE];
4050 		int md_size;
4051 		vm_offset_t hash_pagemask;
4052 
4053 		blob->csb_cd = cd;
4054 		blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
4055 		blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
4056 		blob->csb_hashtype = cs_find_md(cd->hashType);
4057 		if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
4058 			panic("validated CodeDirectory but unsupported type");
4059 		}
4060 
4061 		blob->csb_hash_pageshift = cd->pageSize;
4062 		hash_pagemask = (1U << cd->pageSize) - 1;
4063 		blob->csb_hash_firstlevel_pageshift = 0;
4064 		blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
4065 		blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + hash_pagemask) & ~hash_pagemask);
4066 		if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
4067 			const SC_Scatter *scatter = (const SC_Scatter*)
4068 			    ((const char*)cd + ntohl(cd->scatterOffset));
4069 			blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * (1U << blob->csb_hash_pageshift);
4070 		} else {
4071 			blob->csb_start_offset = 0;
4072 		}
4073 		/* compute the blob's cdhash */
4074 		md_base = (const unsigned char *) cd;
4075 		md_size = ntohl(cd->length);
4076 
4077 		blob->csb_hashtype->cs_init(&mdctx);
4078 		blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
4079 		blob->csb_hashtype->cs_final(hash, &mdctx);
4080 
4081 		memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
4082 
4083 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4084 		blob->csb_linkage_hashtype = NULL;
4085 		if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0 &&
4086 		    ntohl(cd->linkageSize) >= CS_CDHASH_LEN) {
4087 			blob->csb_linkage_hashtype = cs_find_md(cd->linkageHashType);
4088 
4089 			if (blob->csb_linkage_hashtype != NULL) {
4090 				memcpy(blob->csb_linkage, (uint8_t const*)cd + ntohl(cd->linkageOffset),
4091 				    CS_CDHASH_LEN);
4092 			}
4093 		}
4094 #endif
4095 	}
4096 
4097 	error = 0;
4098 
4099 out:
4100 	if (error != 0) {
4101 		cs_blob_cleanup(blob);
4102 		blob = NULL;
4103 		cd = NULL;
4104 	}
4105 
4106 	if (ret_cd != NULL) {
4107 		*ret_cd = cd;
4108 	}
4109 
4110 	return error;
4111 }
4112 
4113 /*
4114  * Validate the code signature blob, create a struct cs_blob wrapper
4115  * and return it together with a pointer to the chosen code directory
4116  * and entitlements blob.
4117  *
4118  * Note that this takes ownership of the memory as addr, mainly because
4119  * this function can actually replace the passed in blob with another
4120  * one, e.g. when performing multilevel hashing optimization.
4121  */
4122 int
cs_blob_create_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob ** const ret_blob,CS_CodeDirectory const ** const ret_cd)4123 cs_blob_create_validated(
4124 	vm_address_t * const            addr,
4125 	vm_size_t                       size,
4126 	struct cs_blob ** const         ret_blob,
4127 	CS_CodeDirectory const ** const     ret_cd)
4128 {
4129 	struct cs_blob blob = {};
4130 	struct cs_blob *ro_blob;
4131 	int error;
4132 
4133 	if (ret_blob) {
4134 		*ret_blob = NULL;
4135 	}
4136 
4137 	if ((error = cs_blob_init_validated(addr, size, &blob, ret_cd)) != 0) {
4138 		return error;
4139 	}
4140 
4141 	if (ret_blob != NULL) {
4142 		ro_blob = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4143 		zalloc_ro_update_elem(ZONE_ID_CS_BLOB, ro_blob, &blob);
4144 		*ret_blob = ro_blob;
4145 	}
4146 
4147 	return error;
4148 }
4149 
4150 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4151 static void
cs_blob_supplement_free(struct cs_blob * const blob)4152 cs_blob_supplement_free(struct cs_blob * const blob)
4153 {
4154 	void *teamid;
4155 
4156 	if (blob != NULL) {
4157 		if (blob->csb_supplement_teamid != NULL) {
4158 			teamid = blob->csb_supplement_teamid;
4159 			vm_size_t teamid_size = strlen(blob->csb_supplement_teamid) + 1;
4160 			kfree_data(teamid, teamid_size);
4161 		}
4162 		cs_blob_ro_free(blob);
4163 	}
4164 }
4165 #endif
4166 
4167 static void
ubc_cs_blob_adjust_statistics(struct cs_blob const * blob)4168 ubc_cs_blob_adjust_statistics(struct cs_blob const *blob)
4169 {
4170 	/* Note that the atomic ops are not enough to guarantee
4171 	 * correctness: If a blob with an intermediate size is inserted
4172 	 * concurrently, we can lose a peak value assignment. But these
4173 	 * statistics are only advisory anyway, so we're not going to
4174 	 * employ full locking here. (Consequently, we are also okay with
4175 	 * relaxed ordering of those accesses.)
4176 	 */
4177 
4178 	unsigned int new_cs_blob_count = os_atomic_add(&cs_blob_count, 1, relaxed);
4179 	if (new_cs_blob_count > os_atomic_load(&cs_blob_count_peak, relaxed)) {
4180 		os_atomic_store(&cs_blob_count_peak, new_cs_blob_count, relaxed);
4181 	}
4182 
4183 	size_t new_cs_blob_size = os_atomic_add(&cs_blob_size, blob->csb_mem_size, relaxed);
4184 
4185 	if (new_cs_blob_size > os_atomic_load(&cs_blob_size_peak, relaxed)) {
4186 		os_atomic_store(&cs_blob_size_peak, new_cs_blob_size, relaxed);
4187 	}
4188 	if (blob->csb_mem_size > os_atomic_load(&cs_blob_size_max, relaxed)) {
4189 		os_atomic_store(&cs_blob_size_max, blob->csb_mem_size, relaxed);
4190 	}
4191 }
4192 
4193 static void
cs_blob_set_cpu_type(struct cs_blob * blob,cpu_type_t cputype)4194 cs_blob_set_cpu_type(struct cs_blob *blob, cpu_type_t cputype)
4195 {
4196 	zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_cpu_type, &cputype);
4197 }
4198 
4199 __abortlike
4200 static void
panic_cs_blob_backref_mismatch(struct cs_blob * blob,struct vnode * vp)4201 panic_cs_blob_backref_mismatch(struct cs_blob *blob, struct vnode *vp)
4202 {
4203 	panic("cs_blob vnode backref mismatch: blob=%p, vp=%p, "
4204 	    "blob->csb_vnode=%p", blob, vp, blob->csb_vnode);
4205 }
4206 
4207 void
cs_blob_require(struct cs_blob * blob,vnode_t vp)4208 cs_blob_require(struct cs_blob *blob, vnode_t vp)
4209 {
4210 	zone_require_ro(ZONE_ID_CS_BLOB, sizeof(struct cs_blob), blob);
4211 
4212 	if (vp != NULL && __improbable(blob->csb_vnode != vp)) {
4213 		panic_cs_blob_backref_mismatch(blob, vp);
4214 	}
4215 }
4216 
4217 #if CODE_SIGNING_MONITOR
4218 
4219 /**
4220  * Independently verify the authenticity of the code signature through the monitor
4221  * environment. This is required as otherwise the monitor won't allow associations
4222  * of the code signature with address spaces.
4223  *
4224  * Once we've verified the code signature, we no longer need to keep around any
4225  * provisioning profiles we may have registered with it. AMFI associates profiles
4226  * with the monitor during its validation (which happens before the monitor's).
4227  */
4228 static errno_t
verify_code_signature_monitor(struct cs_blob * cs_blob)4229 verify_code_signature_monitor(
4230 	struct cs_blob *cs_blob)
4231 {
4232 	kern_return_t ret = KERN_DENIED;
4233 
4234 	ret = csm_verify_code_signature(cs_blob->csb_csm_obj);
4235 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4236 		printf("unable to verify code signature with monitor: %d\n", ret);
4237 		return EPERM;
4238 	}
4239 
4240 	ret = csm_disassociate_provisioning_profile(cs_blob->csb_csm_obj);
4241 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_FOUND) && (ret != KERN_NOT_SUPPORTED)) {
4242 		printf("unable to disassociate profile from code signature: %d\n", ret);
4243 		return EPERM;
4244 	}
4245 
4246 	/* Associate the OSEntitlements kernel object with the monitor */
4247 	ret = csm_associate_os_entitlements(cs_blob->csb_csm_obj, cs_blob->csb_entitlements);
4248 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4249 		printf("unable to associate OSEntitlements with monitor: %d\n", ret);
4250 		return EPERM;
4251 	}
4252 
4253 	return 0;
4254 }
4255 
4256 /**
4257  * Register the code signature with the code signing monitor environment. This
4258  * will effectively make the blob data immutable, either because the blob memory
4259  * will be allocated and managed directory by the monitor, or because the monitor
4260  * will lockdown the memory associated with the blob.
4261  */
4262 static errno_t
register_code_signature_monitor(struct vnode * vnode,struct cs_blob * cs_blob,vm_offset_t code_directory_offset)4263 register_code_signature_monitor(
4264 	struct vnode *vnode,
4265 	struct cs_blob *cs_blob,
4266 	vm_offset_t code_directory_offset)
4267 {
4268 	kern_return_t ret = KERN_DENIED;
4269 	vm_address_t monitor_signature_addr = 0;
4270 	void *monitor_sig_object = NULL;
4271 	const char *vnode_path_ptr = NULL;
4272 
4273 	/*
4274 	 * Attempt to resolve the path for this vnode and pass it in to the code
4275 	 * signing monitor during registration.
4276 	 */
4277 	int vnode_path_len = MAXPATHLEN;
4278 	char *vnode_path = kalloc_data(vnode_path_len, Z_WAITOK);
4279 
4280 	/*
4281 	 * Taking a reference on the vnode recursively can sometimes lead to a
4282 	 * deadlock on the system. Since we already have a vnode pointer, it means
4283 	 * the caller performed a vnode lookup, which implicitly takes a reference
4284 	 * on the vnode. However, there is more than just having a reference on a
4285 	 * vnode which is important. vnode's also have an iocount, and we must only
4286 	 * access a vnode which has an iocount of greater than 0. Thankfully, all
4287 	 * the conditions which lead to calling this function ensure that this
4288 	 * vnode is safe to access here.
4289 	 *
4290 	 * For more details: rdar://105819068.
4291 	 */
4292 	errno_t error = vn_getpath(vnode, vnode_path, &vnode_path_len);
4293 	if (error == 0) {
4294 		vnode_path_ptr = vnode_path;
4295 	}
4296 
4297 	ret = csm_register_code_signature(
4298 		(vm_address_t)cs_blob->csb_mem_kaddr,
4299 		cs_blob->csb_mem_size,
4300 		code_directory_offset,
4301 		vnode_path_ptr,
4302 		&monitor_sig_object,
4303 		&monitor_signature_addr);
4304 
4305 	kfree_data(vnode_path, MAXPATHLEN);
4306 	vnode_path_ptr = NULL;
4307 
4308 	if (ret == KERN_SUCCESS) {
4309 		/* Reconstruct the cs_blob if the monitor used its own allocation */
4310 		if (monitor_signature_addr != (vm_address_t)cs_blob->csb_mem_kaddr) {
4311 			vm_address_t monitor_signature_size = cs_blob->csb_mem_size;
4312 
4313 			/* Free the old memory for the blob */
4314 			ubc_cs_blob_deallocate(
4315 				(vm_address_t)cs_blob->csb_mem_kaddr,
4316 				cs_blob->csb_mem_size);
4317 
4318 			/* Reconstruct critical fields in the blob object */
4319 			ubc_cs_blob_reconstruct(
4320 				cs_blob,
4321 				monitor_signature_addr,
4322 				monitor_signature_size,
4323 				code_directory_offset);
4324 
4325 			/* Mark the signature as monitor managed */
4326 			cs_blob->csb_csm_managed = true;
4327 		}
4328 	} else if (ret != KERN_NOT_SUPPORTED) {
4329 		printf("unable to register code signature with monitor: %d\n", ret);
4330 		return EPERM;
4331 	}
4332 
4333 	/* Save the monitor handle for the signature object -- may be NULL */
4334 	cs_blob->csb_csm_obj = monitor_sig_object;
4335 
4336 	return 0;
4337 }
4338 
4339 #endif /* CODE_SIGNING_MONITOR */
4340 
4341 /**
4342  * Accelerate entitlements for a code signature object. When we have a code
4343  * signing monitor, this acceleration is done within the monitor which then
4344  * passes back a CoreEntitlements query context the kernel can use. When we
4345  * don't have a code signing monitor, we accelerate the queries within the
4346  * kernel memory itself.
4347  *
4348  * This function must be called when the storage for the code signature can
4349  * no longer change.
4350  */
4351 static errno_t
accelerate_entitlement_queries(struct cs_blob * cs_blob)4352 accelerate_entitlement_queries(
4353 	struct cs_blob *cs_blob)
4354 {
4355 	kern_return_t ret = KERN_NOT_SUPPORTED;
4356 
4357 #if CODE_SIGNING_MONITOR
4358 	CEQueryContext_t ce_ctx = NULL;
4359 	const char *signing_id = NULL;
4360 
4361 	ret = csm_accelerate_entitlements(cs_blob->csb_csm_obj, &ce_ctx);
4362 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4363 		printf("unable to accelerate entitlements through the monitor: %d\n", ret);
4364 		return EPERM;
4365 	}
4366 
4367 	if (ret == KERN_SUCCESS) {
4368 		/* Call cannot not fail at this stage */
4369 		ret = csm_acquire_signing_identifier(cs_blob->csb_csm_obj, &signing_id);
4370 		assert(ret == KERN_SUCCESS);
4371 
4372 		/* Adjust the OSEntitlements context with AMFI */
4373 		ret = amfi->OSEntitlements.adjustContextWithMonitor(
4374 			cs_blob->csb_entitlements,
4375 			ce_ctx,
4376 			cs_blob->csb_csm_obj,
4377 			signing_id,
4378 			cs_blob->csb_flags);
4379 		if (ret != KERN_SUCCESS) {
4380 			printf("unable to adjust OSEntitlements context with monitor: %d\n", ret);
4381 			return EPERM;
4382 		}
4383 
4384 		return 0;
4385 	}
4386 #endif
4387 
4388 	/*
4389 	 * If we reach here, then either we don't have a code signing monitor, or
4390 	 * the code signing monitor isn't enabled for code signing, in which case,
4391 	 * AMFI is going to accelerate the entitlements context and adjust its
4392 	 * context on its own.
4393 	 */
4394 	assert(ret == KERN_NOT_SUPPORTED);
4395 
4396 	ret = amfi->OSEntitlements.adjustContextWithoutMonitor(
4397 		cs_blob->csb_entitlements,
4398 		cs_blob);
4399 
4400 	if (ret != KERN_SUCCESS) {
4401 		printf("unable to adjust OSEntitlements context without monitor: %d\n", ret);
4402 		return EPERM;
4403 	}
4404 
4405 	return 0;
4406 }
4407 
4408 /**
4409  * Ensure and validate that some security critical code signing blobs haven't
4410  * been stripped off from the code signature. This can happen if an attacker
4411  * chose to load a code signature sans these critical blobs, or if there is a
4412  * bug in reconstitution logic which remove these blobs from the code signature.
4413  */
4414 static errno_t
validate_auxiliary_signed_blobs(struct cs_blob * cs_blob)4415 validate_auxiliary_signed_blobs(
4416 	struct cs_blob *cs_blob)
4417 {
4418 	struct cs_blob_identifier {
4419 		uint32_t cs_slot;
4420 		uint32_t cs_magic;
4421 	};
4422 
4423 	const struct cs_blob_identifier identifiers[] = {
4424 		{CSSLOT_LAUNCH_CONSTRAINT_SELF, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4425 		{CSSLOT_LAUNCH_CONSTRAINT_PARENT, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4426 		{CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4427 		{CSSLOT_LIBRARY_CONSTRAINT, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT}
4428 	};
4429 	const uint32_t num_identifiers = sizeof(identifiers) / sizeof(identifiers[0]);
4430 
4431 	for (uint32_t i = 0; i < num_identifiers; i++) {
4432 		errno_t err = csblob_find_special_slot_blob(
4433 			cs_blob,
4434 			identifiers[i].cs_slot,
4435 			identifiers[i].cs_magic,
4436 			NULL,
4437 			NULL);
4438 
4439 		if (err != 0) {
4440 			printf("unable to validate security-critical blob: %d [%u|%u]\n",
4441 			    err, identifiers[i].cs_slot, identifiers[i].cs_magic);
4442 
4443 			return EPERM;
4444 		}
4445 	}
4446 
4447 	return 0;
4448 }
4449 
4450 /**
4451  * Setup multi-level hashing for the code signature. This isn't supported on most
4452  * shipping devices, but on ones where it is, it can result in significant savings
4453  * of memory from the code signature standpoint.
4454  *
4455  * Multi-level hashing is used to condense the code directory hashes in order to
4456  * improve memory consumption. We take four 4K page hashes, and condense them into
4457  * a single 16K hash, hence reducing the space consumed by the code directory by
4458  * about ~75%.
4459  */
4460 static errno_t
setup_multilevel_hashing(struct cs_blob * cs_blob)4461 setup_multilevel_hashing(
4462 	struct cs_blob *cs_blob)
4463 {
4464 	code_signing_monitor_type_t monitor_type = CS_MONITOR_TYPE_NONE;
4465 	errno_t err = -1;
4466 
4467 	/*
4468 	 * When we have a code signing monitor, we do not support multi-level hashing
4469 	 * since the code signature data is expected to be locked within memory which
4470 	 * cannot be written to by the kernel.
4471 	 *
4472 	 * Even when the code signing monitor isn't explicitly enabled, there are other
4473 	 * reasons for not performing multi-level hashing. For instance, Rosetta creates
4474 	 * issues with multi-level hashing on Apple Silicon Macs.
4475 	 */
4476 	code_signing_configuration(&monitor_type, NULL);
4477 	if (monitor_type != CS_MONITOR_TYPE_NONE) {
4478 		return 0;
4479 	}
4480 
4481 	/* We need to check if multi-level hashing is supported for this blob */
4482 	if (ubc_cs_supports_multilevel_hash(cs_blob) == false) {
4483 		return 0;
4484 	}
4485 
4486 	err = ubc_cs_convert_to_multilevel_hash(cs_blob);
4487 	if (err != 0) {
4488 		printf("unable to setup multi-level hashing: %d\n", err);
4489 		return err;
4490 	}
4491 
4492 	assert(cs_blob->csb_reconstituted == true);
4493 	return 0;
4494 }
4495 
4496 /**
4497  * Once code signature validation is complete, we can remove even more blobs from the
4498  * code signature as they are no longer needed. This goes on to conserve even more
4499  * system memory.
4500  */
4501 static errno_t
reconstitute_code_signature_2nd_stage(struct cs_blob * cs_blob)4502 reconstitute_code_signature_2nd_stage(
4503 	struct cs_blob *cs_blob)
4504 {
4505 	kern_return_t ret = KERN_NOT_SUPPORTED;
4506 	errno_t err = EPERM;
4507 
4508 	/* If we never reconstituted before, we won't be reconstituting again */
4509 	if (cs_blob->csb_reconstituted == false) {
4510 		return 0;
4511 	}
4512 
4513 #if CODE_SIGNING_MONITOR
4514 	/*
4515 	 * When we have a code signing monitor, the code signature is immutable until the
4516 	 * monitor decides to unlock parts of it. Therefore, 2nd stage reconstitution takes
4517 	 * place in the monitor when we have a monitor available.
4518 	 *
4519 	 * If the monitor isn't enforcing code signing (in which case the code signature is
4520 	 * NOT immutable), then we perform 2nd stage reconstitution within the kernel itself.
4521 	 */
4522 	vm_address_t unneeded_addr = 0;
4523 	vm_size_t unneeded_size = 0;
4524 
4525 	ret = csm_reconstitute_code_signature(
4526 		cs_blob->csb_csm_obj,
4527 		&unneeded_addr,
4528 		&unneeded_size);
4529 
4530 	if ((ret == KERN_SUCCESS) && unneeded_addr && unneeded_size) {
4531 		/* Free the unneded part of the blob */
4532 		kmem_free(kernel_map, unneeded_addr, unneeded_size);
4533 
4534 		/* Adjust the size in the blob object */
4535 		cs_blob->csb_mem_size -= unneeded_size;
4536 	}
4537 #endif
4538 
4539 	if (ret == KERN_SUCCESS) {
4540 		goto success;
4541 	} else if (ret != KERN_NOT_SUPPORTED) {
4542 		/*
4543 		 * A monitor environment is available, and it failed in performing 2nd stage
4544 		 * reconstitution. This is a fatal issue for code signing validation.
4545 		 */
4546 		printf("unable to reconstitute code signature through monitor: %d\n", ret);
4547 		return EPERM;
4548 	}
4549 
4550 	/* No monitor available if we reached here */
4551 	err = ubc_cs_reconstitute_code_signature_2nd_stage(cs_blob);
4552 	if (err != 0) {
4553 		return err;
4554 	}
4555 
4556 success:
4557 	/*
4558 	 * Regardless of whether we are performing 2nd stage reconstitution in the monitor
4559 	 * or in the kernel, we remove references to XML entitlements from the blob here.
4560 	 * None of the 2nd stage reconstitution code ever keeps these around, and they have
4561 	 * been explicitly deprecated and disallowed.
4562 	 */
4563 	cs_blob->csb_entitlements_blob = NULL;
4564 
4565 	return 0;
4566 }
4567 
4568 /**
4569  * A code signature blob often contains blob which aren't needed in the kernel. Since
4570  * the code signature is wired into kernel memory for the time it is used, it behooves
4571  * us to remove any blobs we have no need for in order to conserve memory.
4572  *
4573  * Some platforms support copying the entire SuperBlob stored in kernel memory into
4574  * userspace memory through the "csops" system call. There is an expectation that when
4575  * this happens, all the blobs which were a part of the code signature are copied in
4576  * to userspace memory. As a result, these platforms cannot reconstitute the code
4577  * signature since, or rather, these platforms cannot remove blobs from the signature,
4578  * thereby making reconstitution useless.
4579  */
4580 static errno_t
reconstitute_code_signature(struct cs_blob * cs_blob)4581 reconstitute_code_signature(
4582 	struct cs_blob *cs_blob)
4583 {
4584 	CS_CodeDirectory *code_directory = NULL;
4585 	vm_address_t signature_addr = 0;
4586 	vm_size_t signature_size = 0;
4587 	vm_offset_t code_directory_offset = 0;
4588 	bool platform_supports_reconstitution = false;
4589 
4590 #if CONFIG_CODE_SIGNATURE_RECONSTITUTION
4591 	platform_supports_reconstitution = true;
4592 #endif
4593 
4594 	/*
4595 	 * We can skip reconstitution if the code signing monitor isn't available or not
4596 	 * enabled. But if we do have a monitor, then reconsitution becomes required, as
4597 	 * there is an expectation of performing 2nd stage reconstitution through the
4598 	 * monitor itself.
4599 	 */
4600 	if (platform_supports_reconstitution == false) {
4601 #if CODE_SIGNING_MONITOR
4602 		if (csm_enabled() == true) {
4603 			printf("reconstitution required when code signing monitor is enabled\n");
4604 			return EPERM;
4605 		}
4606 #endif
4607 		return 0;
4608 	}
4609 
4610 	errno_t err = ubc_cs_reconstitute_code_signature(
4611 		cs_blob,
4612 		&signature_addr,
4613 		&signature_size,
4614 		0,
4615 		&code_directory);
4616 
4617 	if (err != 0) {
4618 		printf("unable to reconstitute code signature: %d\n", err);
4619 		return err;
4620 	}
4621 
4622 	/* Calculate the code directory offset */
4623 	code_directory_offset = (vm_offset_t)code_directory - signature_addr;
4624 
4625 	/* Reconstitution allocates new memory -- free the old one */
4626 	ubc_cs_blob_deallocate((vm_address_t)cs_blob->csb_mem_kaddr, cs_blob->csb_mem_size);
4627 
4628 	/* Reconstruct critical fields in the blob object */
4629 	ubc_cs_blob_reconstruct(
4630 		cs_blob,
4631 		signature_addr,
4632 		signature_size,
4633 		code_directory_offset);
4634 
4635 	/* Mark the object as reconstituted */
4636 	cs_blob->csb_reconstituted = true;
4637 
4638 	return 0;
4639 }
4640 
4641 int
ubc_cs_blob_add(struct vnode * vp,uint32_t platform,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t base_offset,vm_address_t * addr,vm_size_t size,struct image_params * imgp,__unused int flags,struct cs_blob ** ret_blob)4642 ubc_cs_blob_add(
4643 	struct vnode    *vp,
4644 	uint32_t        platform,
4645 	cpu_type_t      cputype,
4646 	cpu_subtype_t   cpusubtype,
4647 	off_t           base_offset,
4648 	vm_address_t    *addr,
4649 	vm_size_t       size,
4650 	struct image_params *imgp,
4651 	__unused int    flags,
4652 	struct cs_blob  **ret_blob)
4653 {
4654 	ptrauth_generic_signature_t cs_blob_sig = {0};
4655 	struct ubc_info *uip = NULL;
4656 	struct cs_blob tmp_blob = {0};
4657 	struct cs_blob *blob_ro = NULL;
4658 	struct cs_blob *oblob = NULL;
4659 	CS_CodeDirectory const *cd = NULL;
4660 	off_t blob_start_offset = 0;
4661 	off_t blob_end_offset = 0;
4662 	boolean_t record_mtime = false;
4663 	kern_return_t kr = KERN_DENIED;
4664 	errno_t error = -1;
4665 
4666 #if HAS_APPLE_PAC
4667 	void *signed_entitlements = NULL;
4668 #if CODE_SIGNING_MONITOR
4669 	void *signed_monitor_obj = NULL;
4670 #endif
4671 #endif
4672 
4673 	if (ret_blob) {
4674 		*ret_blob = NULL;
4675 	}
4676 
4677 	/*
4678 	 * Create the struct cs_blob abstract data type which will get attached to
4679 	 * the vnode object. This function also validates the structural integrity
4680 	 * of the code signature blob being passed in.
4681 	 *
4682 	 * We initialize a temporary blob whose contents are then copied into an RO
4683 	 * blob which we allocate from the read-only allocator.
4684 	 */
4685 	error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
4686 	if (error != 0) {
4687 		printf("unable to create a validated cs_blob object: %d\n", error);
4688 		return error;
4689 	}
4690 
4691 	tmp_blob.csb_cpu_type = cputype;
4692 	tmp_blob.csb_cpu_subtype = cpusubtype & ~CPU_SUBTYPE_MASK;
4693 	tmp_blob.csb_base_offset = base_offset;
4694 
4695 	/* Perform 1st stage reconstitution */
4696 	error = reconstitute_code_signature(&tmp_blob);
4697 	if (error != 0) {
4698 		goto out;
4699 	}
4700 
4701 	/*
4702 	 * There is a strong design pattern we have to follow carefully within this
4703 	 * function. Since we're storing the struct cs_blob within RO-allocated
4704 	 * memory, it is immutable to modifications from within the kernel itself.
4705 	 *
4706 	 * However, before the contents of the blob are transferred to the immutable
4707 	 * cs_blob, they are kept on the stack. In order to protect against a kernel
4708 	 * R/W attacker, we must protect this stack variable. Most importantly, any
4709 	 * code paths which can block for a while must compute a PAC signature over
4710 	 * the stack variable, then perform the blocking operation, and then ensure
4711 	 * that the PAC signature over the stack variable is still valid to ensure
4712 	 * that an attacker did not overwrite contents of the blob by introducing a
4713 	 * maliciously long blocking operation, giving them the time required to go
4714 	 * and overwrite the contents of the blob.
4715 	 *
4716 	 * The most important fields to protect here are the OSEntitlements and the
4717 	 * code signing monitor object references. For these ones, we keep around
4718 	 * extra signed pointers diversified against the read-only blobs' memory
4719 	 * and then update the stack variable with these before updating the full
4720 	 * read-only blob.
4721 	 */
4722 
4723 	blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4724 	assert(blob_ro != NULL);
4725 
4726 	tmp_blob.csb_ro_addr = blob_ro;
4727 	tmp_blob.csb_vnode = vp;
4728 
4729 	/* AMFI needs to see the current blob state at the RO address */
4730 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
4731 
4732 #if CODE_SIGNING_MONITOR
4733 	error = register_code_signature_monitor(
4734 		vp,
4735 		&tmp_blob,
4736 		(vm_offset_t)tmp_blob.csb_cd - (vm_offset_t)tmp_blob.csb_mem_kaddr);
4737 
4738 	if (error != 0) {
4739 		goto out;
4740 	}
4741 
4742 #if HAS_APPLE_PAC
4743 	signed_monitor_obj = ptrauth_sign_unauthenticated(
4744 		tmp_blob.csb_csm_obj,
4745 		ptrauth_key_process_independent_data,
4746 		ptrauth_blend_discriminator(&blob_ro->csb_csm_obj,
4747 		OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_csm_obj")));
4748 #endif /* HAS_APPLE_PAC */
4749 
4750 #endif /* CODE_SIGNING_MONITOR */
4751 
4752 #if CONFIG_MACF
4753 	unsigned int cs_flags = tmp_blob.csb_flags;
4754 	unsigned int signer_type = tmp_blob.csb_signer_type;
4755 
4756 	error = mac_vnode_check_signature(
4757 		vp,
4758 		&tmp_blob,
4759 		imgp,
4760 		&cs_flags,
4761 		&signer_type,
4762 		flags,
4763 		platform);
4764 
4765 	if (error != 0) {
4766 		printf("validation of code signature failed through MACF policy: %d\n", error);
4767 		goto out;
4768 	}
4769 
4770 #if HAS_APPLE_PAC
4771 	signed_entitlements = ptrauth_sign_unauthenticated(
4772 		tmp_blob.csb_entitlements,
4773 		ptrauth_key_process_independent_data,
4774 		ptrauth_blend_discriminator(&blob_ro->csb_entitlements,
4775 		OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements")));
4776 #endif
4777 
4778 	tmp_blob.csb_flags = cs_flags;
4779 	tmp_blob.csb_signer_type = signer_type;
4780 
4781 	if (tmp_blob.csb_flags & CS_PLATFORM_BINARY) {
4782 		tmp_blob.csb_platform_binary = 1;
4783 		tmp_blob.csb_platform_path = !!(tmp_blob.csb_flags & CS_PLATFORM_PATH);
4784 		tmp_blob.csb_teamid = NULL;
4785 	} else {
4786 		tmp_blob.csb_platform_binary = 0;
4787 		tmp_blob.csb_platform_path = 0;
4788 	}
4789 
4790 	if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !tmp_blob.csb_platform_binary) {
4791 		printf("dyld simulator runtime is not apple signed: proc: %d\n",
4792 		    proc_getpid(current_proc()));
4793 
4794 		error = EPERM;
4795 		goto out;
4796 	}
4797 #endif /* CONFIG_MACF */
4798 
4799 #if CODE_SIGNING_MONITOR
4800 	error = verify_code_signature_monitor(&tmp_blob);
4801 	if (error != 0) {
4802 		goto out;
4803 	}
4804 #endif
4805 
4806 	/* Perform 2nd stage reconstitution */
4807 	error = reconstitute_code_signature_2nd_stage(&tmp_blob);
4808 	if (error != 0) {
4809 		goto out;
4810 	}
4811 
4812 	/* Setup any multi-level hashing for the code signature */
4813 	error = setup_multilevel_hashing(&tmp_blob);
4814 	if (error != 0) {
4815 		goto out;
4816 	}
4817 
4818 	/* Ensure security critical auxiliary blobs still exist */
4819 	error = validate_auxiliary_signed_blobs(&tmp_blob);
4820 	if (error != 0) {
4821 		goto out;
4822 	}
4823 
4824 	/*
4825 	 * Accelerate the entitlement queries for this code signature. This must
4826 	 * be done only after we know that the code signature pointers within the
4827 	 * struct cs_blob aren't going to be shifted around anymore, which is why
4828 	 * this acceleration is done after setting up multilevel hashing, since
4829 	 * that is the last part of signature validation which can shift the code
4830 	 * signature around.
4831 	 */
4832 	error = accelerate_entitlement_queries(&tmp_blob);
4833 	if (error != 0) {
4834 		goto out;
4835 	}
4836 
4837 	/*
4838 	 * Parse and set the Team ID for this code signature. This only needs to
4839 	 * happen when the signature isn't marked as platform. Like above, this
4840 	 * has to happen after we know the pointers within struct cs_blob aren't
4841 	 * going to be shifted anymore.
4842 	 */
4843 	if ((tmp_blob.csb_flags & CS_PLATFORM_BINARY) == 0) {
4844 		tmp_blob.csb_teamid = csblob_parse_teamid(&tmp_blob);
4845 	}
4846 
4847 	/*
4848 	 * Validate the code signing blob's coverage. Ideally, we can just do this
4849 	 * in the beginning, right after structural validation, however, multilevel
4850 	 * hashing can change some offets.
4851 	 */
4852 	blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
4853 	blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
4854 	if (blob_start_offset >= blob_end_offset) {
4855 		error = EINVAL;
4856 		goto out;
4857 	} else if (blob_start_offset < 0 || blob_end_offset <= 0) {
4858 		error = EINVAL;
4859 		goto out;
4860 	}
4861 
4862 	/*
4863 	 * The vnode_lock, linked list traversal, and marking of the memory object as
4864 	 * signed can all be blocking operations. Compute a PAC over the tmp_blob.
4865 	 */
4866 	cs_blob_sig = ptrauth_utils_sign_blob_generic(
4867 		&tmp_blob,
4868 		sizeof(tmp_blob),
4869 		OS_PTRAUTH_DISCRIMINATOR("ubc_cs_blob_add.blocking_op0"),
4870 		PTRAUTH_ADDR_DIVERSIFY);
4871 
4872 	vnode_lock(vp);
4873 	if (!UBCINFOEXISTS(vp)) {
4874 		vnode_unlock(vp);
4875 		error = ENOENT;
4876 		goto out;
4877 	}
4878 	uip = vp->v_ubcinfo;
4879 
4880 	/* check if this new blob overlaps with an existing blob */
4881 	for (oblob = ubc_get_cs_blobs(vp);
4882 	    oblob != NULL;
4883 	    oblob = oblob->csb_next) {
4884 		off_t oblob_start_offset, oblob_end_offset;
4885 
4886 		if (tmp_blob.csb_signer_type != oblob->csb_signer_type) {  // signer type needs to be the same for slices
4887 			vnode_unlock(vp);
4888 			error = EALREADY;
4889 			goto out;
4890 		} else if (tmp_blob.csb_platform_binary) {  //platform binary needs to be the same for app slices
4891 			if (!oblob->csb_platform_binary) {
4892 				vnode_unlock(vp);
4893 				error = EALREADY;
4894 				goto out;
4895 			}
4896 		} else if (tmp_blob.csb_teamid) {  //teamid binary needs to be the same for app slices
4897 			if (oblob->csb_platform_binary ||
4898 			    oblob->csb_teamid == NULL ||
4899 			    strcmp(oblob->csb_teamid, tmp_blob.csb_teamid) != 0) {
4900 				vnode_unlock(vp);
4901 				error = EALREADY;
4902 				goto out;
4903 			}
4904 		} else {  // non teamid binary needs to be the same for app slices
4905 			if (oblob->csb_platform_binary ||
4906 			    oblob->csb_teamid != NULL) {
4907 				vnode_unlock(vp);
4908 				error = EALREADY;
4909 				goto out;
4910 			}
4911 		}
4912 
4913 		oblob_start_offset = (oblob->csb_base_offset +
4914 		    oblob->csb_start_offset);
4915 		oblob_end_offset = (oblob->csb_base_offset +
4916 		    oblob->csb_end_offset);
4917 		if (blob_start_offset >= oblob_end_offset ||
4918 		    blob_end_offset <= oblob_start_offset) {
4919 			/* no conflict with this existing blob */
4920 		} else {
4921 			/* conflict ! */
4922 			if (blob_start_offset == oblob_start_offset &&
4923 			    blob_end_offset == oblob_end_offset &&
4924 			    tmp_blob.csb_mem_size == oblob->csb_mem_size &&
4925 			    tmp_blob.csb_flags == oblob->csb_flags &&
4926 			    (tmp_blob.csb_cpu_type == CPU_TYPE_ANY ||
4927 			    oblob->csb_cpu_type == CPU_TYPE_ANY ||
4928 			    tmp_blob.csb_cpu_type == oblob->csb_cpu_type) &&
4929 			    !bcmp(tmp_blob.csb_cdhash,
4930 			    oblob->csb_cdhash,
4931 			    CS_CDHASH_LEN)) {
4932 				/*
4933 				 * We already have this blob:
4934 				 * we'll return success but
4935 				 * throw away the new blob.
4936 				 */
4937 				if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
4938 					/*
4939 					 * The old blob matches this one
4940 					 * but doesn't have any CPU type.
4941 					 * Update it with whatever the caller
4942 					 * provided this time.
4943 					 */
4944 					cs_blob_set_cpu_type(oblob, cputype);
4945 				}
4946 
4947 				/* The signature is still accepted, so update the
4948 				 * generation count. */
4949 				uip->cs_add_gen = cs_blob_generation_count;
4950 
4951 				vnode_unlock(vp);
4952 				if (ret_blob) {
4953 					*ret_blob = oblob;
4954 				}
4955 				error = EAGAIN;
4956 				goto out;
4957 			} else {
4958 				/* different blob: reject the new one */
4959 				vnode_unlock(vp);
4960 				error = EALREADY;
4961 				goto out;
4962 			}
4963 		}
4964 	}
4965 
4966 	/* mark this vnode's VM object as having "signed pages" */
4967 	kr = memory_object_signed(uip->ui_control, TRUE);
4968 	if (kr != KERN_SUCCESS) {
4969 		vnode_unlock(vp);
4970 		error = ENOENT;
4971 		goto out;
4972 	}
4973 
4974 	if (uip->cs_blobs == NULL) {
4975 		/* loading 1st blob: record the file's current "modify time" */
4976 		record_mtime = TRUE;
4977 	}
4978 
4979 	/* set the generation count for cs_blobs */
4980 	uip->cs_add_gen = cs_blob_generation_count;
4981 
4982 	/* Authenticate the PAC signature after blocking operation */
4983 	ptrauth_utils_auth_blob_generic(
4984 		&tmp_blob,
4985 		sizeof(tmp_blob),
4986 		OS_PTRAUTH_DISCRIMINATOR("ubc_cs_blob_add.blocking_op0"),
4987 		PTRAUTH_ADDR_DIVERSIFY,
4988 		cs_blob_sig);
4989 
4990 	/* Update the system statistics for code signatures blobs */
4991 	ubc_cs_blob_adjust_statistics(&tmp_blob);
4992 
4993 	/* Update the list pointer to reference other blobs for this vnode */
4994 	tmp_blob.csb_next = uip->cs_blobs;
4995 
4996 #if HAS_APPLE_PAC
4997 	/*
4998 	 * Update all the critical pointers in the blob with the RO diversified
4999 	 * values before updating the read-only blob with the full contents of
5000 	 * the struct cs_blob. We need to use memcpy here as otherwise a simple
5001 	 * assignment will cause the compiler to re-sign using the stack variable
5002 	 * as the address diversifier.
5003 	 */
5004 	memcpy((void*)&tmp_blob.csb_entitlements, &signed_entitlements, sizeof(void*));
5005 #if CODE_SIGNING_MONITOR
5006 	memcpy((void*)&tmp_blob.csb_csm_obj, &signed_monitor_obj, sizeof(void*));
5007 #endif
5008 #endif
5009 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5010 
5011 	/* Add a fence to ensure writes to the blob are visible on all threads */
5012 	os_atomic_thread_fence(seq_cst);
5013 
5014 	/*
5015 	 * Add the cs_blob to the front of the list of blobs for this vnode. We
5016 	 * add to the front of the list, and we never remove a blob from the list
5017 	 * which means ubc_cs_get_blobs can return whatever the top of the list
5018 	 * is, while still keeping the list valid. Useful for if we validate a
5019 	 * page while adding in a new blob for this vnode.
5020 	 */
5021 	uip->cs_blobs = blob_ro;
5022 
5023 	/* Make sure to reload pointer from uip to double check */
5024 	if (uip->cs_blobs->csb_next) {
5025 		zone_require_ro(ZONE_ID_CS_BLOB, sizeof(struct cs_blob), uip->cs_blobs->csb_next);
5026 	}
5027 
5028 	if (cs_debug > 1) {
5029 		proc_t p;
5030 		const char *name = vnode_getname_printable(vp);
5031 		p = current_proc();
5032 		printf("CODE SIGNING: proc %d(%s) "
5033 		    "loaded %s signatures for file (%s) "
5034 		    "range 0x%llx:0x%llx flags 0x%x\n",
5035 		    proc_getpid(p), p->p_comm,
5036 		    blob_ro->csb_cpu_type == -1 ? "detached" : "embedded",
5037 		    name,
5038 		    blob_ro->csb_base_offset + blob_ro->csb_start_offset,
5039 		    blob_ro->csb_base_offset + blob_ro->csb_end_offset,
5040 		    blob_ro->csb_flags);
5041 		vnode_putname_printable(name);
5042 	}
5043 
5044 	vnode_unlock(vp);
5045 
5046 	if (record_mtime) {
5047 		vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
5048 	}
5049 
5050 	if (ret_blob) {
5051 		*ret_blob = blob_ro;
5052 	}
5053 
5054 	error = 0;      /* success ! */
5055 
5056 out:
5057 	if (error) {
5058 		if (error != EAGAIN) {
5059 			printf("check_signature[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
5060 		}
5061 
5062 		cs_blob_cleanup(&tmp_blob);
5063 		if (blob_ro) {
5064 			zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
5065 		}
5066 	}
5067 
5068 	if (error == EAGAIN) {
5069 		/*
5070 		 * See above:  error is EAGAIN if we were asked
5071 		 * to add an existing blob again.  We cleaned the new
5072 		 * blob and we want to return success.
5073 		 */
5074 		error = 0;
5075 	}
5076 
5077 	return error;
5078 }
5079 
5080 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5081 int
ubc_cs_blob_add_supplement(struct vnode * vp,struct vnode * orig_vp,off_t base_offset,vm_address_t * addr,vm_size_t size,struct cs_blob ** ret_blob)5082 ubc_cs_blob_add_supplement(
5083 	struct vnode    *vp,
5084 	struct vnode    *orig_vp,
5085 	off_t           base_offset,
5086 	vm_address_t    *addr,
5087 	vm_size_t       size,
5088 	struct cs_blob  **ret_blob)
5089 {
5090 	kern_return_t           kr;
5091 	struct ubc_info         *uip, *orig_uip;
5092 	int                     error;
5093 	struct cs_blob          tmp_blob;
5094 	struct cs_blob          *orig_blob;
5095 	struct cs_blob          *blob_ro = NULL;
5096 	CS_CodeDirectory const *cd;
5097 	off_t                   blob_start_offset, blob_end_offset;
5098 
5099 	if (ret_blob) {
5100 		*ret_blob = NULL;
5101 	}
5102 
5103 	/* Create the struct cs_blob wrapper that will be attached to the vnode.
5104 	 * Validates the passed in blob in the process. */
5105 	error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
5106 
5107 	if (error != 0) {
5108 		printf("malformed code signature supplement blob: %d\n", error);
5109 		return error;
5110 	}
5111 
5112 	tmp_blob.csb_cpu_type = -1;
5113 	tmp_blob.csb_base_offset = base_offset;
5114 
5115 	tmp_blob.csb_reconstituted = false;
5116 
5117 	vnode_lock(orig_vp);
5118 	if (!UBCINFOEXISTS(orig_vp)) {
5119 		vnode_unlock(orig_vp);
5120 		error = ENOENT;
5121 		goto out;
5122 	}
5123 
5124 	orig_uip = orig_vp->v_ubcinfo;
5125 
5126 	/* check that the supplement's linked cdhash matches a cdhash of
5127 	 * the target image.
5128 	 */
5129 
5130 	if (tmp_blob.csb_linkage_hashtype == NULL) {
5131 		proc_t p;
5132 		const char *iname = vnode_getname_printable(vp);
5133 		p = current_proc();
5134 
5135 		printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
5136 		    "is not a supplemental.\n",
5137 		    proc_getpid(p), p->p_comm, iname);
5138 
5139 		error = EINVAL;
5140 
5141 		vnode_putname_printable(iname);
5142 		vnode_unlock(orig_vp);
5143 		goto out;
5144 	}
5145 	bool found_but_not_valid = false;
5146 	for (orig_blob = ubc_get_cs_blobs(orig_vp); orig_blob != NULL;
5147 	    orig_blob = orig_blob->csb_next) {
5148 		if (orig_blob->csb_hashtype == tmp_blob.csb_linkage_hashtype &&
5149 		    memcmp(orig_blob->csb_cdhash, tmp_blob.csb_linkage, CS_CDHASH_LEN) == 0) {
5150 			// Found match!
5151 			found_but_not_valid = ((orig_blob->csb_flags & CS_VALID) != CS_VALID);
5152 			break;
5153 		}
5154 	}
5155 
5156 	if (orig_blob == NULL || found_but_not_valid) {
5157 		// Not found.
5158 
5159 		proc_t p;
5160 		const char *iname = vnode_getname_printable(vp);
5161 		p = current_proc();
5162 
5163 		error = (orig_blob == NULL) ? ESRCH : EPERM;
5164 
5165 		printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
5166 		    "does not match any attached cdhash (error: %d).\n",
5167 		    proc_getpid(p), p->p_comm, iname, error);
5168 
5169 		vnode_putname_printable(iname);
5170 		vnode_unlock(orig_vp);
5171 		goto out;
5172 	}
5173 
5174 	vnode_unlock(orig_vp);
5175 
5176 	blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
5177 	tmp_blob.csb_ro_addr = blob_ro;
5178 	tmp_blob.csb_vnode = vp;
5179 
5180 	/* AMFI needs to see the current blob state at the RO address. */
5181 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5182 
5183 	// validate the signature against policy!
5184 #if CONFIG_MACF
5185 	unsigned int signer_type = tmp_blob.csb_signer_type;
5186 	error = mac_vnode_check_supplemental_signature(vp, &tmp_blob, orig_vp, orig_blob, &signer_type);
5187 
5188 	tmp_blob.csb_signer_type = signer_type;
5189 
5190 	if (error) {
5191 		if (cs_debug) {
5192 			printf("check_supplemental_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
5193 		}
5194 		goto out;
5195 	}
5196 #endif
5197 
5198 	// We allowed the supplemental signature blob so
5199 	// copy the platform bit or team-id from the linked signature and whether or not the original is developer code
5200 	tmp_blob.csb_platform_binary = 0;
5201 	tmp_blob.csb_platform_path = 0;
5202 	if (orig_blob->csb_platform_binary == 1) {
5203 		tmp_blob.csb_platform_binary = orig_blob->csb_platform_binary;
5204 		tmp_blob.csb_platform_path = orig_blob->csb_platform_path;
5205 	} else if (orig_blob->csb_teamid != NULL) {
5206 		vm_size_t teamid_size = strlen(orig_blob->csb_teamid) + 1;
5207 		tmp_blob.csb_supplement_teamid = kalloc_data(teamid_size, Z_WAITOK);
5208 		if (tmp_blob.csb_supplement_teamid == NULL) {
5209 			error = ENOMEM;
5210 			goto out;
5211 		}
5212 		strlcpy(tmp_blob.csb_supplement_teamid, orig_blob->csb_teamid, teamid_size);
5213 	}
5214 	tmp_blob.csb_flags = (orig_blob->csb_flags & CS_DEV_CODE);
5215 
5216 	// Validate the blob's coverage
5217 	blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
5218 	blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
5219 
5220 	if (blob_start_offset >= blob_end_offset || blob_start_offset < 0 || blob_end_offset <= 0) {
5221 		/* reject empty or backwards blob */
5222 		error = EINVAL;
5223 		goto out;
5224 	}
5225 
5226 	vnode_lock(vp);
5227 	if (!UBCINFOEXISTS(vp)) {
5228 		vnode_unlock(vp);
5229 		error = ENOENT;
5230 		goto out;
5231 	}
5232 	uip = vp->v_ubcinfo;
5233 
5234 	struct cs_blob *existing = uip->cs_blob_supplement;
5235 	if (existing != NULL) {
5236 		if (tmp_blob.csb_hashtype == existing->csb_hashtype &&
5237 		    memcmp(tmp_blob.csb_cdhash, existing->csb_cdhash, CS_CDHASH_LEN) == 0) {
5238 			error = EAGAIN; // non-fatal
5239 		} else {
5240 			error = EALREADY; // fatal
5241 		}
5242 
5243 		vnode_unlock(vp);
5244 		goto out;
5245 	}
5246 
5247 	/* mark this vnode's VM object as having "signed pages" */
5248 	kr = memory_object_signed(uip->ui_control, TRUE);
5249 	if (kr != KERN_SUCCESS) {
5250 		vnode_unlock(vp);
5251 		error = ENOENT;
5252 		goto out;
5253 	}
5254 
5255 
5256 	/* We still adjust statistics even for supplemental blobs, as they
5257 	 * consume memory just the same. */
5258 	ubc_cs_blob_adjust_statistics(&tmp_blob);
5259 	/* Unlike regular cs_blobs, we only ever support one supplement. */
5260 	tmp_blob.csb_next = NULL;
5261 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5262 
5263 	os_atomic_thread_fence(seq_cst); // Fence to prevent reordering here
5264 	uip->cs_blob_supplement = blob_ro;
5265 
5266 	/* Make sure to reload pointer from uip to double check */
5267 	if (__improbable(uip->cs_blob_supplement->csb_next)) {
5268 		panic("csb_next does not match expected NULL value");
5269 	}
5270 
5271 	vnode_unlock(vp);
5272 
5273 
5274 	if (cs_debug > 1) {
5275 		proc_t p;
5276 		const char *name = vnode_getname_printable(vp);
5277 		p = current_proc();
5278 		printf("CODE SIGNING: proc %d(%s) "
5279 		    "loaded supplemental signature for file (%s) "
5280 		    "range 0x%llx:0x%llx\n",
5281 		    proc_getpid(p), p->p_comm,
5282 		    name,
5283 		    blob_ro->csb_base_offset + blob_ro->csb_start_offset,
5284 		    blob_ro->csb_base_offset + blob_ro->csb_end_offset);
5285 		vnode_putname_printable(name);
5286 	}
5287 
5288 	if (ret_blob) {
5289 		*ret_blob = blob_ro;
5290 	}
5291 
5292 	error = 0; // Success!
5293 out:
5294 	if (error) {
5295 		if (cs_debug) {
5296 			printf("ubc_cs_blob_add_supplement[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
5297 		}
5298 
5299 		cs_blob_cleanup(&tmp_blob);
5300 		if (blob_ro) {
5301 			zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
5302 		}
5303 	}
5304 
5305 	if (error == EAGAIN) {
5306 		/* We were asked to add an existing blob.
5307 		 * We cleaned up and ignore the attempt. */
5308 		error = 0;
5309 	}
5310 
5311 	return error;
5312 }
5313 #endif
5314 
5315 
5316 
5317 void
csvnode_print_debug(struct vnode * vp)5318 csvnode_print_debug(struct vnode *vp)
5319 {
5320 	const char      *name = NULL;
5321 	struct ubc_info *uip;
5322 	struct cs_blob *blob;
5323 
5324 	name = vnode_getname_printable(vp);
5325 	if (name) {
5326 		printf("csvnode: name: %s\n", name);
5327 		vnode_putname_printable(name);
5328 	}
5329 
5330 	vnode_lock_spin(vp);
5331 
5332 	if (!UBCINFOEXISTS(vp)) {
5333 		blob = NULL;
5334 		goto out;
5335 	}
5336 
5337 	uip = vp->v_ubcinfo;
5338 	for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
5339 		printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
5340 		    (unsigned long)blob->csb_start_offset,
5341 		    (unsigned long)blob->csb_end_offset,
5342 		    blob->csb_flags,
5343 		    blob->csb_platform_binary ? "yes" : "no",
5344 		    blob->csb_platform_path ? "yes" : "no",
5345 		    blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
5346 	}
5347 
5348 out:
5349 	vnode_unlock(vp);
5350 }
5351 
5352 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5353 struct cs_blob *
ubc_cs_blob_get_supplement(struct vnode * vp,off_t offset)5354 ubc_cs_blob_get_supplement(
5355 	struct vnode    *vp,
5356 	off_t           offset)
5357 {
5358 	struct cs_blob *blob;
5359 	off_t offset_in_blob;
5360 
5361 	vnode_lock_spin(vp);
5362 
5363 	if (!UBCINFOEXISTS(vp)) {
5364 		blob = NULL;
5365 		goto out;
5366 	}
5367 
5368 	blob = vp->v_ubcinfo->cs_blob_supplement;
5369 
5370 	if (blob == NULL) {
5371 		// no supplemental blob
5372 		goto out;
5373 	}
5374 
5375 
5376 	if (offset != -1) {
5377 		offset_in_blob = offset - blob->csb_base_offset;
5378 		if (offset_in_blob < blob->csb_start_offset || offset_in_blob >= blob->csb_end_offset) {
5379 			// not actually covered by this blob
5380 			blob = NULL;
5381 		}
5382 	}
5383 
5384 out:
5385 	vnode_unlock(vp);
5386 
5387 	return blob;
5388 }
5389 #endif
5390 
5391 struct cs_blob *
ubc_cs_blob_get(struct vnode * vp,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t offset)5392 ubc_cs_blob_get(
5393 	struct vnode    *vp,
5394 	cpu_type_t      cputype,
5395 	cpu_subtype_t   cpusubtype,
5396 	off_t           offset)
5397 {
5398 	struct cs_blob  *blob;
5399 	off_t offset_in_blob;
5400 
5401 	vnode_lock_spin(vp);
5402 
5403 	if (!UBCINFOEXISTS(vp)) {
5404 		blob = NULL;
5405 		goto out;
5406 	}
5407 
5408 	for (blob = ubc_get_cs_blobs(vp);
5409 	    blob != NULL;
5410 	    blob = blob->csb_next) {
5411 		if (cputype != -1 && blob->csb_cpu_type == cputype && (cpusubtype == -1 || blob->csb_cpu_subtype == (cpusubtype & ~CPU_SUBTYPE_MASK))) {
5412 			break;
5413 		}
5414 		if (offset != -1) {
5415 			offset_in_blob = offset - blob->csb_base_offset;
5416 			if (offset_in_blob >= blob->csb_start_offset &&
5417 			    offset_in_blob < blob->csb_end_offset) {
5418 				/* our offset is covered by this blob */
5419 				break;
5420 			}
5421 		}
5422 	}
5423 
5424 out:
5425 	vnode_unlock(vp);
5426 
5427 	return blob;
5428 }
5429 
5430 void
ubc_cs_free_and_vnode_unlock(vnode_t vp)5431 ubc_cs_free_and_vnode_unlock(
5432 	vnode_t vp)
5433 {
5434 	struct ubc_info *uip = vp->v_ubcinfo;
5435 	struct cs_blob  *cs_blobs, *blob, *next_blob;
5436 
5437 	if (!(uip->ui_flags & UI_CSBLOBINVALID)) {
5438 		vnode_unlock(vp);
5439 		return;
5440 	}
5441 
5442 	uip->ui_flags &= ~UI_CSBLOBINVALID;
5443 
5444 	cs_blobs = uip->cs_blobs;
5445 	uip->cs_blobs = NULL;
5446 
5447 #if CHECK_CS_VALIDATION_BITMAP
5448 	ubc_cs_validation_bitmap_deallocate( uip );
5449 #endif
5450 
5451 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5452 	struct cs_blob  *cs_blob_supplement = uip->cs_blob_supplement;
5453 	uip->cs_blob_supplement = NULL;
5454 #endif
5455 
5456 	vnode_unlock(vp);
5457 
5458 	for (blob = cs_blobs;
5459 	    blob != NULL;
5460 	    blob = next_blob) {
5461 		next_blob = blob->csb_next;
5462 		os_atomic_add(&cs_blob_count, -1, relaxed);
5463 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5464 		cs_blob_ro_free(blob);
5465 	}
5466 
5467 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5468 	if (cs_blob_supplement != NULL) {
5469 		os_atomic_add(&cs_blob_count, -1, relaxed);
5470 		os_atomic_add(&cs_blob_size, -cs_blob_supplement->csb_mem_size, relaxed);
5471 		cs_blob_supplement_free(cs_blob_supplement);
5472 	}
5473 #endif
5474 }
5475 
5476 static void
ubc_cs_free(struct ubc_info * uip)5477 ubc_cs_free(
5478 	struct ubc_info *uip)
5479 {
5480 	struct cs_blob  *blob, *next_blob;
5481 
5482 	for (blob = uip->cs_blobs;
5483 	    blob != NULL;
5484 	    blob = next_blob) {
5485 		next_blob = blob->csb_next;
5486 		os_atomic_add(&cs_blob_count, -1, relaxed);
5487 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5488 		cs_blob_ro_free(blob);
5489 	}
5490 #if CHECK_CS_VALIDATION_BITMAP
5491 	ubc_cs_validation_bitmap_deallocate( uip );
5492 #endif
5493 	uip->cs_blobs = NULL;
5494 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5495 	if (uip->cs_blob_supplement != NULL) {
5496 		blob = uip->cs_blob_supplement;
5497 		os_atomic_add(&cs_blob_count, -1, relaxed);
5498 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5499 		cs_blob_supplement_free(uip->cs_blob_supplement);
5500 		uip->cs_blob_supplement = NULL;
5501 	}
5502 #endif
5503 }
5504 
5505 /* check cs blob generation on vnode
5506  * returns:
5507  *    0         : Success, the cs_blob attached is current
5508  *    ENEEDAUTH : Generation count mismatch. Needs authentication again.
5509  */
5510 int
ubc_cs_generation_check(struct vnode * vp)5511 ubc_cs_generation_check(
5512 	struct vnode    *vp)
5513 {
5514 	int retval = ENEEDAUTH;
5515 
5516 	vnode_lock_spin(vp);
5517 
5518 	if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
5519 		retval = 0;
5520 	}
5521 
5522 	vnode_unlock(vp);
5523 	return retval;
5524 }
5525 
5526 int
ubc_cs_blob_revalidate(struct vnode * vp,struct cs_blob * blob,struct image_params * imgp,int flags,uint32_t platform)5527 ubc_cs_blob_revalidate(
5528 	struct vnode    *vp,
5529 	struct cs_blob *blob,
5530 	struct image_params *imgp,
5531 	int flags,
5532 	uint32_t platform
5533 	)
5534 {
5535 	int error = 0;
5536 	const CS_CodeDirectory *cd = NULL;
5537 	const CS_GenericBlob *entitlements = NULL;
5538 	const CS_GenericBlob *der_entitlements = NULL;
5539 	size_t size;
5540 	assert(vp != NULL);
5541 	assert(blob != NULL);
5542 
5543 	if ((blob->csb_flags & CS_VALID) == 0) {
5544 		// If the blob attached to the vnode was invalidated, don't try to revalidate it
5545 		// Blob invalidation only occurs when the file that the blob is attached to is
5546 		// opened for writing, giving us a signal that the file is modified.
5547 		printf("CODESIGNING: can not re-validate a previously invalidated blob, reboot or create a new file.\n");
5548 		error = EPERM;
5549 		goto out;
5550 	}
5551 
5552 	size = blob->csb_mem_size;
5553 	error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
5554 	    size, &cd, &entitlements, &der_entitlements);
5555 	if (error) {
5556 		if (cs_debug) {
5557 			printf("CODESIGNING: csblob invalid: %d\n", error);
5558 		}
5559 		goto out;
5560 	}
5561 
5562 	unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
5563 	unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
5564 
5565 	if (blob->csb_reconstituted) {
5566 		/*
5567 		 * Code signatures that have been modified after validation
5568 		 * cannot be revalidated inline from their in-memory blob.
5569 		 *
5570 		 * That's okay, though, because the only path left that relies
5571 		 * on revalidation of existing in-memory blobs is the legacy
5572 		 * detached signature database path, which only exists on macOS,
5573 		 * which does not do reconstitution of any kind.
5574 		 */
5575 		if (cs_debug) {
5576 			printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
5577 		}
5578 
5579 		/*
5580 		 * EAGAIN tells the caller that they may reread the code
5581 		 * signature and try attaching it again, which is the same
5582 		 * thing they would do if there was no cs_blob yet in the
5583 		 * first place.
5584 		 *
5585 		 * Conveniently, after ubc_cs_blob_add did a successful
5586 		 * validation, it will detect that a matching cs_blob (cdhash,
5587 		 * offset, arch etc.) already exists, and return success
5588 		 * without re-adding a cs_blob to the vnode.
5589 		 */
5590 		return EAGAIN;
5591 	}
5592 
5593 	/* callout to mac_vnode_check_signature */
5594 #if CONFIG_MACF
5595 	error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform);
5596 	if (cs_debug && error) {
5597 		printf("revalidate: check_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
5598 	}
5599 #else
5600 	(void)flags;
5601 	(void)signer_type;
5602 #endif
5603 
5604 	/* update generation number if success */
5605 	vnode_lock_spin(vp);
5606 	struct cs_signer_info signer_info = {
5607 		.csb_flags = cs_flags,
5608 		.csb_signer_type = signer_type
5609 	};
5610 	zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_signer_info, &signer_info);
5611 	if (UBCINFOEXISTS(vp)) {
5612 		if (error == 0) {
5613 			vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
5614 		} else {
5615 			vp->v_ubcinfo->cs_add_gen = 0;
5616 		}
5617 	}
5618 
5619 	vnode_unlock(vp);
5620 
5621 out:
5622 	return error;
5623 }
5624 
5625 void
cs_blob_reset_cache()5626 cs_blob_reset_cache()
5627 {
5628 	/* incrementing odd no by 2 makes sure '0' is never reached. */
5629 	OSAddAtomic(+2, &cs_blob_generation_count);
5630 	printf("Reseting cs_blob cache from all vnodes. \n");
5631 }
5632 
5633 struct cs_blob *
ubc_get_cs_blobs(struct vnode * vp)5634 ubc_get_cs_blobs(
5635 	struct vnode    *vp)
5636 {
5637 	struct ubc_info *uip;
5638 	struct cs_blob  *blobs;
5639 
5640 	/*
5641 	 * No need to take the vnode lock here.  The caller must be holding
5642 	 * a reference on the vnode (via a VM mapping or open file descriptor),
5643 	 * so the vnode will not go away.  The ubc_info stays until the vnode
5644 	 * goes away.  And we only modify "blobs" by adding to the head of the
5645 	 * list.
5646 	 * The ubc_info could go away entirely if the vnode gets reclaimed as
5647 	 * part of a forced unmount.  In the case of a code-signature validation
5648 	 * during a page fault, the "paging_in_progress" reference on the VM
5649 	 * object guarantess that the vnode pager (and the ubc_info) won't go
5650 	 * away during the fault.
5651 	 * Other callers need to protect against vnode reclaim by holding the
5652 	 * vnode lock, for example.
5653 	 */
5654 
5655 	if (!UBCINFOEXISTS(vp)) {
5656 		blobs = NULL;
5657 		goto out;
5658 	}
5659 
5660 	uip = vp->v_ubcinfo;
5661 	blobs = uip->cs_blobs;
5662 	if (blobs != NULL) {
5663 		cs_blob_require(blobs, vp);
5664 	}
5665 
5666 out:
5667 	return blobs;
5668 }
5669 
5670 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5671 struct cs_blob *
ubc_get_cs_supplement(struct vnode * vp)5672 ubc_get_cs_supplement(
5673 	struct vnode    *vp)
5674 {
5675 	struct ubc_info *uip;
5676 	struct cs_blob  *blob;
5677 
5678 	/*
5679 	 * No need to take the vnode lock here.  The caller must be holding
5680 	 * a reference on the vnode (via a VM mapping or open file descriptor),
5681 	 * so the vnode will not go away.  The ubc_info stays until the vnode
5682 	 * goes away.
5683 	 * The ubc_info could go away entirely if the vnode gets reclaimed as
5684 	 * part of a forced unmount.  In the case of a code-signature validation
5685 	 * during a page fault, the "paging_in_progress" reference on the VM
5686 	 * object guarantess that the vnode pager (and the ubc_info) won't go
5687 	 * away during the fault.
5688 	 * Other callers need to protect against vnode reclaim by holding the
5689 	 * vnode lock, for example.
5690 	 */
5691 
5692 	if (!UBCINFOEXISTS(vp)) {
5693 		blob = NULL;
5694 		goto out;
5695 	}
5696 
5697 	uip = vp->v_ubcinfo;
5698 	blob = uip->cs_blob_supplement;
5699 	if (blob != NULL) {
5700 		cs_blob_require(blob, vp);
5701 	}
5702 
5703 out:
5704 	return blob;
5705 }
5706 #endif
5707 
5708 
5709 void
ubc_get_cs_mtime(struct vnode * vp,struct timespec * cs_mtime)5710 ubc_get_cs_mtime(
5711 	struct vnode    *vp,
5712 	struct timespec *cs_mtime)
5713 {
5714 	struct ubc_info *uip;
5715 
5716 	if (!UBCINFOEXISTS(vp)) {
5717 		cs_mtime->tv_sec = 0;
5718 		cs_mtime->tv_nsec = 0;
5719 		return;
5720 	}
5721 
5722 	uip = vp->v_ubcinfo;
5723 	cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
5724 	cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
5725 }
5726 
5727 unsigned long cs_validate_page_no_hash = 0;
5728 unsigned long cs_validate_page_bad_hash = 0;
5729 static boolean_t
cs_validate_hash(struct cs_blob * blobs,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t * bytes_processed,unsigned * tainted)5730 cs_validate_hash(
5731 	struct cs_blob          *blobs,
5732 	memory_object_t         pager,
5733 	memory_object_offset_t  page_offset,
5734 	const void              *data,
5735 	vm_size_t               *bytes_processed,
5736 	unsigned                *tainted)
5737 {
5738 	union cs_hash_union     mdctx;
5739 	struct cs_hash const    *hashtype = NULL;
5740 	unsigned char           actual_hash[CS_HASH_MAX_SIZE];
5741 	unsigned char           expected_hash[CS_HASH_MAX_SIZE];
5742 	boolean_t               found_hash;
5743 	struct cs_blob          *blob;
5744 	const CS_CodeDirectory  *cd;
5745 	const unsigned char     *hash;
5746 	boolean_t               validated;
5747 	off_t                   offset; /* page offset in the file */
5748 	size_t                  size;
5749 	off_t                   codeLimit = 0;
5750 	const char              *lower_bound, *upper_bound;
5751 	vm_offset_t             kaddr, blob_addr;
5752 
5753 	/* retrieve the expected hash */
5754 	found_hash = FALSE;
5755 
5756 	for (blob = blobs;
5757 	    blob != NULL;
5758 	    blob = blob->csb_next) {
5759 		offset = page_offset - blob->csb_base_offset;
5760 		if (offset < blob->csb_start_offset ||
5761 		    offset >= blob->csb_end_offset) {
5762 			/* our page is not covered by this blob */
5763 			continue;
5764 		}
5765 
5766 		/* blob data has been released */
5767 		kaddr = (vm_offset_t)blob->csb_mem_kaddr;
5768 		if (kaddr == 0) {
5769 			continue;
5770 		}
5771 
5772 		blob_addr = kaddr + blob->csb_mem_offset;
5773 		lower_bound = CAST_DOWN(char *, blob_addr);
5774 		upper_bound = lower_bound + blob->csb_mem_size;
5775 
5776 		cd = blob->csb_cd;
5777 		if (cd != NULL) {
5778 			/* all CD's that have been injected is already validated */
5779 
5780 			hashtype = blob->csb_hashtype;
5781 			if (hashtype == NULL) {
5782 				panic("unknown hash type ?");
5783 			}
5784 			if (hashtype->cs_digest_size > sizeof(actual_hash)) {
5785 				panic("hash size too large");
5786 			}
5787 			if (offset & ((1U << blob->csb_hash_pageshift) - 1)) {
5788 				panic("offset not aligned to cshash boundary");
5789 			}
5790 
5791 			codeLimit = ntohl(cd->codeLimit);
5792 
5793 			hash = hashes(cd, (uint32_t)(offset >> blob->csb_hash_pageshift),
5794 			    hashtype->cs_size,
5795 			    lower_bound, upper_bound);
5796 			if (hash != NULL) {
5797 				bcopy(hash, expected_hash, hashtype->cs_size);
5798 				found_hash = TRUE;
5799 			}
5800 
5801 			break;
5802 		}
5803 	}
5804 
5805 	if (found_hash == FALSE) {
5806 		/*
5807 		 * We can't verify this page because there is no signature
5808 		 * for it (yet).  It's possible that this part of the object
5809 		 * is not signed, or that signatures for that part have not
5810 		 * been loaded yet.
5811 		 * Report that the page has not been validated and let the
5812 		 * caller decide if it wants to accept it or not.
5813 		 */
5814 		cs_validate_page_no_hash++;
5815 		if (cs_debug > 1) {
5816 			printf("CODE SIGNING: cs_validate_page: "
5817 			    "mobj %p off 0x%llx: no hash to validate !?\n",
5818 			    pager, page_offset);
5819 		}
5820 		validated = FALSE;
5821 		*tainted = 0;
5822 	} else {
5823 		*tainted = 0;
5824 
5825 		size = (1U << blob->csb_hash_pageshift);
5826 		*bytes_processed = size;
5827 
5828 		const uint32_t *asha1, *esha1;
5829 		if ((off_t)(offset + size) > codeLimit) {
5830 			/* partial page at end of segment */
5831 			assert(offset < codeLimit);
5832 			size = (size_t) (codeLimit & (size - 1));
5833 			*tainted |= CS_VALIDATE_NX;
5834 		}
5835 
5836 		hashtype->cs_init(&mdctx);
5837 
5838 		if (blob->csb_hash_firstlevel_pageshift) {
5839 			const unsigned char *partial_data = (const unsigned char *)data;
5840 			size_t i;
5841 			for (i = 0; i < size;) {
5842 				union cs_hash_union     partialctx;
5843 				unsigned char partial_digest[CS_HASH_MAX_SIZE];
5844 				size_t partial_size = MIN(size - i, (1U << blob->csb_hash_firstlevel_pageshift));
5845 
5846 				hashtype->cs_init(&partialctx);
5847 				hashtype->cs_update(&partialctx, partial_data, partial_size);
5848 				hashtype->cs_final(partial_digest, &partialctx);
5849 
5850 				/* Update cumulative multi-level hash */
5851 				hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
5852 				partial_data = partial_data + partial_size;
5853 				i += partial_size;
5854 			}
5855 		} else {
5856 			hashtype->cs_update(&mdctx, data, size);
5857 		}
5858 		hashtype->cs_final(actual_hash, &mdctx);
5859 
5860 		asha1 = (const uint32_t *) actual_hash;
5861 		esha1 = (const uint32_t *) expected_hash;
5862 
5863 		if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
5864 			if (cs_debug) {
5865 				printf("CODE SIGNING: cs_validate_page: "
5866 				    "mobj %p off 0x%llx size 0x%lx: "
5867 				    "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
5868 				    "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
5869 				    pager, page_offset, size,
5870 				    asha1[0], asha1[1], asha1[2],
5871 				    asha1[3], asha1[4],
5872 				    esha1[0], esha1[1], esha1[2],
5873 				    esha1[3], esha1[4]);
5874 			}
5875 			cs_validate_page_bad_hash++;
5876 			*tainted |= CS_VALIDATE_TAINTED;
5877 		} else {
5878 			if (cs_debug > 10) {
5879 				printf("CODE SIGNING: cs_validate_page: "
5880 				    "mobj %p off 0x%llx size 0x%lx: "
5881 				    "SHA1 OK\n",
5882 				    pager, page_offset, size);
5883 			}
5884 		}
5885 		validated = TRUE;
5886 	}
5887 
5888 	return validated;
5889 }
5890 
5891 boolean_t
cs_validate_range(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t dsize,unsigned * tainted)5892 cs_validate_range(
5893 	struct vnode    *vp,
5894 	memory_object_t         pager,
5895 	memory_object_offset_t  page_offset,
5896 	const void              *data,
5897 	vm_size_t               dsize,
5898 	unsigned                *tainted)
5899 {
5900 	vm_size_t offset_in_range;
5901 	boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
5902 
5903 	struct cs_blob *blobs = ubc_get_cs_blobs(vp);
5904 
5905 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5906 	if (blobs == NULL && proc_is_translated(current_proc())) {
5907 		struct cs_blob *supp = ubc_get_cs_supplement(vp);
5908 
5909 		if (supp != NULL) {
5910 			blobs = supp;
5911 		} else {
5912 			return FALSE;
5913 		}
5914 	}
5915 #endif
5916 
5917 #if DEVELOPMENT || DEBUG
5918 	code_signing_config_t cs_config = 0;
5919 
5920 	/*
5921 	 * This exemption is specifically useful for systems which want to avoid paying
5922 	 * the cost of verifying the integrity of pages, since that is done by computing
5923 	 * hashes, which can take some time.
5924 	 */
5925 	code_signing_configuration(NULL, &cs_config);
5926 	if (cs_config & CS_CONFIG_INTEGRITY_SKIP) {
5927 		*tainted = 0;
5928 
5929 		/* Return early to avoid paying the cost of hashing */
5930 		return true;
5931 	}
5932 #endif
5933 
5934 	*tainted = 0;
5935 
5936 	for (offset_in_range = 0;
5937 	    offset_in_range < dsize;
5938 	    /* offset_in_range updated based on bytes processed */) {
5939 		unsigned subrange_tainted = 0;
5940 		boolean_t subrange_validated;
5941 		vm_size_t bytes_processed = 0;
5942 
5943 		subrange_validated = cs_validate_hash(blobs,
5944 		    pager,
5945 		    page_offset + offset_in_range,
5946 		    (const void *)((const char *)data + offset_in_range),
5947 		    &bytes_processed,
5948 		    &subrange_tainted);
5949 
5950 		*tainted |= subrange_tainted;
5951 
5952 		if (bytes_processed == 0) {
5953 			/* Cannote make forward progress, so return an error */
5954 			all_subranges_validated = FALSE;
5955 			break;
5956 		} else if (subrange_validated == FALSE) {
5957 			all_subranges_validated = FALSE;
5958 			/* Keep going to detect other types of failures in subranges */
5959 		}
5960 
5961 		offset_in_range += bytes_processed;
5962 	}
5963 
5964 	return all_subranges_validated;
5965 }
5966 
5967 void
cs_validate_page(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,int * validated_p,int * tainted_p,int * nx_p)5968 cs_validate_page(
5969 	struct vnode            *vp,
5970 	memory_object_t         pager,
5971 	memory_object_offset_t  page_offset,
5972 	const void              *data,
5973 	int                     *validated_p,
5974 	int                     *tainted_p,
5975 	int                     *nx_p)
5976 {
5977 	vm_size_t offset_in_page;
5978 	struct cs_blob *blobs;
5979 
5980 	blobs = ubc_get_cs_blobs(vp);
5981 
5982 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5983 	if (blobs == NULL && proc_is_translated(current_proc())) {
5984 		struct cs_blob *supp = ubc_get_cs_supplement(vp);
5985 
5986 		if (supp != NULL) {
5987 			blobs = supp;
5988 		}
5989 	}
5990 #endif
5991 
5992 #if DEVELOPMENT || DEBUG
5993 	code_signing_config_t cs_config = 0;
5994 
5995 	/*
5996 	 * This exemption is specifically useful for systems which want to avoid paying
5997 	 * the cost of verifying the integrity of pages, since that is done by computing
5998 	 * hashes, which can take some time.
5999 	 */
6000 	code_signing_configuration(NULL, &cs_config);
6001 	if (cs_config & CS_CONFIG_INTEGRITY_SKIP) {
6002 		*validated_p = VMP_CS_ALL_TRUE;
6003 		*tainted_p = VMP_CS_ALL_FALSE;
6004 		*nx_p = VMP_CS_ALL_FALSE;
6005 
6006 		/* Return early to avoid paying the cost of hashing */
6007 		return;
6008 	}
6009 #endif
6010 
6011 	*validated_p = VMP_CS_ALL_FALSE;
6012 	*tainted_p = VMP_CS_ALL_FALSE;
6013 	*nx_p = VMP_CS_ALL_FALSE;
6014 
6015 	for (offset_in_page = 0;
6016 	    offset_in_page < PAGE_SIZE;
6017 	    /* offset_in_page updated based on bytes processed */) {
6018 		unsigned subrange_tainted = 0;
6019 		boolean_t subrange_validated;
6020 		vm_size_t bytes_processed = 0;
6021 		int sub_bit;
6022 
6023 		subrange_validated = cs_validate_hash(blobs,
6024 		    pager,
6025 		    page_offset + offset_in_page,
6026 		    (const void *)((const char *)data + offset_in_page),
6027 		    &bytes_processed,
6028 		    &subrange_tainted);
6029 
6030 		if (bytes_processed == 0) {
6031 			/* 4k chunk not code-signed: try next one */
6032 			offset_in_page += FOURK_PAGE_SIZE;
6033 			continue;
6034 		}
6035 		if (offset_in_page == 0 &&
6036 		    bytes_processed > PAGE_SIZE - FOURK_PAGE_SIZE) {
6037 			/* all processed: no 4k granularity */
6038 			if (subrange_validated) {
6039 				*validated_p = VMP_CS_ALL_TRUE;
6040 			}
6041 			if (subrange_tainted & CS_VALIDATE_TAINTED) {
6042 				*tainted_p = VMP_CS_ALL_TRUE;
6043 			}
6044 			if (subrange_tainted & CS_VALIDATE_NX) {
6045 				*nx_p = VMP_CS_ALL_TRUE;
6046 			}
6047 			break;
6048 		}
6049 		/* we only handle 4k or 16k code-signing granularity... */
6050 		assertf(bytes_processed <= FOURK_PAGE_SIZE,
6051 		    "vp %p blobs %p offset 0x%llx + 0x%llx bytes_processed 0x%llx\n",
6052 		    vp, blobs, (uint64_t)page_offset,
6053 		    (uint64_t)offset_in_page, (uint64_t)bytes_processed);
6054 		sub_bit = 1 << (offset_in_page >> FOURK_PAGE_SHIFT);
6055 		if (subrange_validated) {
6056 			*validated_p |= sub_bit;
6057 		}
6058 		if (subrange_tainted & CS_VALIDATE_TAINTED) {
6059 			*tainted_p |= sub_bit;
6060 		}
6061 		if (subrange_tainted & CS_VALIDATE_NX) {
6062 			*nx_p |= sub_bit;
6063 		}
6064 		/* go to next 4k chunk */
6065 		offset_in_page += FOURK_PAGE_SIZE;
6066 	}
6067 
6068 	return;
6069 }
6070 
6071 int
ubc_cs_getcdhash(vnode_t vp,off_t offset,unsigned char * cdhash)6072 ubc_cs_getcdhash(
6073 	vnode_t         vp,
6074 	off_t           offset,
6075 	unsigned char   *cdhash)
6076 {
6077 	struct cs_blob  *blobs, *blob;
6078 	off_t           rel_offset;
6079 	int             ret;
6080 
6081 	vnode_lock(vp);
6082 
6083 	blobs = ubc_get_cs_blobs(vp);
6084 	for (blob = blobs;
6085 	    blob != NULL;
6086 	    blob = blob->csb_next) {
6087 		/* compute offset relative to this blob */
6088 		rel_offset = offset - blob->csb_base_offset;
6089 		if (rel_offset >= blob->csb_start_offset &&
6090 		    rel_offset < blob->csb_end_offset) {
6091 			/* this blob does cover our "offset" ! */
6092 			break;
6093 		}
6094 	}
6095 
6096 	if (blob == NULL) {
6097 		/* we didn't find a blob covering "offset" */
6098 		ret = EBADEXEC; /* XXX any better error ? */
6099 	} else {
6100 		/* get the SHA1 hash of that blob */
6101 		bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash));
6102 		ret = 0;
6103 	}
6104 
6105 	vnode_unlock(vp);
6106 
6107 	return ret;
6108 }
6109 
6110 boolean_t
ubc_cs_is_range_codesigned(vnode_t vp,mach_vm_offset_t start,mach_vm_size_t size)6111 ubc_cs_is_range_codesigned(
6112 	vnode_t                 vp,
6113 	mach_vm_offset_t        start,
6114 	mach_vm_size_t          size)
6115 {
6116 	struct cs_blob          *csblob;
6117 	mach_vm_offset_t        blob_start;
6118 	mach_vm_offset_t        blob_end;
6119 
6120 	if (vp == NULL) {
6121 		/* no file: no code signature */
6122 		return FALSE;
6123 	}
6124 	if (size == 0) {
6125 		/* no range: no code signature */
6126 		return FALSE;
6127 	}
6128 	if (start + size < start) {
6129 		/* overflow */
6130 		return FALSE;
6131 	}
6132 
6133 	csblob = ubc_cs_blob_get(vp, -1, -1, start);
6134 	if (csblob == NULL) {
6135 		return FALSE;
6136 	}
6137 
6138 	/*
6139 	 * We currently check if the range is covered by a single blob,
6140 	 * which should always be the case for the dyld shared cache.
6141 	 * If we ever want to make this routine handle other cases, we
6142 	 * would have to iterate if the blob does not cover the full range.
6143 	 */
6144 	blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
6145 	    csblob->csb_start_offset);
6146 	blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
6147 	    csblob->csb_end_offset);
6148 	if (blob_start > start || blob_end < (start + size)) {
6149 		/* range not fully covered by this code-signing blob */
6150 		return FALSE;
6151 	}
6152 
6153 	return TRUE;
6154 }
6155 
6156 #if CHECK_CS_VALIDATION_BITMAP
6157 #define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
6158 extern  boolean_t       root_fs_upgrade_try;
6159 
6160 /*
6161  * Should we use the code-sign bitmap to avoid repeated code-sign validation?
6162  * Depends:
6163  * a) Is the target vnode on the root filesystem?
6164  * b) Has someone tried to mount the root filesystem read-write?
6165  * If answers are (a) yes AND (b) no, then we can use the bitmap.
6166  */
6167 #define USE_CODE_SIGN_BITMAP(vp)        ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
6168 kern_return_t
ubc_cs_validation_bitmap_allocate(vnode_t vp)6169 ubc_cs_validation_bitmap_allocate(
6170 	vnode_t         vp)
6171 {
6172 	kern_return_t   kr = KERN_SUCCESS;
6173 	struct ubc_info *uip;
6174 	char            *target_bitmap;
6175 	vm_object_size_t        bitmap_size;
6176 
6177 	if (!USE_CODE_SIGN_BITMAP(vp) || (!UBCINFOEXISTS(vp))) {
6178 		kr = KERN_INVALID_ARGUMENT;
6179 	} else {
6180 		uip = vp->v_ubcinfo;
6181 
6182 		if (uip->cs_valid_bitmap == NULL) {
6183 			bitmap_size = stob(uip->ui_size);
6184 			target_bitmap = (char*) kalloc_data((vm_size_t)bitmap_size, Z_WAITOK | Z_ZERO);
6185 			if (target_bitmap == 0) {
6186 				kr = KERN_NO_SPACE;
6187 			} else {
6188 				kr = KERN_SUCCESS;
6189 			}
6190 			if (kr == KERN_SUCCESS) {
6191 				uip->cs_valid_bitmap = (void*)target_bitmap;
6192 				uip->cs_valid_bitmap_size = bitmap_size;
6193 			}
6194 		}
6195 	}
6196 	return kr;
6197 }
6198 
6199 kern_return_t
ubc_cs_check_validation_bitmap(vnode_t vp,memory_object_offset_t offset,int optype)6200 ubc_cs_check_validation_bitmap(
6201 	vnode_t                 vp,
6202 	memory_object_offset_t          offset,
6203 	int                     optype)
6204 {
6205 	kern_return_t   kr = KERN_SUCCESS;
6206 
6207 	if (!USE_CODE_SIGN_BITMAP(vp) || !UBCINFOEXISTS(vp)) {
6208 		kr = KERN_INVALID_ARGUMENT;
6209 	} else {
6210 		struct ubc_info *uip = vp->v_ubcinfo;
6211 		char            *target_bitmap = uip->cs_valid_bitmap;
6212 
6213 		if (target_bitmap == NULL) {
6214 			kr = KERN_INVALID_ARGUMENT;
6215 		} else {
6216 			uint64_t        bit, byte;
6217 			bit = atop_64( offset );
6218 			byte = bit >> 3;
6219 
6220 			if (byte > uip->cs_valid_bitmap_size) {
6221 				kr = KERN_INVALID_ARGUMENT;
6222 			} else {
6223 				if (optype == CS_BITMAP_SET) {
6224 					target_bitmap[byte] |= (1 << (bit & 07));
6225 					kr = KERN_SUCCESS;
6226 				} else if (optype == CS_BITMAP_CLEAR) {
6227 					target_bitmap[byte] &= ~(1 << (bit & 07));
6228 					kr = KERN_SUCCESS;
6229 				} else if (optype == CS_BITMAP_CHECK) {
6230 					if (target_bitmap[byte] & (1 << (bit & 07))) {
6231 						kr = KERN_SUCCESS;
6232 					} else {
6233 						kr = KERN_FAILURE;
6234 					}
6235 				}
6236 			}
6237 		}
6238 	}
6239 	return kr;
6240 }
6241 
6242 void
ubc_cs_validation_bitmap_deallocate(struct ubc_info * uip)6243 ubc_cs_validation_bitmap_deallocate(
6244 	struct ubc_info *uip)
6245 {
6246 	if (uip->cs_valid_bitmap != NULL) {
6247 		kfree_data(uip->cs_valid_bitmap, (vm_size_t)uip->cs_valid_bitmap_size);
6248 		uip->cs_valid_bitmap = NULL;
6249 	}
6250 }
6251 #else
6252 kern_return_t
ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)6253 ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)
6254 {
6255 	return KERN_INVALID_ARGUMENT;
6256 }
6257 
6258 kern_return_t
ubc_cs_check_validation_bitmap(__unused struct vnode * vp,__unused memory_object_offset_t offset,__unused int optype)6259 ubc_cs_check_validation_bitmap(
6260 	__unused struct vnode *vp,
6261 	__unused memory_object_offset_t offset,
6262 	__unused int optype)
6263 {
6264 	return KERN_INVALID_ARGUMENT;
6265 }
6266 
6267 void
ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info * uip)6268 ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info *uip)
6269 {
6270 	return;
6271 }
6272 #endif /* CHECK_CS_VALIDATION_BITMAP */
6273 
6274 #if CODE_SIGNING_MONITOR
6275 
6276 kern_return_t
cs_associate_blob_with_mapping(void * pmap,vm_map_offset_t start,vm_map_size_t size,vm_object_offset_t offset,void * blobs_p)6277 cs_associate_blob_with_mapping(
6278 	void                    *pmap,
6279 	vm_map_offset_t         start,
6280 	vm_map_size_t           size,
6281 	vm_object_offset_t      offset,
6282 	void                    *blobs_p)
6283 {
6284 	off_t                   blob_start_offset, blob_end_offset;
6285 	kern_return_t           kr;
6286 	struct cs_blob          *blobs, *blob;
6287 	vm_offset_t             kaddr;
6288 	void                    *monitor_sig_obj = NULL;
6289 
6290 	if (csm_enabled() == false) {
6291 		return KERN_NOT_SUPPORTED;
6292 	}
6293 
6294 	blobs = (struct cs_blob *)blobs_p;
6295 
6296 	for (blob = blobs;
6297 	    blob != NULL;
6298 	    blob = blob->csb_next) {
6299 		blob_start_offset = (blob->csb_base_offset +
6300 		    blob->csb_start_offset);
6301 		blob_end_offset = (blob->csb_base_offset +
6302 		    blob->csb_end_offset);
6303 		if ((off_t) offset < blob_start_offset ||
6304 		    (off_t) offset >= blob_end_offset ||
6305 		    (off_t) (offset + size) <= blob_start_offset ||
6306 		    (off_t) (offset + size) > blob_end_offset) {
6307 			continue;
6308 		}
6309 
6310 		kaddr = (vm_offset_t)blob->csb_mem_kaddr;
6311 		if (kaddr == 0) {
6312 			/* blob data has been released */
6313 			continue;
6314 		}
6315 
6316 		monitor_sig_obj = blob->csb_csm_obj;
6317 		if (monitor_sig_obj == NULL) {
6318 			continue;
6319 		}
6320 
6321 		break;
6322 	}
6323 
6324 	if (monitor_sig_obj != NULL) {
6325 		vm_offset_t segment_offset = offset - blob_start_offset;
6326 		kr = csm_associate_code_signature(pmap, monitor_sig_obj, start, size, segment_offset);
6327 	} else {
6328 		kr = KERN_CODESIGN_ERROR;
6329 	}
6330 
6331 	return kr;
6332 }
6333 
6334 #endif /* CODE_SIGNING_MONITOR */
6335