xref: /xnu-12377.41.6/bsd/kern/ubc_subr.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  *	File:	ubc_subr.c
30  *	Author:	Umesh Vaishampayan [[email protected]]
31  *		05-Aug-1999	umeshv	Created.
32  *
33  *	Functions related to Unified Buffer cache.
34  *
35  * Caller of UBC functions MUST have a valid reference on the vnode.
36  *
37  */
38 
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/lock.h>
43 #include <sys/mman.h>
44 #include <sys/mount_internal.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/ucred.h>
48 #include <sys/proc_internal.h>
49 #include <sys/kauth.h>
50 #include <sys/buf.h>
51 #include <sys/user.h>
52 #include <sys/codesign.h>
53 #include <sys/codedir_internal.h>
54 #include <sys/fsevents.h>
55 #include <sys/fcntl.h>
56 #include <sys/reboot.h>
57 #include <sys/code_signing.h>
58 
59 #include <mach/mach_types.h>
60 #include <mach/memory_object_types.h>
61 #include <mach/memory_object_control.h>
62 #include <mach/vm_map.h>
63 #include <mach/mach_vm.h>
64 #include <mach/upl.h>
65 
66 #include <kern/kern_types.h>
67 #include <kern/kalloc.h>
68 #include <kern/zalloc.h>
69 #include <kern/thread.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_upl.h>
74 #include <vm/vm_kern_xnu.h>
75 #include <vm/vm_protos.h> /* last */
76 #include <vm/vm_ubc.h>
77 
78 #include <libkern/crypto/sha1.h>
79 #include <libkern/crypto/sha2.h>
80 #include <libkern/libkern.h>
81 
82 #include <security/mac_framework.h>
83 #include <stdbool.h>
84 #include <stdatomic.h>
85 #include <libkern/amfi/amfi.h>
86 
87 extern void Debugger(const char *message);
88 
89 #if DIAGNOSTIC
90 #if defined(assert)
91 #undef assert
92 #endif
93 #define assert(cond)    \
94     ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
95 #else
96 #include <kern/assert.h>
97 #endif /* DIAGNOSTIC */
98 
99 static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
100 static int ubc_umcallback(vnode_t, void *);
101 static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
102 static void ubc_cs_free(struct ubc_info *uip);
103 
104 static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
105 static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
106 
107 ZONE_DEFINE_TYPE(ubc_info_zone, "ubc_info zone", struct ubc_info,
108     ZC_ZFREE_CLEARMEM);
109 static uint32_t cs_blob_generation_count = 1;
110 
111 /*
112  * CODESIGNING
113  * Routines to navigate code signing data structures in the kernel...
114  */
115 
116 ZONE_DEFINE_ID(ZONE_ID_CS_BLOB, "cs_blob zone", struct cs_blob,
117     ZC_READONLY | ZC_ZFREE_CLEARMEM);
118 
119 extern int cs_debug;
120 
121 #define PAGE_SHIFT_4K           (12)
122 
123 static boolean_t
cs_valid_range(const void * start,const void * end,const void * lower_bound,const void * upper_bound)124 cs_valid_range(
125 	const void *start,
126 	const void *end,
127 	const void *lower_bound,
128 	const void *upper_bound)
129 {
130 	if (upper_bound < lower_bound ||
131 	    end < start) {
132 		return FALSE;
133 	}
134 
135 	if (start < lower_bound ||
136 	    end > upper_bound) {
137 		return FALSE;
138 	}
139 
140 	return TRUE;
141 }
142 
143 typedef void (*cs_md_init)(void *ctx);
144 typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
145 typedef void (*cs_md_final)(void *hash, void *ctx);
146 
147 struct cs_hash {
148 	uint8_t             cs_type;    /* type code as per code signing */
149 	size_t              cs_size;    /* size of effective hash (may be truncated) */
150 	size_t              cs_digest_size;/* size of native hash */
151 	cs_md_init          cs_init;
152 	cs_md_update        cs_update;
153 	cs_md_final         cs_final;
154 };
155 
156 uint8_t
cs_hash_type(struct cs_hash const * const cs_hash)157 cs_hash_type(
158 	struct cs_hash const * const cs_hash)
159 {
160 	return cs_hash->cs_type;
161 }
162 
163 static const struct cs_hash cs_hash_sha1 = {
164 	.cs_type = CS_HASHTYPE_SHA1,
165 	.cs_size = CS_SHA1_LEN,
166 	.cs_digest_size = SHA_DIGEST_LENGTH,
167 	.cs_init = (cs_md_init)SHA1Init,
168 	.cs_update = (cs_md_update)SHA1Update,
169 	.cs_final = (cs_md_final)SHA1Final,
170 };
171 #if CRYPTO_SHA2
172 static const struct cs_hash cs_hash_sha256 = {
173 	.cs_type = CS_HASHTYPE_SHA256,
174 	.cs_size = SHA256_DIGEST_LENGTH,
175 	.cs_digest_size = SHA256_DIGEST_LENGTH,
176 	.cs_init = (cs_md_init)SHA256_Init,
177 	.cs_update = (cs_md_update)SHA256_Update,
178 	.cs_final = (cs_md_final)SHA256_Final,
179 };
180 static const struct cs_hash cs_hash_sha256_truncate = {
181 	.cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
182 	.cs_size = CS_SHA256_TRUNCATED_LEN,
183 	.cs_digest_size = SHA256_DIGEST_LENGTH,
184 	.cs_init = (cs_md_init)SHA256_Init,
185 	.cs_update = (cs_md_update)SHA256_Update,
186 	.cs_final = (cs_md_final)SHA256_Final,
187 };
188 static const struct cs_hash cs_hash_sha384 = {
189 	.cs_type = CS_HASHTYPE_SHA384,
190 	.cs_size = SHA384_DIGEST_LENGTH,
191 	.cs_digest_size = SHA384_DIGEST_LENGTH,
192 	.cs_init = (cs_md_init)SHA384_Init,
193 	.cs_update = (cs_md_update)SHA384_Update,
194 	.cs_final = (cs_md_final)SHA384_Final,
195 };
196 #endif
197 
198 static struct cs_hash const *
cs_find_md(uint8_t type)199 cs_find_md(uint8_t type)
200 {
201 	if (type == CS_HASHTYPE_SHA1) {
202 		return &cs_hash_sha1;
203 #if CRYPTO_SHA2
204 	} else if (type == CS_HASHTYPE_SHA256) {
205 		return &cs_hash_sha256;
206 	} else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
207 		return &cs_hash_sha256_truncate;
208 	} else if (type == CS_HASHTYPE_SHA384) {
209 		return &cs_hash_sha384;
210 #endif
211 	}
212 	return NULL;
213 }
214 
215 union cs_hash_union {
216 	SHA1_CTX                sha1ctxt;
217 	SHA256_CTX              sha256ctx;
218 	SHA384_CTX              sha384ctx;
219 };
220 
221 
222 /*
223  * Choose among different hash algorithms.
224  * Higher is better, 0 => don't use at all.
225  */
226 static const uint32_t hashPriorities[] = {
227 	CS_HASHTYPE_SHA1,
228 	CS_HASHTYPE_SHA256_TRUNCATED,
229 	CS_HASHTYPE_SHA256,
230 	CS_HASHTYPE_SHA384,
231 };
232 
233 static unsigned int
hash_rank(const CS_CodeDirectory * cd)234 hash_rank(const CS_CodeDirectory *cd)
235 {
236 	uint32_t type = cd->hashType;
237 	unsigned int n;
238 
239 	for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) {
240 		if (hashPriorities[n] == type) {
241 			return n + 1;
242 		}
243 	}
244 	return 0;       /* not supported */
245 }
246 
247 
248 /*
249  * Locating a page hash
250  */
251 static const unsigned char *
hashes(const CS_CodeDirectory * cd,uint32_t page,size_t hash_len,const char * lower_bound,const char * upper_bound)252 hashes(
253 	const CS_CodeDirectory *cd,
254 	uint32_t page,
255 	size_t hash_len,
256 	const char *lower_bound,
257 	const char *upper_bound)
258 {
259 	const unsigned char *base, *top, *hash;
260 	uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
261 
262 	assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
263 
264 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
265 		/* Get first scatter struct */
266 		const SC_Scatter *scatter = (const SC_Scatter*)
267 		    ((const char*)cd + ntohl(cd->scatterOffset));
268 		uint32_t hashindex = 0, scount, sbase = 0;
269 		/* iterate all scatter structs */
270 		do {
271 			if ((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
272 				if (cs_debug) {
273 					printf("CODE SIGNING: Scatter extends past Code Directory\n");
274 				}
275 				return NULL;
276 			}
277 
278 			scount = ntohl(scatter->count);
279 			uint32_t new_base = ntohl(scatter->base);
280 
281 			/* last scatter? */
282 			if (scount == 0) {
283 				return NULL;
284 			}
285 
286 			if ((hashindex > 0) && (new_base <= sbase)) {
287 				if (cs_debug) {
288 					printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
289 					    sbase, new_base);
290 				}
291 				return NULL;    /* unordered scatter array */
292 			}
293 			sbase = new_base;
294 
295 			/* this scatter beyond page we're looking for? */
296 			if (sbase > page) {
297 				return NULL;
298 			}
299 
300 			if (sbase + scount >= page) {
301 				/* Found the scatter struct that is
302 				 * referencing our page */
303 
304 				/* base = address of first hash covered by scatter */
305 				base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
306 				    hashindex * hash_len;
307 				/* top = address of first hash after this scatter */
308 				top = base + scount * hash_len;
309 				if (!cs_valid_range(base, top, lower_bound,
310 				    upper_bound) ||
311 				    hashindex > nCodeSlots) {
312 					return NULL;
313 				}
314 
315 				break;
316 			}
317 
318 			/* this scatter struct is before the page we're looking
319 			 * for. Iterate. */
320 			hashindex += scount;
321 			scatter++;
322 		} while (1);
323 
324 		hash = base + (page - sbase) * hash_len;
325 	} else {
326 		base = (const unsigned char *)cd + ntohl(cd->hashOffset);
327 		top = base + nCodeSlots * hash_len;
328 		if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
329 		    page > nCodeSlots) {
330 			return NULL;
331 		}
332 		assert(page < nCodeSlots);
333 
334 		hash = base + page * hash_len;
335 	}
336 
337 	if (!cs_valid_range(hash, hash + hash_len,
338 	    lower_bound, upper_bound)) {
339 		hash = NULL;
340 	}
341 
342 	return hash;
343 }
344 
345 /*
346  * cs_validate_codedirectory
347  *
348  * Validate that pointers inside the code directory to make sure that
349  * all offsets and lengths are constrained within the buffer.
350  *
351  * Parameters:	cd			Pointer to code directory buffer
352  *		length			Length of buffer
353  *
354  * Returns:	0			Success
355  *		EBADEXEC		Invalid code signature
356  */
357 
358 static int
cs_validate_codedirectory(const CS_CodeDirectory * cd,size_t length)359 cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
360 {
361 	struct cs_hash const *hashtype;
362 
363 	if (length < sizeof(*cd)) {
364 		return EBADEXEC;
365 	}
366 	if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) {
367 		return EBADEXEC;
368 	}
369 	if ((cd->pageSize != PAGE_SHIFT_4K) && (cd->pageSize != PAGE_SHIFT)) {
370 		printf("disallowing unsupported code signature page shift: %u\n", cd->pageSize);
371 		return EBADEXEC;
372 	}
373 	hashtype = cs_find_md(cd->hashType);
374 	if (hashtype == NULL) {
375 		return EBADEXEC;
376 	}
377 
378 	if (cd->hashSize != hashtype->cs_size) {
379 		return EBADEXEC;
380 	}
381 
382 	if (length < ntohl(cd->hashOffset)) {
383 		return EBADEXEC;
384 	}
385 
386 	/* check that nSpecialSlots fits in the buffer in front of hashOffset */
387 	if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) {
388 		return EBADEXEC;
389 	}
390 
391 	/* check that codeslots fits in the buffer */
392 	if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) {
393 		return EBADEXEC;
394 	}
395 
396 	if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
397 		if (length < ntohl(cd->scatterOffset)) {
398 			return EBADEXEC;
399 		}
400 
401 		const SC_Scatter *scatter = (const SC_Scatter *)
402 		    (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
403 		uint32_t nPages = 0;
404 
405 		/*
406 		 * Check each scatter buffer, since we don't know the
407 		 * length of the scatter buffer array, we have to
408 		 * check each entry.
409 		 */
410 		while (1) {
411 			/* check that the end of each scatter buffer in within the length */
412 			if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) {
413 				return EBADEXEC;
414 			}
415 			uint32_t scount = ntohl(scatter->count);
416 			if (scount == 0) {
417 				break;
418 			}
419 			if (nPages + scount < nPages) {
420 				return EBADEXEC;
421 			}
422 			nPages += scount;
423 			scatter++;
424 
425 			/* XXX check that basees doesn't overlap */
426 			/* XXX check that targetOffset doesn't overlap */
427 		}
428 #if 0 /* rdar://12579439 */
429 		if (nPages != ntohl(cd->nCodeSlots)) {
430 			return EBADEXEC;
431 		}
432 #endif
433 	}
434 
435 	if (length < ntohl(cd->identOffset)) {
436 		return EBADEXEC;
437 	}
438 
439 	/* identifier is NUL terminated string */
440 	if (cd->identOffset) {
441 		const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
442 		if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) {
443 			return EBADEXEC;
444 		}
445 	}
446 
447 	/* team identifier is NULL terminated string */
448 	if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
449 		if (length < ntohl(cd->teamOffset)) {
450 			return EBADEXEC;
451 		}
452 
453 		const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
454 		if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) {
455 			return EBADEXEC;
456 		}
457 	}
458 
459 	/* linkage is variable length binary data */
460 	if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0) {
461 		const uintptr_t ptr = (uintptr_t)cd + ntohl(cd->linkageOffset);
462 		const uintptr_t ptr_end = ptr + ntohl(cd->linkageSize);
463 
464 		if (ptr_end < ptr || ptr < (uintptr_t)cd || ptr_end > (uintptr_t)cd + length) {
465 			return EBADEXEC;
466 		}
467 	}
468 
469 
470 	return 0;
471 }
472 
473 /*
474  *
475  */
476 
477 static int
cs_validate_blob(const CS_GenericBlob * blob,size_t length)478 cs_validate_blob(const CS_GenericBlob *blob, size_t length)
479 {
480 	if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) {
481 		return EBADEXEC;
482 	}
483 	return 0;
484 }
485 
486 /*
487  * cs_validate_csblob
488  *
489  * Validate that superblob/embedded code directory to make sure that
490  * all internal pointers are valid.
491  *
492  * Will validate both a superblob csblob and a "raw" code directory.
493  *
494  *
495  * Parameters:	buffer			Pointer to code signature
496  *		length			Length of buffer
497  *		rcd			returns pointer to code directory
498  *
499  * Returns:	0			Success
500  *		EBADEXEC		Invalid code signature
501  */
502 
503 static int
cs_validate_csblob(const uint8_t * addr,const size_t blob_size,const CS_CodeDirectory ** rcd,const CS_GenericBlob ** rentitlements,const CS_GenericBlob ** rder_entitlements)504 cs_validate_csblob(
505 	const uint8_t *addr,
506 	const size_t blob_size,
507 	const CS_CodeDirectory **rcd,
508 	const CS_GenericBlob **rentitlements,
509 	const CS_GenericBlob **rder_entitlements)
510 {
511 	const CS_GenericBlob *blob;
512 	int error;
513 	size_t length;
514 	const CS_GenericBlob *self_constraint = NULL;
515 	const CS_GenericBlob *parent_constraint = NULL;
516 	const CS_GenericBlob *responsible_proc_constraint = NULL;
517 	const CS_GenericBlob *library_constraint = NULL;
518 
519 	*rcd = NULL;
520 	*rentitlements = NULL;
521 	*rder_entitlements = NULL;
522 
523 	blob = (const CS_GenericBlob *)(const void *)addr;
524 
525 	length = blob_size;
526 	error = cs_validate_blob(blob, length);
527 	if (error) {
528 		return error;
529 	}
530 	length = ntohl(blob->length);
531 
532 	if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
533 		const CS_SuperBlob *sb;
534 		uint32_t n, count;
535 		const CS_CodeDirectory *best_cd = NULL;
536 		unsigned int best_rank = 0;
537 
538 		if (length < sizeof(CS_SuperBlob)) {
539 			return EBADEXEC;
540 		}
541 
542 		sb = (const CS_SuperBlob *)blob;
543 		count = ntohl(sb->count);
544 
545 		/* check that the array of BlobIndex fits in the rest of the data */
546 		if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) {
547 			return EBADEXEC;
548 		}
549 
550 		/* now check each BlobIndex */
551 		for (n = 0; n < count; n++) {
552 			const CS_BlobIndex *blobIndex = &sb->index[n];
553 			uint32_t type = ntohl(blobIndex->type);
554 			uint32_t offset = ntohl(blobIndex->offset);
555 			if (length < offset) {
556 				return EBADEXEC;
557 			}
558 
559 			const CS_GenericBlob *subBlob =
560 			    (const CS_GenericBlob *)(const void *)(addr + offset);
561 
562 			size_t subLength = length - offset;
563 
564 			if ((error = cs_validate_blob(subBlob, subLength)) != 0) {
565 				return error;
566 			}
567 			subLength = ntohl(subBlob->length);
568 
569 			/* extra validation for CDs, that is also returned */
570 			if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
571 				const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
572 				if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) {
573 					return error;
574 				}
575 				unsigned int rank = hash_rank(candidate);
576 				if (cs_debug > 3) {
577 					printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
578 				}
579 				if (best_cd == NULL || rank > best_rank) {
580 					best_cd = candidate;
581 					best_rank = rank;
582 
583 					if (cs_debug > 2) {
584 						printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
585 					}
586 					*rcd = best_cd;
587 				} else if (best_cd != NULL && rank == best_rank) {
588 					/* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
589 					printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
590 					return EBADEXEC;
591 				}
592 			} else if (type == CSSLOT_ENTITLEMENTS) {
593 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
594 					return EBADEXEC;
595 				}
596 				if (*rentitlements != NULL) {
597 					printf("multiple entitlements blobs\n");
598 					return EBADEXEC;
599 				}
600 				*rentitlements = subBlob;
601 			} else if (type == CSSLOT_DER_ENTITLEMENTS) {
602 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_DER_ENTITLEMENTS) {
603 					return EBADEXEC;
604 				}
605 				if (*rder_entitlements != NULL) {
606 					printf("multiple der entitlements blobs\n");
607 					return EBADEXEC;
608 				}
609 				*rder_entitlements = subBlob;
610 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_SELF) {
611 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
612 					return EBADEXEC;
613 				}
614 				if (self_constraint != NULL) {
615 					printf("multiple self constraint blobs\n");
616 					return EBADEXEC;
617 				}
618 				self_constraint = subBlob;
619 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_PARENT) {
620 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
621 					return EBADEXEC;
622 				}
623 				if (parent_constraint != NULL) {
624 					printf("multiple parent constraint blobs\n");
625 					return EBADEXEC;
626 				}
627 				parent_constraint = subBlob;
628 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE) {
629 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
630 					return EBADEXEC;
631 				}
632 				if (responsible_proc_constraint != NULL) {
633 					printf("multiple responsible process constraint blobs\n");
634 					return EBADEXEC;
635 				}
636 				responsible_proc_constraint = subBlob;
637 			} else if (type == CSSLOT_LIBRARY_CONSTRAINT) {
638 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
639 					return EBADEXEC;
640 				}
641 				if (library_constraint != NULL) {
642 					printf("multiple library constraint blobs\n");
643 					return EBADEXEC;
644 				}
645 				library_constraint = subBlob;
646 			}
647 		}
648 	} else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
649 		if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) {
650 			return error;
651 		}
652 		*rcd = (const CS_CodeDirectory *)blob;
653 	} else {
654 		return EBADEXEC;
655 	}
656 
657 	if (*rcd == NULL) {
658 		return EBADEXEC;
659 	}
660 
661 	return 0;
662 }
663 
664 /*
665  * cs_find_blob_bytes
666  *
667  * Find an blob from the superblob/code directory. The blob must have
668  * been been validated by cs_validate_csblob() before calling
669  * this. Use csblob_find_blob() instead.
670  *
671  * Will also find a "raw" code directory if its stored as well as
672  * searching the superblob.
673  *
674  * Parameters:	buffer			Pointer to code signature
675  *		length			Length of buffer
676  *		type			type of blob to find
677  *		magic			the magic number for that blob
678  *
679  * Returns:	pointer			Success
680  *		NULL			Buffer not found
681  */
682 
683 const CS_GenericBlob *
csblob_find_blob_bytes(const uint8_t * addr,size_t length,uint32_t type,uint32_t magic)684 csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
685 {
686 	const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
687 
688 	if ((addr + length) < addr) {
689 		panic("CODE SIGNING: CS Blob length overflow for addr: %p", addr);
690 	}
691 
692 	if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
693 		const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
694 		size_t n, count = ntohl(sb->count);
695 
696 		for (n = 0; n < count; n++) {
697 			if (ntohl(sb->index[n].type) != type) {
698 				continue;
699 			}
700 			uint32_t offset = ntohl(sb->index[n].offset);
701 			if (length - sizeof(const CS_GenericBlob) < offset) {
702 				return NULL;
703 			}
704 			blob = (const CS_GenericBlob *)(const void *)(addr + offset);
705 			if (ntohl(blob->magic) != magic) {
706 				continue;
707 			}
708 			if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
709 				panic("CODE SIGNING: CS Blob length overflow for blob at: %p", blob);
710 			} else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
711 				continue;
712 			}
713 			return blob;
714 		}
715 	} else if (type == CSSLOT_CODEDIRECTORY && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
716 	    && magic == CSMAGIC_CODEDIRECTORY) {
717 		if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
718 			panic("CODE SIGNING: CS Blob length overflow for code directory blob at: %p", blob);
719 		} else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
720 			return NULL;
721 		}
722 		return blob;
723 	}
724 	return NULL;
725 }
726 
727 
728 const CS_GenericBlob *
csblob_find_blob(struct cs_blob * csblob,uint32_t type,uint32_t magic)729 csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
730 {
731 	if ((csblob->csb_flags & CS_VALID) == 0) {
732 		return NULL;
733 	}
734 	return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
735 }
736 
737 static const uint8_t *
find_special_slot(const CS_CodeDirectory * cd,size_t slotsize,uint32_t slot)738 find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
739 {
740 	/* there is no zero special slot since that is the first code slot */
741 	if (ntohl(cd->nSpecialSlots) < slot || slot == 0) {
742 		return NULL;
743 	}
744 
745 	return (const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot);
746 }
747 
748 static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
749 
750 static int
csblob_find_special_slot_blob(struct cs_blob * csblob,uint32_t slot,uint32_t magic,const CS_GenericBlob ** out_start,size_t * out_length)751 csblob_find_special_slot_blob(struct cs_blob* csblob, uint32_t slot, uint32_t magic, const CS_GenericBlob **out_start, size_t *out_length)
752 {
753 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
754 	const CS_GenericBlob *blob;
755 	const CS_CodeDirectory *code_dir;
756 	const uint8_t *embedded_hash;
757 	union cs_hash_union context;
758 
759 	if (out_start) {
760 		*out_start = NULL;
761 	}
762 	if (out_length) {
763 		*out_length = 0;
764 	}
765 
766 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
767 		return EBADEXEC;
768 	}
769 
770 	code_dir = csblob->csb_cd;
771 
772 	blob = csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, slot, magic);
773 
774 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, slot);
775 
776 	if (embedded_hash == NULL) {
777 		if (blob) {
778 			return EBADEXEC;
779 		}
780 		return 0;
781 	} else if (blob == NULL) {
782 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
783 			return EBADEXEC;
784 		} else {
785 			return 0;
786 		}
787 	}
788 
789 	csblob->csb_hashtype->cs_init(&context);
790 	csblob->csb_hashtype->cs_update(&context, blob, ntohl(blob->length));
791 	csblob->csb_hashtype->cs_final(computed_hash, &context);
792 
793 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
794 		return EBADEXEC;
795 	}
796 	if (out_start) {
797 		*out_start = blob;
798 	}
799 	if (out_length) {
800 		*out_length = ntohl(blob->length);
801 	}
802 
803 	return 0;
804 }
805 
806 int
csblob_get_entitlements(struct cs_blob * csblob,void ** out_start,size_t * out_length)807 csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
808 {
809 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
810 	const CS_GenericBlob *entitlements;
811 	const CS_CodeDirectory *code_dir;
812 	const uint8_t *embedded_hash;
813 	union cs_hash_union context;
814 
815 	*out_start = NULL;
816 	*out_length = 0;
817 
818 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
819 		return EBADEXEC;
820 	}
821 
822 	code_dir = csblob->csb_cd;
823 
824 	if ((csblob->csb_flags & CS_VALID) == 0) {
825 		entitlements = NULL;
826 	} else {
827 		entitlements = csblob->csb_entitlements_blob;
828 	}
829 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
830 
831 	if (embedded_hash == NULL) {
832 		if (entitlements) {
833 			return EBADEXEC;
834 		}
835 		return 0;
836 	} else if (entitlements == NULL) {
837 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
838 			return EBADEXEC;
839 		} else {
840 			return 0;
841 		}
842 	}
843 
844 	csblob->csb_hashtype->cs_init(&context);
845 	csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
846 	csblob->csb_hashtype->cs_final(computed_hash, &context);
847 
848 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
849 		return EBADEXEC;
850 	}
851 
852 	*out_start = __DECONST(void *, entitlements);
853 	*out_length = ntohl(entitlements->length);
854 
855 	return 0;
856 }
857 
858 const CS_GenericBlob*
csblob_get_der_entitlements_unsafe(struct cs_blob * csblob)859 csblob_get_der_entitlements_unsafe(struct cs_blob * csblob)
860 {
861 	if ((csblob->csb_flags & CS_VALID) == 0) {
862 		return NULL;
863 	}
864 
865 	return csblob->csb_der_entitlements_blob;
866 }
867 
868 int
csblob_get_der_entitlements(struct cs_blob * csblob,const CS_GenericBlob ** out_start,size_t * out_length)869 csblob_get_der_entitlements(struct cs_blob *csblob, const CS_GenericBlob **out_start, size_t *out_length)
870 {
871 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
872 	const CS_GenericBlob *der_entitlements;
873 	const CS_CodeDirectory *code_dir;
874 	const uint8_t *embedded_hash;
875 	union cs_hash_union context;
876 
877 	*out_start = NULL;
878 	*out_length = 0;
879 
880 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
881 		return EBADEXEC;
882 	}
883 
884 	code_dir = csblob->csb_cd;
885 
886 	if ((csblob->csb_flags & CS_VALID) == 0) {
887 		der_entitlements = NULL;
888 	} else {
889 		der_entitlements = csblob->csb_der_entitlements_blob;
890 	}
891 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_DER_ENTITLEMENTS);
892 
893 	if (embedded_hash == NULL) {
894 		if (der_entitlements) {
895 			return EBADEXEC;
896 		}
897 		return 0;
898 	} else if (der_entitlements == NULL) {
899 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
900 			return EBADEXEC;
901 		} else {
902 			return 0;
903 		}
904 	}
905 
906 	csblob->csb_hashtype->cs_init(&context);
907 	csblob->csb_hashtype->cs_update(&context, der_entitlements, ntohl(der_entitlements->length));
908 	csblob->csb_hashtype->cs_final(computed_hash, &context);
909 
910 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
911 		return EBADEXEC;
912 	}
913 
914 	*out_start = der_entitlements;
915 	*out_length = ntohl(der_entitlements->length);
916 
917 	return 0;
918 }
919 
920 static bool
ubc_cs_blob_pagewise_allocate(__unused vm_size_t size)921 ubc_cs_blob_pagewise_allocate(
922 	__unused vm_size_t size)
923 {
924 #if CODE_SIGNING_MONITOR
925 	/* If the monitor isn't enabled, then we don't need to page-align */
926 	if (csm_enabled() == false) {
927 		return false;
928 	}
929 
930 	/*
931 	 * Small allocations can be maanged by the monitor itself. We only need to allocate
932 	 * page-wise when it is a sufficiently large allocation and the monitor cannot manage
933 	 * it on its own.
934 	 */
935 	if (size <= csm_signature_size_limit()) {
936 		return false;
937 	}
938 
939 	return true;
940 #else
941 	/* Without a monitor, we never need to page align */
942 	return false;
943 #endif /* CODE_SIGNING_MONITOR */
944 }
945 
946 int
csblob_register_profile(__unused struct cs_blob * csblob,__unused cs_profile_register_t * profile)947 csblob_register_profile(
948 	__unused struct cs_blob *csblob,
949 	__unused cs_profile_register_t *profile)
950 {
951 #if CODE_SIGNING_MONITOR
952 	/* Profiles only need to be registered for monitor environments */
953 	assert(profile->data != NULL);
954 	assert(profile->size != 0);
955 	assert(csblob != NULL);
956 
957 	kern_return_t kr = csm_register_provisioning_profile(
958 		profile->uuid,
959 		profile->data, profile->size);
960 
961 	if ((kr != KERN_SUCCESS) && (kr != KERN_ALREADY_IN_SET)) {
962 		if (kr == KERN_NOT_SUPPORTED) {
963 			return 0;
964 		}
965 		return EPERM;
966 	}
967 
968 	/* Attempt to trust the profile */
969 	kr = csm_trust_provisioning_profile(
970 		profile->uuid,
971 		profile->sig_data, profile->sig_size);
972 
973 	if (kr != KERN_SUCCESS) {
974 		return EPERM;
975 	}
976 
977 	/* Associate the profile with the monitor's signature object */
978 	kr = csm_associate_provisioning_profile(
979 		csblob->csb_csm_obj,
980 		profile->uuid);
981 
982 	if (kr != KERN_SUCCESS) {
983 		return EPERM;
984 	}
985 
986 	return 0;
987 #else
988 	return 0;
989 #endif /* CODE_SIGNING_MONITOR */
990 }
991 
992 int
csblob_register_profile_uuid(struct cs_blob * csblob,const uuid_t profile_uuid,void * profile_addr,vm_size_t profile_size)993 csblob_register_profile_uuid(
994 	struct cs_blob *csblob,
995 	const uuid_t profile_uuid,
996 	void *profile_addr,
997 	vm_size_t profile_size)
998 {
999 	cs_profile_register_t profile = {
1000 		.sig_data = NULL,
1001 		.sig_size = 0,
1002 		.data = profile_addr,
1003 		.size = profile_size
1004 	};
1005 
1006 	/* Copy the provided UUID */
1007 	memcpy(profile.uuid, profile_uuid, sizeof(profile.uuid));
1008 
1009 	return csblob_register_profile(csblob, &profile);
1010 }
1011 
1012 /*
1013  * CODESIGNING
1014  * End of routines to navigate code signing data structures in the kernel.
1015  */
1016 
1017 
1018 
1019 /*
1020  * ubc_info_init
1021  *
1022  * Allocate and attach an empty ubc_info structure to a vnode
1023  *
1024  * Parameters:	vp			Pointer to the vnode
1025  *
1026  * Returns:	0			Success
1027  *	vnode_size:ENOMEM		Not enough space
1028  *	vnode_size:???			Other error from vnode_getattr
1029  *
1030  */
1031 int
ubc_info_init(struct vnode * vp)1032 ubc_info_init(struct vnode *vp)
1033 {
1034 	return ubc_info_init_internal(vp, 0, 0);
1035 }
1036 
1037 
1038 /*
1039  * ubc_info_init_withsize
1040  *
1041  * Allocate and attach a sized ubc_info structure to a vnode
1042  *
1043  * Parameters:	vp			Pointer to the vnode
1044  *		filesize		The size of the file
1045  *
1046  * Returns:	0			Success
1047  *	vnode_size:ENOMEM		Not enough space
1048  *	vnode_size:???			Other error from vnode_getattr
1049  */
1050 int
ubc_info_init_withsize(struct vnode * vp,off_t filesize)1051 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
1052 {
1053 	return ubc_info_init_internal(vp, 1, filesize);
1054 }
1055 
1056 
1057 /*
1058  * ubc_info_init_internal
1059  *
1060  * Allocate and attach a ubc_info structure to a vnode
1061  *
1062  * Parameters:	vp			Pointer to the vnode
1063  *		withfsize{0,1}		Zero if the size should be obtained
1064  *					from the vnode; otherwise, use filesize
1065  *		filesize		The size of the file, if withfsize == 1
1066  *
1067  * Returns:	0			Success
1068  *	vnode_size:ENOMEM		Not enough space
1069  *	vnode_size:???			Other error from vnode_getattr
1070  *
1071  * Notes:	We call a blocking zalloc(), and the zone was created as an
1072  *		expandable and collectable zone, so if no memory is available,
1073  *		it is possible for zalloc() to block indefinitely.  zalloc()
1074  *		may also panic if the zone of zones is exhausted, since it's
1075  *		NOT expandable.
1076  *
1077  *		We unconditionally call vnode_pager_setup(), even if this is
1078  *		a reuse of a ubc_info; in that case, we should probably assert
1079  *		that it does not already have a pager association, but do not.
1080  *
1081  *		Since memory_object_create_named() can only fail from receiving
1082  *		an invalid pager argument, the explicit check and panic is
1083  *		merely precautionary.
1084  */
1085 static int
ubc_info_init_internal(vnode_t vp,int withfsize,off_t filesize)1086 ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
1087 {
1088 	struct ubc_info *uip;
1089 	void *  pager;
1090 	int error = 0;
1091 	kern_return_t kret;
1092 	memory_object_control_t control;
1093 
1094 	uip = vp->v_ubcinfo;
1095 
1096 	/*
1097 	 * If there is not already a ubc_info attached to the vnode, we
1098 	 * attach one; otherwise, we will reuse the one that's there.
1099 	 */
1100 	if (uip == UBC_INFO_NULL) {
1101 		uip = zalloc_flags(ubc_info_zone, Z_WAITOK | Z_ZERO);
1102 
1103 		uip->ui_vnode = vp;
1104 		uip->ui_flags = UI_INITED;
1105 		uip->ui_ucred = NOCRED;
1106 	}
1107 	assert(uip->ui_flags != UI_NONE);
1108 	assert(uip->ui_vnode == vp);
1109 
1110 	/* now set this ubc_info in the vnode */
1111 	vp->v_ubcinfo = uip;
1112 
1113 	/*
1114 	 * Allocate a pager object for this vnode
1115 	 *
1116 	 * XXX The value of the pager parameter is currently ignored.
1117 	 * XXX Presumably, this API changed to avoid the race between
1118 	 * XXX setting the pager and the UI_HASPAGER flag.
1119 	 */
1120 	pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
1121 	assert(pager);
1122 
1123 	/*
1124 	 * Explicitly set the pager into the ubc_info, after setting the
1125 	 * UI_HASPAGER flag.
1126 	 */
1127 	SET(uip->ui_flags, UI_HASPAGER);
1128 	uip->ui_pager = pager;
1129 
1130 	/*
1131 	 * Note: We can not use VNOP_GETATTR() to get accurate
1132 	 * value of ui_size because this may be an NFS vnode, and
1133 	 * nfs_getattr() can call vinvalbuf(); if this happens,
1134 	 * ubc_info is not set up to deal with that event.
1135 	 * So use bogus size.
1136 	 */
1137 
1138 	/*
1139 	 * create a vnode - vm_object association
1140 	 * memory_object_create_named() creates a "named" reference on the
1141 	 * memory object we hold this reference as long as the vnode is
1142 	 * "alive."  Since memory_object_create_named() took its own reference
1143 	 * on the vnode pager we passed it, we can drop the reference
1144 	 * vnode_pager_setup() returned here.
1145 	 */
1146 	kret = memory_object_create_named(pager,
1147 	    (memory_object_size_t)uip->ui_size, &control);
1148 	vnode_pager_deallocate(pager);
1149 	if (kret != KERN_SUCCESS) {
1150 		panic("ubc_info_init: memory_object_create_named returned %d", kret);
1151 	}
1152 
1153 	assert(control);
1154 	uip->ui_control = control;      /* cache the value of the mo control */
1155 	SET(uip->ui_flags, UI_HASOBJREF);       /* with a named reference */
1156 
1157 	if (withfsize == 0) {
1158 		/* initialize the size */
1159 		error = vnode_size(vp, &uip->ui_size, vfs_context_current());
1160 		if (error) {
1161 			uip->ui_size = 0;
1162 		}
1163 	} else {
1164 		uip->ui_size = filesize;
1165 	}
1166 	vp->v_lflag |= VNAMED_UBC;      /* vnode has a named ubc reference */
1167 
1168 	return error;
1169 }
1170 
1171 
1172 /*
1173  * ubc_info_free
1174  *
1175  * Free a ubc_info structure
1176  *
1177  * Parameters:	uip			A pointer to the ubc_info to free
1178  *
1179  * Returns:	(void)
1180  *
1181  * Notes:	If there is a credential that has subsequently been associated
1182  *		with the ubc_info, the reference to the credential is dropped.
1183  *
1184  *		It's actually impossible for a ubc_info.ui_control to take the
1185  *		value MEMORY_OBJECT_CONTROL_NULL.
1186  */
1187 static void
ubc_info_free(struct ubc_info * uip)1188 ubc_info_free(struct ubc_info *uip)
1189 {
1190 	if (IS_VALID_CRED(uip->ui_ucred)) {
1191 		kauth_cred_unref(&uip->ui_ucred);
1192 	}
1193 
1194 	if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) {
1195 		memory_object_control_deallocate(uip->ui_control);
1196 	}
1197 
1198 	cluster_release(uip);
1199 	ubc_cs_free(uip);
1200 
1201 	zfree(ubc_info_zone, uip);
1202 	return;
1203 }
1204 
1205 
1206 void
ubc_info_deallocate(struct ubc_info * uip)1207 ubc_info_deallocate(struct ubc_info *uip)
1208 {
1209 	ubc_info_free(uip);
1210 }
1211 
1212 /*
1213  * ubc_setsize_ex
1214  *
1215  * Tell the VM that the the size of the file represented by the vnode has
1216  * changed
1217  *
1218  * Parameters:	vp	   The vp whose backing file size is
1219  *					   being changed
1220  *				nsize  The new size of the backing file
1221  *				opts   Options
1222  *
1223  * Returns:	EINVAL for new size < 0
1224  *			ENOENT if no UBC info exists
1225  *          EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1226  *          Other errors (mapped to errno_t) returned by VM functions
1227  *
1228  * Notes:   This function will indicate success if the new size is the
1229  *		    same or larger than the old size (in this case, the
1230  *		    remainder of the file will require modification or use of
1231  *		    an existing upl to access successfully).
1232  *
1233  *		    This function will fail if the new file size is smaller,
1234  *		    and the memory region being invalidated was unable to
1235  *		    actually be invalidated and/or the last page could not be
1236  *		    flushed, if the new size is not aligned to a page
1237  *		    boundary.  This is usually indicative of an I/O error.
1238  */
1239 errno_t
ubc_setsize_ex(struct vnode * vp,off_t nsize,ubc_setsize_opts_t opts)1240 ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
1241 {
1242 	off_t osize;    /* ui_size before change */
1243 	off_t lastpg, olastpgend, lastoff;
1244 	struct ubc_info *uip;
1245 	memory_object_control_t control;
1246 	kern_return_t kret = KERN_SUCCESS;
1247 
1248 	if (nsize < (off_t)0) {
1249 		return EINVAL;
1250 	}
1251 
1252 	if (!UBCINFOEXISTS(vp)) {
1253 		return ENOENT;
1254 	}
1255 
1256 	uip = vp->v_ubcinfo;
1257 	osize = uip->ui_size;
1258 
1259 	if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) {
1260 		return EAGAIN;
1261 	}
1262 
1263 	/*
1264 	 * Update the size before flushing the VM
1265 	 */
1266 	uip->ui_size = nsize;
1267 
1268 	if (nsize >= osize) {   /* Nothing more to do */
1269 		if (nsize > osize) {
1270 			lock_vnode_and_post(vp, NOTE_EXTEND);
1271 		}
1272 
1273 		return 0;
1274 	}
1275 
1276 	/*
1277 	 * When the file shrinks, invalidate the pages beyond the
1278 	 * new size. Also get rid of garbage beyond nsize on the
1279 	 * last page. The ui_size already has the nsize, so any
1280 	 * subsequent page-in will zero-fill the tail properly
1281 	 */
1282 	lastpg = trunc_page_64(nsize);
1283 	olastpgend = round_page_64(osize);
1284 	control = uip->ui_control;
1285 	assert(control);
1286 	lastoff = (nsize & PAGE_MASK_64);
1287 
1288 	if (lastoff) {
1289 		upl_t           upl;
1290 		upl_page_info_t *pl;
1291 
1292 		/*
1293 		 * new EOF ends up in the middle of a page
1294 		 * zero the tail of this page if it's currently
1295 		 * present in the cache
1296 		 */
1297 		kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
1298 
1299 		if (kret != KERN_SUCCESS) {
1300 			panic("ubc_setsize: ubc_create_upl (error = %d)", kret);
1301 		}
1302 
1303 		if (upl_valid_page(pl, 0)) {
1304 			cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
1305 		}
1306 
1307 		ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1308 
1309 		lastpg += PAGE_SIZE_64;
1310 	}
1311 	if (olastpgend > lastpg) {
1312 		int     flags;
1313 
1314 		if (lastpg == 0) {
1315 			flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
1316 		} else {
1317 			flags = MEMORY_OBJECT_DATA_FLUSH;
1318 		}
1319 		/*
1320 		 * invalidate the pages beyond the new EOF page
1321 		 *
1322 		 */
1323 		kret = memory_object_lock_request(control,
1324 		    (memory_object_offset_t)lastpg,
1325 		    (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
1326 		    MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
1327 		if (kret != KERN_SUCCESS) {
1328 			printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
1329 		}
1330 	}
1331 	return mach_to_bsd_errno(kret);
1332 }
1333 
1334 // Returns true for success
1335 int
ubc_setsize(vnode_t vp,off_t nsize)1336 ubc_setsize(vnode_t vp, off_t nsize)
1337 {
1338 	return ubc_setsize_ex(vp, nsize, 0) == 0;
1339 }
1340 
1341 /*
1342  * ubc_getsize
1343  *
1344  * Get the size of the file assocated with the specified vnode
1345  *
1346  * Parameters:	vp			The vnode whose size is of interest
1347  *
1348  * Returns:	0			There is no ubc_info associated with
1349  *					this vnode, or the size is zero
1350  *		!0			The size of the file
1351  *
1352  * Notes:	Using this routine, it is not possible for a caller to
1353  *		successfully distinguish between a vnode associate with a zero
1354  *		length file, and a vnode with no associated ubc_info.  The
1355  *		caller therefore needs to not care, or needs to ensure that
1356  *		they have previously successfully called ubc_info_init() or
1357  *		ubc_info_init_withsize().
1358  */
1359 off_t
ubc_getsize(struct vnode * vp)1360 ubc_getsize(struct vnode *vp)
1361 {
1362 	/* people depend on the side effect of this working this way
1363 	 * as they call this for directory
1364 	 */
1365 	if (!UBCINFOEXISTS(vp)) {
1366 		return (off_t)0;
1367 	}
1368 	return vp->v_ubcinfo->ui_size;
1369 }
1370 
1371 
1372 /*
1373  * ubc_umount
1374  *
1375  * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1376  * mount point
1377  *
1378  * Parameters:	mp			The mount point
1379  *
1380  * Returns:	0			Success
1381  *
1382  * Notes:	There is no failure indication for this function.
1383  *
1384  *		This function is used in the unmount path; since it may block
1385  *		I/O indefinitely, it should not be used in the forced unmount
1386  *		path, since a device unavailability could also block that
1387  *		indefinitely.
1388  *
1389  *		Because there is no device ejection interlock on USB, FireWire,
1390  *		or similar devices, it's possible that an ejection that begins
1391  *		subsequent to the vnode_iterate() completing, either on one of
1392  *		those devices, or a network mount for which the server quits
1393  *		responding, etc., may cause the caller to block indefinitely.
1394  */
1395 __private_extern__ int
ubc_umount(struct mount * mp)1396 ubc_umount(struct mount *mp)
1397 {
1398 	vnode_iterate(mp, 0, ubc_umcallback, 0);
1399 	return 0;
1400 }
1401 
1402 
1403 /*
1404  * ubc_umcallback
1405  *
1406  * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1407  * and vnode_iterate() for details of implementation.
1408  */
1409 static int
ubc_umcallback(vnode_t vp,__unused void * args)1410 ubc_umcallback(vnode_t vp, __unused void * args)
1411 {
1412 	if (UBCINFOEXISTS(vp)) {
1413 		(void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1414 	}
1415 	return VNODE_RETURNED;
1416 }
1417 
1418 
1419 /*
1420  * ubc_getcred
1421  *
1422  * Get the credentials currently active for the ubc_info associated with the
1423  * vnode.
1424  *
1425  * Parameters:	vp			The vnode whose ubc_info credentials
1426  *					are to be retrieved
1427  *
1428  * Returns:	!NOCRED			The credentials
1429  *		NOCRED			If there is no ubc_info for the vnode,
1430  *					or if there is one, but it has not had
1431  *					any credentials associated with it.
1432  */
1433 kauth_cred_t
ubc_getcred(struct vnode * vp)1434 ubc_getcred(struct vnode *vp)
1435 {
1436 	if (UBCINFOEXISTS(vp)) {
1437 		return vp->v_ubcinfo->ui_ucred;
1438 	}
1439 
1440 	return NOCRED;
1441 }
1442 
1443 
1444 /*
1445  * ubc_setthreadcred
1446  *
1447  * If they are not already set, set the credentials of the ubc_info structure
1448  * associated with the vnode to those of the supplied thread; otherwise leave
1449  * them alone.
1450  *
1451  * Parameters:	vp			The vnode whose ubc_info creds are to
1452  *					be set
1453  *		p			The process whose credentials are to
1454  *					be used, if not running on an assumed
1455  *					credential
1456  *		thread			The thread whose credentials are to
1457  *					be used
1458  *
1459  * Returns:	1			This vnode has no associated ubc_info
1460  *		0			Success
1461  *
1462  * Notes:	This function is generally used only in the following cases:
1463  *
1464  *		o	a memory mapped file via the mmap() system call
1465  *		o	a swap store backing file
1466  *		o	subsequent to a successful write via vn_write()
1467  *
1468  *		The information is then used by the NFS client in order to
1469  *		cons up a wire message in either the page-in or page-out path.
1470  *
1471  *		There are two potential problems with the use of this API:
1472  *
1473  *		o	Because the write path only set it on a successful
1474  *			write, there is a race window between setting the
1475  *			credential and its use to evict the pages to the
1476  *			remote file server
1477  *
1478  *		o	Because a page-in may occur prior to a write, the
1479  *			credential may not be set at this time, if the page-in
1480  *			is not the result of a mapping established via mmap().
1481  *
1482  *		In both these cases, this will be triggered from the paging
1483  *		path, which will instead use the credential of the current
1484  *		process, which in this case is either the dynamic_pager or
1485  *		the kernel task, both of which utilize "root" credentials.
1486  *
1487  *		This may potentially permit operations to occur which should
1488  *		be denied, or it may cause to be denied operations which
1489  *		should be permitted, depending on the configuration of the NFS
1490  *		server.
1491  */
1492 int
ubc_setthreadcred(struct vnode * vp,proc_t p,thread_t thread)1493 ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
1494 {
1495 #pragma unused(p, thread)
1496 	assert(p == current_proc());
1497 	assert(thread == current_thread());
1498 
1499 	return ubc_setcred(vp, kauth_cred_get());
1500 }
1501 
1502 
1503 /*
1504  * ubc_setcred
1505  *
1506  * If they are not already set, set the credentials of the ubc_info structure
1507  * associated with the vnode to those specified; otherwise leave them
1508  * alone.
1509  *
1510  * Parameters:	vp			The vnode whose ubc_info creds are to
1511  *					be set
1512  *		ucred			The credentials to use
1513  *
1514  * Returns:	0			This vnode has no associated ubc_info
1515  *		1			Success
1516  *
1517  * Notes:	The return values for this function are inverted from nearly
1518  *		all other uses in the kernel.
1519  *
1520  *		See also ubc_setthreadcred(), above.
1521  */
1522 int
ubc_setcred(struct vnode * vp,kauth_cred_t ucred)1523 ubc_setcred(struct vnode *vp, kauth_cred_t ucred)
1524 {
1525 	struct ubc_info *uip;
1526 
1527 	/* If there is no ubc_info, deny the operation */
1528 	if (!UBCINFOEXISTS(vp)) {
1529 		return 0;
1530 	}
1531 
1532 	/*
1533 	 * Check to see if there is already a credential reference in the
1534 	 * ubc_info; if there is not, take one on the supplied credential.
1535 	 */
1536 	vnode_lock(vp);
1537 	uip = vp->v_ubcinfo;
1538 	if (!IS_VALID_CRED(uip->ui_ucred)) {
1539 		kauth_cred_ref(ucred);
1540 		uip->ui_ucred = ucred;
1541 	}
1542 	vnode_unlock(vp);
1543 
1544 	return 1;
1545 }
1546 
1547 /*
1548  * ubc_getpager
1549  *
1550  * Get the pager associated with the ubc_info associated with the vnode.
1551  *
1552  * Parameters:	vp			The vnode to obtain the pager from
1553  *
1554  * Returns:	!VNODE_PAGER_NULL	The memory_object_t for the pager
1555  *		VNODE_PAGER_NULL	There is no ubc_info for this vnode
1556  *
1557  * Notes:	For each vnode that has a ubc_info associated with it, that
1558  *		ubc_info SHALL have a pager associated with it, so in the
1559  *		normal case, it's impossible to return VNODE_PAGER_NULL for
1560  *		a vnode with an associated ubc_info.
1561  */
1562 __private_extern__ memory_object_t
ubc_getpager(struct vnode * vp)1563 ubc_getpager(struct vnode *vp)
1564 {
1565 	if (UBCINFOEXISTS(vp)) {
1566 		return vp->v_ubcinfo->ui_pager;
1567 	}
1568 
1569 	return 0;
1570 }
1571 
1572 
1573 /*
1574  * ubc_getobject
1575  *
1576  * Get the memory object control associated with the ubc_info associated with
1577  * the vnode
1578  *
1579  * Parameters:	vp			The vnode to obtain the memory object
1580  *					from
1581  *		flags			DEPRECATED
1582  *
1583  * Returns:	!MEMORY_OBJECT_CONTROL_NULL
1584  *		MEMORY_OBJECT_CONTROL_NULL
1585  *
1586  * Notes:	Historically, if the flags were not "do not reactivate", this
1587  *		function would look up the memory object using the pager if
1588  *		it did not exist (this could be the case if the vnode had
1589  *		been previously reactivated).  The flags would also permit a
1590  *		hold to be requested, which would have created an object
1591  *		reference, if one had not already existed.  This usage is
1592  *		deprecated, as it would permit a race between finding and
1593  *		taking the reference vs. a single reference being dropped in
1594  *		another thread.
1595  */
1596 memory_object_control_t
ubc_getobject(struct vnode * vp,__unused int flags)1597 ubc_getobject(struct vnode *vp, __unused int flags)
1598 {
1599 	if (UBCINFOEXISTS(vp)) {
1600 		return vp->v_ubcinfo->ui_control;
1601 	}
1602 
1603 	return MEMORY_OBJECT_CONTROL_NULL;
1604 }
1605 
1606 /*
1607  * ubc_blktooff
1608  *
1609  * Convert a given block number to a memory backing object (file) offset for a
1610  * given vnode
1611  *
1612  * Parameters:	vp			The vnode in which the block is located
1613  *		blkno			The block number to convert
1614  *
1615  * Returns:	!-1			The offset into the backing object
1616  *		-1			There is no ubc_info associated with
1617  *					the vnode
1618  *		-1			An error occurred in the underlying VFS
1619  *					while translating the block to an
1620  *					offset; the most likely cause is that
1621  *					the caller specified a block past the
1622  *					end of the file, but this could also be
1623  *					any other error from VNOP_BLKTOOFF().
1624  *
1625  * Note:	Representing the error in band loses some information, but does
1626  *		not occlude a valid offset, since an off_t of -1 is normally
1627  *		used to represent EOF.  If we had a more reliable constant in
1628  *		our header files for it (i.e. explicitly cast to an off_t), we
1629  *		would use it here instead.
1630  */
1631 off_t
ubc_blktooff(vnode_t vp,daddr64_t blkno)1632 ubc_blktooff(vnode_t vp, daddr64_t blkno)
1633 {
1634 	off_t file_offset = -1;
1635 	int error;
1636 
1637 	if (UBCINFOEXISTS(vp)) {
1638 		error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
1639 		if (error) {
1640 			file_offset = -1;
1641 		}
1642 	}
1643 
1644 	return file_offset;
1645 }
1646 
1647 
1648 /*
1649  * ubc_offtoblk
1650  *
1651  * Convert a given offset in a memory backing object into a block number for a
1652  * given vnode
1653  *
1654  * Parameters:	vp			The vnode in which the offset is
1655  *					located
1656  *		offset			The offset into the backing object
1657  *
1658  * Returns:	!-1			The returned block number
1659  *		-1			There is no ubc_info associated with
1660  *					the vnode
1661  *		-1			An error occurred in the underlying VFS
1662  *					while translating the block to an
1663  *					offset; the most likely cause is that
1664  *					the caller specified a block past the
1665  *					end of the file, but this could also be
1666  *					any other error from VNOP_OFFTOBLK().
1667  *
1668  * Note:	Representing the error in band loses some information, but does
1669  *		not occlude a valid block number, since block numbers exceed
1670  *		the valid range for offsets, due to their relative sizes.  If
1671  *		we had a more reliable constant than -1 in our header files
1672  *		for it (i.e. explicitly cast to an daddr64_t), we would use it
1673  *		here instead.
1674  */
1675 daddr64_t
ubc_offtoblk(vnode_t vp,off_t offset)1676 ubc_offtoblk(vnode_t vp, off_t offset)
1677 {
1678 	daddr64_t blkno = -1;
1679 	int error = 0;
1680 
1681 	if (UBCINFOEXISTS(vp)) {
1682 		error = VNOP_OFFTOBLK(vp, offset, &blkno);
1683 		if (error) {
1684 			blkno = -1;
1685 		}
1686 	}
1687 
1688 	return blkno;
1689 }
1690 
1691 
1692 /*
1693  * ubc_pages_resident
1694  *
1695  * Determine whether or not a given vnode has pages resident via the memory
1696  * object control associated with the ubc_info associated with the vnode
1697  *
1698  * Parameters:	vp			The vnode we want to know about
1699  *
1700  * Returns:	1			Yes
1701  *		0			No
1702  */
1703 int
ubc_pages_resident(vnode_t vp)1704 ubc_pages_resident(vnode_t vp)
1705 {
1706 	kern_return_t           kret;
1707 	boolean_t                       has_pages_resident;
1708 
1709 	if (!UBCINFOEXISTS(vp)) {
1710 		return 0;
1711 	}
1712 
1713 	/*
1714 	 * The following call may fail if an invalid ui_control is specified,
1715 	 * or if there is no VM object associated with the control object.  In
1716 	 * either case, reacting to it as if there were no pages resident will
1717 	 * result in correct behavior.
1718 	 */
1719 	kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
1720 
1721 	if (kret != KERN_SUCCESS) {
1722 		return 0;
1723 	}
1724 
1725 	if (has_pages_resident == TRUE) {
1726 		return 1;
1727 	}
1728 
1729 	return 0;
1730 }
1731 
1732 /*
1733  * ubc_msync
1734  *
1735  * Clean and/or invalidate a range in the memory object that backs this vnode
1736  *
1737  * Parameters:	vp			The vnode whose associated ubc_info's
1738  *					associated memory object is to have a
1739  *					range invalidated within it
1740  *		beg_off			The start of the range, as an offset
1741  *		end_off			The end of the range, as an offset
1742  *		resid_off		The address of an off_t supplied by the
1743  *					caller; may be set to NULL to ignore
1744  *		flags			See ubc_msync_internal()
1745  *
1746  * Returns:	0			Success
1747  *		!0			Failure; an errno is returned
1748  *
1749  * Implicit Returns:
1750  *		*resid_off, modified	If non-NULL, the  contents are ALWAYS
1751  *					modified; they are initialized to the
1752  *					beg_off, and in case of an I/O error,
1753  *					the difference between beg_off and the
1754  *					current value will reflect what was
1755  *					able to be written before the error
1756  *					occurred.  If no error is returned, the
1757  *					value of the resid_off is undefined; do
1758  *					NOT use it in place of end_off if you
1759  *					intend to increment from the end of the
1760  *					last call and call iteratively.
1761  *
1762  * Notes:	see ubc_msync_internal() for more detailed information.
1763  *
1764  */
1765 errno_t
ubc_msync(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags)1766 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
1767 {
1768 	int retval;
1769 	int io_errno = 0;
1770 
1771 	if (resid_off) {
1772 		*resid_off = beg_off;
1773 	}
1774 
1775 	retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
1776 
1777 	if (retval == 0 && io_errno == 0) {
1778 		return EINVAL;
1779 	}
1780 	return io_errno;
1781 }
1782 
1783 
1784 /*
1785  * ubc_msync_internal
1786  *
1787  * Clean and/or invalidate a range in the memory object that backs this vnode
1788  *
1789  * Parameters:	vp			The vnode whose associated ubc_info's
1790  *					associated memory object is to have a
1791  *					range invalidated within it
1792  *		beg_off			The start of the range, as an offset
1793  *		end_off			The end of the range, as an offset
1794  *		resid_off		The address of an off_t supplied by the
1795  *					caller; may be set to NULL to ignore
1796  *		flags			MUST contain at least one of the flags
1797  *					UBC_INVALIDATE, UBC_PUSHDIRTY, or
1798  *					UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1799  *					UBC_SYNC may also be specified to cause
1800  *					this function to block until the
1801  *					operation is complete.  The behavior
1802  *					of UBC_SYNC is otherwise undefined.
1803  *		io_errno		The address of an int to contain the
1804  *					errno from a failed I/O operation, if
1805  *					one occurs; may be set to NULL to
1806  *					ignore
1807  *
1808  * Returns:	1			Success
1809  *		0			Failure
1810  *
1811  * Implicit Returns:
1812  *		*resid_off, modified	The contents of this offset MAY be
1813  *					modified; in case of an I/O error, the
1814  *					difference between beg_off and the
1815  *					current value will reflect what was
1816  *					able to be written before the error
1817  *					occurred.
1818  *		*io_errno, modified	The contents of this offset are set to
1819  *					an errno, if an error occurs; if the
1820  *					caller supplies an io_errno parameter,
1821  *					they should be careful to initialize it
1822  *					to 0 before calling this function to
1823  *					enable them to distinguish an error
1824  *					with a valid *resid_off from an invalid
1825  *					one, and to avoid potentially falsely
1826  *					reporting an error, depending on use.
1827  *
1828  * Notes:	If there is no ubc_info associated with the vnode supplied,
1829  *		this function immediately returns success.
1830  *
1831  *		If the value of end_off is less than or equal to beg_off, this
1832  *		function immediately returns success; that is, end_off is NOT
1833  *		inclusive.
1834  *
1835  *		IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1836  *		UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1837  *		attempt to block on in-progress I/O by calling this function
1838  *		with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1839  *		in order to block pending on the I/O already in progress.
1840  *
1841  *		The start offset is truncated to the page boundary and the
1842  *		size is adjusted to include the last page in the range; that
1843  *		is, end_off on exactly a page boundary will not change if it
1844  *		is rounded, and the range of bytes written will be from the
1845  *		truncate beg_off to the rounded (end_off - 1).
1846  */
1847 static int
ubc_msync_internal(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags,int * io_errno)1848 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1849 {
1850 	memory_object_size_t    tsize;
1851 	kern_return_t           kret;
1852 	int request_flags = 0;
1853 	int flush_flags   = MEMORY_OBJECT_RETURN_NONE;
1854 
1855 	if (!UBCINFOEXISTS(vp)) {
1856 		return 0;
1857 	}
1858 	if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) {
1859 		return 0;
1860 	}
1861 	if (end_off <= beg_off) {
1862 		return 1;
1863 	}
1864 
1865 	if (flags & UBC_INVALIDATE) {
1866 		/*
1867 		 * discard the resident pages
1868 		 */
1869 		request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
1870 	}
1871 
1872 	if (flags & UBC_SYNC) {
1873 		/*
1874 		 * wait for all the I/O to complete before returning
1875 		 */
1876 		request_flags |= MEMORY_OBJECT_IO_SYNC;
1877 	}
1878 
1879 	if (flags & UBC_PUSHDIRTY) {
1880 		/*
1881 		 * we only return the dirty pages in the range
1882 		 */
1883 		flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
1884 	}
1885 
1886 	if (flags & UBC_PUSHALL) {
1887 		/*
1888 		 * then return all the interesting pages in the range (both
1889 		 * dirty and precious) to the pager
1890 		 */
1891 		flush_flags = MEMORY_OBJECT_RETURN_ALL;
1892 	}
1893 
1894 	beg_off = trunc_page_64(beg_off);
1895 	end_off = round_page_64(end_off);
1896 	tsize   = (memory_object_size_t)end_off - beg_off;
1897 
1898 	/* flush and/or invalidate pages in the range requested */
1899 	kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
1900 	    beg_off, tsize,
1901 	    (memory_object_offset_t *)resid_off,
1902 	    io_errno, flush_flags, request_flags,
1903 	    VM_PROT_NO_CHANGE);
1904 
1905 	return (kret == KERN_SUCCESS) ? 1 : 0;
1906 }
1907 
1908 
1909 /*
1910  * ubc_map
1911  *
1912  * Explicitly map a vnode that has an associate ubc_info, and add a reference
1913  * to it for the ubc system, if there isn't one already, so it will not be
1914  * recycled while it's in use, and set flags on the ubc_info to indicate that
1915  * we have done this
1916  *
1917  * Parameters:	vp			The vnode to map
1918  *		flags			The mapping flags for the vnode; this
1919  *					will be a combination of one or more of
1920  *					PROT_READ, PROT_WRITE, and PROT_EXEC
1921  *
1922  * Returns:	0			Success
1923  *		EPERM			Permission was denied
1924  *
1925  * Notes:	An I/O reference on the vnode must already be held on entry
1926  *
1927  *		If there is no ubc_info associated with the vnode, this function
1928  *		will return success.
1929  *
1930  *		If a permission error occurs, this function will return
1931  *		failure; all other failures will cause this function to return
1932  *		success.
1933  *
1934  *		IMPORTANT: This is an internal use function, and its symbols
1935  *		are not exported, hence its error checking is not very robust.
1936  *		It is primarily used by:
1937  *
1938  *		o	mmap(), when mapping a file
1939  *		o	When mapping a shared file (a shared library in the
1940  *			shared segment region)
1941  *		o	When loading a program image during the exec process
1942  *
1943  *		...all of these uses ignore the return code, and any fault that
1944  *		results later because of a failure is handled in the fix-up path
1945  *		of the fault handler.  The interface exists primarily as a
1946  *		performance hint.
1947  *
1948  *		Given that third party implementation of the type of interfaces
1949  *		that would use this function, such as alternative executable
1950  *		formats, etc., are unsupported, this function is not exported
1951  *		for general use.
1952  *
1953  *		The extra reference is held until the VM system unmaps the
1954  *		vnode from its own context to maintain a vnode reference in
1955  *		cases like open()/mmap()/close(), which leave the backing
1956  *		object referenced by a mapped memory region in a process
1957  *		address space.
1958  */
1959 __private_extern__ int
ubc_map(vnode_t vp,int flags)1960 ubc_map(vnode_t vp, int flags)
1961 {
1962 	struct ubc_info *uip;
1963 	int error = 0;
1964 	int need_ref = 0;
1965 	int need_wakeup = 0;
1966 
1967 	/*
1968 	 * This call is non-blocking and does not ever fail but it can
1969 	 * only be made when there is other explicit synchronization
1970 	 * with reclaiming of the vnode which, in this path, is provided
1971 	 * by the "mapping in progress" counter.
1972 	 */
1973 	error = vnode_getalways_from_pager(vp);
1974 	if (error != 0) {
1975 		/* This can't happen */
1976 		panic("vnode_getalways returned %d for vp %p", error, vp);
1977 	}
1978 
1979 	if (UBCINFOEXISTS(vp) == 0) {
1980 		/*
1981 		 * The vnode might have started being reclaimed (forced unmount?) while
1982 		 * this call was in progress.
1983 		 * The caller is not expecting an error but is expected to figure out that
1984 		 * the "pager" it used for this vnode is now gone.
1985 		 */
1986 		error = 0;
1987 	} else {
1988 		vnode_lock(vp);
1989 		uip = vp->v_ubcinfo;
1990 
1991 		while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
1992 			SET(uip->ui_flags, UI_MAPWAITING);
1993 			(void) msleep(&uip->ui_flags, &vp->v_lock,
1994 			    PRIBIO, "ubc_map", NULL);
1995 		}
1996 		SET(uip->ui_flags, UI_MAPBUSY);
1997 		vnode_unlock(vp);
1998 
1999 		error = VNOP_MMAP(vp, flags, vfs_context_current());
2000 
2001 		/*
2002 		 * rdar://problem/22587101 required that we stop propagating
2003 		 * EPERM up the stack. Otherwise, we would have to funnel up
2004 		 * the error at all the call sites for memory_object_map().
2005 		 * The risk is in having to undo the map/object/entry state at
2006 		 * all these call sites. It would also affect more than just mmap()
2007 		 * e.g. vm_remap().
2008 		 *
2009 		 *	if (error != EPERM)
2010 		 *              error = 0;
2011 		 */
2012 
2013 		error = 0;
2014 
2015 		vnode_lock_spin(vp);
2016 
2017 		if (error == 0) {
2018 			if (!ISSET(uip->ui_flags, UI_ISMAPPED)) {
2019 				need_ref = 1;
2020 			}
2021 			SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
2022 			if (flags & PROT_WRITE) {
2023 				SET(uip->ui_flags, (UI_WASMAPPEDWRITE | UI_MAPPEDWRITE));
2024 			}
2025 		}
2026 		CLR(uip->ui_flags, UI_MAPBUSY);
2027 
2028 		if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2029 			CLR(uip->ui_flags, UI_MAPWAITING);
2030 			need_wakeup = 1;
2031 		}
2032 		vnode_unlock(vp);
2033 
2034 		if (need_wakeup) {
2035 			wakeup(&uip->ui_flags);
2036 		}
2037 
2038 		if (need_ref) {
2039 			/*
2040 			 * Make sure we get a ref as we can't unwind from here
2041 			 */
2042 			if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) {
2043 				panic("%s : VNODE_REF_FORCE failed", __FUNCTION__);
2044 			}
2045 			/*
2046 			 * Vnodes that are on "unreliable" media (like disk
2047 			 * images, network filesystems, 3rd-party filesystems,
2048 			 * and possibly external devices) could see their
2049 			 * contents be changed via the backing store without
2050 			 * triggering copy-on-write, so we can't fully rely
2051 			 * on copy-on-write and might have to resort to
2052 			 * copy-on-read to protect "privileged" processes and
2053 			 * prevent privilege escalation.
2054 			 *
2055 			 * The root filesystem is considered "reliable" because
2056 			 * there's not much point in trying to protect
2057 			 * ourselves from such a vulnerability and the extra
2058 			 * cost of copy-on-read (CPU time and memory pressure)
2059 			 * could result in some serious regressions.
2060 			 */
2061 			if (vp->v_mount != NULL &&
2062 			    ((vp->v_mount->mnt_flag & MNT_ROOTFS) ||
2063 			    vnode_on_reliable_media(vp))) {
2064 				/*
2065 				 * This vnode is deemed "reliable" so mark
2066 				 * its VM object as "trusted".
2067 				 */
2068 				memory_object_mark_trusted(uip->ui_control);
2069 			} else {
2070 //				printf("BUGGYCOW: %s:%d vp %p \"%s\" in mnt %p \"%s\" is untrusted\n", __FUNCTION__, __LINE__, vp, vp->v_name, vp->v_mount, vp->v_mount->mnt_vnodecovered->v_name);
2071 			}
2072 		}
2073 	}
2074 	vnode_put_from_pager(vp);
2075 
2076 	return error;
2077 }
2078 
2079 
2080 /*
2081  * ubc_destroy_named
2082  *
2083  * Destroy the named memory object associated with the ubc_info control object
2084  * associated with the designated vnode, if there is a ubc_info associated
2085  * with the vnode, and a control object is associated with it
2086  *
2087  * Parameters:	vp			The designated vnode
2088  *
2089  * Returns:	(void)
2090  *
2091  * Notes:	This function is called on vnode termination for all vnodes,
2092  *		and must therefore not assume that there is a ubc_info that is
2093  *		associated with the vnode, nor that there is a control object
2094  *		associated with the ubc_info.
2095  *
2096  *		If all the conditions necessary are present, this function
2097  *		calls memory_object_destory(), which will in turn end up
2098  *		calling ubc_unmap() to release any vnode references that were
2099  *		established via ubc_map().
2100  *
2101  *		IMPORTANT: This is an internal use function that is used
2102  *		exclusively by the internal use function vclean().
2103  */
2104 __private_extern__ void
ubc_destroy_named(vnode_t vp,vm_object_destroy_reason_t reason)2105 ubc_destroy_named(vnode_t vp, vm_object_destroy_reason_t reason)
2106 {
2107 	memory_object_control_t control;
2108 	struct ubc_info *uip;
2109 	kern_return_t kret;
2110 
2111 	if (UBCINFOEXISTS(vp)) {
2112 		uip = vp->v_ubcinfo;
2113 
2114 		/* Terminate the memory object  */
2115 		control = ubc_getobject(vp, UBC_HOLDOBJECT);
2116 		if (control != MEMORY_OBJECT_CONTROL_NULL) {
2117 			kret = memory_object_destroy(control, reason);
2118 			if (kret != KERN_SUCCESS) {
2119 				panic("ubc_destroy_named: memory_object_destroy failed");
2120 			}
2121 		}
2122 	}
2123 }
2124 
2125 
2126 /*
2127  * ubc_isinuse
2128  *
2129  * Determine whether or not a vnode is currently in use by ubc at a level in
2130  * excess of the requested busycount
2131  *
2132  * Parameters:	vp			The vnode to check
2133  *		busycount		The threshold busy count, used to bias
2134  *					the count usually already held by the
2135  *					caller to avoid races
2136  *
2137  * Returns:	1			The vnode is in use over the threshold
2138  *		0			The vnode is not in use over the
2139  *					threshold
2140  *
2141  * Notes:	Because the vnode is only held locked while actually asking
2142  *		the use count, this function only represents a snapshot of the
2143  *		current state of the vnode.  If more accurate information is
2144  *		required, an additional busycount should be held by the caller
2145  *		and a non-zero busycount used.
2146  *
2147  *		If there is no ubc_info associated with the vnode, this
2148  *		function will report that the vnode is not in use by ubc.
2149  */
2150 int
ubc_isinuse(struct vnode * vp,int busycount)2151 ubc_isinuse(struct vnode *vp, int busycount)
2152 {
2153 	if (!UBCINFOEXISTS(vp)) {
2154 		return 0;
2155 	}
2156 	return ubc_isinuse_locked(vp, busycount, 0);
2157 }
2158 
2159 
2160 /*
2161  * ubc_isinuse_locked
2162  *
2163  * Determine whether or not a vnode is currently in use by ubc at a level in
2164  * excess of the requested busycount
2165  *
2166  * Parameters:	vp			The vnode to check
2167  *		busycount		The threshold busy count, used to bias
2168  *					the count usually already held by the
2169  *					caller to avoid races
2170  *		locked			True if the vnode is already locked by
2171  *					the caller
2172  *
2173  * Returns:	1			The vnode is in use over the threshold
2174  *		0			The vnode is not in use over the
2175  *					threshold
2176  *
2177  * Notes:	If the vnode is not locked on entry, it is locked while
2178  *		actually asking the use count.  If this is the case, this
2179  *		function only represents a snapshot of the current state of
2180  *		the vnode.  If more accurate information is required, the
2181  *		vnode lock should be held by the caller, otherwise an
2182  *		additional busycount should be held by the caller and a
2183  *		non-zero busycount used.
2184  *
2185  *		If there is no ubc_info associated with the vnode, this
2186  *		function will report that the vnode is not in use by ubc.
2187  */
2188 int
ubc_isinuse_locked(struct vnode * vp,int busycount,int locked)2189 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
2190 {
2191 	int retval = 0;
2192 
2193 
2194 	if (!locked) {
2195 		vnode_lock_spin(vp);
2196 	}
2197 
2198 	if ((vp->v_usecount - vp->v_kusecount) > busycount) {
2199 		retval = 1;
2200 	}
2201 
2202 	if (!locked) {
2203 		vnode_unlock(vp);
2204 	}
2205 	return retval;
2206 }
2207 
2208 
2209 /*
2210  * ubc_unmap
2211  *
2212  * Reverse the effects of a ubc_map() call for a given vnode
2213  *
2214  * Parameters:	vp			vnode to unmap from ubc
2215  *
2216  * Returns:	(void)
2217  *
2218  * Notes:	This is an internal use function used by vnode_pager_unmap().
2219  *		It will attempt to obtain a reference on the supplied vnode,
2220  *		and if it can do so, and there is an associated ubc_info, and
2221  *		the flags indicate that it was mapped via ubc_map(), then the
2222  *		flag is cleared, the mapping removed, and the reference taken
2223  *		by ubc_map() is released.
2224  *
2225  *		IMPORTANT: This MUST only be called by the VM
2226  *		to prevent race conditions.
2227  */
2228 __private_extern__ void
ubc_unmap(struct vnode * vp)2229 ubc_unmap(struct vnode *vp)
2230 {
2231 	struct ubc_info *uip;
2232 	int     need_rele = 0;
2233 	int     need_wakeup = 0;
2234 	int     error = 0;
2235 
2236 	/*
2237 	 * This call is non-blocking and does not ever fail but it can
2238 	 * only be made when there is other explicit synchronization
2239 	 * with reclaiming of the vnode which, in this path, is provided
2240 	 * by the "mapping in progress" counter.
2241 	 */
2242 	error = vnode_getalways_from_pager(vp);
2243 	if (error != 0) {
2244 		/* This can't happen */
2245 		panic("vnode_getalways returned %d for vp %p", error, vp);
2246 	}
2247 
2248 	if (UBCINFOEXISTS(vp) == 0) {
2249 		/*
2250 		 * The vnode might have started being reclaimed (forced unmount?) while
2251 		 * this call was in progress.
2252 		 * The caller is not expecting an error but is expected to figure out that
2253 		 * the "pager" it used for this vnode is now gone and take appropriate
2254 		 * action.
2255 		 */
2256 	} else {
2257 		bool want_fsevent = false;
2258 
2259 		vnode_lock(vp);
2260 		uip = vp->v_ubcinfo;
2261 
2262 		while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2263 			SET(uip->ui_flags, UI_MAPWAITING);
2264 			(void) msleep(&uip->ui_flags, &vp->v_lock,
2265 			    PRIBIO, "ubc_unmap", NULL);
2266 		}
2267 		SET(uip->ui_flags, UI_MAPBUSY);
2268 
2269 		if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
2270 			if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) {
2271 				want_fsevent = true;
2272 			}
2273 
2274 			need_rele = 1;
2275 
2276 			/*
2277 			 * We want to clear the mapped flags after we've called
2278 			 * VNOP_MNOMAP to avoid certain races and allow
2279 			 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2280 			 */
2281 		}
2282 		vnode_unlock(vp);
2283 
2284 		if (need_rele) {
2285 			vfs_context_t ctx = vfs_context_current();
2286 
2287 			(void)VNOP_MNOMAP(vp, ctx);
2288 
2289 #if CONFIG_FSE
2290 			/*
2291 			 * Why do we want an fsevent here?  Normally the
2292 			 * content modified fsevent is posted when a file is
2293 			 * closed and only if it's written to via conventional
2294 			 * means.  It's perfectly legal to close a file and
2295 			 * keep your mappings and we don't currently track
2296 			 * whether it was written to via a mapping.
2297 			 * Therefore, we need to post an fsevent here if the
2298 			 * file was mapped writable.  This may result in false
2299 			 * events, i.e. we post a notification when nothing
2300 			 * has really changed.
2301 			 */
2302 			if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
2303 				add_fsevent(FSE_CONTENT_MODIFIED_NO_HLINK, ctx,
2304 				    FSE_ARG_VNODE, vp,
2305 				    FSE_ARG_DONE);
2306 			}
2307 #endif
2308 
2309 			vnode_rele(vp);
2310 		}
2311 
2312 		vnode_lock_spin(vp);
2313 
2314 		if (need_rele) {
2315 			CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
2316 		}
2317 
2318 		CLR(uip->ui_flags, UI_MAPBUSY);
2319 
2320 		if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2321 			CLR(uip->ui_flags, UI_MAPWAITING);
2322 			need_wakeup = 1;
2323 		}
2324 		vnode_unlock(vp);
2325 
2326 		if (need_wakeup) {
2327 			wakeup(&uip->ui_flags);
2328 		}
2329 	}
2330 	/*
2331 	 * the drop of the vnode ref will cleanup
2332 	 */
2333 	vnode_put_from_pager(vp);
2334 }
2335 
2336 
2337 /*
2338  * ubc_page_op
2339  *
2340  * Manipulate individual page state for a vnode with an associated ubc_info
2341  * with an associated memory object control.
2342  *
2343  * Parameters:	vp			The vnode backing the page
2344  *		f_offset		A file offset interior to the page
2345  *		ops			The operations to perform, as a bitmap
2346  *					(see below for more information)
2347  *		phys_entryp		The address of a ppnum_t; may be NULL
2348  *					to ignore
2349  *		flagsp			A pointer to an int to contain flags;
2350  *					may be NULL to ignore
2351  *
2352  * Returns:	KERN_SUCCESS		Success
2353  *		KERN_INVALID_ARGUMENT	If the memory object control has no VM
2354  *					object associated
2355  *		KERN_INVALID_OBJECT	If UPL_POP_PHYSICAL and the object is
2356  *					not physically contiguous
2357  *		KERN_INVALID_OBJECT	If !UPL_POP_PHYSICAL and the object is
2358  *					physically contiguous
2359  *		KERN_FAILURE		If the page cannot be looked up
2360  *
2361  * Implicit Returns:
2362  *		*phys_entryp (modified)	If phys_entryp is non-NULL and
2363  *					UPL_POP_PHYSICAL
2364  *		*flagsp (modified)	If flagsp is non-NULL and there was
2365  *					!UPL_POP_PHYSICAL and a KERN_SUCCESS
2366  *
2367  * Notes:	For object boundaries, it is considerably more efficient to
2368  *		ensure that f_offset is in fact on a page boundary, as this
2369  *		will avoid internal use of the hash table to identify the
2370  *		page, and would therefore skip a number of early optimizations.
2371  *		Since this is a page operation anyway, the caller should try
2372  *		to pass only a page aligned offset because of this.
2373  *
2374  *		*flagsp may be modified even if this function fails.  If it is
2375  *		modified, it will contain the condition of the page before the
2376  *		requested operation was attempted; these will only include the
2377  *		bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2378  *		UPL_POP_SET, or UPL_POP_CLR bits.
2379  *
2380  *		The flags field may contain a specific operation, such as
2381  *		UPL_POP_PHYSICAL or UPL_POP_DUMP:
2382  *
2383  *		o	UPL_POP_PHYSICAL	Fail if not contiguous; if
2384  *						*phys_entryp and successful, set
2385  *						*phys_entryp
2386  *		o	UPL_POP_DUMP		Dump the specified page
2387  *
2388  *		Otherwise, it is treated as a bitmap of one or more page
2389  *		operations to perform on the final memory object; allowable
2390  *		bit values are:
2391  *
2392  *		o	UPL_POP_DIRTY		The page is dirty
2393  *		o	UPL_POP_PAGEOUT		The page is paged out
2394  *		o	UPL_POP_PRECIOUS	The page is precious
2395  *		o	UPL_POP_ABSENT		The page is absent
2396  *		o	UPL_POP_BUSY		The page is busy
2397  *
2398  *		If the page status is only being queried and not modified, then
2399  *		not other bits should be specified.  However, if it is being
2400  *		modified, exactly ONE of the following bits should be set:
2401  *
2402  *		o	UPL_POP_SET		Set the current bitmap bits
2403  *		o	UPL_POP_CLR		Clear the current bitmap bits
2404  *
2405  *		Thus to effect a combination of setting an clearing, it may be
2406  *		necessary to call this function twice.  If this is done, the
2407  *		set should be used before the clear, since clearing may trigger
2408  *		a wakeup on the destination page, and if the page is backed by
2409  *		an encrypted swap file, setting will trigger the decryption
2410  *		needed before the wakeup occurs.
2411  */
2412 kern_return_t
ubc_page_op(struct vnode * vp,off_t f_offset,int ops,ppnum_t * phys_entryp,int * flagsp)2413 ubc_page_op(
2414 	struct vnode    *vp,
2415 	off_t           f_offset,
2416 	int             ops,
2417 	ppnum_t *phys_entryp,
2418 	int             *flagsp)
2419 {
2420 	memory_object_control_t         control;
2421 
2422 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2423 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2424 		return KERN_INVALID_ARGUMENT;
2425 	}
2426 
2427 	return memory_object_page_op(control,
2428 	           (memory_object_offset_t)f_offset,
2429 	           ops,
2430 	           phys_entryp,
2431 	           flagsp);
2432 }
2433 
2434 
2435 /*
2436  * ubc_range_op
2437  *
2438  * Manipulate page state for a range of memory for a vnode with an associated
2439  * ubc_info with an associated memory object control, when page level state is
2440  * not required to be returned from the call (i.e. there are no phys_entryp or
2441  * flagsp parameters to this call, and it takes a range which may contain
2442  * multiple pages, rather than an offset interior to a single page).
2443  *
2444  * Parameters:	vp			The vnode backing the page
2445  *		f_offset_beg		A file offset interior to the start page
2446  *		f_offset_end		A file offset interior to the end page
2447  *		ops			The operations to perform, as a bitmap
2448  *					(see below for more information)
2449  *		range			The address of an int; may be NULL to
2450  *					ignore
2451  *
2452  * Returns:	KERN_SUCCESS		Success
2453  *		KERN_INVALID_ARGUMENT	If the memory object control has no VM
2454  *					object associated
2455  *		KERN_INVALID_OBJECT	If the object is physically contiguous
2456  *
2457  * Implicit Returns:
2458  *		*range (modified)	If range is non-NULL, its contents will
2459  *					be modified to contain the number of
2460  *					bytes successfully operated upon.
2461  *
2462  * Notes:	IMPORTANT: This function cannot be used on a range that
2463  *		consists of physically contiguous pages.
2464  *
2465  *		For object boundaries, it is considerably more efficient to
2466  *		ensure that f_offset_beg and f_offset_end are in fact on page
2467  *		boundaries, as this will avoid internal use of the hash table
2468  *		to identify the page, and would therefore skip a number of
2469  *		early optimizations.  Since this is an operation on a set of
2470  *		pages anyway, the caller should try to pass only a page aligned
2471  *		offsets because of this.
2472  *
2473  *		*range will be modified only if this function succeeds.
2474  *
2475  *		The flags field MUST contain a specific operation; allowable
2476  *		values are:
2477  *
2478  *		o	UPL_ROP_ABSENT	Returns the extent of the range
2479  *					presented which is absent, starting
2480  *					with the start address presented
2481  *
2482  *		o	UPL_ROP_PRESENT	Returns the extent of the range
2483  *					presented which is present (resident),
2484  *					starting with the start address
2485  *					presented
2486  *		o	UPL_ROP_DUMP	Dump the pages which are found in the
2487  *					target object for the target range.
2488  *
2489  *		IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2490  *		multiple regions in the range, only the first matching region
2491  *		is returned.
2492  */
2493 kern_return_t
ubc_range_op(struct vnode * vp,off_t f_offset_beg,off_t f_offset_end,int ops,int * range)2494 ubc_range_op(
2495 	struct vnode    *vp,
2496 	off_t           f_offset_beg,
2497 	off_t           f_offset_end,
2498 	int             ops,
2499 	int             *range)
2500 {
2501 	memory_object_control_t         control;
2502 
2503 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2504 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2505 		return KERN_INVALID_ARGUMENT;
2506 	}
2507 
2508 	return memory_object_range_op(control,
2509 	           (memory_object_offset_t)f_offset_beg,
2510 	           (memory_object_offset_t)f_offset_end,
2511 	           ops,
2512 	           range);
2513 }
2514 
2515 
2516 /*
2517  * ubc_create_upl
2518  *
2519  * Given a vnode, cause the population of a portion of the vm_object; based on
2520  * the nature of the request, the pages returned may contain valid data, or
2521  * they may be uninitialized.
2522  *
2523  * Parameters:	vp			The vnode from which to create the upl
2524  *		f_offset		The start offset into the backing store
2525  *					represented by the vnode
2526  *		bufsize			The size of the upl to create
2527  *		uplp			Pointer to the upl_t to receive the
2528  *					created upl; MUST NOT be NULL
2529  *		plp			Pointer to receive the internal page
2530  *					list for the created upl; MAY be NULL
2531  *					to ignore
2532  *
2533  * Returns:	KERN_SUCCESS		The requested upl has been created
2534  *		KERN_INVALID_ARGUMENT	The bufsize argument is not an even
2535  *					multiple of the page size
2536  *		KERN_INVALID_ARGUMENT	There is no ubc_info associated with
2537  *					the vnode, or there is no memory object
2538  *					control associated with the ubc_info
2539  *	memory_object_upl_request:KERN_INVALID_VALUE
2540  *					The supplied upl_flags argument is
2541  *					invalid
2542  * Implicit Returns:
2543  *		*uplp (modified)
2544  *		*plp (modified)		If non-NULL, the value of *plp will be
2545  *					modified to point to the internal page
2546  *					list; this modification may occur even
2547  *					if this function is unsuccessful, in
2548  *					which case the contents may be invalid
2549  *
2550  * Note:	If successful, the returned *uplp MUST subsequently be freed
2551  *		via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2552  *		ubc_upl_abort(), or ubc_upl_abort_range().
2553  */
2554 kern_return_t
ubc_create_upl_external(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags)2555 ubc_create_upl_external(
2556 	struct vnode    *vp,
2557 	off_t           f_offset,
2558 	int             bufsize,
2559 	upl_t           *uplp,
2560 	upl_page_info_t **plp,
2561 	int             uplflags)
2562 {
2563 	return ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt());
2564 }
2565 
2566 kern_return_t
ubc_create_upl_kernel(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags,vm_tag_t tag)2567 ubc_create_upl_kernel(
2568 	struct vnode    *vp,
2569 	off_t           f_offset,
2570 	int             bufsize,
2571 	upl_t           *uplp,
2572 	upl_page_info_t **plp,
2573 	int             uplflags,
2574 	vm_tag_t tag)
2575 {
2576 	memory_object_control_t         control;
2577 	kern_return_t                   kr;
2578 
2579 	if (plp != NULL) {
2580 		*plp = NULL;
2581 	}
2582 	*uplp = NULL;
2583 
2584 	if (bufsize & 0xfff) {
2585 		return KERN_INVALID_ARGUMENT;
2586 	}
2587 
2588 	if (bufsize > MAX_UPL_SIZE_BYTES) {
2589 		return KERN_INVALID_ARGUMENT;
2590 	}
2591 
2592 	if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
2593 		if (uplflags & UPL_UBC_MSYNC) {
2594 			uplflags &= UPL_RET_ONLY_DIRTY;
2595 
2596 			uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
2597 			    UPL_SET_INTERNAL | UPL_SET_LITE;
2598 		} else if (uplflags & UPL_UBC_PAGEOUT) {
2599 			uplflags &= UPL_RET_ONLY_DIRTY;
2600 
2601 			if (uplflags & UPL_RET_ONLY_DIRTY) {
2602 				uplflags |= UPL_NOBLOCK;
2603 			}
2604 
2605 			uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
2606 			    UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
2607 		} else {
2608 			uplflags |= UPL_RET_ONLY_ABSENT |
2609 			    UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
2610 			    UPL_SET_INTERNAL | UPL_SET_LITE;
2611 
2612 			/*
2613 			 * if the requested size == PAGE_SIZE, we don't want to set
2614 			 * the UPL_NOBLOCK since we may be trying to recover from a
2615 			 * previous partial pagein I/O that occurred because we were low
2616 			 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2617 			 * since we're only asking for a single page, we can block w/o fear
2618 			 * of tying up pages while waiting for more to become available
2619 			 */
2620 			if (bufsize > PAGE_SIZE) {
2621 				uplflags |= UPL_NOBLOCK;
2622 			}
2623 		}
2624 	} else {
2625 		uplflags &= ~UPL_FOR_PAGEOUT;
2626 
2627 		if (uplflags & UPL_WILL_BE_DUMPED) {
2628 			uplflags &= ~UPL_WILL_BE_DUMPED;
2629 			uplflags |= (UPL_NO_SYNC | UPL_SET_INTERNAL);
2630 		} else {
2631 			uplflags |= (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
2632 		}
2633 	}
2634 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2635 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2636 		return KERN_INVALID_ARGUMENT;
2637 	}
2638 
2639 	kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
2640 	if (kr == KERN_SUCCESS && plp != NULL) {
2641 		*plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
2642 	}
2643 	return kr;
2644 }
2645 
2646 
2647 /*
2648  * ubc_upl_maxbufsize
2649  *
2650  * Return the maximum bufsize ubc_create_upl( ) will take.
2651  *
2652  * Parameters:	none
2653  *
2654  * Returns:	maximum size buffer (in bytes) ubc_create_upl( ) will take.
2655  */
2656 upl_size_t
ubc_upl_maxbufsize(void)2657 ubc_upl_maxbufsize(
2658 	void)
2659 {
2660 	return MAX_UPL_SIZE_BYTES;
2661 }
2662 
2663 /*
2664  * ubc_upl_map
2665  *
2666  * Map the page list assocated with the supplied upl into the kernel virtual
2667  * address space at the virtual address indicated by the dst_addr argument;
2668  * the entire upl is mapped
2669  *
2670  * Parameters:	upl			The upl to map
2671  *		dst_addr		The address at which to map the upl
2672  *
2673  * Returns:	KERN_SUCCESS		The upl has been mapped
2674  *		KERN_INVALID_ARGUMENT	The upl is UPL_NULL
2675  *		KERN_FAILURE		The upl is already mapped
2676  *	vm_map_enter:KERN_INVALID_ARGUMENT
2677  *					A failure code from vm_map_enter() due
2678  *					to an invalid argument
2679  */
2680 kern_return_t
ubc_upl_map(upl_t upl,vm_offset_t * dst_addr)2681 ubc_upl_map(
2682 	upl_t           upl,
2683 	vm_offset_t     *dst_addr)
2684 {
2685 	return vm_upl_map(kernel_map, upl, dst_addr);
2686 }
2687 
2688 /*
2689  * ubc_upl_map_range:- similar to ubc_upl_map but the focus is on a range
2690  * of the UPL. Takes an offset, size, and protection so that only a  part
2691  * of the UPL can be mapped with the right protections.
2692  */
2693 kern_return_t
ubc_upl_map_range(upl_t upl,vm_offset_t offset_to_map,vm_size_t size_to_map,vm_prot_t prot_to_map,vm_offset_t * dst_addr)2694 ubc_upl_map_range(
2695 	upl_t           upl,
2696 	vm_offset_t     offset_to_map,
2697 	vm_size_t       size_to_map,
2698 	vm_prot_t       prot_to_map,
2699 	vm_offset_t     *dst_addr)
2700 {
2701 	return vm_upl_map_range(kernel_map, upl, offset_to_map, size_to_map, prot_to_map, dst_addr);
2702 }
2703 
2704 
2705 /*
2706  * ubc_upl_unmap
2707  *
2708  * Unmap the page list assocated with the supplied upl from the kernel virtual
2709  * address space; the entire upl is unmapped.
2710  *
2711  * Parameters:	upl			The upl to unmap
2712  *
2713  * Returns:	KERN_SUCCESS		The upl has been unmapped
2714  *		KERN_FAILURE		The upl is not currently mapped
2715  *		KERN_INVALID_ARGUMENT	If the upl is UPL_NULL
2716  */
2717 kern_return_t
ubc_upl_unmap(upl_t upl)2718 ubc_upl_unmap(
2719 	upl_t   upl)
2720 {
2721 	return vm_upl_unmap(kernel_map, upl);
2722 }
2723 
2724 /*
2725  * ubc_upl_unmap_range:- similar to ubc_upl_unmap but the focus is
2726  * on part of the UPL that is mapped. The offset and size parameter
2727  * specifies what part of the UPL needs to be unmapped.
2728  *
2729  * Note: Currrently offset & size are unused as we always initiate the unmap from the
2730  * very beginning of the UPL's mapping and track the mapped size in the UPL. But we
2731  * might want to allow unmapping a UPL in the middle, for example, and we can use the
2732  * offset + size parameters for that purpose.
2733  */
2734 kern_return_t
ubc_upl_unmap_range(upl_t upl,vm_offset_t offset_to_unmap,vm_size_t size_to_unmap)2735 ubc_upl_unmap_range(
2736 	upl_t   upl,
2737 	vm_offset_t     offset_to_unmap,
2738 	vm_size_t       size_to_unmap)
2739 {
2740 	return vm_upl_unmap_range(kernel_map, upl, offset_to_unmap, size_to_unmap);
2741 }
2742 
2743 
2744 /*
2745  * ubc_upl_commit
2746  *
2747  * Commit the contents of the upl to the backing store
2748  *
2749  * Parameters:	upl			The upl to commit
2750  *
2751  * Returns:	KERN_SUCCESS		The upl has been committed
2752  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2753  *		KERN_FAILURE		The supplied upl does not represent
2754  *					device memory, and the offset plus the
2755  *					size would exceed the actual size of
2756  *					the upl
2757  *
2758  * Notes:	In practice, the only return value for this function should be
2759  *		KERN_SUCCESS, unless there has been data structure corruption;
2760  *		since the upl is deallocated regardless of success or failure,
2761  *		there's really nothing to do about this other than panic.
2762  *
2763  *		IMPORTANT: Use of this function should not be mixed with use of
2764  *		ubc_upl_commit_range(), due to the unconditional deallocation
2765  *		by this function.
2766  */
2767 kern_return_t
ubc_upl_commit(upl_t upl)2768 ubc_upl_commit(
2769 	upl_t                   upl)
2770 {
2771 	upl_page_info_t *pl;
2772 	kern_return_t   kr;
2773 
2774 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2775 	kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
2776 	upl_deallocate(upl);
2777 	return kr;
2778 }
2779 
2780 
2781 /*
2782  * ubc_upl_commit
2783  *
2784  * Commit the contents of the specified range of the upl to the backing store
2785  *
2786  * Parameters:	upl			The upl to commit
2787  *		offset			The offset into the upl
2788  *		size			The size of the region to be committed,
2789  *					starting at the specified offset
2790  *		flags			commit type (see below)
2791  *
2792  * Returns:	KERN_SUCCESS		The range has been committed
2793  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2794  *		KERN_FAILURE		The supplied upl does not represent
2795  *					device memory, and the offset plus the
2796  *					size would exceed the actual size of
2797  *					the upl
2798  *
2799  * Notes:	IMPORTANT: If the commit is successful, and the object is now
2800  *		empty, the upl will be deallocated.  Since the caller cannot
2801  *		check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2802  *		should generally only be used when the offset is 0 and the size
2803  *		is equal to the upl size.
2804  *
2805  *		The flags argument is a bitmap of flags on the rage of pages in
2806  *		the upl to be committed; allowable flags are:
2807  *
2808  *		o	UPL_COMMIT_FREE_ON_EMPTY	Free the upl when it is
2809  *							both empty and has been
2810  *							successfully committed
2811  *		o	UPL_COMMIT_CLEAR_DIRTY		Clear each pages dirty
2812  *							bit; will prevent a
2813  *							later pageout
2814  *		o	UPL_COMMIT_SET_DIRTY		Set each pages dirty
2815  *							bit; will cause a later
2816  *							pageout
2817  *		o	UPL_COMMIT_INACTIVATE		Clear each pages
2818  *							reference bit; the page
2819  *							will not be accessed
2820  *		o	UPL_COMMIT_ALLOW_ACCESS		Unbusy each page; pages
2821  *							become busy when an
2822  *							IOMemoryDescriptor is
2823  *							mapped or redirected,
2824  *							and we have to wait for
2825  *							an IOKit driver
2826  *
2827  *		The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2828  *		not be specified by the caller.
2829  *
2830  *		The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2831  *		mutually exclusive, and should not be combined.
2832  */
2833 kern_return_t
ubc_upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags)2834 ubc_upl_commit_range(
2835 	upl_t                   upl,
2836 	upl_offset_t            offset,
2837 	upl_size_t              size,
2838 	int                             flags)
2839 {
2840 	upl_page_info_t *pl;
2841 	boolean_t               empty;
2842 	kern_return_t   kr;
2843 
2844 	if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
2845 		flags |= UPL_COMMIT_NOTIFY_EMPTY;
2846 	}
2847 
2848 	if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
2849 		return KERN_INVALID_ARGUMENT;
2850 	}
2851 
2852 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2853 
2854 	kr = upl_commit_range(upl, offset, size, flags,
2855 	    pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
2856 
2857 	if ((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) {
2858 		upl_deallocate(upl);
2859 	}
2860 
2861 	return kr;
2862 }
2863 
2864 
2865 /*
2866  * ubc_upl_abort_range
2867  *
2868  * Abort the contents of the specified range of the specified upl
2869  *
2870  * Parameters:	upl			The upl to abort
2871  *		offset			The offset into the upl
2872  *		size			The size of the region to be aborted,
2873  *					starting at the specified offset
2874  *		abort_flags		abort type (see below)
2875  *
2876  * Returns:	KERN_SUCCESS		The range has been aborted
2877  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2878  *		KERN_FAILURE		The supplied upl does not represent
2879  *					device memory, and the offset plus the
2880  *					size would exceed the actual size of
2881  *					the upl
2882  *
2883  * Notes:	IMPORTANT: If the abort is successful, and the object is now
2884  *		empty, the upl will be deallocated.  Since the caller cannot
2885  *		check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2886  *		should generally only be used when the offset is 0 and the size
2887  *		is equal to the upl size.
2888  *
2889  *		The abort_flags argument is a bitmap of flags on the range of
2890  *		pages in the upl to be aborted; allowable flags are:
2891  *
2892  *		o	UPL_ABORT_FREE_ON_EMPTY	Free the upl when it is both
2893  *						empty and has been successfully
2894  *						aborted
2895  *		o	UPL_ABORT_RESTART	The operation must be restarted
2896  *		o	UPL_ABORT_UNAVAILABLE	The pages are unavailable
2897  *		o	UPL_ABORT_ERROR		An I/O error occurred
2898  *		o	UPL_ABORT_DUMP_PAGES	Just free the pages
2899  *		o	UPL_ABORT_NOTIFY_EMPTY	RESERVED
2900  *		o	UPL_ABORT_ALLOW_ACCESS	RESERVED
2901  *
2902  *		The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2903  *		not be specified by the caller.  It is intended to fulfill the
2904  *		same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2905  *		ubc_upl_commit_range(), but is never referenced internally.
2906  *
2907  *		The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2908  *		referenced; do not use it.
2909  */
2910 kern_return_t
ubc_upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int abort_flags)2911 ubc_upl_abort_range(
2912 	upl_t                   upl,
2913 	upl_offset_t            offset,
2914 	upl_size_t              size,
2915 	int                             abort_flags)
2916 {
2917 	kern_return_t   kr;
2918 	boolean_t               empty = FALSE;
2919 
2920 	if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) {
2921 		abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
2922 	}
2923 
2924 	kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
2925 
2926 	if ((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) {
2927 		upl_deallocate(upl);
2928 	}
2929 
2930 	return kr;
2931 }
2932 
2933 
2934 /*
2935  * ubc_upl_abort
2936  *
2937  * Abort the contents of the specified upl
2938  *
2939  * Parameters:	upl			The upl to abort
2940  *		abort_type		abort type (see below)
2941  *
2942  * Returns:	KERN_SUCCESS		The range has been aborted
2943  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2944  *		KERN_FAILURE		The supplied upl does not represent
2945  *					device memory, and the offset plus the
2946  *					size would exceed the actual size of
2947  *					the upl
2948  *
2949  * Notes:	IMPORTANT: If the abort is successful, and the object is now
2950  *		empty, the upl will be deallocated.  Since the caller cannot
2951  *		check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2952  *		should generally only be used when the offset is 0 and the size
2953  *		is equal to the upl size.
2954  *
2955  *		The abort_type is a bitmap of flags on the range of
2956  *		pages in the upl to be aborted; allowable flags are:
2957  *
2958  *		o	UPL_ABORT_FREE_ON_EMPTY	Free the upl when it is both
2959  *						empty and has been successfully
2960  *						aborted
2961  *		o	UPL_ABORT_RESTART	The operation must be restarted
2962  *		o	UPL_ABORT_UNAVAILABLE	The pages are unavailable
2963  *		o	UPL_ABORT_ERROR		An I/O error occurred
2964  *		o	UPL_ABORT_DUMP_PAGES	Just free the pages
2965  *		o	UPL_ABORT_NOTIFY_EMPTY	RESERVED
2966  *		o	UPL_ABORT_ALLOW_ACCESS	RESERVED
2967  *
2968  *		The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2969  *		not be specified by the caller.  It is intended to fulfill the
2970  *		same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2971  *		ubc_upl_commit_range(), but is never referenced internally.
2972  *
2973  *		The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2974  *		referenced; do not use it.
2975  */
2976 kern_return_t
ubc_upl_abort(upl_t upl,int abort_type)2977 ubc_upl_abort(
2978 	upl_t                   upl,
2979 	int                             abort_type)
2980 {
2981 	kern_return_t   kr;
2982 
2983 	kr = upl_abort(upl, abort_type);
2984 	upl_deallocate(upl);
2985 	return kr;
2986 }
2987 
2988 
2989 /*
2990  * ubc_upl_pageinfo
2991  *
2992  *  Retrieve the internal page list for the specified upl
2993  *
2994  * Parameters:	upl			The upl to obtain the page list from
2995  *
2996  * Returns:	!NULL			The (upl_page_info_t *) for the page
2997  *					list internal to the upl
2998  *		NULL			Error/no page list associated
2999  *
3000  * Notes:	IMPORTANT: The function is only valid on internal objects
3001  *		where the list request was made with the UPL_INTERNAL flag.
3002  *
3003  *		This function is a utility helper function, since some callers
3004  *		may not have direct access to the header defining the macro,
3005  *		due to abstraction layering constraints.
3006  */
3007 upl_page_info_t *
ubc_upl_pageinfo(upl_t upl)3008 ubc_upl_pageinfo(
3009 	upl_t                   upl)
3010 {
3011 	return UPL_GET_INTERNAL_PAGE_LIST(upl);
3012 }
3013 
3014 
3015 int
UBCINFOEXISTS(const struct vnode * vp)3016 UBCINFOEXISTS(const struct vnode * vp)
3017 {
3018 	return (vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL);
3019 }
3020 
3021 
3022 void
ubc_upl_range_needed(upl_t upl,int index,int count)3023 ubc_upl_range_needed(
3024 	upl_t           upl,
3025 	int             index,
3026 	int             count)
3027 {
3028 	upl_range_needed(upl, index, count);
3029 }
3030 
3031 boolean_t
ubc_is_mapped(const struct vnode * vp,boolean_t * writable)3032 ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
3033 {
3034 	if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) {
3035 		return FALSE;
3036 	}
3037 	if (writable) {
3038 		*writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
3039 	}
3040 	return TRUE;
3041 }
3042 
3043 boolean_t
ubc_is_mapped_writable(const struct vnode * vp)3044 ubc_is_mapped_writable(const struct vnode *vp)
3045 {
3046 	boolean_t writable;
3047 	return ubc_is_mapped(vp, &writable) && writable;
3048 }
3049 
3050 boolean_t
ubc_was_mapped(const struct vnode * vp,boolean_t * writable)3051 ubc_was_mapped(const struct vnode *vp, boolean_t *writable)
3052 {
3053 	if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_WASMAPPED)) {
3054 		return FALSE;
3055 	}
3056 	if (writable) {
3057 		*writable = ISSET(vp->v_ubcinfo->ui_flags, UI_WASMAPPEDWRITE);
3058 	}
3059 	return TRUE;
3060 }
3061 
3062 boolean_t
ubc_was_mapped_writable(const struct vnode * vp)3063 ubc_was_mapped_writable(const struct vnode *vp)
3064 {
3065 	boolean_t writable;
3066 	return ubc_was_mapped(vp, &writable) && writable;
3067 }
3068 
3069 
3070 /*
3071  * CODE SIGNING
3072  */
3073 static atomic_size_t cs_blob_size = 0;
3074 static atomic_uint_fast32_t cs_blob_count = 0;
3075 static atomic_size_t cs_blob_size_peak = 0;
3076 static atomic_size_t cs_blob_size_max = 0;
3077 static atomic_uint_fast32_t cs_blob_count_peak = 0;
3078 
3079 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count, 0, "Current number of code signature blobs");
3080 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size, "Current size of all code signature blobs");
3081 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
3082 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, "Peak size of code signature blobs");
3083 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, "Size of biggest code signature blob");
3084 
3085 /*
3086  * Function: csblob_parse_teamid
3087  *
3088  * Description: This function returns a pointer to the team id
3089  *               stored within the codedirectory of the csblob.
3090  *               If the codedirectory predates team-ids, it returns
3091  *               NULL.
3092  *               This does not copy the name but returns a pointer to
3093  *               it within the CD. Subsequently, the CD must be
3094  *               available when this is used.
3095  */
3096 
3097 static const char *
csblob_parse_teamid(struct cs_blob * csblob)3098 csblob_parse_teamid(struct cs_blob *csblob)
3099 {
3100 	const CS_CodeDirectory *cd;
3101 
3102 	cd = csblob->csb_cd;
3103 
3104 	if (ntohl(cd->version) < CS_SUPPORTSTEAMID) {
3105 		return NULL;
3106 	}
3107 
3108 	if (cd->teamOffset == 0) {
3109 		return NULL;
3110 	}
3111 
3112 	const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
3113 	if (cs_debug > 1) {
3114 		printf("found team-id %s in cdblob\n", name);
3115 	}
3116 
3117 	return name;
3118 }
3119 
3120 kern_return_t
ubc_cs_blob_allocate(vm_offset_t * blob_addr_p,vm_size_t * blob_size_p)3121 ubc_cs_blob_allocate(
3122 	vm_offset_t     *blob_addr_p,
3123 	vm_size_t       *blob_size_p)
3124 {
3125 	kern_return_t   kr = KERN_FAILURE;
3126 	vm_size_t       allocation_size = 0;
3127 
3128 	if (!blob_addr_p || !blob_size_p) {
3129 		return KERN_INVALID_ARGUMENT;
3130 	}
3131 	allocation_size = *blob_size_p;
3132 
3133 	if (ubc_cs_blob_pagewise_allocate(allocation_size) == true) {
3134 		/* Round up to page size */
3135 		allocation_size = round_page(allocation_size);
3136 
3137 		/* Allocate page-wise */
3138 		kr = kmem_alloc(
3139 			kernel_map,
3140 			blob_addr_p,
3141 			allocation_size,
3142 			KMA_KOBJECT | KMA_DATA | KMA_ZERO,
3143 			VM_KERN_MEMORY_SECURITY);
3144 	} else {
3145 		*blob_addr_p = (vm_offset_t)kalloc_data_tag(
3146 			allocation_size,
3147 			Z_WAITOK | Z_ZERO,
3148 			VM_KERN_MEMORY_SECURITY);
3149 
3150 		assert(*blob_addr_p != 0);
3151 		kr = KERN_SUCCESS;
3152 	}
3153 
3154 	if (kr == KERN_SUCCESS) {
3155 		*blob_size_p = allocation_size;
3156 	}
3157 
3158 	return kr;
3159 }
3160 
3161 void
ubc_cs_blob_deallocate(vm_offset_t blob_addr,vm_size_t blob_size)3162 ubc_cs_blob_deallocate(
3163 	vm_offset_t     blob_addr,
3164 	vm_size_t       blob_size)
3165 {
3166 	if (ubc_cs_blob_pagewise_allocate(blob_size) == true) {
3167 		kmem_free(kernel_map, blob_addr, blob_size);
3168 	} else {
3169 		kfree_data(blob_addr, blob_size);
3170 	}
3171 }
3172 
3173 /*
3174  * Some codesigned files use a lowest common denominator page size of
3175  * 4KiB, but can be used on systems that have a runtime page size of
3176  * 16KiB. Since faults will only occur on 16KiB ranges in
3177  * cs_validate_range(), we can convert the original Code Directory to
3178  * a multi-level scheme where groups of 4 hashes are combined to form
3179  * a new hash, which represents 16KiB in the on-disk file.  This can
3180  * reduce the wired memory requirement for the Code Directory by
3181  * 75%.
3182  */
3183 static boolean_t
ubc_cs_supports_multilevel_hash(struct cs_blob * blob __unused)3184 ubc_cs_supports_multilevel_hash(struct cs_blob *blob __unused)
3185 {
3186 	const CS_CodeDirectory *cd;
3187 
3188 #if CODE_SIGNING_MONITOR
3189 	// TODO: <rdar://problem/30954826>
3190 	if (csm_enabled() == true) {
3191 		return FALSE;
3192 	}
3193 #endif
3194 
3195 	/*
3196 	 * Only applies to binaries that ship as part of the OS,
3197 	 * primarily the shared cache.
3198 	 */
3199 	if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
3200 		return FALSE;
3201 	}
3202 
3203 	/*
3204 	 * If the runtime page size matches the code signing page
3205 	 * size, there is no work to do.
3206 	 */
3207 	if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
3208 		return FALSE;
3209 	}
3210 
3211 	cd = blob->csb_cd;
3212 
3213 	/*
3214 	 * There must be a valid integral multiple of hashes
3215 	 */
3216 	if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3217 		return FALSE;
3218 	}
3219 
3220 	/*
3221 	 * Scatter lists must also have ranges that have an integral number of hashes
3222 	 */
3223 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3224 		const SC_Scatter *scatter = (const SC_Scatter*)
3225 		    ((const char*)cd + ntohl(cd->scatterOffset));
3226 		/* iterate all scatter structs to make sure they are all aligned */
3227 		do {
3228 			uint32_t sbase = ntohl(scatter->base);
3229 			uint32_t scount = ntohl(scatter->count);
3230 
3231 			/* last scatter? */
3232 			if (scount == 0) {
3233 				break;
3234 			}
3235 
3236 			if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3237 				return FALSE;
3238 			}
3239 
3240 			if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3241 				return FALSE;
3242 			}
3243 
3244 			scatter++;
3245 		} while (1);
3246 	}
3247 
3248 	/* Covered range must be a multiple of the new page size */
3249 	if (ntohl(cd->codeLimit) & PAGE_MASK) {
3250 		return FALSE;
3251 	}
3252 
3253 	/* All checks pass */
3254 	return TRUE;
3255 }
3256 
3257 /*
3258  * Reconstruct a cs_blob with the code signature fields. This helper function
3259  * is useful because a lot of things often change the base address of the code
3260  * signature blob, which requires reconstructing some of the other pointers
3261  * within.
3262  */
3263 static errno_t
ubc_cs_blob_reconstruct(struct cs_blob * cs_blob,const vm_address_t signature_addr,const vm_address_t signature_size,const vm_offset_t code_directory_offset)3264 ubc_cs_blob_reconstruct(
3265 	struct cs_blob *cs_blob,
3266 	const vm_address_t signature_addr,
3267 	const vm_address_t signature_size,
3268 	const vm_offset_t code_directory_offset)
3269 {
3270 	const CS_CodeDirectory *code_directory = NULL;
3271 
3272 	/* Setup the signature blob address */
3273 	cs_blob->csb_mem_kaddr = (void*)signature_addr;
3274 	cs_blob->csb_mem_size = signature_size;
3275 
3276 	/* Setup the code directory in the blob */
3277 	code_directory = (const CS_CodeDirectory*)(signature_addr + code_directory_offset);
3278 	cs_blob->csb_cd = code_directory;
3279 
3280 	/* Setup the XML entitlements */
3281 	cs_blob->csb_entitlements_blob = csblob_find_blob_bytes(
3282 		(uint8_t*)signature_addr,
3283 		signature_size,
3284 		CSSLOT_ENTITLEMENTS,
3285 		CSMAGIC_EMBEDDED_ENTITLEMENTS);
3286 
3287 	/* Setup the DER entitlements */
3288 	cs_blob->csb_der_entitlements_blob = csblob_find_blob_bytes(
3289 		(uint8_t*)signature_addr,
3290 		signature_size,
3291 		CSSLOT_DER_ENTITLEMENTS,
3292 		CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3293 
3294 	return 0;
3295 }
3296 
3297 /*
3298  * Given a validated cs_blob, we reformat the structure to only include
3299  * the blobs which are required by the kernel for our current platform.
3300  * This saves significant memory with agile signatures.
3301  *
3302  * To support rewriting the code directory, potentially through
3303  * multilevel hashes, we provide a mechanism to allocate a code directory
3304  * of a specified size and zero it out --> caller can fill it in.
3305  *
3306  * We don't need to perform a lot of overflow checks as the assumption
3307  * here is that the cs_blob has already been validated.
3308  */
3309 static errno_t
ubc_cs_reconstitute_code_signature(const struct cs_blob * const blob,vm_address_t * const ret_mem_kaddr,vm_size_t * const ret_mem_size,vm_size_t code_directory_size,CS_CodeDirectory ** const code_directory)3310 ubc_cs_reconstitute_code_signature(
3311 	const struct cs_blob * const blob,
3312 	vm_address_t * const ret_mem_kaddr,
3313 	vm_size_t * const ret_mem_size,
3314 	vm_size_t code_directory_size,
3315 	CS_CodeDirectory ** const code_directory
3316 	)
3317 {
3318 	vm_address_t new_blob_addr = 0;
3319 	vm_size_t new_blob_size = 0;
3320 	vm_size_t new_code_directory_size = 0;
3321 	const CS_GenericBlob *best_code_directory = NULL;
3322 	const CS_GenericBlob *first_code_directory = NULL;
3323 	const CS_GenericBlob *der_entitlements_blob = NULL;
3324 	const CS_GenericBlob *entitlements_blob = NULL;
3325 	const CS_GenericBlob *cms_blob = NULL;
3326 	const CS_GenericBlob *launch_constraint_self = NULL;
3327 	const CS_GenericBlob *launch_constraint_parent = NULL;
3328 	const CS_GenericBlob *launch_constraint_responsible = NULL;
3329 	const CS_GenericBlob *library_constraint = NULL;
3330 	CS_SuperBlob *superblob = NULL;
3331 	uint32_t num_blobs = 0;
3332 	uint32_t blob_index = 0;
3333 	uint32_t blob_offset = 0;
3334 	kern_return_t ret;
3335 	int err;
3336 
3337 	if (!blob) {
3338 		if (cs_debug > 1) {
3339 			printf("CODE SIGNING: CS Blob passed in is NULL\n");
3340 		}
3341 		return EINVAL;
3342 	}
3343 
3344 	best_code_directory = (const CS_GenericBlob*)blob->csb_cd;
3345 	if (!best_code_directory) {
3346 		/* This case can never happen, and it is a sign of bad things */
3347 		panic("CODE SIGNING: Validated CS Blob has no code directory");
3348 	}
3349 
3350 	new_code_directory_size = code_directory_size;
3351 	if (new_code_directory_size == 0) {
3352 		new_code_directory_size = ntohl(best_code_directory->length);
3353 	}
3354 
3355 	/*
3356 	 * A code signature can contain multiple code directories, each of which contains hashes
3357 	 * of pages based on a hashing algorithm. The kernel selects which hashing algorithm is
3358 	 * the strongest, and consequently, marks one of these code directories as the best
3359 	 * matched one. More often than not, the best matched one is _not_ the first one.
3360 	 *
3361 	 * However, the CMS blob which cryptographically verifies the code signature is only
3362 	 * signed against the first code directory. Therefore, if the CMS blob is present, we also
3363 	 * need the first code directory to be able to verify it. Given this, we organize the
3364 	 * new cs_blob as following order:
3365 	 *
3366 	 * 1. best code directory
3367 	 * 2. DER encoded entitlements blob (if present)
3368 	 * 3. launch constraint self (if present)
3369 	 * 4. launch constraint parent (if present)
3370 	 * 5. launch constraint responsible (if present)
3371 	 * 6. library constraint (if present)
3372 	 * 7. entitlements blob (if present)
3373 	 * 8. cms blob (if present)
3374 	 * 9. first code directory (if not already the best match, and if cms blob is present)
3375 	 *
3376 	 * This order is chosen deliberately, as later on, we expect to get rid of the CMS blob
3377 	 * and the first code directory once their verification is complete.
3378 	 */
3379 
3380 	/* Storage for the super blob header */
3381 	new_blob_size += sizeof(CS_SuperBlob);
3382 
3383 	/* Guaranteed storage for the best code directory */
3384 	new_blob_size += sizeof(CS_BlobIndex);
3385 	new_blob_size += new_code_directory_size;
3386 	num_blobs += 1;
3387 
3388 	/* Conditional storage for the DER entitlements blob */
3389 	der_entitlements_blob = blob->csb_der_entitlements_blob;
3390 	if (der_entitlements_blob) {
3391 		new_blob_size += sizeof(CS_BlobIndex);
3392 		new_blob_size += ntohl(der_entitlements_blob->length);
3393 		num_blobs += 1;
3394 	}
3395 
3396 	/* Conditional storage for the launch constraints self blob */
3397 	launch_constraint_self = csblob_find_blob_bytes(
3398 		(const uint8_t *)blob->csb_mem_kaddr,
3399 		blob->csb_mem_size,
3400 		CSSLOT_LAUNCH_CONSTRAINT_SELF,
3401 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3402 	if (launch_constraint_self) {
3403 		new_blob_size += sizeof(CS_BlobIndex);
3404 		new_blob_size += ntohl(launch_constraint_self->length);
3405 		num_blobs += 1;
3406 	}
3407 
3408 	/* Conditional storage for the launch constraints parent blob */
3409 	launch_constraint_parent = csblob_find_blob_bytes(
3410 		(const uint8_t *)blob->csb_mem_kaddr,
3411 		blob->csb_mem_size,
3412 		CSSLOT_LAUNCH_CONSTRAINT_PARENT,
3413 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3414 	if (launch_constraint_parent) {
3415 		new_blob_size += sizeof(CS_BlobIndex);
3416 		new_blob_size += ntohl(launch_constraint_parent->length);
3417 		num_blobs += 1;
3418 	}
3419 
3420 	/* Conditional storage for the launch constraints responsible blob */
3421 	launch_constraint_responsible = csblob_find_blob_bytes(
3422 		(const uint8_t *)blob->csb_mem_kaddr,
3423 		blob->csb_mem_size,
3424 		CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE,
3425 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3426 	if (launch_constraint_responsible) {
3427 		new_blob_size += sizeof(CS_BlobIndex);
3428 		new_blob_size += ntohl(launch_constraint_responsible->length);
3429 		num_blobs += 1;
3430 	}
3431 
3432 	/* Conditional storage for the library constraintsblob */
3433 	library_constraint = csblob_find_blob_bytes(
3434 		(const uint8_t *)blob->csb_mem_kaddr,
3435 		blob->csb_mem_size,
3436 		CSSLOT_LIBRARY_CONSTRAINT,
3437 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3438 	if (library_constraint) {
3439 		new_blob_size += sizeof(CS_BlobIndex);
3440 		new_blob_size += ntohl(library_constraint->length);
3441 		num_blobs += 1;
3442 	}
3443 
3444 	/* Conditional storage for the entitlements blob */
3445 	entitlements_blob = blob->csb_entitlements_blob;
3446 	if (entitlements_blob) {
3447 		new_blob_size += sizeof(CS_BlobIndex);
3448 		new_blob_size += ntohl(entitlements_blob->length);
3449 		num_blobs += 1;
3450 	}
3451 
3452 	/* Conditional storage for the CMS blob */
3453 	cms_blob = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_SIGNATURESLOT, CSMAGIC_BLOBWRAPPER);
3454 	if (cms_blob) {
3455 		new_blob_size += sizeof(CS_BlobIndex);
3456 		new_blob_size += ntohl(cms_blob->length);
3457 		num_blobs += 1;
3458 	}
3459 
3460 	/*
3461 	 * Conditional storage for the first code directory.
3462 	 * This is only needed if a CMS blob exists and the best code directory isn't already
3463 	 * the first one. It is an error if we find a CMS blob but do not find a first code directory.
3464 	 */
3465 	if (cms_blob) {
3466 		first_code_directory = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_CODEDIRECTORY, CSMAGIC_CODEDIRECTORY);
3467 		if (first_code_directory == best_code_directory) {
3468 			/* We don't need the first code directory anymore, since the best one is already it */
3469 			first_code_directory = NULL;
3470 		} else if (first_code_directory) {
3471 			new_blob_size += sizeof(CS_BlobIndex);
3472 			new_blob_size += ntohl(first_code_directory->length);
3473 			num_blobs += 1;
3474 		} else {
3475 			printf("CODE SIGNING: Invalid CS Blob: found CMS blob but not a first code directory\n");
3476 			return EINVAL;
3477 		}
3478 	}
3479 
3480 	/*
3481 	 * The blob size could be rouded up to page size here, so we keep a copy
3482 	 * of the actual superblob length as well.
3483 	 */
3484 	vm_size_t new_blob_allocation_size = new_blob_size;
3485 	ret = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_allocation_size);
3486 	if (ret != KERN_SUCCESS) {
3487 		printf("CODE SIGNING: Failed to allocate memory for new code signing blob: %d\n", ret);
3488 		return ENOMEM;
3489 	}
3490 
3491 	/*
3492 	 * Fill out the superblob header and then all the blobs in the order listed
3493 	 * above.
3494 	 */
3495 	superblob = (CS_SuperBlob*)new_blob_addr;
3496 	superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
3497 	superblob->length = htonl((uint32_t)new_blob_size);
3498 	superblob->count = htonl(num_blobs);
3499 
3500 	blob_index = 0;
3501 	blob_offset = sizeof(CS_SuperBlob) + (num_blobs * sizeof(CS_BlobIndex));
3502 
3503 	/* Best code directory */
3504 	superblob->index[blob_index].offset = htonl(blob_offset);
3505 	if (first_code_directory) {
3506 		superblob->index[blob_index].type = htonl(CSSLOT_ALTERNATE_CODEDIRECTORIES);
3507 	} else {
3508 		superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3509 	}
3510 
3511 	if (code_directory_size > 0) {
3512 		/* We zero out the code directory, as we expect the caller to fill it in */
3513 		memset((void*)(new_blob_addr + blob_offset), 0, new_code_directory_size);
3514 	} else {
3515 		memcpy((void*)(new_blob_addr + blob_offset), best_code_directory, new_code_directory_size);
3516 	}
3517 
3518 	if (code_directory) {
3519 		*code_directory = (CS_CodeDirectory*)(new_blob_addr + blob_offset);
3520 	}
3521 	blob_offset += new_code_directory_size;
3522 
3523 	/* DER entitlements blob */
3524 	if (der_entitlements_blob) {
3525 		blob_index += 1;
3526 		superblob->index[blob_index].offset = htonl(blob_offset);
3527 		superblob->index[blob_index].type = htonl(CSSLOT_DER_ENTITLEMENTS);
3528 
3529 		memcpy((void*)(new_blob_addr + blob_offset), der_entitlements_blob, ntohl(der_entitlements_blob->length));
3530 		blob_offset += ntohl(der_entitlements_blob->length);
3531 	}
3532 
3533 	/* Launch constraints self blob */
3534 	if (launch_constraint_self) {
3535 		blob_index += 1;
3536 		superblob->index[blob_index].offset = htonl(blob_offset);
3537 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_SELF);
3538 
3539 		memcpy(
3540 			(void*)(new_blob_addr + blob_offset),
3541 			launch_constraint_self,
3542 			ntohl(launch_constraint_self->length));
3543 
3544 		blob_offset += ntohl(launch_constraint_self->length);
3545 	}
3546 
3547 	/* Launch constraints parent blob */
3548 	if (launch_constraint_parent) {
3549 		blob_index += 1;
3550 		superblob->index[blob_index].offset = htonl(blob_offset);
3551 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_PARENT);
3552 
3553 		memcpy(
3554 			(void*)(new_blob_addr + blob_offset),
3555 			launch_constraint_parent,
3556 			ntohl(launch_constraint_parent->length));
3557 
3558 		blob_offset += ntohl(launch_constraint_parent->length);
3559 	}
3560 
3561 	/* Launch constraints responsible blob */
3562 	if (launch_constraint_responsible) {
3563 		blob_index += 1;
3564 		superblob->index[blob_index].offset = htonl(blob_offset);
3565 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE);
3566 
3567 		memcpy(
3568 			(void*)(new_blob_addr + blob_offset),
3569 			launch_constraint_responsible,
3570 			ntohl(launch_constraint_responsible->length));
3571 
3572 		blob_offset += ntohl(launch_constraint_responsible->length);
3573 	}
3574 
3575 	/* library constraints blob */
3576 	if (library_constraint) {
3577 		blob_index += 1;
3578 		superblob->index[blob_index].offset = htonl(blob_offset);
3579 		superblob->index[blob_index].type = htonl(CSSLOT_LIBRARY_CONSTRAINT);
3580 
3581 		memcpy(
3582 			(void*)(new_blob_addr + blob_offset),
3583 			library_constraint,
3584 			ntohl(library_constraint->length));
3585 
3586 		blob_offset += ntohl(library_constraint->length);
3587 	}
3588 
3589 	/* Entitlements blob */
3590 	if (entitlements_blob) {
3591 		blob_index += 1;
3592 		superblob->index[blob_index].offset = htonl(blob_offset);
3593 		superblob->index[blob_index].type = htonl(CSSLOT_ENTITLEMENTS);
3594 
3595 		memcpy((void*)(new_blob_addr + blob_offset), entitlements_blob, ntohl(entitlements_blob->length));
3596 		blob_offset += ntohl(entitlements_blob->length);
3597 	}
3598 
3599 	/* CMS blob */
3600 	if (cms_blob) {
3601 		blob_index += 1;
3602 		superblob->index[blob_index].offset = htonl(blob_offset);
3603 		superblob->index[blob_index].type = htonl(CSSLOT_SIGNATURESLOT);
3604 		memcpy((void*)(new_blob_addr + blob_offset), cms_blob, ntohl(cms_blob->length));
3605 		blob_offset += ntohl(cms_blob->length);
3606 	}
3607 
3608 	/* First code directory */
3609 	if (first_code_directory) {
3610 		blob_index += 1;
3611 		superblob->index[blob_index].offset = htonl(blob_offset);
3612 		superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3613 		memcpy((void*)(new_blob_addr + blob_offset), first_code_directory, ntohl(first_code_directory->length));
3614 		blob_offset += ntohl(first_code_directory->length);
3615 	}
3616 
3617 	/*
3618 	 * We only validate the blob in case we copied in the best code directory.
3619 	 * In case the code directory size we were passed in wasn't 0, we memset the best
3620 	 * code directory to 0 and expect the caller to fill it in. In the same spirit, we
3621 	 * expect the caller to validate the code signature after they fill in the code
3622 	 * directory.
3623 	 */
3624 	if (code_directory_size == 0) {
3625 		const CS_CodeDirectory *validated_code_directory = NULL;
3626 		const CS_GenericBlob *validated_entitlements_blob = NULL;
3627 		const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3628 
3629 		ret = cs_validate_csblob(
3630 			(const uint8_t *)superblob,
3631 			new_blob_size,
3632 			&validated_code_directory,
3633 			&validated_entitlements_blob,
3634 			&validated_der_entitlements_blob);
3635 
3636 		if (ret) {
3637 			printf("unable to validate reconstituted cs_blob: %d\n", ret);
3638 			err = EINVAL;
3639 			goto fail;
3640 		}
3641 	}
3642 
3643 	if (ret_mem_kaddr) {
3644 		*ret_mem_kaddr = new_blob_addr;
3645 	}
3646 	if (ret_mem_size) {
3647 		*ret_mem_size = new_blob_allocation_size;
3648 	}
3649 
3650 	return 0;
3651 
3652 fail:
3653 	ubc_cs_blob_deallocate(new_blob_addr, new_blob_allocation_size);
3654 	return err;
3655 }
3656 
3657 /*
3658  * We use this function to clear out unnecessary bits from the code signature
3659  * blob which are no longer needed. We free these bits and give them back to
3660  * the kernel. This is needed since reconstitution includes extra data which is
3661  * needed only for verification but has no point in keeping afterwards.
3662  *
3663  * This results in significant memory reduction, especially for 3rd party apps
3664  * since we also get rid of the CMS blob.
3665  */
3666 static errno_t
ubc_cs_reconstitute_code_signature_2nd_stage(struct cs_blob * blob)3667 ubc_cs_reconstitute_code_signature_2nd_stage(
3668 	struct cs_blob *blob
3669 	)
3670 {
3671 	kern_return_t ret = KERN_FAILURE;
3672 	const CS_GenericBlob *launch_constraint_self = NULL;
3673 	const CS_GenericBlob *launch_constraint_parent = NULL;
3674 	const CS_GenericBlob *launch_constraint_responsible = NULL;
3675 	const CS_GenericBlob *library_constraint = NULL;
3676 	CS_SuperBlob *superblob = NULL;
3677 	uint32_t num_blobs = 0;
3678 	vm_size_t last_needed_blob_offset = 0;
3679 	vm_offset_t code_directory_offset = 0;
3680 
3681 	/*
3682 	 * Ordering of blobs we need to keep:
3683 	 * 1. Code directory
3684 	 * 2. DER encoded entitlements (if present)
3685 	 * 3. Launch constraints self (if present)
3686 	 * 4. Launch constraints parent (if present)
3687 	 * 5. Launch constraints responsible (if present)
3688 	 * 6. Library constraints (if present)
3689 	 *
3690 	 * We need to clear out the remaining page after these blobs end, and fix up
3691 	 * the superblob for the changes. Things gets a little more complicated for
3692 	 * blobs which may not have been kmem_allocated. For those, we simply just
3693 	 * allocate the new required space and copy into it.
3694 	 */
3695 
3696 	if (blob == NULL) {
3697 		printf("NULL blob passed in for 2nd stage reconstitution\n");
3698 		return EINVAL;
3699 	}
3700 	assert(blob->csb_reconstituted == true);
3701 
3702 	/* Ensure we're not page-wise allocated when in this function */
3703 	assert(ubc_cs_blob_pagewise_allocate(blob->csb_mem_size) == false);
3704 
3705 	if (!blob->csb_cd) {
3706 		/* This case can never happen, and it is a sign of bad things */
3707 		panic("validated cs_blob has no code directory");
3708 	}
3709 	superblob = (CS_SuperBlob*)blob->csb_mem_kaddr;
3710 
3711 	num_blobs = 1;
3712 	last_needed_blob_offset = ntohl(superblob->index[0].offset) + ntohl(blob->csb_cd->length);
3713 
3714 	/* Check for DER entitlements */
3715 	if (blob->csb_der_entitlements_blob) {
3716 		num_blobs += 1;
3717 		last_needed_blob_offset += ntohl(blob->csb_der_entitlements_blob->length);
3718 	}
3719 
3720 	/* Check for launch constraints self */
3721 	launch_constraint_self = csblob_find_blob_bytes(
3722 		(const uint8_t *)blob->csb_mem_kaddr,
3723 		blob->csb_mem_size,
3724 		CSSLOT_LAUNCH_CONSTRAINT_SELF,
3725 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3726 	if (launch_constraint_self) {
3727 		num_blobs += 1;
3728 		last_needed_blob_offset += ntohl(launch_constraint_self->length);
3729 	}
3730 
3731 	/* Check for launch constraints parent */
3732 	launch_constraint_parent = csblob_find_blob_bytes(
3733 		(const uint8_t *)blob->csb_mem_kaddr,
3734 		blob->csb_mem_size,
3735 		CSSLOT_LAUNCH_CONSTRAINT_PARENT,
3736 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3737 	if (launch_constraint_parent) {
3738 		num_blobs += 1;
3739 		last_needed_blob_offset += ntohl(launch_constraint_parent->length);
3740 	}
3741 
3742 	/* Check for launch constraints responsible */
3743 	launch_constraint_responsible = csblob_find_blob_bytes(
3744 		(const uint8_t *)blob->csb_mem_kaddr,
3745 		blob->csb_mem_size,
3746 		CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE,
3747 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3748 	if (launch_constraint_responsible) {
3749 		num_blobs += 1;
3750 		last_needed_blob_offset += ntohl(launch_constraint_responsible->length);
3751 	}
3752 
3753 	/* Check for library constraint */
3754 	library_constraint = csblob_find_blob_bytes(
3755 		(const uint8_t *)blob->csb_mem_kaddr,
3756 		blob->csb_mem_size,
3757 		CSSLOT_LIBRARY_CONSTRAINT,
3758 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3759 	if (library_constraint) {
3760 		num_blobs += 1;
3761 		last_needed_blob_offset += ntohl(library_constraint->length);
3762 	}
3763 
3764 	superblob->count = htonl(num_blobs);
3765 	superblob->length = htonl((uint32_t)last_needed_blob_offset);
3766 
3767 	/*
3768 	 * There is a chance that the code directory is marked within the superblob as an
3769 	 * alternate code directory. This happens when the first code directory isn't the
3770 	 * best one chosen by the kernel, so to be able to access both the first and the best,
3771 	 * we save the best one as an alternate one. Since we're getting rid of the first one
3772 	 * here, we mark the best one as the first one.
3773 	 */
3774 	superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
3775 
3776 	vm_address_t new_superblob = 0;
3777 	vm_size_t new_superblob_size = last_needed_blob_offset;
3778 
3779 	ret = ubc_cs_blob_allocate(&new_superblob, &new_superblob_size);
3780 	if (ret != KERN_SUCCESS) {
3781 		printf("unable to allocate memory for 2nd stage reconstitution: %d\n", ret);
3782 		return ENOMEM;
3783 	}
3784 	assert(new_superblob_size == last_needed_blob_offset);
3785 
3786 	/* Calculate the code directory offset */
3787 	code_directory_offset = (vm_offset_t)blob->csb_cd - (vm_offset_t)blob->csb_mem_kaddr;
3788 
3789 	/* Copy in the updated superblob into the new memory */
3790 	memcpy((void*)new_superblob, superblob, new_superblob_size);
3791 
3792 	/* Free the old code signature and old memory */
3793 	ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3794 
3795 	/* Reconstruct critical fields in the blob object */
3796 	ubc_cs_blob_reconstruct(
3797 		blob,
3798 		new_superblob,
3799 		new_superblob_size,
3800 		code_directory_offset);
3801 
3802 	/* XML entitlements should've been removed */
3803 	assert(blob->csb_entitlements_blob == NULL);
3804 
3805 	const CS_CodeDirectory *validated_code_directory = NULL;
3806 	const CS_GenericBlob *validated_entitlements_blob = NULL;
3807 	const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3808 
3809 	ret = cs_validate_csblob(
3810 		(const uint8_t*)blob->csb_mem_kaddr,
3811 		blob->csb_mem_size,
3812 		&validated_code_directory,
3813 		&validated_entitlements_blob,
3814 		&validated_der_entitlements_blob);
3815 	if (ret) {
3816 		printf("unable to validate code signature after 2nd stage reconstitution: %d\n", ret);
3817 		return EINVAL;
3818 	}
3819 
3820 	return 0;
3821 }
3822 
3823 static int
ubc_cs_convert_to_multilevel_hash(struct cs_blob * blob)3824 ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
3825 {
3826 	const CS_CodeDirectory  *old_cd, *cd;
3827 	CS_CodeDirectory        *new_cd;
3828 	const CS_GenericBlob *entitlements;
3829 	const CS_GenericBlob *der_entitlements;
3830 	vm_offset_t     new_blob_addr;
3831 	vm_size_t       new_blob_size;
3832 	vm_size_t       new_cdsize;
3833 	int                             error;
3834 
3835 	uint32_t                hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
3836 
3837 	if (cs_debug > 1) {
3838 		printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
3839 		    (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
3840 	}
3841 
3842 	old_cd = blob->csb_cd;
3843 
3844 	/* Up to the hashes, we can copy all data */
3845 	new_cdsize  = ntohl(old_cd->hashOffset);
3846 	new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
3847 
3848 	error = ubc_cs_reconstitute_code_signature(blob, &new_blob_addr, &new_blob_size, new_cdsize, &new_cd);
3849 	if (error != 0) {
3850 		printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
3851 		return error;
3852 	}
3853 	entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_ENTITLEMENTS, CSMAGIC_EMBEDDED_ENTITLEMENTS);
3854 	der_entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_DER_ENTITLEMENTS, CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3855 
3856 	memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
3857 
3858 	/* Update fields in the Code Directory structure */
3859 	new_cd->length = htonl((uint32_t)new_cdsize);
3860 
3861 	uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
3862 	nCodeSlots >>= hashes_per_new_hash_shift;
3863 	new_cd->nCodeSlots = htonl(nCodeSlots);
3864 
3865 	new_cd->pageSize = (uint8_t)PAGE_SHIFT; /* Not byte-swapped */
3866 
3867 	if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
3868 		SC_Scatter *scatter = (SC_Scatter*)
3869 		    ((char *)new_cd + ntohl(new_cd->scatterOffset));
3870 		/* iterate all scatter structs to scale their counts */
3871 		do {
3872 			uint32_t scount = ntohl(scatter->count);
3873 			uint32_t sbase  = ntohl(scatter->base);
3874 
3875 			/* last scatter? */
3876 			if (scount == 0) {
3877 				break;
3878 			}
3879 
3880 			scount >>= hashes_per_new_hash_shift;
3881 			scatter->count = htonl(scount);
3882 
3883 			sbase >>= hashes_per_new_hash_shift;
3884 			scatter->base = htonl(sbase);
3885 
3886 			scatter++;
3887 		} while (1);
3888 	}
3889 
3890 	/* For each group of hashes, hash them together */
3891 	const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
3892 	unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
3893 
3894 	uint32_t hash_index;
3895 	for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
3896 		union cs_hash_union     mdctx;
3897 
3898 		uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
3899 		const unsigned char *src = src_base + hash_index * source_hash_len;
3900 		unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
3901 
3902 		blob->csb_hashtype->cs_init(&mdctx);
3903 		blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
3904 		blob->csb_hashtype->cs_final(dst, &mdctx);
3905 	}
3906 
3907 	error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements, &der_entitlements);
3908 	if (error != 0) {
3909 		printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3910 		    error);
3911 
3912 		ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3913 		return error;
3914 	}
3915 
3916 	/* New Code Directory is ready for use, swap it out in the blob structure */
3917 	ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3918 
3919 	blob->csb_mem_size = new_blob_size;
3920 	blob->csb_mem_kaddr = (void *)new_blob_addr;
3921 	blob->csb_cd = cd;
3922 	blob->csb_entitlements_blob = NULL;
3923 
3924 	blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
3925 	blob->csb_reconstituted = true;
3926 
3927 	/* The blob has some cached attributes of the Code Directory, so update those */
3928 
3929 	blob->csb_hash_firstlevel_pageshift = blob->csb_hash_pageshift; /* Save the original page size */
3930 
3931 	blob->csb_hash_pageshift = PAGE_SHIFT;
3932 	blob->csb_end_offset = ntohl(cd->codeLimit);
3933 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3934 		const SC_Scatter *scatter = (const SC_Scatter*)
3935 		    ((const char*)cd + ntohl(cd->scatterOffset));
3936 		blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
3937 	} else {
3938 		blob->csb_start_offset = 0;
3939 	}
3940 
3941 	return 0;
3942 }
3943 
3944 static void
cs_blob_cleanup(struct cs_blob * blob)3945 cs_blob_cleanup(struct cs_blob *blob)
3946 {
3947 	if (blob->csb_entitlements != NULL) {
3948 		amfi->OSEntitlements_invalidate(blob->csb_entitlements);
3949 		osobject_release(blob->csb_entitlements);
3950 		blob->csb_entitlements = NULL;
3951 	}
3952 
3953 #if CODE_SIGNING_MONITOR
3954 	if (blob->csb_csm_obj != NULL) {
3955 		/* Unconditionally remove any profiles we may have associated */
3956 		csm_disassociate_provisioning_profile(blob->csb_csm_obj);
3957 
3958 		kern_return_t kr = csm_unregister_code_signature(blob->csb_csm_obj);
3959 		if (kr == KERN_SUCCESS) {
3960 			/*
3961 			 * If the code signature was monitor managed, the monitor will have freed it
3962 			 * itself in the unregistration call. It means we do not need to free the data
3963 			 * over here.
3964 			 */
3965 			if (blob->csb_csm_managed) {
3966 				blob->csb_mem_kaddr = NULL;
3967 				blob->csb_mem_size = 0;
3968 			}
3969 		} else if (kr == KERN_ABORTED) {
3970 			/*
3971 			 * The code-signing-monitor refused to unregister the code signature. It means
3972 			 * whatever memory was backing the code signature may not have been released, and
3973 			 * attempting to free it down below will not be successful. As a result, all we
3974 			 * can do is prevent the kernel from touching the data.
3975 			 */
3976 			blob->csb_mem_kaddr = NULL;
3977 			blob->csb_mem_size = 0;
3978 		}
3979 	}
3980 
3981 	/* Unconditionally remove references to the monitor */
3982 	blob->csb_csm_obj = NULL;
3983 	blob->csb_csm_managed = false;
3984 #endif
3985 
3986 	if (blob->csb_mem_kaddr) {
3987 		ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3988 	}
3989 	blob->csb_mem_kaddr = NULL;
3990 	blob->csb_mem_size = 0;
3991 }
3992 
3993 static void
cs_blob_ro_free(struct cs_blob * blob)3994 cs_blob_ro_free(struct cs_blob *blob)
3995 {
3996 	struct cs_blob tmp;
3997 
3998 	if (blob != NULL) {
3999 		/*
4000 		 * cs_blob_cleanup clears fields, so we need to pass it a
4001 		 * mutable copy.
4002 		 */
4003 		tmp = *blob;
4004 		cs_blob_cleanup(&tmp);
4005 
4006 		zfree_ro(ZONE_ID_CS_BLOB, blob);
4007 	}
4008 }
4009 
4010 /*
4011  * Free a cs_blob previously created by cs_blob_create_validated.
4012  */
4013 void
cs_blob_free(struct cs_blob * blob)4014 cs_blob_free(
4015 	struct cs_blob *blob)
4016 {
4017 	cs_blob_ro_free(blob);
4018 }
4019 
4020 static int
cs_blob_init_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob * blob,CS_CodeDirectory const ** const ret_cd)4021 cs_blob_init_validated(
4022 	vm_address_t * const addr,
4023 	vm_size_t size,
4024 	struct cs_blob *blob,
4025 	CS_CodeDirectory const ** const ret_cd)
4026 {
4027 	int error = EINVAL;
4028 	const CS_CodeDirectory *cd = NULL;
4029 	const CS_GenericBlob *entitlements = NULL;
4030 	const CS_GenericBlob *der_entitlements = NULL;
4031 	union cs_hash_union mdctx;
4032 	size_t length;
4033 
4034 	bzero(blob, sizeof(*blob));
4035 
4036 	/* fill in the new blob */
4037 	blob->csb_mem_size = size;
4038 	blob->csb_mem_offset = 0;
4039 	blob->csb_mem_kaddr = (void *)*addr;
4040 	blob->csb_flags = 0;
4041 	blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
4042 	blob->csb_platform_binary = 0;
4043 	blob->csb_platform_path = 0;
4044 	blob->csb_teamid = NULL;
4045 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4046 	blob->csb_supplement_teamid = NULL;
4047 #endif
4048 	blob->csb_entitlements_blob = NULL;
4049 	blob->csb_der_entitlements_blob = NULL;
4050 	blob->csb_entitlements = NULL;
4051 #if CODE_SIGNING_MONITOR
4052 	blob->csb_csm_obj = NULL;
4053 	blob->csb_csm_managed = false;
4054 #endif
4055 	blob->csb_reconstituted = false;
4056 	blob->csb_validation_category = CS_VALIDATION_CATEGORY_INVALID;
4057 
4058 	/* Transfer ownership. Even on error, this function will deallocate */
4059 	*addr = 0;
4060 
4061 	/*
4062 	 * Validate the blob's contents
4063 	 */
4064 	length = (size_t) size;
4065 	error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
4066 	    length, &cd, &entitlements, &der_entitlements);
4067 	if (error) {
4068 		if (cs_debug) {
4069 			printf("CODESIGNING: csblob invalid: %d\n", error);
4070 		}
4071 		/*
4072 		 * The vnode checker can't make the rest of this function
4073 		 * succeed if csblob validation failed, so bail */
4074 		goto out;
4075 	} else {
4076 		const unsigned char *md_base;
4077 		uint8_t hash[CS_HASH_MAX_SIZE];
4078 		int md_size;
4079 		vm_offset_t hash_pagemask;
4080 
4081 		blob->csb_cd = cd;
4082 		blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
4083 		blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
4084 		blob->csb_hashtype = cs_find_md(cd->hashType);
4085 		if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
4086 			panic("validated CodeDirectory but unsupported type");
4087 		}
4088 
4089 		blob->csb_hash_pageshift = cd->pageSize;
4090 		hash_pagemask = (1U << cd->pageSize) - 1;
4091 		blob->csb_hash_firstlevel_pageshift = 0;
4092 		blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
4093 		blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + hash_pagemask) & ~hash_pagemask);
4094 		if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
4095 			const SC_Scatter *scatter = (const SC_Scatter*)
4096 			    ((const char*)cd + ntohl(cd->scatterOffset));
4097 			blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * (1U << blob->csb_hash_pageshift);
4098 		} else {
4099 			blob->csb_start_offset = 0;
4100 		}
4101 		/* compute the blob's cdhash */
4102 		md_base = (const unsigned char *) cd;
4103 		md_size = ntohl(cd->length);
4104 
4105 		blob->csb_hashtype->cs_init(&mdctx);
4106 		blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
4107 		blob->csb_hashtype->cs_final(hash, &mdctx);
4108 
4109 		memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
4110 
4111 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4112 		blob->csb_linkage_hashtype = NULL;
4113 		if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0 &&
4114 		    ntohl(cd->linkageSize) >= CS_CDHASH_LEN) {
4115 			blob->csb_linkage_hashtype = cs_find_md(cd->linkageHashType);
4116 
4117 			if (blob->csb_linkage_hashtype != NULL) {
4118 				memcpy(blob->csb_linkage, (uint8_t const*)cd + ntohl(cd->linkageOffset),
4119 				    CS_CDHASH_LEN);
4120 			}
4121 		}
4122 #endif
4123 	}
4124 
4125 	error = 0;
4126 
4127 out:
4128 	if (error != 0) {
4129 		cs_blob_cleanup(blob);
4130 		blob = NULL;
4131 		cd = NULL;
4132 	}
4133 
4134 	if (ret_cd != NULL) {
4135 		*ret_cd = cd;
4136 	}
4137 
4138 	return error;
4139 }
4140 
4141 /*
4142  * Validate the code signature blob, create a struct cs_blob wrapper
4143  * and return it together with a pointer to the chosen code directory
4144  * and entitlements blob.
4145  *
4146  * Note that this takes ownership of the memory as addr, mainly because
4147  * this function can actually replace the passed in blob with another
4148  * one, e.g. when performing multilevel hashing optimization.
4149  */
4150 int
cs_blob_create_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob ** const ret_blob,CS_CodeDirectory const ** const ret_cd)4151 cs_blob_create_validated(
4152 	vm_address_t * const            addr,
4153 	vm_size_t                       size,
4154 	struct cs_blob ** const         ret_blob,
4155 	CS_CodeDirectory const ** const     ret_cd)
4156 {
4157 	struct cs_blob blob = {};
4158 	struct cs_blob *ro_blob;
4159 	int error;
4160 
4161 	if (ret_blob) {
4162 		*ret_blob = NULL;
4163 	}
4164 
4165 	if ((error = cs_blob_init_validated(addr, size, &blob, ret_cd)) != 0) {
4166 		return error;
4167 	}
4168 
4169 	if (ret_blob != NULL) {
4170 		ro_blob = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4171 		zalloc_ro_update_elem(ZONE_ID_CS_BLOB, ro_blob, &blob);
4172 		*ret_blob = ro_blob;
4173 	}
4174 
4175 	return error;
4176 }
4177 
4178 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4179 static void
cs_blob_supplement_free(struct cs_blob * const blob)4180 cs_blob_supplement_free(struct cs_blob * const blob)
4181 {
4182 	void *teamid;
4183 
4184 	if (blob != NULL) {
4185 		if (blob->csb_supplement_teamid != NULL) {
4186 			teamid = blob->csb_supplement_teamid;
4187 			vm_size_t teamid_size = strlen(blob->csb_supplement_teamid) + 1;
4188 			kfree_data(teamid, teamid_size);
4189 		}
4190 		cs_blob_ro_free(blob);
4191 	}
4192 }
4193 #endif
4194 
4195 static void
ubc_cs_blob_adjust_statistics(struct cs_blob const * blob)4196 ubc_cs_blob_adjust_statistics(struct cs_blob const *blob)
4197 {
4198 	/* Note that the atomic ops are not enough to guarantee
4199 	 * correctness: If a blob with an intermediate size is inserted
4200 	 * concurrently, we can lose a peak value assignment. But these
4201 	 * statistics are only advisory anyway, so we're not going to
4202 	 * employ full locking here. (Consequently, we are also okay with
4203 	 * relaxed ordering of those accesses.)
4204 	 */
4205 
4206 	unsigned int new_cs_blob_count = os_atomic_add(&cs_blob_count, 1, relaxed);
4207 	if (new_cs_blob_count > os_atomic_load(&cs_blob_count_peak, relaxed)) {
4208 		os_atomic_store(&cs_blob_count_peak, new_cs_blob_count, relaxed);
4209 	}
4210 
4211 	size_t new_cs_blob_size = os_atomic_add(&cs_blob_size, blob->csb_mem_size, relaxed);
4212 
4213 	if (new_cs_blob_size > os_atomic_load(&cs_blob_size_peak, relaxed)) {
4214 		os_atomic_store(&cs_blob_size_peak, new_cs_blob_size, relaxed);
4215 	}
4216 	if (blob->csb_mem_size > os_atomic_load(&cs_blob_size_max, relaxed)) {
4217 		os_atomic_store(&cs_blob_size_max, blob->csb_mem_size, relaxed);
4218 	}
4219 }
4220 
4221 static void
cs_blob_set_cpu_type(struct cs_blob * blob,cpu_type_t cputype)4222 cs_blob_set_cpu_type(struct cs_blob *blob, cpu_type_t cputype)
4223 {
4224 	zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_cpu_type, &cputype);
4225 }
4226 
4227 __abortlike
4228 static void
panic_cs_blob_backref_mismatch(struct cs_blob * blob,struct vnode * vp)4229 panic_cs_blob_backref_mismatch(struct cs_blob *blob, struct vnode *vp)
4230 {
4231 	panic("cs_blob vnode backref mismatch: blob=%p, vp=%p, "
4232 	    "blob->csb_vnode=%p", blob, vp, blob->csb_vnode);
4233 }
4234 
4235 void
cs_blob_require(struct cs_blob * blob,vnode_t vp)4236 cs_blob_require(struct cs_blob *blob, vnode_t vp)
4237 {
4238 	zone_require_ro(ZONE_ID_CS_BLOB, sizeof(struct cs_blob), blob);
4239 
4240 	if (vp != NULL && __improbable(blob->csb_vnode != vp)) {
4241 		panic_cs_blob_backref_mismatch(blob, vp);
4242 	}
4243 }
4244 
4245 #if CODE_SIGNING_MONITOR
4246 
4247 /**
4248  * Independently verify the authenticity of the code signature through the monitor
4249  * environment. This is required as otherwise the monitor won't allow associations
4250  * of the code signature with address spaces.
4251  *
4252  * Once we've verified the code signature, we no longer need to keep around any
4253  * provisioning profiles we may have registered with it. AMFI associates profiles
4254  * with the monitor during its validation (which happens before the monitor's).
4255  */
4256 static errno_t
verify_code_signature_monitor(struct cs_blob * cs_blob)4257 verify_code_signature_monitor(
4258 	struct cs_blob *cs_blob)
4259 {
4260 	kern_return_t ret = KERN_DENIED;
4261 
4262 	ret = csm_verify_code_signature(cs_blob->csb_csm_obj, &cs_blob->csb_csm_trust_level);
4263 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4264 		printf("unable to verify code signature with monitor: %d\n", ret);
4265 		return EPERM;
4266 	}
4267 
4268 	ret = csm_disassociate_provisioning_profile(cs_blob->csb_csm_obj);
4269 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_FOUND) && (ret != KERN_NOT_SUPPORTED)) {
4270 		printf("unable to disassociate profile from code signature: %d\n", ret);
4271 		return EPERM;
4272 	}
4273 
4274 	/* Associate the OSEntitlements kernel object with the monitor */
4275 	ret = csm_associate_os_entitlements(cs_blob->csb_csm_obj, cs_blob->csb_entitlements);
4276 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4277 		printf("unable to associate OSEntitlements with monitor: %d\n", ret);
4278 		return EPERM;
4279 	}
4280 
4281 	return 0;
4282 }
4283 
4284 /**
4285  * Register the code signature with the code signing monitor environment. This
4286  * will effectively make the blob data immutable, either because the blob memory
4287  * will be allocated and managed directory by the monitor, or because the monitor
4288  * will lockdown the memory associated with the blob.
4289  */
4290 static errno_t
register_code_signature_monitor(struct vnode * vnode,struct cs_blob * cs_blob,vm_offset_t code_directory_offset)4291 register_code_signature_monitor(
4292 	struct vnode *vnode,
4293 	struct cs_blob *cs_blob,
4294 	vm_offset_t code_directory_offset)
4295 {
4296 	kern_return_t ret = KERN_DENIED;
4297 	vm_address_t monitor_signature_addr = 0;
4298 	void *monitor_sig_object = NULL;
4299 	const char *vnode_path_ptr = NULL;
4300 
4301 	/*
4302 	 * Attempt to resolve the path for this vnode and pass it in to the code
4303 	 * signing monitor during registration.
4304 	 */
4305 	int vnode_path_len = MAXPATHLEN;
4306 	char *vnode_path = kalloc_data(vnode_path_len, Z_WAITOK);
4307 
4308 	/*
4309 	 * Taking a reference on the vnode recursively can sometimes lead to a
4310 	 * deadlock on the system. Since we already have a vnode pointer, it means
4311 	 * the caller performed a vnode lookup, which implicitly takes a reference
4312 	 * on the vnode. However, there is more than just having a reference on a
4313 	 * vnode which is important. vnode's also have an iocount, and we must only
4314 	 * access a vnode which has an iocount of greater than 0. Thankfully, all
4315 	 * the conditions which lead to calling this function ensure that this
4316 	 * vnode is safe to access here.
4317 	 *
4318 	 * For more details: rdar://105819068.
4319 	 */
4320 	errno_t error = vn_getpath(vnode, vnode_path, &vnode_path_len);
4321 	if (error == 0) {
4322 		vnode_path_ptr = vnode_path;
4323 	}
4324 
4325 	ret = csm_register_code_signature(
4326 		(vm_address_t)cs_blob->csb_mem_kaddr,
4327 		cs_blob->csb_mem_size,
4328 		code_directory_offset,
4329 		vnode_path_ptr,
4330 		&monitor_sig_object,
4331 		&monitor_signature_addr);
4332 
4333 	kfree_data(vnode_path, MAXPATHLEN);
4334 	vnode_path_ptr = NULL;
4335 
4336 	if (ret == KERN_SUCCESS) {
4337 		/* Reconstruct the cs_blob if the monitor used its own allocation */
4338 		if (monitor_signature_addr != (vm_address_t)cs_blob->csb_mem_kaddr) {
4339 			vm_address_t monitor_signature_size = cs_blob->csb_mem_size;
4340 
4341 			/* Free the old memory for the blob */
4342 			ubc_cs_blob_deallocate(
4343 				(vm_address_t)cs_blob->csb_mem_kaddr,
4344 				cs_blob->csb_mem_size);
4345 
4346 			/* Reconstruct critical fields in the blob object */
4347 			ubc_cs_blob_reconstruct(
4348 				cs_blob,
4349 				monitor_signature_addr,
4350 				monitor_signature_size,
4351 				code_directory_offset);
4352 
4353 			/* Mark the signature as monitor managed */
4354 			cs_blob->csb_csm_managed = true;
4355 		}
4356 	} else if (ret != KERN_NOT_SUPPORTED) {
4357 		printf("unable to register code signature with monitor: %d\n", ret);
4358 		return EPERM;
4359 	}
4360 
4361 	/* Save the monitor handle for the signature object -- may be NULL */
4362 	cs_blob->csb_csm_obj = monitor_sig_object;
4363 
4364 	return 0;
4365 }
4366 
4367 #endif /* CODE_SIGNING_MONITOR */
4368 
4369 static errno_t
validate_main_binary_check(struct cs_blob * csblob,cs_blob_add_flags_t csblob_add_flags)4370 validate_main_binary_check(
4371 	struct cs_blob *csblob,
4372 	cs_blob_add_flags_t csblob_add_flags)
4373 {
4374 #if XNU_TARGET_OS_OSX
4375 	(void)csblob;
4376 	(void)csblob_add_flags;
4377 	return 0;
4378 #else
4379 	const CS_CodeDirectory *first_cd = NULL;
4380 	const CS_CodeDirectory *alt_cd = NULL;
4381 	uint64_t exec_seg_flags = 0;
4382 	uint32_t slot = CSSLOT_CODEDIRECTORY;
4383 
4384 	/* Nothing to enforce if we're allowing main binaries */
4385 	if ((csblob_add_flags & CS_BLOB_ADD_ALLOW_MAIN_BINARY) != 0) {
4386 		return 0;
4387 	}
4388 
4389 	first_cd = (const CS_CodeDirectory*)csblob_find_blob(csblob, slot, CSMAGIC_CODEDIRECTORY);
4390 	if ((first_cd != NULL) && (ntohl(first_cd->version) >= CS_SUPPORTSEXECSEG)) {
4391 		exec_seg_flags |= ntohll(first_cd->execSegFlags);
4392 	}
4393 
4394 	for (uint32_t i = 0; i < CSSLOT_ALTERNATE_CODEDIRECTORY_MAX; i++) {
4395 		slot = CSSLOT_ALTERNATE_CODEDIRECTORIES + i;
4396 		alt_cd = (const CS_CodeDirectory*)csblob_find_blob(csblob, slot, CSMAGIC_CODEDIRECTORY);
4397 		if ((alt_cd == NULL) || (ntohl(alt_cd->version) < CS_SUPPORTSEXECSEG)) {
4398 			continue;
4399 		}
4400 		exec_seg_flags |= ntohll(alt_cd->execSegFlags);
4401 	}
4402 
4403 	if ((exec_seg_flags & CS_EXECSEG_MAIN_BINARY) != 0) {
4404 		return EBADEXEC;
4405 	}
4406 	return 0;
4407 #endif /* XNU_TARGET_OS_OSX */
4408 }
4409 
4410 /**
4411  * Accelerate entitlements for a code signature object. When we have a code
4412  * signing monitor, this acceleration is done within the monitor which then
4413  * passes back a CoreEntitlements query context the kernel can use. When we
4414  * don't have a code signing monitor, we accelerate the queries within the
4415  * kernel memory itself.
4416  *
4417  * This function must be called when the storage for the code signature can
4418  * no longer change.
4419  */
4420 static errno_t
accelerate_entitlement_queries(struct cs_blob * cs_blob)4421 accelerate_entitlement_queries(
4422 	struct cs_blob *cs_blob)
4423 {
4424 	kern_return_t ret = KERN_NOT_SUPPORTED;
4425 
4426 #if CODE_SIGNING_MONITOR
4427 	CEQueryContext_t ce_ctx = NULL;
4428 	const char *signing_id = NULL;
4429 
4430 	ret = csm_accelerate_entitlements(cs_blob->csb_csm_obj, &ce_ctx);
4431 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4432 		printf("unable to accelerate entitlements through the monitor: %d\n", ret);
4433 		return EPERM;
4434 	}
4435 
4436 	if (ret == KERN_SUCCESS) {
4437 		/* Call cannot not fail at this stage */
4438 		ret = csm_acquire_signing_identifier(cs_blob->csb_csm_obj, &signing_id);
4439 		assert(ret == KERN_SUCCESS);
4440 
4441 		/* Adjust the OSEntitlements context with AMFI */
4442 		ret = amfi->OSEntitlements.adjustContextWithMonitor(
4443 			cs_blob->csb_entitlements,
4444 			ce_ctx,
4445 			cs_blob->csb_csm_obj,
4446 			signing_id,
4447 			cs_blob->csb_flags);
4448 		if (ret != KERN_SUCCESS) {
4449 			printf("unable to adjust OSEntitlements context with monitor: %d\n", ret);
4450 			return EPERM;
4451 		}
4452 
4453 		return 0;
4454 	}
4455 #endif
4456 
4457 	/*
4458 	 * If we reach here, then either we don't have a code signing monitor, or
4459 	 * the code signing monitor isn't enabled for code signing, in which case,
4460 	 * AMFI is going to accelerate the entitlements context and adjust its
4461 	 * context on its own.
4462 	 */
4463 	assert(ret == KERN_NOT_SUPPORTED);
4464 
4465 	ret = amfi->OSEntitlements.adjustContextWithoutMonitor(
4466 		cs_blob->csb_entitlements,
4467 		cs_blob);
4468 
4469 	if (ret != KERN_SUCCESS) {
4470 		printf("unable to adjust OSEntitlements context without monitor: %d\n", ret);
4471 		return EPERM;
4472 	}
4473 
4474 	return 0;
4475 }
4476 
4477 /**
4478  * Ensure and validate that some security critical code signing blobs haven't
4479  * been stripped off from the code signature. This can happen if an attacker
4480  * chose to load a code signature sans these critical blobs, or if there is a
4481  * bug in reconstitution logic which remove these blobs from the code signature.
4482  */
4483 static errno_t
validate_auxiliary_signed_blobs(struct cs_blob * cs_blob)4484 validate_auxiliary_signed_blobs(
4485 	struct cs_blob *cs_blob)
4486 {
4487 	struct cs_blob_identifier {
4488 		uint32_t cs_slot;
4489 		uint32_t cs_magic;
4490 	};
4491 
4492 	const struct cs_blob_identifier identifiers[] = {
4493 		{CSSLOT_LAUNCH_CONSTRAINT_SELF, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4494 		{CSSLOT_LAUNCH_CONSTRAINT_PARENT, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4495 		{CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4496 		{CSSLOT_LIBRARY_CONSTRAINT, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT}
4497 	};
4498 	const uint32_t num_identifiers = sizeof(identifiers) / sizeof(identifiers[0]);
4499 
4500 	for (uint32_t i = 0; i < num_identifiers; i++) {
4501 		errno_t err = csblob_find_special_slot_blob(
4502 			cs_blob,
4503 			identifiers[i].cs_slot,
4504 			identifiers[i].cs_magic,
4505 			NULL,
4506 			NULL);
4507 
4508 		if (err != 0) {
4509 			printf("unable to validate security-critical blob: %d [%u|%u]\n",
4510 			    err, identifiers[i].cs_slot, identifiers[i].cs_magic);
4511 
4512 			return EPERM;
4513 		}
4514 	}
4515 
4516 	return 0;
4517 }
4518 
4519 /**
4520  * Setup multi-level hashing for the code signature. This isn't supported on most
4521  * shipping devices, but on ones where it is, it can result in significant savings
4522  * of memory from the code signature standpoint.
4523  *
4524  * Multi-level hashing is used to condense the code directory hashes in order to
4525  * improve memory consumption. We take four 4K page hashes, and condense them into
4526  * a single 16K hash, hence reducing the space consumed by the code directory by
4527  * about ~75%.
4528  */
4529 static errno_t
setup_multilevel_hashing(struct cs_blob * cs_blob)4530 setup_multilevel_hashing(
4531 	struct cs_blob *cs_blob)
4532 {
4533 	code_signing_monitor_type_t monitor_type = CS_MONITOR_TYPE_NONE;
4534 	errno_t err = -1;
4535 
4536 	/*
4537 	 * When we have a code signing monitor, we do not support multi-level hashing
4538 	 * since the code signature data is expected to be locked within memory which
4539 	 * cannot be written to by the kernel.
4540 	 *
4541 	 * Even when the code signing monitor isn't explicitly enabled, there are other
4542 	 * reasons for not performing multi-level hashing. For instance, Rosetta creates
4543 	 * issues with multi-level hashing on Apple Silicon Macs.
4544 	 */
4545 	code_signing_configuration(&monitor_type, NULL);
4546 	if (monitor_type != CS_MONITOR_TYPE_NONE) {
4547 		return 0;
4548 	}
4549 
4550 	/* We need to check if multi-level hashing is supported for this blob */
4551 	if (ubc_cs_supports_multilevel_hash(cs_blob) == false) {
4552 		return 0;
4553 	}
4554 
4555 	err = ubc_cs_convert_to_multilevel_hash(cs_blob);
4556 	if (err != 0) {
4557 		printf("unable to setup multi-level hashing: %d\n", err);
4558 		return err;
4559 	}
4560 
4561 	assert(cs_blob->csb_reconstituted == true);
4562 	return 0;
4563 }
4564 
4565 /**
4566  * Once code signature validation is complete, we can remove even more blobs from the
4567  * code signature as they are no longer needed. This goes on to conserve even more
4568  * system memory.
4569  */
4570 static errno_t
reconstitute_code_signature_2nd_stage(struct cs_blob * cs_blob)4571 reconstitute_code_signature_2nd_stage(
4572 	struct cs_blob *cs_blob)
4573 {
4574 	kern_return_t ret = KERN_NOT_SUPPORTED;
4575 	errno_t err = EPERM;
4576 
4577 	/* If we never reconstituted before, we won't be reconstituting again */
4578 	if (cs_blob->csb_reconstituted == false) {
4579 		return 0;
4580 	}
4581 
4582 #if CODE_SIGNING_MONITOR
4583 	/*
4584 	 * When we have a code signing monitor, the code signature is immutable until the
4585 	 * monitor decides to unlock parts of it. Therefore, 2nd stage reconstitution takes
4586 	 * place in the monitor when we have a monitor available.
4587 	 *
4588 	 * If the monitor isn't enforcing code signing (in which case the code signature is
4589 	 * NOT immutable), then we perform 2nd stage reconstitution within the kernel itself.
4590 	 */
4591 	vm_address_t unneeded_addr = 0;
4592 	vm_size_t unneeded_size = 0;
4593 
4594 	ret = csm_reconstitute_code_signature(
4595 		cs_blob->csb_csm_obj,
4596 		&unneeded_addr,
4597 		&unneeded_size);
4598 
4599 	if ((ret == KERN_SUCCESS) && unneeded_addr && unneeded_size) {
4600 		/* Free the unneded part of the blob */
4601 		kmem_free(kernel_map, unneeded_addr, unneeded_size);
4602 
4603 		/* Adjust the size in the blob object */
4604 		cs_blob->csb_mem_size -= unneeded_size;
4605 	}
4606 #endif
4607 
4608 	if (ret == KERN_SUCCESS) {
4609 		goto success;
4610 	} else if (ret != KERN_NOT_SUPPORTED) {
4611 		/*
4612 		 * A monitor environment is available, and it failed in performing 2nd stage
4613 		 * reconstitution. This is a fatal issue for code signing validation.
4614 		 */
4615 		printf("unable to reconstitute code signature through monitor: %d\n", ret);
4616 		return EPERM;
4617 	}
4618 
4619 	/* No monitor available if we reached here */
4620 	err = ubc_cs_reconstitute_code_signature_2nd_stage(cs_blob);
4621 	if (err != 0) {
4622 		return err;
4623 	}
4624 
4625 success:
4626 	/*
4627 	 * Regardless of whether we are performing 2nd stage reconstitution in the monitor
4628 	 * or in the kernel, we remove references to XML entitlements from the blob here.
4629 	 * None of the 2nd stage reconstitution code ever keeps these around, and they have
4630 	 * been explicitly deprecated and disallowed.
4631 	 */
4632 	cs_blob->csb_entitlements_blob = NULL;
4633 
4634 	return 0;
4635 }
4636 
4637 /**
4638  * A code signature blob often contains blob which aren't needed in the kernel. Since
4639  * the code signature is wired into kernel memory for the time it is used, it behooves
4640  * us to remove any blobs we have no need for in order to conserve memory.
4641  *
4642  * Some platforms support copying the entire SuperBlob stored in kernel memory into
4643  * userspace memory through the "csops" system call. There is an expectation that when
4644  * this happens, all the blobs which were a part of the code signature are copied in
4645  * to userspace memory. As a result, these platforms cannot reconstitute the code
4646  * signature since, or rather, these platforms cannot remove blobs from the signature,
4647  * thereby making reconstitution useless.
4648  */
4649 static errno_t
reconstitute_code_signature(struct cs_blob * cs_blob)4650 reconstitute_code_signature(
4651 	struct cs_blob *cs_blob)
4652 {
4653 	CS_CodeDirectory *code_directory = NULL;
4654 	vm_address_t signature_addr = 0;
4655 	vm_size_t signature_size = 0;
4656 	vm_offset_t code_directory_offset = 0;
4657 	bool platform_supports_reconstitution = false;
4658 
4659 #if CONFIG_CODE_SIGNATURE_RECONSTITUTION
4660 	platform_supports_reconstitution = true;
4661 #endif
4662 
4663 	/*
4664 	 * We can skip reconstitution if the code signing monitor isn't available or not
4665 	 * enabled. But if we do have a monitor, then reconsitution becomes required, as
4666 	 * there is an expectation of performing 2nd stage reconstitution through the
4667 	 * monitor itself.
4668 	 */
4669 	if (platform_supports_reconstitution == false) {
4670 #if CODE_SIGNING_MONITOR
4671 		if (csm_enabled() == true) {
4672 			printf("reconstitution required when code signing monitor is enabled\n");
4673 			return EPERM;
4674 		}
4675 #endif
4676 		return 0;
4677 	}
4678 
4679 	errno_t err = ubc_cs_reconstitute_code_signature(
4680 		cs_blob,
4681 		&signature_addr,
4682 		&signature_size,
4683 		0,
4684 		&code_directory);
4685 
4686 	if (err != 0) {
4687 		printf("unable to reconstitute code signature: %d\n", err);
4688 		return err;
4689 	}
4690 
4691 	/* Calculate the code directory offset */
4692 	code_directory_offset = (vm_offset_t)code_directory - signature_addr;
4693 
4694 	/* Reconstitution allocates new memory -- free the old one */
4695 	ubc_cs_blob_deallocate((vm_address_t)cs_blob->csb_mem_kaddr, cs_blob->csb_mem_size);
4696 
4697 	/* Reconstruct critical fields in the blob object */
4698 	ubc_cs_blob_reconstruct(
4699 		cs_blob,
4700 		signature_addr,
4701 		signature_size,
4702 		code_directory_offset);
4703 
4704 	/* Mark the object as reconstituted */
4705 	cs_blob->csb_reconstituted = true;
4706 
4707 	return 0;
4708 }
4709 
4710 int
ubc_cs_blob_add(struct vnode * vp,uint32_t platform,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t base_offset,vm_address_t * addr,vm_size_t size,struct image_params * imgp,__unused int flags,struct cs_blob ** ret_blob,cs_blob_add_flags_t csblob_add_flags)4711 ubc_cs_blob_add(
4712 	struct vnode    *vp,
4713 	uint32_t        platform,
4714 	cpu_type_t      cputype,
4715 	cpu_subtype_t   cpusubtype,
4716 	off_t           base_offset,
4717 	vm_address_t    *addr,
4718 	vm_size_t       size,
4719 	struct image_params *imgp,
4720 	__unused int    flags,
4721 	struct cs_blob  **ret_blob,
4722 	cs_blob_add_flags_t csblob_add_flags)
4723 {
4724 	ptrauth_generic_signature_t cs_blob_sig = {0};
4725 	struct ubc_info *uip = NULL;
4726 	struct cs_blob tmp_blob = {0};
4727 	struct cs_blob *blob_ro = NULL;
4728 	struct cs_blob *oblob = NULL;
4729 	CS_CodeDirectory const *cd = NULL;
4730 	off_t blob_start_offset = 0;
4731 	off_t blob_end_offset = 0;
4732 	boolean_t record_mtime = false;
4733 	kern_return_t kr = KERN_DENIED;
4734 	errno_t error = -1;
4735 
4736 #if HAS_APPLE_PAC
4737 	void *signed_entitlements = NULL;
4738 #if CODE_SIGNING_MONITOR
4739 	void *signed_monitor_obj = NULL;
4740 #endif
4741 #endif
4742 
4743 	if (ret_blob) {
4744 		*ret_blob = NULL;
4745 	}
4746 
4747 	/*
4748 	 * Create the struct cs_blob abstract data type which will get attached to
4749 	 * the vnode object. This function also validates the structural integrity
4750 	 * of the code signature blob being passed in.
4751 	 *
4752 	 * We initialize a temporary blob whose contents are then copied into an RO
4753 	 * blob which we allocate from the read-only allocator.
4754 	 */
4755 	error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
4756 	if (error != 0) {
4757 		printf("unable to create a validated cs_blob object: %d\n", error);
4758 		return error;
4759 	}
4760 
4761 	tmp_blob.csb_cpu_type = cputype;
4762 	tmp_blob.csb_cpu_subtype = cpusubtype & ~CPU_SUBTYPE_MASK;
4763 	tmp_blob.csb_base_offset = base_offset;
4764 
4765 	/* Perform 1st stage reconstitution */
4766 	error = reconstitute_code_signature(&tmp_blob);
4767 	if (error != 0) {
4768 		goto out;
4769 	}
4770 
4771 	/*
4772 	 * There is a strong design pattern we have to follow carefully within this
4773 	 * function. Since we're storing the struct cs_blob within RO-allocated
4774 	 * memory, it is immutable to modifications from within the kernel itself.
4775 	 *
4776 	 * However, before the contents of the blob are transferred to the immutable
4777 	 * cs_blob, they are kept on the stack. In order to protect against a kernel
4778 	 * R/W attacker, we must protect this stack variable. Most importantly, any
4779 	 * code paths which can block for a while must compute a PAC signature over
4780 	 * the stack variable, then perform the blocking operation, and then ensure
4781 	 * that the PAC signature over the stack variable is still valid to ensure
4782 	 * that an attacker did not overwrite contents of the blob by introducing a
4783 	 * maliciously long blocking operation, giving them the time required to go
4784 	 * and overwrite the contents of the blob.
4785 	 *
4786 	 * The most important fields to protect here are the OSEntitlements and the
4787 	 * code signing monitor object references. For these ones, we keep around
4788 	 * extra signed pointers diversified against the read-only blobs' memory
4789 	 * and then update the stack variable with these before updating the full
4790 	 * read-only blob.
4791 	 */
4792 
4793 	blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4794 	assert(blob_ro != NULL);
4795 
4796 	tmp_blob.csb_ro_addr = blob_ro;
4797 	tmp_blob.csb_vnode = vp;
4798 
4799 	/* AMFI needs to see the current blob state at the RO address */
4800 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
4801 
4802 #if CODE_SIGNING_MONITOR
4803 	error = register_code_signature_monitor(
4804 		vp,
4805 		&tmp_blob,
4806 		(vm_offset_t)tmp_blob.csb_cd - (vm_offset_t)tmp_blob.csb_mem_kaddr);
4807 
4808 	if (error != 0) {
4809 		goto out;
4810 	}
4811 
4812 #if HAS_APPLE_PAC
4813 	signed_monitor_obj = ptrauth_sign_unauthenticated(
4814 		tmp_blob.csb_csm_obj,
4815 		ptrauth_key_process_independent_data,
4816 		ptrauth_blend_discriminator(&blob_ro->csb_csm_obj,
4817 		OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_csm_obj")));
4818 #endif /* HAS_APPLE_PAC */
4819 
4820 #endif /* CODE_SIGNING_MONITOR */
4821 
4822 	/*
4823 	 * Ensure that we're honoring the main binary policy check on platforms which
4824 	 * require it. We perform this check at this stage to ensure the blob we're
4825 	 * looking at has been locked down by a code signing monitor if the system
4826 	 * has one.
4827 	 */
4828 	error = validate_main_binary_check(&tmp_blob, csblob_add_flags);
4829 	if (error != 0) {
4830 		printf("failed to verify main binary policy: %d\n", error);
4831 		goto out;
4832 	}
4833 
4834 #if CONFIG_MACF
4835 	unsigned int cs_flags = tmp_blob.csb_flags;
4836 	unsigned int signer_type = tmp_blob.csb_signer_type;
4837 
4838 	error = mac_vnode_check_signature(
4839 		vp,
4840 		&tmp_blob,
4841 		imgp,
4842 		&cs_flags,
4843 		&signer_type,
4844 		flags,
4845 		platform);
4846 
4847 	if (error != 0) {
4848 		printf("validation of code signature failed through MACF policy: %d\n", error);
4849 		goto out;
4850 	}
4851 
4852 #if HAS_APPLE_PAC
4853 	signed_entitlements = ptrauth_sign_unauthenticated(
4854 		tmp_blob.csb_entitlements,
4855 		ptrauth_key_process_independent_data,
4856 		ptrauth_blend_discriminator(&blob_ro->csb_entitlements,
4857 		OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements")));
4858 #endif
4859 
4860 	tmp_blob.csb_flags = cs_flags;
4861 	tmp_blob.csb_signer_type = signer_type;
4862 
4863 	if (tmp_blob.csb_flags & CS_PLATFORM_BINARY) {
4864 		tmp_blob.csb_platform_binary = 1;
4865 		tmp_blob.csb_platform_path = !!(tmp_blob.csb_flags & CS_PLATFORM_PATH);
4866 		tmp_blob.csb_teamid = NULL;
4867 	} else {
4868 		tmp_blob.csb_platform_binary = 0;
4869 		tmp_blob.csb_platform_path = 0;
4870 	}
4871 
4872 	if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !tmp_blob.csb_platform_binary) {
4873 		printf("dyld simulator runtime is not apple signed: proc: %d\n",
4874 		    proc_getpid(current_proc()));
4875 
4876 		error = EPERM;
4877 		goto out;
4878 	}
4879 #endif /* CONFIG_MACF */
4880 
4881 #if CODE_SIGNING_MONITOR
4882 	error = verify_code_signature_monitor(&tmp_blob);
4883 	if (error != 0) {
4884 		goto out;
4885 	}
4886 #endif
4887 
4888 	/* Perform 2nd stage reconstitution */
4889 	error = reconstitute_code_signature_2nd_stage(&tmp_blob);
4890 	if (error != 0) {
4891 		goto out;
4892 	}
4893 
4894 	/* Setup any multi-level hashing for the code signature */
4895 	error = setup_multilevel_hashing(&tmp_blob);
4896 	if (error != 0) {
4897 		goto out;
4898 	}
4899 
4900 	/* Ensure security critical auxiliary blobs still exist */
4901 	error = validate_auxiliary_signed_blobs(&tmp_blob);
4902 	if (error != 0) {
4903 		goto out;
4904 	}
4905 
4906 	/*
4907 	 * Accelerate the entitlement queries for this code signature. This must
4908 	 * be done only after we know that the code signature pointers within the
4909 	 * struct cs_blob aren't going to be shifted around anymore, which is why
4910 	 * this acceleration is done after setting up multilevel hashing, since
4911 	 * that is the last part of signature validation which can shift the code
4912 	 * signature around.
4913 	 */
4914 	error = accelerate_entitlement_queries(&tmp_blob);
4915 	if (error != 0) {
4916 		goto out;
4917 	}
4918 
4919 	/*
4920 	 * Parse and set the Team ID for this code signature. This only needs to
4921 	 * happen when the signature isn't marked as platform. Like above, this
4922 	 * has to happen after we know the pointers within struct cs_blob aren't
4923 	 * going to be shifted anymore.
4924 	 */
4925 	if ((tmp_blob.csb_flags & CS_PLATFORM_BINARY) == 0) {
4926 		tmp_blob.csb_teamid = csblob_parse_teamid(&tmp_blob);
4927 	}
4928 
4929 	/*
4930 	 * Validate the code signing blob's coverage. Ideally, we can just do this
4931 	 * in the beginning, right after structural validation, however, multilevel
4932 	 * hashing can change some offets.
4933 	 */
4934 	blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
4935 	blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
4936 	if (blob_start_offset >= blob_end_offset) {
4937 		error = EINVAL;
4938 		goto out;
4939 	} else if (blob_start_offset < 0 || blob_end_offset <= 0) {
4940 		error = EINVAL;
4941 		goto out;
4942 	}
4943 
4944 	/*
4945 	 * The vnode_lock, linked list traversal, and marking of the memory object as
4946 	 * signed can all be blocking operations. Compute a PAC over the tmp_blob.
4947 	 */
4948 	cs_blob_sig = ptrauth_utils_sign_blob_generic(
4949 		&tmp_blob,
4950 		sizeof(tmp_blob),
4951 		OS_PTRAUTH_DISCRIMINATOR("ubc_cs_blob_add.blocking_op0"),
4952 		PTRAUTH_ADDR_DIVERSIFY);
4953 
4954 	vnode_lock(vp);
4955 	if (!UBCINFOEXISTS(vp)) {
4956 		vnode_unlock(vp);
4957 		error = ENOENT;
4958 		goto out;
4959 	}
4960 	uip = vp->v_ubcinfo;
4961 
4962 	/* check if this new blob overlaps with an existing blob */
4963 	for (oblob = ubc_get_cs_blobs(vp);
4964 	    oblob != NULL;
4965 	    oblob = oblob->csb_next) {
4966 		off_t oblob_start_offset, oblob_end_offset;
4967 
4968 		if (tmp_blob.csb_signer_type != oblob->csb_signer_type) {  // signer type needs to be the same for slices
4969 			vnode_unlock(vp);
4970 			error = EALREADY;
4971 			goto out;
4972 		} else if (tmp_blob.csb_platform_binary) {  //platform binary needs to be the same for app slices
4973 			if (!oblob->csb_platform_binary) {
4974 				vnode_unlock(vp);
4975 				error = EALREADY;
4976 				goto out;
4977 			}
4978 		} else if (tmp_blob.csb_teamid) {  //teamid binary needs to be the same for app slices
4979 			if (oblob->csb_platform_binary ||
4980 			    oblob->csb_teamid == NULL ||
4981 			    strcmp(oblob->csb_teamid, tmp_blob.csb_teamid) != 0) {
4982 				vnode_unlock(vp);
4983 				error = EALREADY;
4984 				goto out;
4985 			}
4986 		} else {  // non teamid binary needs to be the same for app slices
4987 			if (oblob->csb_platform_binary ||
4988 			    oblob->csb_teamid != NULL) {
4989 				vnode_unlock(vp);
4990 				error = EALREADY;
4991 				goto out;
4992 			}
4993 		}
4994 
4995 		oblob_start_offset = (oblob->csb_base_offset +
4996 		    oblob->csb_start_offset);
4997 		oblob_end_offset = (oblob->csb_base_offset +
4998 		    oblob->csb_end_offset);
4999 		if (blob_start_offset >= oblob_end_offset ||
5000 		    blob_end_offset <= oblob_start_offset) {
5001 			/* no conflict with this existing blob */
5002 		} else {
5003 			/* conflict ! */
5004 			if (blob_start_offset == oblob_start_offset &&
5005 			    blob_end_offset == oblob_end_offset &&
5006 			    tmp_blob.csb_mem_size == oblob->csb_mem_size &&
5007 			    tmp_blob.csb_flags == oblob->csb_flags &&
5008 			    (tmp_blob.csb_cpu_type == CPU_TYPE_ANY ||
5009 			    oblob->csb_cpu_type == CPU_TYPE_ANY ||
5010 			    tmp_blob.csb_cpu_type == oblob->csb_cpu_type) &&
5011 			    !bcmp(tmp_blob.csb_cdhash,
5012 			    oblob->csb_cdhash,
5013 			    CS_CDHASH_LEN)) {
5014 				/*
5015 				 * We already have this blob:
5016 				 * we'll return success but
5017 				 * throw away the new blob.
5018 				 */
5019 				if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
5020 					/*
5021 					 * The old blob matches this one
5022 					 * but doesn't have any CPU type.
5023 					 * Update it with whatever the caller
5024 					 * provided this time.
5025 					 */
5026 					cs_blob_set_cpu_type(oblob, cputype);
5027 				}
5028 
5029 				/* The signature is still accepted, so update the
5030 				 * generation count. */
5031 				uip->cs_add_gen = cs_blob_generation_count;
5032 
5033 				vnode_unlock(vp);
5034 				if (ret_blob) {
5035 					*ret_blob = oblob;
5036 				}
5037 				error = EAGAIN;
5038 				goto out;
5039 			} else {
5040 				/* different blob: reject the new one */
5041 				vnode_unlock(vp);
5042 				error = EALREADY;
5043 				goto out;
5044 			}
5045 		}
5046 	}
5047 
5048 	/* mark this vnode's VM object as having "signed pages" */
5049 	kr = memory_object_signed(uip->ui_control, TRUE);
5050 	if (kr != KERN_SUCCESS) {
5051 		vnode_unlock(vp);
5052 		error = ENOENT;
5053 		goto out;
5054 	}
5055 
5056 	if (uip->cs_blobs == NULL) {
5057 		/* loading 1st blob: record the file's current "modify time" */
5058 		record_mtime = TRUE;
5059 	}
5060 
5061 	/* set the generation count for cs_blobs */
5062 	uip->cs_add_gen = cs_blob_generation_count;
5063 
5064 	/* Authenticate the PAC signature after blocking operation */
5065 	ptrauth_utils_auth_blob_generic(
5066 		&tmp_blob,
5067 		sizeof(tmp_blob),
5068 		OS_PTRAUTH_DISCRIMINATOR("ubc_cs_blob_add.blocking_op0"),
5069 		PTRAUTH_ADDR_DIVERSIFY,
5070 		cs_blob_sig);
5071 
5072 	/* Update the system statistics for code signatures blobs */
5073 	ubc_cs_blob_adjust_statistics(&tmp_blob);
5074 
5075 	/* Update the list pointer to reference other blobs for this vnode */
5076 	tmp_blob.csb_next = uip->cs_blobs;
5077 
5078 #if HAS_APPLE_PAC
5079 	/*
5080 	 * Update all the critical pointers in the blob with the RO diversified
5081 	 * values before updating the read-only blob with the full contents of
5082 	 * the struct cs_blob. We need to use memcpy here as otherwise a simple
5083 	 * assignment will cause the compiler to re-sign using the stack variable
5084 	 * as the address diversifier.
5085 	 */
5086 	memcpy((void*)&tmp_blob.csb_entitlements, &signed_entitlements, sizeof(void*));
5087 #if CODE_SIGNING_MONITOR
5088 	memcpy((void*)&tmp_blob.csb_csm_obj, &signed_monitor_obj, sizeof(void*));
5089 #endif
5090 #endif
5091 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5092 
5093 	/* Add a fence to ensure writes to the blob are visible on all threads */
5094 	os_atomic_thread_fence(seq_cst);
5095 
5096 	/*
5097 	 * Add the cs_blob to the front of the list of blobs for this vnode. We
5098 	 * add to the front of the list, and we never remove a blob from the list
5099 	 * which means ubc_cs_get_blobs can return whatever the top of the list
5100 	 * is, while still keeping the list valid. Useful for if we validate a
5101 	 * page while adding in a new blob for this vnode.
5102 	 */
5103 	uip->cs_blobs = blob_ro;
5104 
5105 	/* Make sure to reload pointer from uip to double check */
5106 	if (uip->cs_blobs->csb_next) {
5107 		zone_require_ro(ZONE_ID_CS_BLOB, sizeof(struct cs_blob), uip->cs_blobs->csb_next);
5108 	}
5109 
5110 	if (cs_debug > 1) {
5111 		proc_t p;
5112 		const char *name = vnode_getname_printable(vp);
5113 		p = current_proc();
5114 		printf("CODE SIGNING: proc %d(%s) "
5115 		    "loaded %s signatures for file (%s) "
5116 		    "range 0x%llx:0x%llx flags 0x%x\n",
5117 		    proc_getpid(p), p->p_comm,
5118 		    blob_ro->csb_cpu_type == -1 ? "detached" : "embedded",
5119 		    name,
5120 		    blob_ro->csb_base_offset + blob_ro->csb_start_offset,
5121 		    blob_ro->csb_base_offset + blob_ro->csb_end_offset,
5122 		    blob_ro->csb_flags);
5123 		vnode_putname_printable(name);
5124 	}
5125 
5126 	vnode_unlock(vp);
5127 
5128 	if (record_mtime) {
5129 		vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
5130 	}
5131 
5132 	if (ret_blob) {
5133 		*ret_blob = blob_ro;
5134 	}
5135 
5136 	error = 0;      /* success ! */
5137 
5138 out:
5139 	if (error) {
5140 		if (error != EAGAIN) {
5141 			printf("check_signature[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
5142 		}
5143 
5144 		cs_blob_cleanup(&tmp_blob);
5145 		if (blob_ro) {
5146 			zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
5147 		}
5148 	}
5149 
5150 	if (error == EAGAIN) {
5151 		/*
5152 		 * See above:  error is EAGAIN if we were asked
5153 		 * to add an existing blob again.  We cleaned the new
5154 		 * blob and we want to return success.
5155 		 */
5156 		error = 0;
5157 	}
5158 
5159 	return error;
5160 }
5161 
5162 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5163 int
ubc_cs_blob_add_supplement(struct vnode * vp,struct vnode * orig_vp,off_t base_offset,vm_address_t * addr,vm_size_t size,struct cs_blob ** ret_blob)5164 ubc_cs_blob_add_supplement(
5165 	struct vnode    *vp,
5166 	struct vnode    *orig_vp,
5167 	off_t           base_offset,
5168 	vm_address_t    *addr,
5169 	vm_size_t       size,
5170 	struct cs_blob  **ret_blob)
5171 {
5172 	kern_return_t           kr;
5173 	struct ubc_info         *uip, *orig_uip;
5174 	int                     error;
5175 	struct cs_blob          tmp_blob;
5176 	struct cs_blob          *orig_blob;
5177 	struct cs_blob          *blob_ro = NULL;
5178 	CS_CodeDirectory const *cd;
5179 	off_t                   blob_start_offset, blob_end_offset;
5180 
5181 	if (ret_blob) {
5182 		*ret_blob = NULL;
5183 	}
5184 
5185 	/* Create the struct cs_blob wrapper that will be attached to the vnode.
5186 	 * Validates the passed in blob in the process. */
5187 	error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
5188 
5189 	if (error != 0) {
5190 		printf("malformed code signature supplement blob: %d\n", error);
5191 		return error;
5192 	}
5193 
5194 	tmp_blob.csb_cpu_type = -1;
5195 	tmp_blob.csb_base_offset = base_offset;
5196 
5197 	tmp_blob.csb_reconstituted = false;
5198 
5199 	vnode_lock(orig_vp);
5200 	if (!UBCINFOEXISTS(orig_vp)) {
5201 		vnode_unlock(orig_vp);
5202 		error = ENOENT;
5203 		goto out;
5204 	}
5205 
5206 	orig_uip = orig_vp->v_ubcinfo;
5207 
5208 	/* check that the supplement's linked cdhash matches a cdhash of
5209 	 * the target image.
5210 	 */
5211 
5212 	if (tmp_blob.csb_linkage_hashtype == NULL) {
5213 		proc_t p;
5214 		const char *iname = vnode_getname_printable(vp);
5215 		p = current_proc();
5216 
5217 		printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
5218 		    "is not a supplemental.\n",
5219 		    proc_getpid(p), p->p_comm, iname);
5220 
5221 		error = EINVAL;
5222 
5223 		vnode_putname_printable(iname);
5224 		vnode_unlock(orig_vp);
5225 		goto out;
5226 	}
5227 	bool found_but_not_valid = false;
5228 	for (orig_blob = ubc_get_cs_blobs(orig_vp); orig_blob != NULL;
5229 	    orig_blob = orig_blob->csb_next) {
5230 		if (orig_blob->csb_hashtype == tmp_blob.csb_linkage_hashtype &&
5231 		    memcmp(orig_blob->csb_cdhash, tmp_blob.csb_linkage, CS_CDHASH_LEN) == 0) {
5232 			// Found match!
5233 			found_but_not_valid = ((orig_blob->csb_flags & CS_VALID) != CS_VALID);
5234 			break;
5235 		}
5236 	}
5237 
5238 	if (orig_blob == NULL || found_but_not_valid) {
5239 		// Not found.
5240 
5241 		proc_t p;
5242 		const char *iname = vnode_getname_printable(vp);
5243 		p = current_proc();
5244 
5245 		error = (orig_blob == NULL) ? ESRCH : EPERM;
5246 
5247 		printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
5248 		    "does not match any attached cdhash (error: %d).\n",
5249 		    proc_getpid(p), p->p_comm, iname, error);
5250 
5251 		vnode_putname_printable(iname);
5252 		vnode_unlock(orig_vp);
5253 		goto out;
5254 	}
5255 
5256 	vnode_unlock(orig_vp);
5257 
5258 	blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
5259 	tmp_blob.csb_ro_addr = blob_ro;
5260 	tmp_blob.csb_vnode = vp;
5261 
5262 	/* AMFI needs to see the current blob state at the RO address. */
5263 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5264 
5265 	// validate the signature against policy!
5266 #if CONFIG_MACF
5267 	unsigned int signer_type = tmp_blob.csb_signer_type;
5268 	error = mac_vnode_check_supplemental_signature(vp, &tmp_blob, orig_vp, orig_blob, &signer_type);
5269 
5270 	tmp_blob.csb_signer_type = signer_type;
5271 
5272 	if (error) {
5273 		if (cs_debug) {
5274 			printf("check_supplemental_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
5275 		}
5276 		goto out;
5277 	}
5278 #endif
5279 
5280 	// We allowed the supplemental signature blob so
5281 	// copy the platform bit or team-id from the linked signature and whether or not the original is developer code
5282 	tmp_blob.csb_platform_binary = 0;
5283 	tmp_blob.csb_platform_path = 0;
5284 	if (orig_blob->csb_platform_binary == 1) {
5285 		tmp_blob.csb_platform_binary = orig_blob->csb_platform_binary;
5286 		tmp_blob.csb_platform_path = orig_blob->csb_platform_path;
5287 	} else if (orig_blob->csb_teamid != NULL) {
5288 		vm_size_t teamid_size = strlen(orig_blob->csb_teamid) + 1;
5289 		tmp_blob.csb_supplement_teamid = kalloc_data(teamid_size, Z_WAITOK);
5290 		if (tmp_blob.csb_supplement_teamid == NULL) {
5291 			error = ENOMEM;
5292 			goto out;
5293 		}
5294 		strlcpy(tmp_blob.csb_supplement_teamid, orig_blob->csb_teamid, teamid_size);
5295 	}
5296 	tmp_blob.csb_flags = (orig_blob->csb_flags & CS_DEV_CODE);
5297 
5298 	// Validate the blob's coverage
5299 	blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
5300 	blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
5301 
5302 	if (blob_start_offset >= blob_end_offset || blob_start_offset < 0 || blob_end_offset <= 0) {
5303 		/* reject empty or backwards blob */
5304 		error = EINVAL;
5305 		goto out;
5306 	}
5307 
5308 	vnode_lock(vp);
5309 	if (!UBCINFOEXISTS(vp)) {
5310 		vnode_unlock(vp);
5311 		error = ENOENT;
5312 		goto out;
5313 	}
5314 	uip = vp->v_ubcinfo;
5315 
5316 	struct cs_blob *existing = uip->cs_blob_supplement;
5317 	if (existing != NULL) {
5318 		if (tmp_blob.csb_hashtype == existing->csb_hashtype &&
5319 		    memcmp(tmp_blob.csb_cdhash, existing->csb_cdhash, CS_CDHASH_LEN) == 0) {
5320 			error = EAGAIN; // non-fatal
5321 		} else {
5322 			error = EALREADY; // fatal
5323 		}
5324 
5325 		vnode_unlock(vp);
5326 		goto out;
5327 	}
5328 
5329 	/* mark this vnode's VM object as having "signed pages" */
5330 	kr = memory_object_signed(uip->ui_control, TRUE);
5331 	if (kr != KERN_SUCCESS) {
5332 		vnode_unlock(vp);
5333 		error = ENOENT;
5334 		goto out;
5335 	}
5336 
5337 
5338 	/* We still adjust statistics even for supplemental blobs, as they
5339 	 * consume memory just the same. */
5340 	ubc_cs_blob_adjust_statistics(&tmp_blob);
5341 	/* Unlike regular cs_blobs, we only ever support one supplement. */
5342 	tmp_blob.csb_next = NULL;
5343 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5344 
5345 	os_atomic_thread_fence(seq_cst); // Fence to prevent reordering here
5346 	uip->cs_blob_supplement = blob_ro;
5347 
5348 	/* Make sure to reload pointer from uip to double check */
5349 	if (__improbable(uip->cs_blob_supplement->csb_next)) {
5350 		panic("csb_next does not match expected NULL value");
5351 	}
5352 
5353 	vnode_unlock(vp);
5354 
5355 
5356 	if (cs_debug > 1) {
5357 		proc_t p;
5358 		const char *name = vnode_getname_printable(vp);
5359 		p = current_proc();
5360 		printf("CODE SIGNING: proc %d(%s) "
5361 		    "loaded supplemental signature for file (%s) "
5362 		    "range 0x%llx:0x%llx\n",
5363 		    proc_getpid(p), p->p_comm,
5364 		    name,
5365 		    blob_ro->csb_base_offset + blob_ro->csb_start_offset,
5366 		    blob_ro->csb_base_offset + blob_ro->csb_end_offset);
5367 		vnode_putname_printable(name);
5368 	}
5369 
5370 	if (ret_blob) {
5371 		*ret_blob = blob_ro;
5372 	}
5373 
5374 	error = 0; // Success!
5375 out:
5376 	if (error) {
5377 		if (cs_debug) {
5378 			printf("ubc_cs_blob_add_supplement[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
5379 		}
5380 
5381 		cs_blob_cleanup(&tmp_blob);
5382 		if (blob_ro) {
5383 			zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
5384 		}
5385 	}
5386 
5387 	if (error == EAGAIN) {
5388 		/* We were asked to add an existing blob.
5389 		 * We cleaned up and ignore the attempt. */
5390 		error = 0;
5391 	}
5392 
5393 	return error;
5394 }
5395 #endif
5396 
5397 
5398 
5399 void
csvnode_print_debug(struct vnode * vp)5400 csvnode_print_debug(struct vnode *vp)
5401 {
5402 	const char      *name = NULL;
5403 	struct ubc_info *uip;
5404 	struct cs_blob *blob;
5405 
5406 	name = vnode_getname_printable(vp);
5407 	if (name) {
5408 		printf("csvnode: name: %s\n", name);
5409 		vnode_putname_printable(name);
5410 	}
5411 
5412 	vnode_lock_spin(vp);
5413 
5414 	if (!UBCINFOEXISTS(vp)) {
5415 		blob = NULL;
5416 		goto out;
5417 	}
5418 
5419 	uip = vp->v_ubcinfo;
5420 	for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
5421 		printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
5422 		    (unsigned long)blob->csb_start_offset,
5423 		    (unsigned long)blob->csb_end_offset,
5424 		    blob->csb_flags,
5425 		    blob->csb_platform_binary ? "yes" : "no",
5426 		    blob->csb_platform_path ? "yes" : "no",
5427 		    blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
5428 	}
5429 
5430 out:
5431 	vnode_unlock(vp);
5432 }
5433 
5434 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5435 struct cs_blob *
ubc_cs_blob_get_supplement(struct vnode * vp,off_t offset)5436 ubc_cs_blob_get_supplement(
5437 	struct vnode    *vp,
5438 	off_t           offset)
5439 {
5440 	struct cs_blob *blob;
5441 	off_t offset_in_blob;
5442 
5443 	vnode_lock_spin(vp);
5444 
5445 	if (!UBCINFOEXISTS(vp)) {
5446 		blob = NULL;
5447 		goto out;
5448 	}
5449 
5450 	blob = vp->v_ubcinfo->cs_blob_supplement;
5451 
5452 	if (blob == NULL) {
5453 		// no supplemental blob
5454 		goto out;
5455 	}
5456 
5457 
5458 	if (offset != -1) {
5459 		offset_in_blob = offset - blob->csb_base_offset;
5460 		if (offset_in_blob < blob->csb_start_offset || offset_in_blob >= blob->csb_end_offset) {
5461 			// not actually covered by this blob
5462 			blob = NULL;
5463 		}
5464 	}
5465 
5466 out:
5467 	vnode_unlock(vp);
5468 
5469 	return blob;
5470 }
5471 #endif
5472 
5473 struct cs_blob *
ubc_cs_blob_get(struct vnode * vp,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t offset)5474 ubc_cs_blob_get(
5475 	struct vnode    *vp,
5476 	cpu_type_t      cputype,
5477 	cpu_subtype_t   cpusubtype,
5478 	off_t           offset)
5479 {
5480 	struct cs_blob  *blob;
5481 	off_t offset_in_blob;
5482 
5483 	vnode_lock_spin(vp);
5484 
5485 	if (!UBCINFOEXISTS(vp)) {
5486 		blob = NULL;
5487 		goto out;
5488 	}
5489 
5490 	for (blob = ubc_get_cs_blobs(vp);
5491 	    blob != NULL;
5492 	    blob = blob->csb_next) {
5493 		if (cputype != -1 && blob->csb_cpu_type == cputype && (cpusubtype == -1 || blob->csb_cpu_subtype == (cpusubtype & ~CPU_SUBTYPE_MASK))) {
5494 			break;
5495 		}
5496 		if (offset != -1) {
5497 			offset_in_blob = offset - blob->csb_base_offset;
5498 			if (offset_in_blob >= blob->csb_start_offset &&
5499 			    offset_in_blob < blob->csb_end_offset) {
5500 				/* our offset is covered by this blob */
5501 				break;
5502 			}
5503 		}
5504 	}
5505 
5506 out:
5507 	vnode_unlock(vp);
5508 
5509 	return blob;
5510 }
5511 
5512 void
ubc_cs_free_and_vnode_unlock(vnode_t vp)5513 ubc_cs_free_and_vnode_unlock(
5514 	vnode_t vp)
5515 {
5516 	struct ubc_info *uip = vp->v_ubcinfo;
5517 	struct cs_blob  *cs_blobs, *blob, *next_blob;
5518 
5519 	if (!(uip->ui_flags & UI_CSBLOBINVALID)) {
5520 		vnode_unlock(vp);
5521 		return;
5522 	}
5523 
5524 	uip->ui_flags &= ~UI_CSBLOBINVALID;
5525 
5526 	cs_blobs = uip->cs_blobs;
5527 	uip->cs_blobs = NULL;
5528 
5529 #if CHECK_CS_VALIDATION_BITMAP
5530 	ubc_cs_validation_bitmap_deallocate( uip );
5531 #endif
5532 
5533 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5534 	struct cs_blob  *cs_blob_supplement = uip->cs_blob_supplement;
5535 	uip->cs_blob_supplement = NULL;
5536 #endif
5537 
5538 	vnode_unlock(vp);
5539 
5540 	for (blob = cs_blobs;
5541 	    blob != NULL;
5542 	    blob = next_blob) {
5543 		next_blob = blob->csb_next;
5544 		os_atomic_add(&cs_blob_count, -1, relaxed);
5545 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5546 		cs_blob_ro_free(blob);
5547 	}
5548 
5549 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5550 	if (cs_blob_supplement != NULL) {
5551 		os_atomic_add(&cs_blob_count, -1, relaxed);
5552 		os_atomic_add(&cs_blob_size, -cs_blob_supplement->csb_mem_size, relaxed);
5553 		cs_blob_supplement_free(cs_blob_supplement);
5554 	}
5555 #endif
5556 }
5557 
5558 static void
ubc_cs_free(struct ubc_info * uip)5559 ubc_cs_free(
5560 	struct ubc_info *uip)
5561 {
5562 	struct cs_blob  *blob, *next_blob;
5563 
5564 	for (blob = uip->cs_blobs;
5565 	    blob != NULL;
5566 	    blob = next_blob) {
5567 		next_blob = blob->csb_next;
5568 		os_atomic_add(&cs_blob_count, -1, relaxed);
5569 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5570 		cs_blob_ro_free(blob);
5571 	}
5572 #if CHECK_CS_VALIDATION_BITMAP
5573 	ubc_cs_validation_bitmap_deallocate( uip );
5574 #endif
5575 	uip->cs_blobs = NULL;
5576 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5577 	if (uip->cs_blob_supplement != NULL) {
5578 		blob = uip->cs_blob_supplement;
5579 		os_atomic_add(&cs_blob_count, -1, relaxed);
5580 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5581 		cs_blob_supplement_free(uip->cs_blob_supplement);
5582 		uip->cs_blob_supplement = NULL;
5583 	}
5584 #endif
5585 }
5586 
5587 /* check cs blob generation on vnode
5588  * returns:
5589  *    0         : Success, the cs_blob attached is current
5590  *    ENEEDAUTH : Generation count mismatch. Needs authentication again.
5591  */
5592 int
ubc_cs_generation_check(struct vnode * vp)5593 ubc_cs_generation_check(
5594 	struct vnode    *vp)
5595 {
5596 	int retval = ENEEDAUTH;
5597 
5598 	vnode_lock_spin(vp);
5599 
5600 	if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
5601 		retval = 0;
5602 	}
5603 
5604 	vnode_unlock(vp);
5605 	return retval;
5606 }
5607 
5608 int
ubc_cs_blob_revalidate(struct vnode * vp,struct cs_blob * blob,struct image_params * imgp,int flags,uint32_t platform)5609 ubc_cs_blob_revalidate(
5610 	struct vnode    *vp,
5611 	struct cs_blob *blob,
5612 	struct image_params *imgp,
5613 	int flags,
5614 	uint32_t platform
5615 	)
5616 {
5617 	int error = 0;
5618 	const CS_CodeDirectory *cd = NULL;
5619 	const CS_GenericBlob *entitlements = NULL;
5620 	const CS_GenericBlob *der_entitlements = NULL;
5621 	size_t size;
5622 	assert(vp != NULL);
5623 	assert(blob != NULL);
5624 
5625 	if ((blob->csb_flags & CS_VALID) == 0) {
5626 		// If the blob attached to the vnode was invalidated, don't try to revalidate it
5627 		// Blob invalidation only occurs when the file that the blob is attached to is
5628 		// opened for writing, giving us a signal that the file is modified.
5629 		printf("CODESIGNING: can not re-validate a previously invalidated blob, reboot or create a new file.\n");
5630 		error = EPERM;
5631 		goto out;
5632 	}
5633 
5634 	size = blob->csb_mem_size;
5635 	error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
5636 	    size, &cd, &entitlements, &der_entitlements);
5637 	if (error) {
5638 		if (cs_debug) {
5639 			printf("CODESIGNING: csblob invalid: %d\n", error);
5640 		}
5641 		goto out;
5642 	}
5643 
5644 	unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
5645 	unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
5646 
5647 	if (blob->csb_reconstituted) {
5648 		/*
5649 		 * Code signatures that have been modified after validation
5650 		 * cannot be revalidated inline from their in-memory blob.
5651 		 *
5652 		 * That's okay, though, because the only path left that relies
5653 		 * on revalidation of existing in-memory blobs is the legacy
5654 		 * detached signature database path, which only exists on macOS,
5655 		 * which does not do reconstitution of any kind.
5656 		 */
5657 		if (cs_debug) {
5658 			printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
5659 		}
5660 
5661 		/*
5662 		 * EAGAIN tells the caller that they may reread the code
5663 		 * signature and try attaching it again, which is the same
5664 		 * thing they would do if there was no cs_blob yet in the
5665 		 * first place.
5666 		 *
5667 		 * Conveniently, after ubc_cs_blob_add did a successful
5668 		 * validation, it will detect that a matching cs_blob (cdhash,
5669 		 * offset, arch etc.) already exists, and return success
5670 		 * without re-adding a cs_blob to the vnode.
5671 		 */
5672 		return EAGAIN;
5673 	}
5674 
5675 	/* callout to mac_vnode_check_signature */
5676 #if CONFIG_MACF
5677 	error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform);
5678 	if (cs_debug && error) {
5679 		printf("revalidate: check_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
5680 	}
5681 #else
5682 	(void)flags;
5683 	(void)signer_type;
5684 #endif
5685 
5686 	/* update generation number if success */
5687 	vnode_lock_spin(vp);
5688 	struct cs_signer_info signer_info = {
5689 		.csb_flags = cs_flags,
5690 		.csb_signer_type = signer_type
5691 	};
5692 	zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_signer_info, &signer_info);
5693 	if (UBCINFOEXISTS(vp)) {
5694 		if (error == 0) {
5695 			vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
5696 		} else {
5697 			vp->v_ubcinfo->cs_add_gen = 0;
5698 		}
5699 	}
5700 
5701 	vnode_unlock(vp);
5702 
5703 out:
5704 	return error;
5705 }
5706 
5707 void
cs_blob_reset_cache()5708 cs_blob_reset_cache()
5709 {
5710 	/* incrementing odd no by 2 makes sure '0' is never reached. */
5711 	OSAddAtomic(+2, &cs_blob_generation_count);
5712 	printf("Reseting cs_blob cache from all vnodes. \n");
5713 }
5714 
5715 struct cs_blob *
ubc_get_cs_blobs(struct vnode * vp)5716 ubc_get_cs_blobs(
5717 	struct vnode    *vp)
5718 {
5719 	struct ubc_info *uip;
5720 	struct cs_blob  *blobs;
5721 
5722 	/*
5723 	 * No need to take the vnode lock here.  The caller must be holding
5724 	 * a reference on the vnode (via a VM mapping or open file descriptor),
5725 	 * so the vnode will not go away.  The ubc_info stays until the vnode
5726 	 * goes away.  And we only modify "blobs" by adding to the head of the
5727 	 * list.
5728 	 * The ubc_info could go away entirely if the vnode gets reclaimed as
5729 	 * part of a forced unmount.  In the case of a code-signature validation
5730 	 * during a page fault, the "paging_in_progress" reference on the VM
5731 	 * object guarantess that the vnode pager (and the ubc_info) won't go
5732 	 * away during the fault.
5733 	 * Other callers need to protect against vnode reclaim by holding the
5734 	 * vnode lock, for example.
5735 	 */
5736 
5737 	if (!UBCINFOEXISTS(vp)) {
5738 		blobs = NULL;
5739 		goto out;
5740 	}
5741 
5742 	uip = vp->v_ubcinfo;
5743 	blobs = uip->cs_blobs;
5744 	if (blobs != NULL) {
5745 		cs_blob_require(blobs, vp);
5746 	}
5747 
5748 out:
5749 	return blobs;
5750 }
5751 
5752 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5753 struct cs_blob *
ubc_get_cs_supplement(struct vnode * vp)5754 ubc_get_cs_supplement(
5755 	struct vnode    *vp)
5756 {
5757 	struct ubc_info *uip;
5758 	struct cs_blob  *blob;
5759 
5760 	/*
5761 	 * No need to take the vnode lock here.  The caller must be holding
5762 	 * a reference on the vnode (via a VM mapping or open file descriptor),
5763 	 * so the vnode will not go away.  The ubc_info stays until the vnode
5764 	 * goes away.
5765 	 * The ubc_info could go away entirely if the vnode gets reclaimed as
5766 	 * part of a forced unmount.  In the case of a code-signature validation
5767 	 * during a page fault, the "paging_in_progress" reference on the VM
5768 	 * object guarantess that the vnode pager (and the ubc_info) won't go
5769 	 * away during the fault.
5770 	 * Other callers need to protect against vnode reclaim by holding the
5771 	 * vnode lock, for example.
5772 	 */
5773 
5774 	if (!UBCINFOEXISTS(vp)) {
5775 		blob = NULL;
5776 		goto out;
5777 	}
5778 
5779 	uip = vp->v_ubcinfo;
5780 	blob = uip->cs_blob_supplement;
5781 	if (blob != NULL) {
5782 		cs_blob_require(blob, vp);
5783 	}
5784 
5785 out:
5786 	return blob;
5787 }
5788 #endif
5789 
5790 
5791 void
ubc_get_cs_mtime(struct vnode * vp,struct timespec * cs_mtime)5792 ubc_get_cs_mtime(
5793 	struct vnode    *vp,
5794 	struct timespec *cs_mtime)
5795 {
5796 	struct ubc_info *uip;
5797 
5798 	if (!UBCINFOEXISTS(vp)) {
5799 		cs_mtime->tv_sec = 0;
5800 		cs_mtime->tv_nsec = 0;
5801 		return;
5802 	}
5803 
5804 	uip = vp->v_ubcinfo;
5805 	cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
5806 	cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
5807 }
5808 
5809 unsigned long cs_validate_page_no_hash = 0;
5810 unsigned long cs_validate_page_bad_hash = 0;
5811 static boolean_t
cs_validate_hash(struct cs_blob * blobs,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t * bytes_processed,unsigned * tainted)5812 cs_validate_hash(
5813 	struct cs_blob          *blobs,
5814 	memory_object_t         pager,
5815 	memory_object_offset_t  page_offset,
5816 	const void              *data,
5817 	vm_size_t               *bytes_processed,
5818 	unsigned                *tainted)
5819 {
5820 	union cs_hash_union     mdctx;
5821 	struct cs_hash const    *hashtype = NULL;
5822 	unsigned char           actual_hash[CS_HASH_MAX_SIZE];
5823 	unsigned char           expected_hash[CS_HASH_MAX_SIZE];
5824 	boolean_t               found_hash;
5825 	struct cs_blob          *blob;
5826 	const CS_CodeDirectory  *cd;
5827 	const unsigned char     *hash;
5828 	boolean_t               validated;
5829 	off_t                   offset; /* page offset in the file */
5830 	size_t                  size;
5831 	off_t                   codeLimit = 0;
5832 	const char              *lower_bound, *upper_bound;
5833 	vm_offset_t             kaddr, blob_addr;
5834 
5835 	/* retrieve the expected hash */
5836 	found_hash = FALSE;
5837 
5838 	for (blob = blobs;
5839 	    blob != NULL;
5840 	    blob = blob->csb_next) {
5841 		offset = page_offset - blob->csb_base_offset;
5842 		if (offset < blob->csb_start_offset ||
5843 		    offset >= blob->csb_end_offset) {
5844 			/* our page is not covered by this blob */
5845 			continue;
5846 		}
5847 
5848 		/* blob data has been released */
5849 		kaddr = (vm_offset_t)blob->csb_mem_kaddr;
5850 		if (kaddr == 0) {
5851 			continue;
5852 		}
5853 
5854 		blob_addr = kaddr + blob->csb_mem_offset;
5855 		lower_bound = CAST_DOWN(char *, blob_addr);
5856 		upper_bound = lower_bound + blob->csb_mem_size;
5857 
5858 		cd = blob->csb_cd;
5859 		if (cd != NULL) {
5860 			/* all CD's that have been injected is already validated */
5861 
5862 			hashtype = blob->csb_hashtype;
5863 			if (hashtype == NULL) {
5864 				panic("unknown hash type ?");
5865 			}
5866 			if (hashtype->cs_digest_size > sizeof(actual_hash)) {
5867 				panic("hash size too large");
5868 			}
5869 			if (offset & ((1U << blob->csb_hash_pageshift) - 1)) {
5870 				panic("offset not aligned to cshash boundary");
5871 			}
5872 
5873 			codeLimit = ntohl(cd->codeLimit);
5874 
5875 			hash = hashes(cd, (uint32_t)(offset >> blob->csb_hash_pageshift),
5876 			    hashtype->cs_size,
5877 			    lower_bound, upper_bound);
5878 			if (hash != NULL) {
5879 				bcopy(hash, expected_hash, hashtype->cs_size);
5880 				found_hash = TRUE;
5881 			}
5882 
5883 			break;
5884 		}
5885 	}
5886 
5887 	if (found_hash == FALSE) {
5888 		/*
5889 		 * We can't verify this page because there is no signature
5890 		 * for it (yet).  It's possible that this part of the object
5891 		 * is not signed, or that signatures for that part have not
5892 		 * been loaded yet.
5893 		 * Report that the page has not been validated and let the
5894 		 * caller decide if it wants to accept it or not.
5895 		 */
5896 		cs_validate_page_no_hash++;
5897 		if (cs_debug > 1) {
5898 			printf("CODE SIGNING: cs_validate_page: "
5899 			    "mobj %p off 0x%llx: no hash to validate !?\n",
5900 			    pager, page_offset);
5901 		}
5902 		validated = FALSE;
5903 		*tainted = 0;
5904 	} else {
5905 		*tainted = 0;
5906 
5907 		size = (1U << blob->csb_hash_pageshift);
5908 		*bytes_processed = size;
5909 
5910 		const uint32_t *asha1, *esha1;
5911 		if ((off_t)(offset + size) > codeLimit) {
5912 			/* partial page at end of segment */
5913 			assert(offset < codeLimit);
5914 			size = (size_t) (codeLimit & (size - 1));
5915 			*tainted |= CS_VALIDATE_NX;
5916 		}
5917 
5918 		hashtype->cs_init(&mdctx);
5919 
5920 		if (blob->csb_hash_firstlevel_pageshift) {
5921 			const unsigned char *partial_data = (const unsigned char *)data;
5922 			size_t i;
5923 			for (i = 0; i < size;) {
5924 				union cs_hash_union     partialctx;
5925 				unsigned char partial_digest[CS_HASH_MAX_SIZE];
5926 				size_t partial_size = MIN(size - i, (1U << blob->csb_hash_firstlevel_pageshift));
5927 
5928 				hashtype->cs_init(&partialctx);
5929 				hashtype->cs_update(&partialctx, partial_data, partial_size);
5930 				hashtype->cs_final(partial_digest, &partialctx);
5931 
5932 				/* Update cumulative multi-level hash */
5933 				hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
5934 				partial_data = partial_data + partial_size;
5935 				i += partial_size;
5936 			}
5937 		} else {
5938 			hashtype->cs_update(&mdctx, data, size);
5939 		}
5940 		hashtype->cs_final(actual_hash, &mdctx);
5941 
5942 		asha1 = (const uint32_t *) actual_hash;
5943 		esha1 = (const uint32_t *) expected_hash;
5944 
5945 		if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
5946 			if (cs_debug) {
5947 				printf("CODE SIGNING: cs_validate_page: "
5948 				    "mobj %p off 0x%llx size 0x%lx: "
5949 				    "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
5950 				    "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
5951 				    pager, page_offset, size,
5952 				    asha1[0], asha1[1], asha1[2],
5953 				    asha1[3], asha1[4],
5954 				    esha1[0], esha1[1], esha1[2],
5955 				    esha1[3], esha1[4]);
5956 			}
5957 			cs_validate_page_bad_hash++;
5958 			*tainted |= CS_VALIDATE_TAINTED;
5959 		} else {
5960 			if (cs_debug > 10) {
5961 				printf("CODE SIGNING: cs_validate_page: "
5962 				    "mobj %p off 0x%llx size 0x%lx: "
5963 				    "SHA1 OK\n",
5964 				    pager, page_offset, size);
5965 			}
5966 		}
5967 		validated = TRUE;
5968 	}
5969 
5970 	return validated;
5971 }
5972 
5973 boolean_t
cs_validate_range(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t dsize,unsigned * tainted)5974 cs_validate_range(
5975 	struct vnode    *vp,
5976 	memory_object_t         pager,
5977 	memory_object_offset_t  page_offset,
5978 	const void              *data,
5979 	vm_size_t               dsize,
5980 	unsigned                *tainted)
5981 {
5982 	vm_size_t offset_in_range;
5983 	boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
5984 
5985 	struct cs_blob *blobs = ubc_get_cs_blobs(vp);
5986 
5987 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5988 	if (blobs == NULL && proc_is_translated(current_proc())) {
5989 		struct cs_blob *supp = ubc_get_cs_supplement(vp);
5990 
5991 		if (supp != NULL) {
5992 			blobs = supp;
5993 		} else {
5994 			return FALSE;
5995 		}
5996 	}
5997 #endif
5998 
5999 #if DEVELOPMENT || DEBUG
6000 	code_signing_config_t cs_config = 0;
6001 
6002 	/*
6003 	 * This exemption is specifically useful for systems which want to avoid paying
6004 	 * the cost of verifying the integrity of pages, since that is done by computing
6005 	 * hashes, which can take some time.
6006 	 */
6007 	code_signing_configuration(NULL, &cs_config);
6008 	if (cs_config & CS_CONFIG_INTEGRITY_SKIP) {
6009 		*tainted = 0;
6010 
6011 		/* Return early to avoid paying the cost of hashing */
6012 		return true;
6013 	}
6014 #endif
6015 
6016 	*tainted = 0;
6017 
6018 	for (offset_in_range = 0;
6019 	    offset_in_range < dsize;
6020 	    /* offset_in_range updated based on bytes processed */) {
6021 		unsigned subrange_tainted = 0;
6022 		boolean_t subrange_validated;
6023 		vm_size_t bytes_processed = 0;
6024 
6025 		subrange_validated = cs_validate_hash(blobs,
6026 		    pager,
6027 		    page_offset + offset_in_range,
6028 		    (const void *)((const char *)data + offset_in_range),
6029 		    &bytes_processed,
6030 		    &subrange_tainted);
6031 
6032 		*tainted |= subrange_tainted;
6033 
6034 		if (bytes_processed == 0) {
6035 			/* Cannote make forward progress, so return an error */
6036 			all_subranges_validated = FALSE;
6037 			break;
6038 		} else if (subrange_validated == FALSE) {
6039 			all_subranges_validated = FALSE;
6040 			/* Keep going to detect other types of failures in subranges */
6041 		}
6042 
6043 		offset_in_range += bytes_processed;
6044 	}
6045 
6046 	return all_subranges_validated;
6047 }
6048 
6049 void
cs_validate_page(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,int * validated_p,int * tainted_p,int * nx_p)6050 cs_validate_page(
6051 	struct vnode            *vp,
6052 	memory_object_t         pager,
6053 	memory_object_offset_t  page_offset,
6054 	const void              *data,
6055 	int                     *validated_p,
6056 	int                     *tainted_p,
6057 	int                     *nx_p)
6058 {
6059 	vm_size_t offset_in_page;
6060 	struct cs_blob *blobs;
6061 
6062 	blobs = ubc_get_cs_blobs(vp);
6063 
6064 #if CONFIG_SUPPLEMENTAL_SIGNATURES
6065 	if (blobs == NULL && proc_is_translated(current_proc())) {
6066 		struct cs_blob *supp = ubc_get_cs_supplement(vp);
6067 
6068 		if (supp != NULL) {
6069 			blobs = supp;
6070 		}
6071 	}
6072 #endif
6073 
6074 #if DEVELOPMENT || DEBUG
6075 	code_signing_config_t cs_config = 0;
6076 
6077 	/*
6078 	 * This exemption is specifically useful for systems which want to avoid paying
6079 	 * the cost of verifying the integrity of pages, since that is done by computing
6080 	 * hashes, which can take some time.
6081 	 */
6082 	code_signing_configuration(NULL, &cs_config);
6083 	if (cs_config & CS_CONFIG_INTEGRITY_SKIP) {
6084 		*validated_p = VMP_CS_ALL_TRUE;
6085 		*tainted_p = VMP_CS_ALL_FALSE;
6086 		*nx_p = VMP_CS_ALL_FALSE;
6087 
6088 		/* Return early to avoid paying the cost of hashing */
6089 		return;
6090 	}
6091 #endif
6092 
6093 	*validated_p = VMP_CS_ALL_FALSE;
6094 	*tainted_p = VMP_CS_ALL_FALSE;
6095 	*nx_p = VMP_CS_ALL_FALSE;
6096 
6097 	for (offset_in_page = 0;
6098 	    offset_in_page < PAGE_SIZE;
6099 	    /* offset_in_page updated based on bytes processed */) {
6100 		unsigned subrange_tainted = 0;
6101 		boolean_t subrange_validated;
6102 		vm_size_t bytes_processed = 0;
6103 		int sub_bit;
6104 
6105 		subrange_validated = cs_validate_hash(blobs,
6106 		    pager,
6107 		    page_offset + offset_in_page,
6108 		    (const void *)((const char *)data + offset_in_page),
6109 		    &bytes_processed,
6110 		    &subrange_tainted);
6111 
6112 		if (bytes_processed == 0) {
6113 			/* 4k chunk not code-signed: try next one */
6114 			offset_in_page += FOURK_PAGE_SIZE;
6115 			continue;
6116 		}
6117 		if (offset_in_page == 0 &&
6118 		    bytes_processed > PAGE_SIZE - FOURK_PAGE_SIZE) {
6119 			/* all processed: no 4k granularity */
6120 			if (subrange_validated) {
6121 				*validated_p = VMP_CS_ALL_TRUE;
6122 			}
6123 			if (subrange_tainted & CS_VALIDATE_TAINTED) {
6124 				*tainted_p = VMP_CS_ALL_TRUE;
6125 			}
6126 			if (subrange_tainted & CS_VALIDATE_NX) {
6127 				*nx_p = VMP_CS_ALL_TRUE;
6128 			}
6129 			break;
6130 		}
6131 		/* we only handle 4k or 16k code-signing granularity... */
6132 		assertf(bytes_processed <= FOURK_PAGE_SIZE,
6133 		    "vp %p blobs %p offset 0x%llx + 0x%llx bytes_processed 0x%llx\n",
6134 		    vp, blobs, (uint64_t)page_offset,
6135 		    (uint64_t)offset_in_page, (uint64_t)bytes_processed);
6136 		sub_bit = 1 << (offset_in_page >> FOURK_PAGE_SHIFT);
6137 		if (subrange_validated) {
6138 			*validated_p |= sub_bit;
6139 		}
6140 		if (subrange_tainted & CS_VALIDATE_TAINTED) {
6141 			*tainted_p |= sub_bit;
6142 		}
6143 		if (subrange_tainted & CS_VALIDATE_NX) {
6144 			*nx_p |= sub_bit;
6145 		}
6146 		/* go to next 4k chunk */
6147 		offset_in_page += FOURK_PAGE_SIZE;
6148 	}
6149 
6150 	return;
6151 }
6152 
6153 int
ubc_cs_getcdhash(vnode_t vp,off_t offset,unsigned char * cdhash,uint8_t * type)6154 ubc_cs_getcdhash(
6155 	vnode_t         vp,
6156 	off_t           offset,
6157 	unsigned char   *cdhash,
6158 	uint8_t         *type)
6159 {
6160 	struct cs_blob  *blobs, *blob;
6161 	off_t           rel_offset;
6162 	int             ret;
6163 
6164 	vnode_lock(vp);
6165 
6166 	blobs = ubc_get_cs_blobs(vp);
6167 	for (blob = blobs;
6168 	    blob != NULL;
6169 	    blob = blob->csb_next) {
6170 		/* compute offset relative to this blob */
6171 		rel_offset = offset - blob->csb_base_offset;
6172 		if (rel_offset >= blob->csb_start_offset &&
6173 		    rel_offset < blob->csb_end_offset) {
6174 			/* this blob does cover our "offset" ! */
6175 			break;
6176 		}
6177 	}
6178 
6179 	if (blob == NULL) {
6180 		/* we didn't find a blob covering "offset" */
6181 		ret = EBADEXEC; /* XXX any better error ? */
6182 	} else {
6183 		/* get the CDHash of that blob */
6184 		bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash));
6185 
6186 		/* get the type of the CDHash */
6187 		if (type != NULL) {
6188 			*type = blob->csb_cd->hashType;
6189 		}
6190 
6191 		ret = 0;
6192 	}
6193 
6194 	vnode_unlock(vp);
6195 
6196 	return ret;
6197 }
6198 
6199 boolean_t
ubc_cs_is_range_codesigned(vnode_t vp,mach_vm_offset_t start,mach_vm_size_t size)6200 ubc_cs_is_range_codesigned(
6201 	vnode_t                 vp,
6202 	mach_vm_offset_t        start,
6203 	mach_vm_size_t          size)
6204 {
6205 	struct cs_blob          *csblob;
6206 	mach_vm_offset_t        blob_start;
6207 	mach_vm_offset_t        blob_end;
6208 
6209 	if (vp == NULL) {
6210 		/* no file: no code signature */
6211 		return FALSE;
6212 	}
6213 	if (size == 0) {
6214 		/* no range: no code signature */
6215 		return FALSE;
6216 	}
6217 	if (start + size < start) {
6218 		/* overflow */
6219 		return FALSE;
6220 	}
6221 
6222 	csblob = ubc_cs_blob_get(vp, -1, -1, start);
6223 	if (csblob == NULL) {
6224 		return FALSE;
6225 	}
6226 
6227 	/*
6228 	 * We currently check if the range is covered by a single blob,
6229 	 * which should always be the case for the dyld shared cache.
6230 	 * If we ever want to make this routine handle other cases, we
6231 	 * would have to iterate if the blob does not cover the full range.
6232 	 */
6233 	blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
6234 	    csblob->csb_start_offset);
6235 	blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
6236 	    csblob->csb_end_offset);
6237 	if (blob_start > start || blob_end < (start + size)) {
6238 		/* range not fully covered by this code-signing blob */
6239 		return FALSE;
6240 	}
6241 
6242 	return TRUE;
6243 }
6244 
6245 #if CHECK_CS_VALIDATION_BITMAP
6246 #define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
6247 extern  boolean_t       root_fs_upgrade_try;
6248 
6249 /*
6250  * Should we use the code-sign bitmap to avoid repeated code-sign validation?
6251  * Depends:
6252  * a) Is the target vnode on the root filesystem?
6253  * b) Has someone tried to mount the root filesystem read-write?
6254  * If answers are (a) yes AND (b) no, then we can use the bitmap.
6255  */
6256 #define USE_CODE_SIGN_BITMAP(vp)        ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
6257 kern_return_t
ubc_cs_validation_bitmap_allocate(vnode_t vp)6258 ubc_cs_validation_bitmap_allocate(
6259 	vnode_t         vp)
6260 {
6261 	kern_return_t   kr = KERN_SUCCESS;
6262 	struct ubc_info *uip;
6263 	char            *target_bitmap;
6264 	vm_object_size_t        bitmap_size;
6265 
6266 	if (!USE_CODE_SIGN_BITMAP(vp) || (!UBCINFOEXISTS(vp))) {
6267 		kr = KERN_INVALID_ARGUMENT;
6268 	} else {
6269 		uip = vp->v_ubcinfo;
6270 
6271 		if (uip->cs_valid_bitmap == NULL) {
6272 			bitmap_size = stob(uip->ui_size);
6273 			target_bitmap = (char*) kalloc_data((vm_size_t)bitmap_size, Z_WAITOK | Z_ZERO);
6274 			if (target_bitmap == 0) {
6275 				kr = KERN_NO_SPACE;
6276 			} else {
6277 				kr = KERN_SUCCESS;
6278 			}
6279 			if (kr == KERN_SUCCESS) {
6280 				uip->cs_valid_bitmap = (void*)target_bitmap;
6281 				uip->cs_valid_bitmap_size = bitmap_size;
6282 			}
6283 		}
6284 	}
6285 	return kr;
6286 }
6287 
6288 kern_return_t
ubc_cs_check_validation_bitmap(vnode_t vp,memory_object_offset_t offset,int optype)6289 ubc_cs_check_validation_bitmap(
6290 	vnode_t                 vp,
6291 	memory_object_offset_t          offset,
6292 	int                     optype)
6293 {
6294 	kern_return_t   kr = KERN_SUCCESS;
6295 
6296 	if (!USE_CODE_SIGN_BITMAP(vp) || !UBCINFOEXISTS(vp)) {
6297 		kr = KERN_INVALID_ARGUMENT;
6298 	} else {
6299 		struct ubc_info *uip = vp->v_ubcinfo;
6300 		char            *target_bitmap = uip->cs_valid_bitmap;
6301 
6302 		if (target_bitmap == NULL) {
6303 			kr = KERN_INVALID_ARGUMENT;
6304 		} else {
6305 			uint64_t        bit, byte;
6306 			bit = atop_64( offset );
6307 			byte = bit >> 3;
6308 
6309 			if (byte > uip->cs_valid_bitmap_size) {
6310 				kr = KERN_INVALID_ARGUMENT;
6311 			} else {
6312 				if (optype == CS_BITMAP_SET) {
6313 					target_bitmap[byte] |= (1 << (bit & 07));
6314 					kr = KERN_SUCCESS;
6315 				} else if (optype == CS_BITMAP_CLEAR) {
6316 					target_bitmap[byte] &= ~(1 << (bit & 07));
6317 					kr = KERN_SUCCESS;
6318 				} else if (optype == CS_BITMAP_CHECK) {
6319 					if (target_bitmap[byte] & (1 << (bit & 07))) {
6320 						kr = KERN_SUCCESS;
6321 					} else {
6322 						kr = KERN_FAILURE;
6323 					}
6324 				}
6325 			}
6326 		}
6327 	}
6328 	return kr;
6329 }
6330 
6331 void
ubc_cs_validation_bitmap_deallocate(struct ubc_info * uip)6332 ubc_cs_validation_bitmap_deallocate(
6333 	struct ubc_info *uip)
6334 {
6335 	if (uip->cs_valid_bitmap != NULL) {
6336 		kfree_data(uip->cs_valid_bitmap, (vm_size_t)uip->cs_valid_bitmap_size);
6337 		uip->cs_valid_bitmap = NULL;
6338 	}
6339 }
6340 #else
6341 kern_return_t
ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)6342 ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)
6343 {
6344 	return KERN_INVALID_ARGUMENT;
6345 }
6346 
6347 kern_return_t
ubc_cs_check_validation_bitmap(__unused struct vnode * vp,__unused memory_object_offset_t offset,__unused int optype)6348 ubc_cs_check_validation_bitmap(
6349 	__unused struct vnode *vp,
6350 	__unused memory_object_offset_t offset,
6351 	__unused int optype)
6352 {
6353 	return KERN_INVALID_ARGUMENT;
6354 }
6355 
6356 void
ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info * uip)6357 ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info *uip)
6358 {
6359 	return;
6360 }
6361 #endif /* CHECK_CS_VALIDATION_BITMAP */
6362 
6363 #if CODE_SIGNING_MONITOR
6364 
6365 kern_return_t
cs_associate_blob_with_mapping(void * pmap,vm_map_offset_t start,vm_map_size_t size,vm_object_offset_t offset,void * blobs_p)6366 cs_associate_blob_with_mapping(
6367 	void                    *pmap,
6368 	vm_map_offset_t         start,
6369 	vm_map_size_t           size,
6370 	vm_object_offset_t      offset,
6371 	void                    *blobs_p)
6372 {
6373 	off_t                   blob_start_offset, blob_end_offset;
6374 	kern_return_t           kr;
6375 	struct cs_blob          *blobs, *blob;
6376 	vm_offset_t             kaddr;
6377 	void                    *monitor_sig_obj = NULL;
6378 
6379 	if (csm_enabled() == false) {
6380 		return KERN_NOT_SUPPORTED;
6381 	}
6382 
6383 	blobs = (struct cs_blob *)blobs_p;
6384 
6385 	for (blob = blobs;
6386 	    blob != NULL;
6387 	    blob = blob->csb_next) {
6388 		blob_start_offset = (blob->csb_base_offset +
6389 		    blob->csb_start_offset);
6390 		blob_end_offset = (blob->csb_base_offset +
6391 		    blob->csb_end_offset);
6392 		if ((off_t) offset < blob_start_offset ||
6393 		    (off_t) offset >= blob_end_offset ||
6394 		    (off_t) (offset + size) <= blob_start_offset ||
6395 		    (off_t) (offset + size) > blob_end_offset) {
6396 			continue;
6397 		}
6398 
6399 		kaddr = (vm_offset_t)blob->csb_mem_kaddr;
6400 		if (kaddr == 0) {
6401 			/* blob data has been released */
6402 			continue;
6403 		}
6404 
6405 		monitor_sig_obj = blob->csb_csm_obj;
6406 		if (monitor_sig_obj == NULL) {
6407 			continue;
6408 		}
6409 
6410 		break;
6411 	}
6412 
6413 	if (monitor_sig_obj != NULL) {
6414 		vm_offset_t segment_offset = offset - blob_start_offset;
6415 		kr = csm_associate_code_signature(pmap, monitor_sig_obj, start, size, segment_offset);
6416 	} else {
6417 		kr = KERN_CODESIGN_ERROR;
6418 	}
6419 
6420 	return kr;
6421 }
6422 
6423 #endif /* CODE_SIGNING_MONITOR */
6424