xref: /xnu-12377.61.12/bsd/kern/ubc_subr.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  *	File:	ubc_subr.c
30  *	Author:	Umesh Vaishampayan [[email protected]]
31  *		05-Aug-1999	umeshv	Created.
32  *
33  *	Functions related to Unified Buffer cache.
34  *
35  * Caller of UBC functions MUST have a valid reference on the vnode.
36  *
37  */
38 
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/lock.h>
43 #include <sys/mman.h>
44 #include <sys/mount_internal.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/ucred.h>
48 #include <sys/proc_internal.h>
49 #include <sys/kauth.h>
50 #include <sys/buf.h>
51 #include <sys/user.h>
52 #include <sys/codesign.h>
53 #include <sys/codedir_internal.h>
54 #include <sys/fsevents.h>
55 #include <sys/fcntl.h>
56 #include <sys/reboot.h>
57 #include <sys/code_signing.h>
58 
59 #include <mach/mach_types.h>
60 #include <mach/memory_object_types.h>
61 #include <mach/memory_object_control.h>
62 #include <mach/vm_map.h>
63 #include <mach/mach_vm.h>
64 #include <mach/upl.h>
65 
66 #include <kern/kern_types.h>
67 #include <kern/kalloc.h>
68 #include <kern/zalloc.h>
69 #include <kern/thread.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_upl.h>
74 #include <vm/vm_kern_xnu.h>
75 #include <vm/vm_protos.h> /* last */
76 #include <vm/vm_ubc.h>
77 
78 #include <libkern/crypto/sha1.h>
79 #include <libkern/crypto/sha2.h>
80 #include <libkern/libkern.h>
81 
82 #include <security/mac_framework.h>
83 #include <stdbool.h>
84 #include <stdatomic.h>
85 #include <libkern/amfi/amfi.h>
86 
87 extern void Debugger(const char *message);
88 
89 #if DIAGNOSTIC
90 #if defined(assert)
91 #undef assert
92 #endif
93 #define assert(cond)    \
94     ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
95 #else
96 #include <kern/assert.h>
97 #endif /* DIAGNOSTIC */
98 
99 static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
100 static int ubc_umcallback(vnode_t, void *);
101 static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
102 static void ubc_cs_free(struct ubc_info *uip);
103 
104 static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
105 static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
106 
107 ZONE_DEFINE_TYPE(ubc_info_zone, "ubc_info zone", struct ubc_info,
108     ZC_ZFREE_CLEARMEM);
109 static uint32_t cs_blob_generation_count = 1;
110 
111 /*
112  * CODESIGNING
113  * Routines to navigate code signing data structures in the kernel...
114  */
115 
116 ZONE_DEFINE_ID(ZONE_ID_CS_BLOB, "cs_blob zone", struct cs_blob,
117     ZC_READONLY | ZC_ZFREE_CLEARMEM);
118 
119 extern int cs_debug;
120 
121 #define PAGE_SHIFT_4K           (12)
122 
123 static boolean_t
cs_valid_range(const void * start,const void * end,const void * lower_bound,const void * upper_bound)124 cs_valid_range(
125 	const void *start,
126 	const void *end,
127 	const void *lower_bound,
128 	const void *upper_bound)
129 {
130 	if (upper_bound < lower_bound ||
131 	    end < start) {
132 		return FALSE;
133 	}
134 
135 	if (start < lower_bound ||
136 	    end > upper_bound) {
137 		return FALSE;
138 	}
139 
140 	return TRUE;
141 }
142 
143 typedef void (*cs_md_init)(void *ctx);
144 typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
145 typedef void (*cs_md_final)(void *hash, void *ctx);
146 
147 struct cs_hash {
148 	uint8_t             cs_type;    /* type code as per code signing */
149 	size_t              cs_size;    /* size of effective hash (may be truncated) */
150 	size_t              cs_digest_size;/* size of native hash */
151 	cs_md_init          cs_init;
152 	cs_md_update        cs_update;
153 	cs_md_final         cs_final;
154 };
155 
156 uint8_t
cs_hash_type(struct cs_hash const * const cs_hash)157 cs_hash_type(
158 	struct cs_hash const * const cs_hash)
159 {
160 	return cs_hash->cs_type;
161 }
162 
163 static const struct cs_hash cs_hash_sha1 = {
164 	.cs_type = CS_HASHTYPE_SHA1,
165 	.cs_size = CS_SHA1_LEN,
166 	.cs_digest_size = SHA_DIGEST_LENGTH,
167 	.cs_init = (cs_md_init)SHA1Init,
168 	.cs_update = (cs_md_update)SHA1Update,
169 	.cs_final = (cs_md_final)SHA1Final,
170 };
171 #if CRYPTO_SHA2
172 static const struct cs_hash cs_hash_sha256 = {
173 	.cs_type = CS_HASHTYPE_SHA256,
174 	.cs_size = SHA256_DIGEST_LENGTH,
175 	.cs_digest_size = SHA256_DIGEST_LENGTH,
176 	.cs_init = (cs_md_init)SHA256_Init,
177 	.cs_update = (cs_md_update)SHA256_Update,
178 	.cs_final = (cs_md_final)SHA256_Final,
179 };
180 static const struct cs_hash cs_hash_sha256_truncate = {
181 	.cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
182 	.cs_size = CS_SHA256_TRUNCATED_LEN,
183 	.cs_digest_size = SHA256_DIGEST_LENGTH,
184 	.cs_init = (cs_md_init)SHA256_Init,
185 	.cs_update = (cs_md_update)SHA256_Update,
186 	.cs_final = (cs_md_final)SHA256_Final,
187 };
188 static const struct cs_hash cs_hash_sha384 = {
189 	.cs_type = CS_HASHTYPE_SHA384,
190 	.cs_size = SHA384_DIGEST_LENGTH,
191 	.cs_digest_size = SHA384_DIGEST_LENGTH,
192 	.cs_init = (cs_md_init)SHA384_Init,
193 	.cs_update = (cs_md_update)SHA384_Update,
194 	.cs_final = (cs_md_final)SHA384_Final,
195 };
196 #endif
197 
198 static struct cs_hash const *
cs_find_md(uint8_t type)199 cs_find_md(uint8_t type)
200 {
201 	if (type == CS_HASHTYPE_SHA1) {
202 		return &cs_hash_sha1;
203 #if CRYPTO_SHA2
204 	} else if (type == CS_HASHTYPE_SHA256) {
205 		return &cs_hash_sha256;
206 	} else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
207 		return &cs_hash_sha256_truncate;
208 	} else if (type == CS_HASHTYPE_SHA384) {
209 		return &cs_hash_sha384;
210 #endif
211 	}
212 	return NULL;
213 }
214 
215 union cs_hash_union {
216 	SHA1_CTX                sha1ctxt;
217 	SHA256_CTX              sha256ctx;
218 	SHA384_CTX              sha384ctx;
219 };
220 
221 
222 /*
223  * Choose among different hash algorithms.
224  * Higher is better, 0 => don't use at all.
225  */
226 static const uint32_t hashPriorities[] = {
227 	CS_HASHTYPE_SHA1,
228 	CS_HASHTYPE_SHA256_TRUNCATED,
229 	CS_HASHTYPE_SHA256,
230 	CS_HASHTYPE_SHA384,
231 };
232 
233 static unsigned int
hash_rank(const CS_CodeDirectory * cd)234 hash_rank(const CS_CodeDirectory *cd)
235 {
236 	uint32_t type = cd->hashType;
237 	unsigned int n;
238 
239 	for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) {
240 		if (hashPriorities[n] == type) {
241 			return n + 1;
242 		}
243 	}
244 	return 0;       /* not supported */
245 }
246 
247 
248 /*
249  * Locating a page hash
250  */
251 static const unsigned char *
hashes(const CS_CodeDirectory * cd,uint32_t page,size_t hash_len,const char * lower_bound,const char * upper_bound)252 hashes(
253 	const CS_CodeDirectory *cd,
254 	uint32_t page,
255 	size_t hash_len,
256 	const char *lower_bound,
257 	const char *upper_bound)
258 {
259 	const unsigned char *base, *top, *hash;
260 	uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
261 
262 	assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
263 
264 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
265 		/* Get first scatter struct */
266 		const SC_Scatter *scatter = (const SC_Scatter*)
267 		    ((const char*)cd + ntohl(cd->scatterOffset));
268 		uint32_t hashindex = 0, scount, sbase = 0;
269 		/* iterate all scatter structs */
270 		do {
271 			if ((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
272 				if (cs_debug) {
273 					printf("CODE SIGNING: Scatter extends past Code Directory\n");
274 				}
275 				return NULL;
276 			}
277 
278 			scount = ntohl(scatter->count);
279 			uint32_t new_base = ntohl(scatter->base);
280 
281 			/* last scatter? */
282 			if (scount == 0) {
283 				return NULL;
284 			}
285 
286 			if ((hashindex > 0) && (new_base <= sbase)) {
287 				if (cs_debug) {
288 					printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
289 					    sbase, new_base);
290 				}
291 				return NULL;    /* unordered scatter array */
292 			}
293 			sbase = new_base;
294 
295 			/* this scatter beyond page we're looking for? */
296 			if (sbase > page) {
297 				return NULL;
298 			}
299 
300 			if (sbase + scount >= page) {
301 				/* Found the scatter struct that is
302 				 * referencing our page */
303 
304 				/* base = address of first hash covered by scatter */
305 				base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
306 				    hashindex * hash_len;
307 				/* top = address of first hash after this scatter */
308 				top = base + scount * hash_len;
309 				if (!cs_valid_range(base, top, lower_bound,
310 				    upper_bound) ||
311 				    hashindex > nCodeSlots) {
312 					return NULL;
313 				}
314 
315 				break;
316 			}
317 
318 			/* this scatter struct is before the page we're looking
319 			 * for. Iterate. */
320 			hashindex += scount;
321 			scatter++;
322 		} while (1);
323 
324 		hash = base + (page - sbase) * hash_len;
325 	} else {
326 		base = (const unsigned char *)cd + ntohl(cd->hashOffset);
327 		top = base + nCodeSlots * hash_len;
328 		if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
329 		    page > nCodeSlots) {
330 			return NULL;
331 		}
332 		assert(page < nCodeSlots);
333 
334 		hash = base + page * hash_len;
335 	}
336 
337 	if (!cs_valid_range(hash, hash + hash_len,
338 	    lower_bound, upper_bound)) {
339 		hash = NULL;
340 	}
341 
342 	return hash;
343 }
344 
345 /*
346  * cs_validate_codedirectory
347  *
348  * Validate that pointers inside the code directory to make sure that
349  * all offsets and lengths are constrained within the buffer.
350  *
351  * Parameters:	cd			Pointer to code directory buffer
352  *		length			Length of buffer
353  *
354  * Returns:	0			Success
355  *		EBADEXEC		Invalid code signature
356  */
357 
358 static int
cs_validate_codedirectory(const CS_CodeDirectory * cd,size_t length)359 cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
360 {
361 	struct cs_hash const *hashtype;
362 
363 	if (length < sizeof(*cd)) {
364 		return EBADEXEC;
365 	}
366 	if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) {
367 		return EBADEXEC;
368 	}
369 	if ((cd->pageSize != PAGE_SHIFT_4K) && (cd->pageSize != PAGE_SHIFT)) {
370 		printf("disallowing unsupported code signature page shift: %u\n", cd->pageSize);
371 		return EBADEXEC;
372 	}
373 	hashtype = cs_find_md(cd->hashType);
374 	if (hashtype == NULL) {
375 		return EBADEXEC;
376 	}
377 
378 	if (cd->hashSize != hashtype->cs_size) {
379 		return EBADEXEC;
380 	}
381 
382 	if (length < ntohl(cd->hashOffset)) {
383 		return EBADEXEC;
384 	}
385 
386 	/* check that nSpecialSlots fits in the buffer in front of hashOffset */
387 	if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) {
388 		return EBADEXEC;
389 	}
390 
391 	/* check that codeslots fits in the buffer */
392 	if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) {
393 		return EBADEXEC;
394 	}
395 
396 	if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
397 		if (length < ntohl(cd->scatterOffset)) {
398 			return EBADEXEC;
399 		}
400 
401 		const SC_Scatter *scatter = (const SC_Scatter *)
402 		    (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
403 		uint32_t nPages = 0;
404 
405 		/*
406 		 * Check each scatter buffer, since we don't know the
407 		 * length of the scatter buffer array, we have to
408 		 * check each entry.
409 		 */
410 		while (1) {
411 			/* check that the end of each scatter buffer in within the length */
412 			if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) {
413 				return EBADEXEC;
414 			}
415 			uint32_t scount = ntohl(scatter->count);
416 			if (scount == 0) {
417 				break;
418 			}
419 			if (nPages + scount < nPages) {
420 				return EBADEXEC;
421 			}
422 			nPages += scount;
423 			scatter++;
424 
425 			/* XXX check that basees doesn't overlap */
426 			/* XXX check that targetOffset doesn't overlap */
427 		}
428 #if 0 /* rdar://12579439 */
429 		if (nPages != ntohl(cd->nCodeSlots)) {
430 			return EBADEXEC;
431 		}
432 #endif
433 	}
434 
435 	if (length < ntohl(cd->identOffset)) {
436 		return EBADEXEC;
437 	}
438 
439 	/* identifier is NUL terminated string */
440 	if (cd->identOffset) {
441 		const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
442 		if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) {
443 			return EBADEXEC;
444 		}
445 	}
446 
447 	/* team identifier is NULL terminated string */
448 	if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
449 		if (length < ntohl(cd->teamOffset)) {
450 			return EBADEXEC;
451 		}
452 
453 		const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
454 		if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) {
455 			return EBADEXEC;
456 		}
457 	}
458 
459 	/* linkage is variable length binary data */
460 	if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0) {
461 		const uintptr_t ptr = (uintptr_t)cd + ntohl(cd->linkageOffset);
462 		const uintptr_t ptr_end = ptr + ntohl(cd->linkageSize);
463 
464 		if (ptr_end < ptr || ptr < (uintptr_t)cd || ptr_end > (uintptr_t)cd + length) {
465 			return EBADEXEC;
466 		}
467 	}
468 
469 
470 	return 0;
471 }
472 
473 /*
474  *
475  */
476 
477 static int
cs_validate_blob(const CS_GenericBlob * blob,size_t length)478 cs_validate_blob(const CS_GenericBlob *blob, size_t length)
479 {
480 	if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) {
481 		return EBADEXEC;
482 	}
483 	return 0;
484 }
485 
486 /*
487  * cs_validate_csblob
488  *
489  * Validate that superblob/embedded code directory to make sure that
490  * all internal pointers are valid.
491  *
492  * Will validate both a superblob csblob and a "raw" code directory.
493  *
494  *
495  * Parameters:	buffer			Pointer to code signature
496  *		length			Length of buffer
497  *		rcd			returns pointer to code directory
498  *
499  * Returns:	0			Success
500  *		EBADEXEC		Invalid code signature
501  */
502 
503 static int
cs_validate_csblob(const uint8_t * addr,const size_t blob_size,const CS_CodeDirectory ** rcd,const CS_GenericBlob ** rentitlements,const CS_GenericBlob ** rder_entitlements)504 cs_validate_csblob(
505 	const uint8_t *addr,
506 	const size_t blob_size,
507 	const CS_CodeDirectory **rcd,
508 	const CS_GenericBlob **rentitlements,
509 	const CS_GenericBlob **rder_entitlements)
510 {
511 	const CS_GenericBlob *blob;
512 	int error;
513 	size_t length;
514 	bool primary_cd_exists = false;
515 	const CS_GenericBlob *self_constraint = NULL;
516 	const CS_GenericBlob *parent_constraint = NULL;
517 	const CS_GenericBlob *responsible_proc_constraint = NULL;
518 	const CS_GenericBlob *library_constraint = NULL;
519 
520 	*rcd = NULL;
521 	*rentitlements = NULL;
522 	*rder_entitlements = NULL;
523 
524 	blob = (const CS_GenericBlob *)(const void *)addr;
525 
526 	length = blob_size;
527 	error = cs_validate_blob(blob, length);
528 	if (error) {
529 		return error;
530 	}
531 	length = ntohl(blob->length);
532 
533 	if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
534 		const CS_SuperBlob *sb;
535 		uint32_t n, count;
536 		const CS_CodeDirectory *best_cd = NULL;
537 		unsigned int best_rank = 0;
538 
539 		if (length < sizeof(CS_SuperBlob)) {
540 			return EBADEXEC;
541 		}
542 
543 		sb = (const CS_SuperBlob *)blob;
544 		count = ntohl(sb->count);
545 
546 		/* check that the array of BlobIndex fits in the rest of the data */
547 		if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) {
548 			return EBADEXEC;
549 		}
550 
551 		/* now check each BlobIndex */
552 		for (n = 0; n < count; n++) {
553 			const CS_BlobIndex *blobIndex = &sb->index[n];
554 			uint32_t type = ntohl(blobIndex->type);
555 			uint32_t offset = ntohl(blobIndex->offset);
556 			if (length < offset) {
557 				return EBADEXEC;
558 			}
559 
560 			const CS_GenericBlob *subBlob =
561 			    (const CS_GenericBlob *)(const void *)(addr + offset);
562 
563 			size_t subLength = length - offset;
564 
565 			if ((error = cs_validate_blob(subBlob, subLength)) != 0) {
566 				return error;
567 			}
568 			subLength = ntohl(subBlob->length);
569 
570 			/* extra validation for CDs, that is also returned */
571 			if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
572 				if (type == CSSLOT_CODEDIRECTORY) {
573 					primary_cd_exists = true;
574 				}
575 				const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
576 				if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) {
577 					return error;
578 				}
579 				unsigned int rank = hash_rank(candidate);
580 				if (cs_debug > 3) {
581 					printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
582 				}
583 				if (best_cd == NULL || rank > best_rank) {
584 					best_cd = candidate;
585 					best_rank = rank;
586 
587 					if (cs_debug > 2) {
588 						printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
589 					}
590 					*rcd = best_cd;
591 				} else if (best_cd != NULL && rank == best_rank) {
592 					/* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
593 					printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
594 					return EBADEXEC;
595 				}
596 			} else if (type == CSSLOT_ENTITLEMENTS) {
597 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
598 					return EBADEXEC;
599 				}
600 				if (*rentitlements != NULL) {
601 					printf("multiple entitlements blobs\n");
602 					return EBADEXEC;
603 				}
604 				*rentitlements = subBlob;
605 			} else if (type == CSSLOT_DER_ENTITLEMENTS) {
606 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_DER_ENTITLEMENTS) {
607 					return EBADEXEC;
608 				}
609 				if (*rder_entitlements != NULL) {
610 					printf("multiple der entitlements blobs\n");
611 					return EBADEXEC;
612 				}
613 				*rder_entitlements = subBlob;
614 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_SELF) {
615 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
616 					return EBADEXEC;
617 				}
618 				if (self_constraint != NULL) {
619 					printf("multiple self constraint blobs\n");
620 					return EBADEXEC;
621 				}
622 				self_constraint = subBlob;
623 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_PARENT) {
624 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
625 					return EBADEXEC;
626 				}
627 				if (parent_constraint != NULL) {
628 					printf("multiple parent constraint blobs\n");
629 					return EBADEXEC;
630 				}
631 				parent_constraint = subBlob;
632 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE) {
633 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
634 					return EBADEXEC;
635 				}
636 				if (responsible_proc_constraint != NULL) {
637 					printf("multiple responsible process constraint blobs\n");
638 					return EBADEXEC;
639 				}
640 				responsible_proc_constraint = subBlob;
641 			} else if (type == CSSLOT_LIBRARY_CONSTRAINT) {
642 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
643 					return EBADEXEC;
644 				}
645 				if (library_constraint != NULL) {
646 					printf("multiple library constraint blobs\n");
647 					return EBADEXEC;
648 				}
649 				library_constraint = subBlob;
650 			}
651 		}
652 		if (!primary_cd_exists) {
653 			printf("missing primary code directory\n");
654 			return EBADEXEC;
655 		}
656 	} else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
657 		if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) {
658 			return error;
659 		}
660 		*rcd = (const CS_CodeDirectory *)blob;
661 	} else {
662 		return EBADEXEC;
663 	}
664 
665 	if (*rcd == NULL) {
666 		return EBADEXEC;
667 	}
668 
669 	return 0;
670 }
671 
672 /*
673  * cs_find_blob_bytes
674  *
675  * Find an blob from the superblob/code directory. The blob must have
676  * been been validated by cs_validate_csblob() before calling
677  * this. Use csblob_find_blob() instead.
678  *
679  * Will also find a "raw" code directory if its stored as well as
680  * searching the superblob.
681  *
682  * Parameters:	buffer			Pointer to code signature
683  *		length			Length of buffer
684  *		type			type of blob to find
685  *		magic			the magic number for that blob
686  *
687  * Returns:	pointer			Success
688  *		NULL			Buffer not found
689  */
690 
691 const CS_GenericBlob *
csblob_find_blob_bytes(const uint8_t * addr,size_t length,uint32_t type,uint32_t magic)692 csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
693 {
694 	const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
695 
696 	if ((addr + length) < addr) {
697 		panic("CODE SIGNING: CS Blob length overflow for addr: %p", addr);
698 	}
699 
700 	if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
701 		const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
702 		size_t n, count = ntohl(sb->count);
703 
704 		for (n = 0; n < count; n++) {
705 			if (ntohl(sb->index[n].type) != type) {
706 				continue;
707 			}
708 			uint32_t offset = ntohl(sb->index[n].offset);
709 			if (length - sizeof(const CS_GenericBlob) < offset) {
710 				return NULL;
711 			}
712 			blob = (const CS_GenericBlob *)(const void *)(addr + offset);
713 			if (ntohl(blob->magic) != magic) {
714 				continue;
715 			}
716 			if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
717 				panic("CODE SIGNING: CS Blob length overflow for blob at: %p", blob);
718 			} else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
719 				continue;
720 			}
721 			return blob;
722 		}
723 	} else if (type == CSSLOT_CODEDIRECTORY && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
724 	    && magic == CSMAGIC_CODEDIRECTORY) {
725 		if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
726 			panic("CODE SIGNING: CS Blob length overflow for code directory blob at: %p", blob);
727 		} else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
728 			return NULL;
729 		}
730 		return blob;
731 	}
732 	return NULL;
733 }
734 
735 
736 const CS_GenericBlob *
csblob_find_blob(struct cs_blob * csblob,uint32_t type,uint32_t magic)737 csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
738 {
739 	if ((csblob->csb_flags & CS_VALID) == 0) {
740 		return NULL;
741 	}
742 	return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
743 }
744 
745 static const uint8_t *
find_special_slot(const CS_CodeDirectory * cd,size_t slotsize,uint32_t slot)746 find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
747 {
748 	/* there is no zero special slot since that is the first code slot */
749 	if (ntohl(cd->nSpecialSlots) < slot || slot == 0) {
750 		return NULL;
751 	}
752 
753 	return (const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot);
754 }
755 
756 static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
757 
758 static int
csblob_find_special_slot_blob(struct cs_blob * csblob,uint32_t slot,uint32_t magic,const CS_GenericBlob ** out_start,size_t * out_length)759 csblob_find_special_slot_blob(struct cs_blob* csblob, uint32_t slot, uint32_t magic, const CS_GenericBlob **out_start, size_t *out_length)
760 {
761 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
762 	const CS_GenericBlob *blob;
763 	const CS_CodeDirectory *code_dir;
764 	const uint8_t *embedded_hash;
765 	union cs_hash_union context;
766 
767 	if (out_start) {
768 		*out_start = NULL;
769 	}
770 	if (out_length) {
771 		*out_length = 0;
772 	}
773 
774 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
775 		return EBADEXEC;
776 	}
777 
778 	code_dir = csblob->csb_cd;
779 
780 	blob = csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, slot, magic);
781 
782 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, slot);
783 
784 	if (embedded_hash == NULL) {
785 		if (blob) {
786 			return EBADEXEC;
787 		}
788 		return 0;
789 	} else if (blob == NULL) {
790 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
791 			return EBADEXEC;
792 		} else {
793 			return 0;
794 		}
795 	}
796 
797 	csblob->csb_hashtype->cs_init(&context);
798 	csblob->csb_hashtype->cs_update(&context, blob, ntohl(blob->length));
799 	csblob->csb_hashtype->cs_final(computed_hash, &context);
800 
801 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
802 		return EBADEXEC;
803 	}
804 	if (out_start) {
805 		*out_start = blob;
806 	}
807 	if (out_length) {
808 		*out_length = ntohl(blob->length);
809 	}
810 
811 	return 0;
812 }
813 
814 int
csblob_get_entitlements(struct cs_blob * csblob,void ** out_start,size_t * out_length)815 csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
816 {
817 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
818 	const CS_GenericBlob *entitlements;
819 	const CS_CodeDirectory *code_dir;
820 	const uint8_t *embedded_hash;
821 	union cs_hash_union context;
822 
823 	*out_start = NULL;
824 	*out_length = 0;
825 
826 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
827 		return EBADEXEC;
828 	}
829 
830 	code_dir = csblob->csb_cd;
831 
832 	if ((csblob->csb_flags & CS_VALID) == 0) {
833 		entitlements = NULL;
834 	} else {
835 		entitlements = csblob->csb_entitlements_blob;
836 	}
837 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
838 
839 	if (embedded_hash == NULL) {
840 		if (entitlements) {
841 			return EBADEXEC;
842 		}
843 		return 0;
844 	} else if (entitlements == NULL) {
845 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
846 			return EBADEXEC;
847 		} else {
848 			return 0;
849 		}
850 	}
851 
852 	csblob->csb_hashtype->cs_init(&context);
853 	csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
854 	csblob->csb_hashtype->cs_final(computed_hash, &context);
855 
856 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
857 		return EBADEXEC;
858 	}
859 
860 	*out_start = __DECONST(void *, entitlements);
861 	*out_length = ntohl(entitlements->length);
862 
863 	return 0;
864 }
865 
866 const CS_GenericBlob*
csblob_get_der_entitlements_unsafe(struct cs_blob * csblob)867 csblob_get_der_entitlements_unsafe(struct cs_blob * csblob)
868 {
869 	if ((csblob->csb_flags & CS_VALID) == 0) {
870 		return NULL;
871 	}
872 
873 	return csblob->csb_der_entitlements_blob;
874 }
875 
876 int
csblob_get_der_entitlements(struct cs_blob * csblob,const CS_GenericBlob ** out_start,size_t * out_length)877 csblob_get_der_entitlements(struct cs_blob *csblob, const CS_GenericBlob **out_start, size_t *out_length)
878 {
879 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
880 	const CS_GenericBlob *der_entitlements;
881 	const CS_CodeDirectory *code_dir;
882 	const uint8_t *embedded_hash;
883 	union cs_hash_union context;
884 
885 	*out_start = NULL;
886 	*out_length = 0;
887 
888 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
889 		return EBADEXEC;
890 	}
891 
892 	code_dir = csblob->csb_cd;
893 
894 	if ((csblob->csb_flags & CS_VALID) == 0) {
895 		der_entitlements = NULL;
896 	} else {
897 		der_entitlements = csblob->csb_der_entitlements_blob;
898 	}
899 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_DER_ENTITLEMENTS);
900 
901 	if (embedded_hash == NULL) {
902 		if (der_entitlements) {
903 			return EBADEXEC;
904 		}
905 		return 0;
906 	} else if (der_entitlements == NULL) {
907 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
908 			return EBADEXEC;
909 		} else {
910 			return 0;
911 		}
912 	}
913 
914 	csblob->csb_hashtype->cs_init(&context);
915 	csblob->csb_hashtype->cs_update(&context, der_entitlements, ntohl(der_entitlements->length));
916 	csblob->csb_hashtype->cs_final(computed_hash, &context);
917 
918 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
919 		return EBADEXEC;
920 	}
921 
922 	*out_start = der_entitlements;
923 	*out_length = ntohl(der_entitlements->length);
924 
925 	return 0;
926 }
927 
928 static bool
ubc_cs_blob_pagewise_allocate(__unused vm_size_t size)929 ubc_cs_blob_pagewise_allocate(
930 	__unused vm_size_t size)
931 {
932 #if CODE_SIGNING_MONITOR
933 	/* If the monitor isn't enabled, then we don't need to page-align */
934 	if (csm_enabled() == false) {
935 		return false;
936 	}
937 
938 	/*
939 	 * Small allocations can be maanged by the monitor itself. We only need to allocate
940 	 * page-wise when it is a sufficiently large allocation and the monitor cannot manage
941 	 * it on its own.
942 	 */
943 	if (size <= csm_signature_size_limit()) {
944 		return false;
945 	}
946 
947 	return true;
948 #else
949 	/* Without a monitor, we never need to page align */
950 	return false;
951 #endif /* CODE_SIGNING_MONITOR */
952 }
953 
954 int
csblob_register_profile(__unused struct cs_blob * csblob,__unused cs_profile_register_t * profile)955 csblob_register_profile(
956 	__unused struct cs_blob *csblob,
957 	__unused cs_profile_register_t *profile)
958 {
959 #if CODE_SIGNING_MONITOR
960 	/* Profiles only need to be registered for monitor environments */
961 	assert(profile->data != NULL);
962 	assert(profile->size != 0);
963 	assert(csblob != NULL);
964 
965 	kern_return_t kr = csm_register_provisioning_profile(
966 		profile->uuid,
967 		profile->data, profile->size);
968 
969 	if ((kr != KERN_SUCCESS) && (kr != KERN_ALREADY_IN_SET)) {
970 		if (kr == KERN_NOT_SUPPORTED) {
971 			return 0;
972 		}
973 		return EPERM;
974 	}
975 
976 	/* Attempt to trust the profile */
977 	kr = csm_trust_provisioning_profile(
978 		profile->uuid,
979 		profile->sig_data, profile->sig_size);
980 
981 	if (kr != KERN_SUCCESS) {
982 		return EPERM;
983 	}
984 
985 	/* Associate the profile with the monitor's signature object */
986 	kr = csm_associate_provisioning_profile(
987 		csblob->csb_csm_obj,
988 		profile->uuid);
989 
990 	if (kr != KERN_SUCCESS) {
991 		return EPERM;
992 	}
993 
994 	return 0;
995 #else
996 	return 0;
997 #endif /* CODE_SIGNING_MONITOR */
998 }
999 
1000 int
csblob_register_profile_uuid(struct cs_blob * csblob,const uuid_t profile_uuid,void * profile_addr,vm_size_t profile_size)1001 csblob_register_profile_uuid(
1002 	struct cs_blob *csblob,
1003 	const uuid_t profile_uuid,
1004 	void *profile_addr,
1005 	vm_size_t profile_size)
1006 {
1007 	cs_profile_register_t profile = {
1008 		.sig_data = NULL,
1009 		.sig_size = 0,
1010 		.data = profile_addr,
1011 		.size = profile_size
1012 	};
1013 
1014 	/* Copy the provided UUID */
1015 	memcpy(profile.uuid, profile_uuid, sizeof(profile.uuid));
1016 
1017 	return csblob_register_profile(csblob, &profile);
1018 }
1019 
1020 /*
1021  * CODESIGNING
1022  * End of routines to navigate code signing data structures in the kernel.
1023  */
1024 
1025 
1026 
1027 /*
1028  * ubc_info_init
1029  *
1030  * Allocate and attach an empty ubc_info structure to a vnode
1031  *
1032  * Parameters:	vp			Pointer to the vnode
1033  *
1034  * Returns:	0			Success
1035  *	vnode_size:ENOMEM		Not enough space
1036  *	vnode_size:???			Other error from vnode_getattr
1037  *
1038  */
1039 int
ubc_info_init(struct vnode * vp)1040 ubc_info_init(struct vnode *vp)
1041 {
1042 	return ubc_info_init_internal(vp, 0, 0);
1043 }
1044 
1045 
1046 /*
1047  * ubc_info_init_withsize
1048  *
1049  * Allocate and attach a sized ubc_info structure to a vnode
1050  *
1051  * Parameters:	vp			Pointer to the vnode
1052  *		filesize		The size of the file
1053  *
1054  * Returns:	0			Success
1055  *	vnode_size:ENOMEM		Not enough space
1056  *	vnode_size:???			Other error from vnode_getattr
1057  */
1058 int
ubc_info_init_withsize(struct vnode * vp,off_t filesize)1059 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
1060 {
1061 	return ubc_info_init_internal(vp, 1, filesize);
1062 }
1063 
1064 
1065 /*
1066  * ubc_info_init_internal
1067  *
1068  * Allocate and attach a ubc_info structure to a vnode
1069  *
1070  * Parameters:	vp			Pointer to the vnode
1071  *		withfsize{0,1}		Zero if the size should be obtained
1072  *					from the vnode; otherwise, use filesize
1073  *		filesize		The size of the file, if withfsize == 1
1074  *
1075  * Returns:	0			Success
1076  *	vnode_size:ENOMEM		Not enough space
1077  *	vnode_size:???			Other error from vnode_getattr
1078  *
1079  * Notes:	We call a blocking zalloc(), and the zone was created as an
1080  *		expandable and collectable zone, so if no memory is available,
1081  *		it is possible for zalloc() to block indefinitely.  zalloc()
1082  *		may also panic if the zone of zones is exhausted, since it's
1083  *		NOT expandable.
1084  *
1085  *		We unconditionally call vnode_pager_setup(), even if this is
1086  *		a reuse of a ubc_info; in that case, we should probably assert
1087  *		that it does not already have a pager association, but do not.
1088  *
1089  *		Since memory_object_create_named() can only fail from receiving
1090  *		an invalid pager argument, the explicit check and panic is
1091  *		merely precautionary.
1092  */
1093 static int
ubc_info_init_internal(vnode_t vp,int withfsize,off_t filesize)1094 ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
1095 {
1096 	struct ubc_info *uip;
1097 	void *  pager;
1098 	int error = 0;
1099 	kern_return_t kret;
1100 	memory_object_control_t control;
1101 
1102 	uip = vp->v_ubcinfo;
1103 
1104 	/*
1105 	 * If there is not already a ubc_info attached to the vnode, we
1106 	 * attach one; otherwise, we will reuse the one that's there.
1107 	 */
1108 	if (uip == UBC_INFO_NULL) {
1109 		uip = zalloc_flags(ubc_info_zone, Z_WAITOK | Z_ZERO);
1110 
1111 		uip->ui_vnode = vp;
1112 		uip->ui_flags = UI_INITED;
1113 		uip->ui_ucred = NOCRED;
1114 	}
1115 	assert(uip->ui_flags != UI_NONE);
1116 	assert(uip->ui_vnode == vp);
1117 
1118 	/* now set this ubc_info in the vnode */
1119 	vp->v_ubcinfo = uip;
1120 
1121 	/*
1122 	 * Allocate a pager object for this vnode
1123 	 *
1124 	 * XXX The value of the pager parameter is currently ignored.
1125 	 * XXX Presumably, this API changed to avoid the race between
1126 	 * XXX setting the pager and the UI_HASPAGER flag.
1127 	 */
1128 	pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
1129 	assert(pager);
1130 
1131 	/*
1132 	 * Explicitly set the pager into the ubc_info, after setting the
1133 	 * UI_HASPAGER flag.
1134 	 */
1135 	SET(uip->ui_flags, UI_HASPAGER);
1136 	uip->ui_pager = pager;
1137 
1138 	/*
1139 	 * Note: We can not use VNOP_GETATTR() to get accurate
1140 	 * value of ui_size because this may be an NFS vnode, and
1141 	 * nfs_getattr() can call vinvalbuf(); if this happens,
1142 	 * ubc_info is not set up to deal with that event.
1143 	 * So use bogus size.
1144 	 */
1145 
1146 	/*
1147 	 * create a vnode - vm_object association
1148 	 * memory_object_create_named() creates a "named" reference on the
1149 	 * memory object we hold this reference as long as the vnode is
1150 	 * "alive."  Since memory_object_create_named() took its own reference
1151 	 * on the vnode pager we passed it, we can drop the reference
1152 	 * vnode_pager_setup() returned here.
1153 	 */
1154 	kret = memory_object_create_named(pager,
1155 	    (memory_object_size_t)uip->ui_size, &control);
1156 	vnode_pager_deallocate(pager);
1157 	if (kret != KERN_SUCCESS) {
1158 		panic("ubc_info_init: memory_object_create_named returned %d", kret);
1159 	}
1160 
1161 	assert(control);
1162 	uip->ui_control = control;      /* cache the value of the mo control */
1163 	SET(uip->ui_flags, UI_HASOBJREF);       /* with a named reference */
1164 
1165 	if (withfsize == 0) {
1166 		/* initialize the size */
1167 		error = vnode_size(vp, &uip->ui_size, vfs_context_current());
1168 		if (error) {
1169 			uip->ui_size = 0;
1170 		}
1171 	} else {
1172 		uip->ui_size = filesize;
1173 	}
1174 	vp->v_lflag |= VNAMED_UBC;      /* vnode has a named ubc reference */
1175 
1176 	return error;
1177 }
1178 
1179 
1180 /*
1181  * ubc_info_free
1182  *
1183  * Free a ubc_info structure
1184  *
1185  * Parameters:	uip			A pointer to the ubc_info to free
1186  *
1187  * Returns:	(void)
1188  *
1189  * Notes:	If there is a credential that has subsequently been associated
1190  *		with the ubc_info, the reference to the credential is dropped.
1191  *
1192  *		It's actually impossible for a ubc_info.ui_control to take the
1193  *		value MEMORY_OBJECT_CONTROL_NULL.
1194  */
1195 static void
ubc_info_free(struct ubc_info * uip)1196 ubc_info_free(struct ubc_info *uip)
1197 {
1198 	if (IS_VALID_CRED(uip->ui_ucred)) {
1199 		kauth_cred_unref(&uip->ui_ucred);
1200 	}
1201 
1202 	if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) {
1203 		memory_object_control_deallocate(uip->ui_control);
1204 	}
1205 
1206 	cluster_release(uip);
1207 	ubc_cs_free(uip);
1208 
1209 	zfree(ubc_info_zone, uip);
1210 	return;
1211 }
1212 
1213 
1214 void
ubc_info_deallocate(struct ubc_info * uip)1215 ubc_info_deallocate(struct ubc_info *uip)
1216 {
1217 	ubc_info_free(uip);
1218 }
1219 
1220 /*
1221  * ubc_setsize_ex
1222  *
1223  * Tell the VM that the the size of the file represented by the vnode has
1224  * changed
1225  *
1226  * Parameters:	vp	   The vp whose backing file size is
1227  *					   being changed
1228  *				nsize  The new size of the backing file
1229  *				opts   Options
1230  *
1231  * Returns:	EINVAL for new size < 0
1232  *			ENOENT if no UBC info exists
1233  *          EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1234  *          Other errors (mapped to errno_t) returned by VM functions
1235  *
1236  * Notes:   This function will indicate success if the new size is the
1237  *		    same or larger than the old size (in this case, the
1238  *		    remainder of the file will require modification or use of
1239  *		    an existing upl to access successfully).
1240  *
1241  *		    This function will fail if the new file size is smaller,
1242  *		    and the memory region being invalidated was unable to
1243  *		    actually be invalidated and/or the last page could not be
1244  *		    flushed, if the new size is not aligned to a page
1245  *		    boundary.  This is usually indicative of an I/O error.
1246  */
1247 errno_t
ubc_setsize_ex(struct vnode * vp,off_t nsize,ubc_setsize_opts_t opts)1248 ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
1249 {
1250 	off_t osize;    /* ui_size before change */
1251 	off_t lastpg, olastpgend, lastoff;
1252 	struct ubc_info *uip;
1253 	memory_object_control_t control;
1254 	kern_return_t kret = KERN_SUCCESS;
1255 
1256 	if (nsize < (off_t)0) {
1257 		return EINVAL;
1258 	}
1259 
1260 	if (!UBCINFOEXISTS(vp)) {
1261 		return ENOENT;
1262 	}
1263 
1264 	uip = vp->v_ubcinfo;
1265 	osize = uip->ui_size;
1266 
1267 	if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) {
1268 		return EAGAIN;
1269 	}
1270 
1271 	/*
1272 	 * Update the size before flushing the VM
1273 	 */
1274 	uip->ui_size = nsize;
1275 
1276 	if (nsize >= osize) {   /* Nothing more to do */
1277 		if (nsize > osize) {
1278 			lock_vnode_and_post(vp, NOTE_EXTEND);
1279 		}
1280 
1281 		return 0;
1282 	}
1283 
1284 	/*
1285 	 * When the file shrinks, invalidate the pages beyond the
1286 	 * new size. Also get rid of garbage beyond nsize on the
1287 	 * last page. The ui_size already has the nsize, so any
1288 	 * subsequent page-in will zero-fill the tail properly
1289 	 */
1290 	lastpg = trunc_page_64(nsize);
1291 	olastpgend = round_page_64(osize);
1292 	control = uip->ui_control;
1293 	assert(control);
1294 	lastoff = (nsize & PAGE_MASK_64);
1295 
1296 	if (lastoff) {
1297 		upl_t           upl;
1298 		upl_page_info_t *pl;
1299 
1300 		/*
1301 		 * new EOF ends up in the middle of a page
1302 		 * zero the tail of this page if it's currently
1303 		 * present in the cache
1304 		 */
1305 		kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
1306 
1307 		if (kret != KERN_SUCCESS) {
1308 			panic("ubc_setsize: ubc_create_upl (error = %d)", kret);
1309 		}
1310 
1311 		if (upl_valid_page(pl, 0)) {
1312 			cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
1313 		}
1314 
1315 		ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1316 
1317 		lastpg += PAGE_SIZE_64;
1318 	}
1319 	if (olastpgend > lastpg) {
1320 		int     flags;
1321 
1322 		if (lastpg == 0) {
1323 			flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
1324 		} else {
1325 			flags = MEMORY_OBJECT_DATA_FLUSH;
1326 		}
1327 		/*
1328 		 * invalidate the pages beyond the new EOF page
1329 		 *
1330 		 */
1331 		kret = memory_object_lock_request(control,
1332 		    (memory_object_offset_t)lastpg,
1333 		    (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
1334 		    MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
1335 		if (kret != KERN_SUCCESS) {
1336 			printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
1337 		}
1338 	}
1339 	return mach_to_bsd_errno(kret);
1340 }
1341 
1342 // Returns true for success
1343 int
ubc_setsize(vnode_t vp,off_t nsize)1344 ubc_setsize(vnode_t vp, off_t nsize)
1345 {
1346 	return ubc_setsize_ex(vp, nsize, 0) == 0;
1347 }
1348 
1349 /*
1350  * ubc_getsize
1351  *
1352  * Get the size of the file assocated with the specified vnode
1353  *
1354  * Parameters:	vp			The vnode whose size is of interest
1355  *
1356  * Returns:	0			There is no ubc_info associated with
1357  *					this vnode, or the size is zero
1358  *		!0			The size of the file
1359  *
1360  * Notes:	Using this routine, it is not possible for a caller to
1361  *		successfully distinguish between a vnode associate with a zero
1362  *		length file, and a vnode with no associated ubc_info.  The
1363  *		caller therefore needs to not care, or needs to ensure that
1364  *		they have previously successfully called ubc_info_init() or
1365  *		ubc_info_init_withsize().
1366  */
1367 off_t
ubc_getsize(struct vnode * vp)1368 ubc_getsize(struct vnode *vp)
1369 {
1370 	/* people depend on the side effect of this working this way
1371 	 * as they call this for directory
1372 	 */
1373 	if (!UBCINFOEXISTS(vp)) {
1374 		return (off_t)0;
1375 	}
1376 	return vp->v_ubcinfo->ui_size;
1377 }
1378 
1379 
1380 /*
1381  * ubc_umount
1382  *
1383  * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1384  * mount point
1385  *
1386  * Parameters:	mp			The mount point
1387  *
1388  * Returns:	0			Success
1389  *
1390  * Notes:	There is no failure indication for this function.
1391  *
1392  *		This function is used in the unmount path; since it may block
1393  *		I/O indefinitely, it should not be used in the forced unmount
1394  *		path, since a device unavailability could also block that
1395  *		indefinitely.
1396  *
1397  *		Because there is no device ejection interlock on USB, FireWire,
1398  *		or similar devices, it's possible that an ejection that begins
1399  *		subsequent to the vnode_iterate() completing, either on one of
1400  *		those devices, or a network mount for which the server quits
1401  *		responding, etc., may cause the caller to block indefinitely.
1402  */
1403 __private_extern__ int
ubc_umount(struct mount * mp)1404 ubc_umount(struct mount *mp)
1405 {
1406 	vnode_iterate(mp, 0, ubc_umcallback, 0);
1407 	return 0;
1408 }
1409 
1410 
1411 /*
1412  * ubc_umcallback
1413  *
1414  * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1415  * and vnode_iterate() for details of implementation.
1416  */
1417 static int
ubc_umcallback(vnode_t vp,__unused void * args)1418 ubc_umcallback(vnode_t vp, __unused void * args)
1419 {
1420 	if (UBCINFOEXISTS(vp)) {
1421 		(void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1422 	}
1423 	return VNODE_RETURNED;
1424 }
1425 
1426 
1427 /*
1428  * ubc_getcred
1429  *
1430  * Get the credentials currently active for the ubc_info associated with the
1431  * vnode.
1432  *
1433  * Parameters:	vp			The vnode whose ubc_info credentials
1434  *					are to be retrieved
1435  *
1436  * Returns:	!NOCRED			The credentials
1437  *		NOCRED			If there is no ubc_info for the vnode,
1438  *					or if there is one, but it has not had
1439  *					any credentials associated with it.
1440  */
1441 kauth_cred_t
ubc_getcred(struct vnode * vp)1442 ubc_getcred(struct vnode *vp)
1443 {
1444 	if (UBCINFOEXISTS(vp)) {
1445 		return vp->v_ubcinfo->ui_ucred;
1446 	}
1447 
1448 	return NOCRED;
1449 }
1450 
1451 
1452 /*
1453  * ubc_setthreadcred
1454  *
1455  * If they are not already set, set the credentials of the ubc_info structure
1456  * associated with the vnode to those of the supplied thread; otherwise leave
1457  * them alone.
1458  *
1459  * Parameters:	vp			The vnode whose ubc_info creds are to
1460  *					be set
1461  *		p			The process whose credentials are to
1462  *					be used, if not running on an assumed
1463  *					credential
1464  *		thread			The thread whose credentials are to
1465  *					be used
1466  *
1467  * Returns:	1			This vnode has no associated ubc_info
1468  *		0			Success
1469  *
1470  * Notes:	This function is generally used only in the following cases:
1471  *
1472  *		o	a memory mapped file via the mmap() system call
1473  *		o	a swap store backing file
1474  *		o	subsequent to a successful write via vn_write()
1475  *
1476  *		The information is then used by the NFS client in order to
1477  *		cons up a wire message in either the page-in or page-out path.
1478  *
1479  *		There are two potential problems with the use of this API:
1480  *
1481  *		o	Because the write path only set it on a successful
1482  *			write, there is a race window between setting the
1483  *			credential and its use to evict the pages to the
1484  *			remote file server
1485  *
1486  *		o	Because a page-in may occur prior to a write, the
1487  *			credential may not be set at this time, if the page-in
1488  *			is not the result of a mapping established via mmap().
1489  *
1490  *		In both these cases, this will be triggered from the paging
1491  *		path, which will instead use the credential of the current
1492  *		process, which in this case is either the dynamic_pager or
1493  *		the kernel task, both of which utilize "root" credentials.
1494  *
1495  *		This may potentially permit operations to occur which should
1496  *		be denied, or it may cause to be denied operations which
1497  *		should be permitted, depending on the configuration of the NFS
1498  *		server.
1499  */
1500 int
ubc_setthreadcred(struct vnode * vp,proc_t p,thread_t thread)1501 ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
1502 {
1503 #pragma unused(p, thread)
1504 	assert(p == current_proc());
1505 	assert(thread == current_thread());
1506 
1507 	return ubc_setcred(vp, kauth_cred_get());
1508 }
1509 
1510 
1511 /*
1512  * ubc_setcred
1513  *
1514  * If they are not already set, set the credentials of the ubc_info structure
1515  * associated with the vnode to those specified; otherwise leave them
1516  * alone.
1517  *
1518  * Parameters:	vp			The vnode whose ubc_info creds are to
1519  *					be set
1520  *		ucred			The credentials to use
1521  *
1522  * Returns:	0			This vnode has no associated ubc_info
1523  *		1			Success
1524  *
1525  * Notes:	The return values for this function are inverted from nearly
1526  *		all other uses in the kernel.
1527  *
1528  *		See also ubc_setthreadcred(), above.
1529  */
1530 int
ubc_setcred(struct vnode * vp,kauth_cred_t ucred)1531 ubc_setcred(struct vnode *vp, kauth_cred_t ucred)
1532 {
1533 	struct ubc_info *uip;
1534 
1535 	/* If there is no ubc_info, deny the operation */
1536 	if (!UBCINFOEXISTS(vp)) {
1537 		return 0;
1538 	}
1539 
1540 	/*
1541 	 * Check to see if there is already a credential reference in the
1542 	 * ubc_info; if there is not, take one on the supplied credential.
1543 	 */
1544 	vnode_lock(vp);
1545 	uip = vp->v_ubcinfo;
1546 	if (!IS_VALID_CRED(uip->ui_ucred)) {
1547 		kauth_cred_ref(ucred);
1548 		uip->ui_ucred = ucred;
1549 	}
1550 	vnode_unlock(vp);
1551 
1552 	return 1;
1553 }
1554 
1555 /*
1556  * ubc_getpager
1557  *
1558  * Get the pager associated with the ubc_info associated with the vnode.
1559  *
1560  * Parameters:	vp			The vnode to obtain the pager from
1561  *
1562  * Returns:	!VNODE_PAGER_NULL	The memory_object_t for the pager
1563  *		VNODE_PAGER_NULL	There is no ubc_info for this vnode
1564  *
1565  * Notes:	For each vnode that has a ubc_info associated with it, that
1566  *		ubc_info SHALL have a pager associated with it, so in the
1567  *		normal case, it's impossible to return VNODE_PAGER_NULL for
1568  *		a vnode with an associated ubc_info.
1569  */
1570 __private_extern__ memory_object_t
ubc_getpager(struct vnode * vp)1571 ubc_getpager(struct vnode *vp)
1572 {
1573 	if (UBCINFOEXISTS(vp)) {
1574 		return vp->v_ubcinfo->ui_pager;
1575 	}
1576 
1577 	return 0;
1578 }
1579 
1580 
1581 /*
1582  * ubc_getobject
1583  *
1584  * Get the memory object control associated with the ubc_info associated with
1585  * the vnode
1586  *
1587  * Parameters:	vp			The vnode to obtain the memory object
1588  *					from
1589  *		flags			DEPRECATED
1590  *
1591  * Returns:	!MEMORY_OBJECT_CONTROL_NULL
1592  *		MEMORY_OBJECT_CONTROL_NULL
1593  *
1594  * Notes:	Historically, if the flags were not "do not reactivate", this
1595  *		function would look up the memory object using the pager if
1596  *		it did not exist (this could be the case if the vnode had
1597  *		been previously reactivated).  The flags would also permit a
1598  *		hold to be requested, which would have created an object
1599  *		reference, if one had not already existed.  This usage is
1600  *		deprecated, as it would permit a race between finding and
1601  *		taking the reference vs. a single reference being dropped in
1602  *		another thread.
1603  */
1604 memory_object_control_t
ubc_getobject(struct vnode * vp,__unused int flags)1605 ubc_getobject(struct vnode *vp, __unused int flags)
1606 {
1607 	if (UBCINFOEXISTS(vp)) {
1608 		return vp->v_ubcinfo->ui_control;
1609 	}
1610 
1611 	return MEMORY_OBJECT_CONTROL_NULL;
1612 }
1613 
1614 /*
1615  * ubc_blktooff
1616  *
1617  * Convert a given block number to a memory backing object (file) offset for a
1618  * given vnode
1619  *
1620  * Parameters:	vp			The vnode in which the block is located
1621  *		blkno			The block number to convert
1622  *
1623  * Returns:	!-1			The offset into the backing object
1624  *		-1			There is no ubc_info associated with
1625  *					the vnode
1626  *		-1			An error occurred in the underlying VFS
1627  *					while translating the block to an
1628  *					offset; the most likely cause is that
1629  *					the caller specified a block past the
1630  *					end of the file, but this could also be
1631  *					any other error from VNOP_BLKTOOFF().
1632  *
1633  * Note:	Representing the error in band loses some information, but does
1634  *		not occlude a valid offset, since an off_t of -1 is normally
1635  *		used to represent EOF.  If we had a more reliable constant in
1636  *		our header files for it (i.e. explicitly cast to an off_t), we
1637  *		would use it here instead.
1638  */
1639 off_t
ubc_blktooff(vnode_t vp,daddr64_t blkno)1640 ubc_blktooff(vnode_t vp, daddr64_t blkno)
1641 {
1642 	off_t file_offset = -1;
1643 	int error;
1644 
1645 	if (UBCINFOEXISTS(vp)) {
1646 		error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
1647 		if (error) {
1648 			file_offset = -1;
1649 		}
1650 	}
1651 
1652 	return file_offset;
1653 }
1654 
1655 
1656 /*
1657  * ubc_offtoblk
1658  *
1659  * Convert a given offset in a memory backing object into a block number for a
1660  * given vnode
1661  *
1662  * Parameters:	vp			The vnode in which the offset is
1663  *					located
1664  *		offset			The offset into the backing object
1665  *
1666  * Returns:	!-1			The returned block number
1667  *		-1			There is no ubc_info associated with
1668  *					the vnode
1669  *		-1			An error occurred in the underlying VFS
1670  *					while translating the block to an
1671  *					offset; the most likely cause is that
1672  *					the caller specified a block past the
1673  *					end of the file, but this could also be
1674  *					any other error from VNOP_OFFTOBLK().
1675  *
1676  * Note:	Representing the error in band loses some information, but does
1677  *		not occlude a valid block number, since block numbers exceed
1678  *		the valid range for offsets, due to their relative sizes.  If
1679  *		we had a more reliable constant than -1 in our header files
1680  *		for it (i.e. explicitly cast to an daddr64_t), we would use it
1681  *		here instead.
1682  */
1683 daddr64_t
ubc_offtoblk(vnode_t vp,off_t offset)1684 ubc_offtoblk(vnode_t vp, off_t offset)
1685 {
1686 	daddr64_t blkno = -1;
1687 	int error = 0;
1688 
1689 	if (UBCINFOEXISTS(vp)) {
1690 		error = VNOP_OFFTOBLK(vp, offset, &blkno);
1691 		if (error) {
1692 			blkno = -1;
1693 		}
1694 	}
1695 
1696 	return blkno;
1697 }
1698 
1699 
1700 /*
1701  * ubc_pages_resident
1702  *
1703  * Determine whether or not a given vnode has pages resident via the memory
1704  * object control associated with the ubc_info associated with the vnode
1705  *
1706  * Parameters:	vp			The vnode we want to know about
1707  *
1708  * Returns:	1			Yes
1709  *		0			No
1710  */
1711 int
ubc_pages_resident(vnode_t vp)1712 ubc_pages_resident(vnode_t vp)
1713 {
1714 	kern_return_t           kret;
1715 	boolean_t                       has_pages_resident;
1716 
1717 	if (!UBCINFOEXISTS(vp)) {
1718 		return 0;
1719 	}
1720 
1721 	/*
1722 	 * The following call may fail if an invalid ui_control is specified,
1723 	 * or if there is no VM object associated with the control object.  In
1724 	 * either case, reacting to it as if there were no pages resident will
1725 	 * result in correct behavior.
1726 	 */
1727 	kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
1728 
1729 	if (kret != KERN_SUCCESS) {
1730 		return 0;
1731 	}
1732 
1733 	if (has_pages_resident == TRUE) {
1734 		return 1;
1735 	}
1736 
1737 	return 0;
1738 }
1739 
1740 /*
1741  * ubc_msync
1742  *
1743  * Clean and/or invalidate a range in the memory object that backs this vnode
1744  *
1745  * Parameters:	vp			The vnode whose associated ubc_info's
1746  *					associated memory object is to have a
1747  *					range invalidated within it
1748  *		beg_off			The start of the range, as an offset
1749  *		end_off			The end of the range, as an offset
1750  *		resid_off		The address of an off_t supplied by the
1751  *					caller; may be set to NULL to ignore
1752  *		flags			See ubc_msync_internal()
1753  *
1754  * Returns:	0			Success
1755  *		!0			Failure; an errno is returned
1756  *
1757  * Implicit Returns:
1758  *		*resid_off, modified	If non-NULL, the  contents are ALWAYS
1759  *					modified; they are initialized to the
1760  *					beg_off, and in case of an I/O error,
1761  *					the difference between beg_off and the
1762  *					current value will reflect what was
1763  *					able to be written before the error
1764  *					occurred.  If no error is returned, the
1765  *					value of the resid_off is undefined; do
1766  *					NOT use it in place of end_off if you
1767  *					intend to increment from the end of the
1768  *					last call and call iteratively.
1769  *
1770  * Notes:	see ubc_msync_internal() for more detailed information.
1771  *
1772  */
1773 errno_t
ubc_msync(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags)1774 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
1775 {
1776 	int retval;
1777 	int io_errno = 0;
1778 
1779 	if (resid_off) {
1780 		*resid_off = beg_off;
1781 	}
1782 
1783 	retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
1784 
1785 	if (retval == 0 && io_errno == 0) {
1786 		return EINVAL;
1787 	}
1788 	return io_errno;
1789 }
1790 
1791 
1792 /*
1793  * ubc_msync_internal
1794  *
1795  * Clean and/or invalidate a range in the memory object that backs this vnode
1796  *
1797  * Parameters:	vp			The vnode whose associated ubc_info's
1798  *					associated memory object is to have a
1799  *					range invalidated within it
1800  *		beg_off			The start of the range, as an offset
1801  *		end_off			The end of the range, as an offset
1802  *		resid_off		The address of an off_t supplied by the
1803  *					caller; may be set to NULL to ignore
1804  *		flags			MUST contain at least one of the flags
1805  *					UBC_INVALIDATE, UBC_PUSHDIRTY, or
1806  *					UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1807  *					UBC_SYNC may also be specified to cause
1808  *					this function to block until the
1809  *					operation is complete.  The behavior
1810  *					of UBC_SYNC is otherwise undefined.
1811  *		io_errno		The address of an int to contain the
1812  *					errno from a failed I/O operation, if
1813  *					one occurs; may be set to NULL to
1814  *					ignore
1815  *
1816  * Returns:	1			Success
1817  *		0			Failure
1818  *
1819  * Implicit Returns:
1820  *		*resid_off, modified	The contents of this offset MAY be
1821  *					modified; in case of an I/O error, the
1822  *					difference between beg_off and the
1823  *					current value will reflect what was
1824  *					able to be written before the error
1825  *					occurred.
1826  *		*io_errno, modified	The contents of this offset are set to
1827  *					an errno, if an error occurs; if the
1828  *					caller supplies an io_errno parameter,
1829  *					they should be careful to initialize it
1830  *					to 0 before calling this function to
1831  *					enable them to distinguish an error
1832  *					with a valid *resid_off from an invalid
1833  *					one, and to avoid potentially falsely
1834  *					reporting an error, depending on use.
1835  *
1836  * Notes:	If there is no ubc_info associated with the vnode supplied,
1837  *		this function immediately returns success.
1838  *
1839  *		If the value of end_off is less than or equal to beg_off, this
1840  *		function immediately returns success; that is, end_off is NOT
1841  *		inclusive.
1842  *
1843  *		IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1844  *		UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1845  *		attempt to block on in-progress I/O by calling this function
1846  *		with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1847  *		in order to block pending on the I/O already in progress.
1848  *
1849  *		The start offset is truncated to the page boundary and the
1850  *		size is adjusted to include the last page in the range; that
1851  *		is, end_off on exactly a page boundary will not change if it
1852  *		is rounded, and the range of bytes written will be from the
1853  *		truncate beg_off to the rounded (end_off - 1).
1854  */
1855 static int
ubc_msync_internal(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags,int * io_errno)1856 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1857 {
1858 	memory_object_size_t    tsize;
1859 	kern_return_t           kret;
1860 	int request_flags = 0;
1861 	int flush_flags   = MEMORY_OBJECT_RETURN_NONE;
1862 
1863 	if (!UBCINFOEXISTS(vp)) {
1864 		return 0;
1865 	}
1866 	if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) {
1867 		return 0;
1868 	}
1869 	if (end_off <= beg_off) {
1870 		return 1;
1871 	}
1872 
1873 	if (flags & UBC_INVALIDATE) {
1874 		/*
1875 		 * discard the resident pages
1876 		 */
1877 		request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
1878 	}
1879 
1880 	if (flags & UBC_SYNC) {
1881 		/*
1882 		 * wait for all the I/O to complete before returning
1883 		 */
1884 		request_flags |= MEMORY_OBJECT_IO_SYNC;
1885 	}
1886 
1887 	if (flags & UBC_PUSHDIRTY) {
1888 		/*
1889 		 * we only return the dirty pages in the range
1890 		 */
1891 		flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
1892 	}
1893 
1894 	if (flags & UBC_PUSHALL) {
1895 		/*
1896 		 * then return all the interesting pages in the range (both
1897 		 * dirty and precious) to the pager
1898 		 */
1899 		flush_flags = MEMORY_OBJECT_RETURN_ALL;
1900 	}
1901 
1902 	beg_off = trunc_page_64(beg_off);
1903 	end_off = round_page_64(end_off);
1904 	tsize   = (memory_object_size_t)end_off - beg_off;
1905 
1906 	/* flush and/or invalidate pages in the range requested */
1907 	kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
1908 	    beg_off, tsize,
1909 	    (memory_object_offset_t *)resid_off,
1910 	    io_errno, flush_flags, request_flags,
1911 	    VM_PROT_NO_CHANGE);
1912 
1913 	return (kret == KERN_SUCCESS) ? 1 : 0;
1914 }
1915 
1916 
1917 /*
1918  * ubc_map
1919  *
1920  * Explicitly map a vnode that has an associate ubc_info, and add a reference
1921  * to it for the ubc system, if there isn't one already, so it will not be
1922  * recycled while it's in use, and set flags on the ubc_info to indicate that
1923  * we have done this
1924  *
1925  * Parameters:	vp			The vnode to map
1926  *		flags			The mapping flags for the vnode; this
1927  *					will be a combination of one or more of
1928  *					PROT_READ, PROT_WRITE, and PROT_EXEC
1929  *
1930  * Returns:	0			Success
1931  *		EPERM			Permission was denied
1932  *
1933  * Notes:	An I/O reference on the vnode must already be held on entry
1934  *
1935  *		If there is no ubc_info associated with the vnode, this function
1936  *		will return success.
1937  *
1938  *		If a permission error occurs, this function will return
1939  *		failure; all other failures will cause this function to return
1940  *		success.
1941  *
1942  *		IMPORTANT: This is an internal use function, and its symbols
1943  *		are not exported, hence its error checking is not very robust.
1944  *		It is primarily used by:
1945  *
1946  *		o	mmap(), when mapping a file
1947  *		o	When mapping a shared file (a shared library in the
1948  *			shared segment region)
1949  *		o	When loading a program image during the exec process
1950  *
1951  *		...all of these uses ignore the return code, and any fault that
1952  *		results later because of a failure is handled in the fix-up path
1953  *		of the fault handler.  The interface exists primarily as a
1954  *		performance hint.
1955  *
1956  *		Given that third party implementation of the type of interfaces
1957  *		that would use this function, such as alternative executable
1958  *		formats, etc., are unsupported, this function is not exported
1959  *		for general use.
1960  *
1961  *		The extra reference is held until the VM system unmaps the
1962  *		vnode from its own context to maintain a vnode reference in
1963  *		cases like open()/mmap()/close(), which leave the backing
1964  *		object referenced by a mapped memory region in a process
1965  *		address space.
1966  */
1967 __private_extern__ int
ubc_map(vnode_t vp,int flags)1968 ubc_map(vnode_t vp, int flags)
1969 {
1970 	struct ubc_info *uip;
1971 	int error = 0;
1972 	int need_ref = 0;
1973 	int need_wakeup = 0;
1974 
1975 	/*
1976 	 * This call is non-blocking and does not ever fail but it can
1977 	 * only be made when there is other explicit synchronization
1978 	 * with reclaiming of the vnode which, in this path, is provided
1979 	 * by the "mapping in progress" counter.
1980 	 */
1981 	error = vnode_getalways_from_pager(vp);
1982 	if (error != 0) {
1983 		/* This can't happen */
1984 		panic("vnode_getalways returned %d for vp %p", error, vp);
1985 	}
1986 
1987 	if (UBCINFOEXISTS(vp) == 0) {
1988 		/*
1989 		 * The vnode might have started being reclaimed (forced unmount?) while
1990 		 * this call was in progress.
1991 		 * The caller is not expecting an error but is expected to figure out that
1992 		 * the "pager" it used for this vnode is now gone.
1993 		 */
1994 		error = 0;
1995 	} else {
1996 		vnode_lock(vp);
1997 		uip = vp->v_ubcinfo;
1998 
1999 		while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2000 			SET(uip->ui_flags, UI_MAPWAITING);
2001 			(void) msleep(&uip->ui_flags, &vp->v_lock,
2002 			    PRIBIO, "ubc_map", NULL);
2003 		}
2004 		SET(uip->ui_flags, UI_MAPBUSY);
2005 		vnode_unlock(vp);
2006 
2007 		error = VNOP_MMAP(vp, flags, vfs_context_current());
2008 
2009 		/*
2010 		 * rdar://problem/22587101 required that we stop propagating
2011 		 * EPERM up the stack. Otherwise, we would have to funnel up
2012 		 * the error at all the call sites for memory_object_map().
2013 		 * The risk is in having to undo the map/object/entry state at
2014 		 * all these call sites. It would also affect more than just mmap()
2015 		 * e.g. vm_remap().
2016 		 *
2017 		 *	if (error != EPERM)
2018 		 *              error = 0;
2019 		 */
2020 
2021 		error = 0;
2022 
2023 		vnode_lock_spin(vp);
2024 
2025 		if (error == 0) {
2026 			if (!ISSET(uip->ui_flags, UI_ISMAPPED)) {
2027 				need_ref = 1;
2028 			}
2029 			SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
2030 			if (flags & PROT_WRITE) {
2031 				SET(uip->ui_flags, (UI_WASMAPPEDWRITE | UI_MAPPEDWRITE));
2032 			}
2033 		}
2034 		CLR(uip->ui_flags, UI_MAPBUSY);
2035 
2036 		if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2037 			CLR(uip->ui_flags, UI_MAPWAITING);
2038 			need_wakeup = 1;
2039 		}
2040 		vnode_unlock(vp);
2041 
2042 		if (need_wakeup) {
2043 			wakeup(&uip->ui_flags);
2044 		}
2045 
2046 		if (need_ref) {
2047 			/*
2048 			 * Make sure we get a ref as we can't unwind from here
2049 			 */
2050 			if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) {
2051 				panic("%s : VNODE_REF_FORCE failed", __FUNCTION__);
2052 			}
2053 			/*
2054 			 * Vnodes that are on "unreliable" media (like disk
2055 			 * images, network filesystems, 3rd-party filesystems,
2056 			 * and possibly external devices) could see their
2057 			 * contents be changed via the backing store without
2058 			 * triggering copy-on-write, so we can't fully rely
2059 			 * on copy-on-write and might have to resort to
2060 			 * copy-on-read to protect "privileged" processes and
2061 			 * prevent privilege escalation.
2062 			 *
2063 			 * The root filesystem is considered "reliable" because
2064 			 * there's not much point in trying to protect
2065 			 * ourselves from such a vulnerability and the extra
2066 			 * cost of copy-on-read (CPU time and memory pressure)
2067 			 * could result in some serious regressions.
2068 			 */
2069 			if (vp->v_mount != NULL &&
2070 			    ((vp->v_mount->mnt_flag & MNT_ROOTFS) ||
2071 			    vnode_on_reliable_media(vp))) {
2072 				/*
2073 				 * This vnode is deemed "reliable" so mark
2074 				 * its VM object as "trusted".
2075 				 */
2076 				memory_object_mark_trusted(uip->ui_control);
2077 			} else {
2078 //				printf("BUGGYCOW: %s:%d vp %p \"%s\" in mnt %p \"%s\" is untrusted\n", __FUNCTION__, __LINE__, vp, vp->v_name, vp->v_mount, vp->v_mount->mnt_vnodecovered->v_name);
2079 			}
2080 		}
2081 	}
2082 	vnode_put_from_pager(vp);
2083 
2084 	return error;
2085 }
2086 
2087 
2088 /*
2089  * ubc_destroy_named
2090  *
2091  * Destroy the named memory object associated with the ubc_info control object
2092  * associated with the designated vnode, if there is a ubc_info associated
2093  * with the vnode, and a control object is associated with it
2094  *
2095  * Parameters:	vp			The designated vnode
2096  *
2097  * Returns:	(void)
2098  *
2099  * Notes:	This function is called on vnode termination for all vnodes,
2100  *		and must therefore not assume that there is a ubc_info that is
2101  *		associated with the vnode, nor that there is a control object
2102  *		associated with the ubc_info.
2103  *
2104  *		If all the conditions necessary are present, this function
2105  *		calls memory_object_destory(), which will in turn end up
2106  *		calling ubc_unmap() to release any vnode references that were
2107  *		established via ubc_map().
2108  *
2109  *		IMPORTANT: This is an internal use function that is used
2110  *		exclusively by the internal use function vclean().
2111  */
2112 __private_extern__ void
ubc_destroy_named(vnode_t vp,vm_object_destroy_reason_t reason)2113 ubc_destroy_named(vnode_t vp, vm_object_destroy_reason_t reason)
2114 {
2115 	memory_object_control_t control;
2116 	struct ubc_info *uip;
2117 	kern_return_t kret;
2118 
2119 	if (UBCINFOEXISTS(vp)) {
2120 		uip = vp->v_ubcinfo;
2121 
2122 		/* Terminate the memory object  */
2123 		control = ubc_getobject(vp, UBC_HOLDOBJECT);
2124 		if (control != MEMORY_OBJECT_CONTROL_NULL) {
2125 			kret = memory_object_destroy(control, reason);
2126 			if (kret != KERN_SUCCESS) {
2127 				panic("ubc_destroy_named: memory_object_destroy failed");
2128 			}
2129 		}
2130 	}
2131 }
2132 
2133 
2134 /*
2135  * ubc_isinuse
2136  *
2137  * Determine whether or not a vnode is currently in use by ubc at a level in
2138  * excess of the requested busycount
2139  *
2140  * Parameters:	vp			The vnode to check
2141  *		busycount		The threshold busy count, used to bias
2142  *					the count usually already held by the
2143  *					caller to avoid races
2144  *
2145  * Returns:	1			The vnode is in use over the threshold
2146  *		0			The vnode is not in use over the
2147  *					threshold
2148  *
2149  * Notes:	Because the vnode is only held locked while actually asking
2150  *		the use count, this function only represents a snapshot of the
2151  *		current state of the vnode.  If more accurate information is
2152  *		required, an additional busycount should be held by the caller
2153  *		and a non-zero busycount used.
2154  *
2155  *		If there is no ubc_info associated with the vnode, this
2156  *		function will report that the vnode is not in use by ubc.
2157  */
2158 int
ubc_isinuse(struct vnode * vp,int busycount)2159 ubc_isinuse(struct vnode *vp, int busycount)
2160 {
2161 	if (!UBCINFOEXISTS(vp)) {
2162 		return 0;
2163 	}
2164 	return ubc_isinuse_locked(vp, busycount, 0);
2165 }
2166 
2167 
2168 /*
2169  * ubc_isinuse_locked
2170  *
2171  * Determine whether or not a vnode is currently in use by ubc at a level in
2172  * excess of the requested busycount
2173  *
2174  * Parameters:	vp			The vnode to check
2175  *		busycount		The threshold busy count, used to bias
2176  *					the count usually already held by the
2177  *					caller to avoid races
2178  *		locked			True if the vnode is already locked by
2179  *					the caller
2180  *
2181  * Returns:	1			The vnode is in use over the threshold
2182  *		0			The vnode is not in use over the
2183  *					threshold
2184  *
2185  * Notes:	If the vnode is not locked on entry, it is locked while
2186  *		actually asking the use count.  If this is the case, this
2187  *		function only represents a snapshot of the current state of
2188  *		the vnode.  If more accurate information is required, the
2189  *		vnode lock should be held by the caller, otherwise an
2190  *		additional busycount should be held by the caller and a
2191  *		non-zero busycount used.
2192  *
2193  *		If there is no ubc_info associated with the vnode, this
2194  *		function will report that the vnode is not in use by ubc.
2195  */
2196 int
ubc_isinuse_locked(struct vnode * vp,int busycount,int locked)2197 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
2198 {
2199 	int retval = 0;
2200 
2201 
2202 	if (!locked) {
2203 		vnode_lock_spin(vp);
2204 	}
2205 
2206 	if ((vp->v_usecount - vp->v_kusecount) > busycount) {
2207 		retval = 1;
2208 	}
2209 
2210 	if (!locked) {
2211 		vnode_unlock(vp);
2212 	}
2213 	return retval;
2214 }
2215 
2216 
2217 /*
2218  * ubc_unmap
2219  *
2220  * Reverse the effects of a ubc_map() call for a given vnode
2221  *
2222  * Parameters:	vp			vnode to unmap from ubc
2223  *
2224  * Returns:	(void)
2225  *
2226  * Notes:	This is an internal use function used by vnode_pager_unmap().
2227  *		It will attempt to obtain a reference on the supplied vnode,
2228  *		and if it can do so, and there is an associated ubc_info, and
2229  *		the flags indicate that it was mapped via ubc_map(), then the
2230  *		flag is cleared, the mapping removed, and the reference taken
2231  *		by ubc_map() is released.
2232  *
2233  *		IMPORTANT: This MUST only be called by the VM
2234  *		to prevent race conditions.
2235  */
2236 __private_extern__ void
ubc_unmap(struct vnode * vp)2237 ubc_unmap(struct vnode *vp)
2238 {
2239 	struct ubc_info *uip;
2240 	int     need_rele = 0;
2241 	int     need_wakeup = 0;
2242 	int     error = 0;
2243 
2244 	/*
2245 	 * This call is non-blocking and does not ever fail but it can
2246 	 * only be made when there is other explicit synchronization
2247 	 * with reclaiming of the vnode which, in this path, is provided
2248 	 * by the "mapping in progress" counter.
2249 	 */
2250 	error = vnode_getalways_from_pager(vp);
2251 	if (error != 0) {
2252 		/* This can't happen */
2253 		panic("vnode_getalways returned %d for vp %p", error, vp);
2254 	}
2255 
2256 	if (UBCINFOEXISTS(vp) == 0) {
2257 		/*
2258 		 * The vnode might have started being reclaimed (forced unmount?) while
2259 		 * this call was in progress.
2260 		 * The caller is not expecting an error but is expected to figure out that
2261 		 * the "pager" it used for this vnode is now gone and take appropriate
2262 		 * action.
2263 		 */
2264 	} else {
2265 		bool want_fsevent = false;
2266 
2267 		vnode_lock(vp);
2268 		uip = vp->v_ubcinfo;
2269 
2270 		while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2271 			SET(uip->ui_flags, UI_MAPWAITING);
2272 			(void) msleep(&uip->ui_flags, &vp->v_lock,
2273 			    PRIBIO, "ubc_unmap", NULL);
2274 		}
2275 		SET(uip->ui_flags, UI_MAPBUSY);
2276 
2277 		if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
2278 			if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) {
2279 				want_fsevent = true;
2280 			}
2281 
2282 			need_rele = 1;
2283 
2284 			/*
2285 			 * We want to clear the mapped flags after we've called
2286 			 * VNOP_MNOMAP to avoid certain races and allow
2287 			 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2288 			 */
2289 		}
2290 		vnode_unlock(vp);
2291 
2292 		if (need_rele) {
2293 			vfs_context_t ctx = vfs_context_current();
2294 
2295 			(void)VNOP_MNOMAP(vp, ctx);
2296 
2297 #if CONFIG_FSE
2298 			/*
2299 			 * Why do we want an fsevent here?  Normally the
2300 			 * content modified fsevent is posted when a file is
2301 			 * closed and only if it's written to via conventional
2302 			 * means.  It's perfectly legal to close a file and
2303 			 * keep your mappings and we don't currently track
2304 			 * whether it was written to via a mapping.
2305 			 * Therefore, we need to post an fsevent here if the
2306 			 * file was mapped writable.  This may result in false
2307 			 * events, i.e. we post a notification when nothing
2308 			 * has really changed.
2309 			 */
2310 			if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
2311 				add_fsevent(FSE_CONTENT_MODIFIED_NO_HLINK, ctx,
2312 				    FSE_ARG_VNODE, vp,
2313 				    FSE_ARG_DONE);
2314 			}
2315 #endif
2316 
2317 			vnode_rele(vp);
2318 		}
2319 
2320 		vnode_lock_spin(vp);
2321 
2322 		if (need_rele) {
2323 			CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
2324 		}
2325 
2326 		CLR(uip->ui_flags, UI_MAPBUSY);
2327 
2328 		if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2329 			CLR(uip->ui_flags, UI_MAPWAITING);
2330 			need_wakeup = 1;
2331 		}
2332 		vnode_unlock(vp);
2333 
2334 		if (need_wakeup) {
2335 			wakeup(&uip->ui_flags);
2336 		}
2337 	}
2338 	/*
2339 	 * the drop of the vnode ref will cleanup
2340 	 */
2341 	vnode_put_from_pager(vp);
2342 }
2343 
2344 
2345 /*
2346  * ubc_page_op
2347  *
2348  * Manipulate individual page state for a vnode with an associated ubc_info
2349  * with an associated memory object control.
2350  *
2351  * Parameters:	vp			The vnode backing the page
2352  *		f_offset		A file offset interior to the page
2353  *		ops			The operations to perform, as a bitmap
2354  *					(see below for more information)
2355  *		phys_entryp		The address of a ppnum_t; may be NULL
2356  *					to ignore
2357  *		flagsp			A pointer to an int to contain flags;
2358  *					may be NULL to ignore
2359  *
2360  * Returns:	KERN_SUCCESS		Success
2361  *		KERN_INVALID_ARGUMENT	If the memory object control has no VM
2362  *					object associated
2363  *		KERN_INVALID_OBJECT	If UPL_POP_PHYSICAL and the object is
2364  *					not physically contiguous
2365  *		KERN_INVALID_OBJECT	If !UPL_POP_PHYSICAL and the object is
2366  *					physically contiguous
2367  *		KERN_FAILURE		If the page cannot be looked up
2368  *
2369  * Implicit Returns:
2370  *		*phys_entryp (modified)	If phys_entryp is non-NULL and
2371  *					UPL_POP_PHYSICAL
2372  *		*flagsp (modified)	If flagsp is non-NULL and there was
2373  *					!UPL_POP_PHYSICAL and a KERN_SUCCESS
2374  *
2375  * Notes:	For object boundaries, it is considerably more efficient to
2376  *		ensure that f_offset is in fact on a page boundary, as this
2377  *		will avoid internal use of the hash table to identify the
2378  *		page, and would therefore skip a number of early optimizations.
2379  *		Since this is a page operation anyway, the caller should try
2380  *		to pass only a page aligned offset because of this.
2381  *
2382  *		*flagsp may be modified even if this function fails.  If it is
2383  *		modified, it will contain the condition of the page before the
2384  *		requested operation was attempted; these will only include the
2385  *		bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2386  *		UPL_POP_SET, or UPL_POP_CLR bits.
2387  *
2388  *		The flags field may contain a specific operation, such as
2389  *		UPL_POP_PHYSICAL or UPL_POP_DUMP:
2390  *
2391  *		o	UPL_POP_PHYSICAL	Fail if not contiguous; if
2392  *						*phys_entryp and successful, set
2393  *						*phys_entryp
2394  *		o	UPL_POP_DUMP		Dump the specified page
2395  *
2396  *		Otherwise, it is treated as a bitmap of one or more page
2397  *		operations to perform on the final memory object; allowable
2398  *		bit values are:
2399  *
2400  *		o	UPL_POP_DIRTY		The page is dirty
2401  *		o	UPL_POP_PAGEOUT		The page is paged out
2402  *		o	UPL_POP_PRECIOUS	The page is precious
2403  *		o	UPL_POP_ABSENT		The page is absent
2404  *		o	UPL_POP_BUSY		The page is busy
2405  *
2406  *		If the page status is only being queried and not modified, then
2407  *		not other bits should be specified.  However, if it is being
2408  *		modified, exactly ONE of the following bits should be set:
2409  *
2410  *		o	UPL_POP_SET		Set the current bitmap bits
2411  *		o	UPL_POP_CLR		Clear the current bitmap bits
2412  *
2413  *		Thus to effect a combination of setting an clearing, it may be
2414  *		necessary to call this function twice.  If this is done, the
2415  *		set should be used before the clear, since clearing may trigger
2416  *		a wakeup on the destination page, and if the page is backed by
2417  *		an encrypted swap file, setting will trigger the decryption
2418  *		needed before the wakeup occurs.
2419  */
2420 kern_return_t
ubc_page_op(struct vnode * vp,off_t f_offset,int ops,ppnum_t * phys_entryp,int * flagsp)2421 ubc_page_op(
2422 	struct vnode    *vp,
2423 	off_t           f_offset,
2424 	int             ops,
2425 	ppnum_t *phys_entryp,
2426 	int             *flagsp)
2427 {
2428 	memory_object_control_t         control;
2429 
2430 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2431 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2432 		return KERN_INVALID_ARGUMENT;
2433 	}
2434 
2435 	return memory_object_page_op(control,
2436 	           (memory_object_offset_t)f_offset,
2437 	           ops,
2438 	           phys_entryp,
2439 	           flagsp);
2440 }
2441 
2442 
2443 /*
2444  * ubc_range_op
2445  *
2446  * Manipulate page state for a range of memory for a vnode with an associated
2447  * ubc_info with an associated memory object control, when page level state is
2448  * not required to be returned from the call (i.e. there are no phys_entryp or
2449  * flagsp parameters to this call, and it takes a range which may contain
2450  * multiple pages, rather than an offset interior to a single page).
2451  *
2452  * Parameters:	vp			The vnode backing the page
2453  *		f_offset_beg		A file offset interior to the start page
2454  *		f_offset_end		A file offset interior to the end page
2455  *		ops			The operations to perform, as a bitmap
2456  *					(see below for more information)
2457  *		range			The address of an int; may be NULL to
2458  *					ignore
2459  *
2460  * Returns:	KERN_SUCCESS		Success
2461  *		KERN_INVALID_ARGUMENT	If the memory object control has no VM
2462  *					object associated
2463  *		KERN_INVALID_OBJECT	If the object is physically contiguous
2464  *
2465  * Implicit Returns:
2466  *		*range (modified)	If range is non-NULL, its contents will
2467  *					be modified to contain the number of
2468  *					bytes successfully operated upon.
2469  *
2470  * Notes:	IMPORTANT: This function cannot be used on a range that
2471  *		consists of physically contiguous pages.
2472  *
2473  *		For object boundaries, it is considerably more efficient to
2474  *		ensure that f_offset_beg and f_offset_end are in fact on page
2475  *		boundaries, as this will avoid internal use of the hash table
2476  *		to identify the page, and would therefore skip a number of
2477  *		early optimizations.  Since this is an operation on a set of
2478  *		pages anyway, the caller should try to pass only a page aligned
2479  *		offsets because of this.
2480  *
2481  *		*range will be modified only if this function succeeds.
2482  *
2483  *		The flags field MUST contain a specific operation; allowable
2484  *		values are:
2485  *
2486  *		o	UPL_ROP_ABSENT	Returns the extent of the range
2487  *					presented which is absent, starting
2488  *					with the start address presented
2489  *
2490  *		o	UPL_ROP_PRESENT	Returns the extent of the range
2491  *					presented which is present (resident),
2492  *					starting with the start address
2493  *					presented
2494  *		o	UPL_ROP_DUMP	Dump the pages which are found in the
2495  *					target object for the target range.
2496  *
2497  *		IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2498  *		multiple regions in the range, only the first matching region
2499  *		is returned.
2500  */
2501 kern_return_t
ubc_range_op(struct vnode * vp,off_t f_offset_beg,off_t f_offset_end,int ops,int * range)2502 ubc_range_op(
2503 	struct vnode    *vp,
2504 	off_t           f_offset_beg,
2505 	off_t           f_offset_end,
2506 	int             ops,
2507 	int             *range)
2508 {
2509 	memory_object_control_t         control;
2510 
2511 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2512 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2513 		return KERN_INVALID_ARGUMENT;
2514 	}
2515 
2516 	return memory_object_range_op(control,
2517 	           (memory_object_offset_t)f_offset_beg,
2518 	           (memory_object_offset_t)f_offset_end,
2519 	           ops,
2520 	           range);
2521 }
2522 
2523 
2524 /*
2525  * ubc_create_upl
2526  *
2527  * Given a vnode, cause the population of a portion of the vm_object; based on
2528  * the nature of the request, the pages returned may contain valid data, or
2529  * they may be uninitialized.
2530  *
2531  * Parameters:	vp			The vnode from which to create the upl
2532  *		f_offset		The start offset into the backing store
2533  *					represented by the vnode
2534  *		bufsize			The size of the upl to create
2535  *		uplp			Pointer to the upl_t to receive the
2536  *					created upl; MUST NOT be NULL
2537  *		plp			Pointer to receive the internal page
2538  *					list for the created upl; MAY be NULL
2539  *					to ignore
2540  *
2541  * Returns:	KERN_SUCCESS		The requested upl has been created
2542  *		KERN_INVALID_ARGUMENT	The bufsize argument is not an even
2543  *					multiple of the page size
2544  *		KERN_INVALID_ARGUMENT	There is no ubc_info associated with
2545  *					the vnode, or there is no memory object
2546  *					control associated with the ubc_info
2547  *	memory_object_upl_request:KERN_INVALID_VALUE
2548  *					The supplied upl_flags argument is
2549  *					invalid
2550  * Implicit Returns:
2551  *		*uplp (modified)
2552  *		*plp (modified)		If non-NULL, the value of *plp will be
2553  *					modified to point to the internal page
2554  *					list; this modification may occur even
2555  *					if this function is unsuccessful, in
2556  *					which case the contents may be invalid
2557  *
2558  * Note:	If successful, the returned *uplp MUST subsequently be freed
2559  *		via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2560  *		ubc_upl_abort(), or ubc_upl_abort_range().
2561  */
2562 kern_return_t
ubc_create_upl_external(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags)2563 ubc_create_upl_external(
2564 	struct vnode    *vp,
2565 	off_t           f_offset,
2566 	int             bufsize,
2567 	upl_t           *uplp,
2568 	upl_page_info_t **plp,
2569 	int             uplflags)
2570 {
2571 	return ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt());
2572 }
2573 
2574 kern_return_t
ubc_create_upl_kernel(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags,vm_tag_t tag)2575 ubc_create_upl_kernel(
2576 	struct vnode    *vp,
2577 	off_t           f_offset,
2578 	int             bufsize,
2579 	upl_t           *uplp,
2580 	upl_page_info_t **plp,
2581 	int             uplflags,
2582 	vm_tag_t tag)
2583 {
2584 	memory_object_control_t         control;
2585 	kern_return_t                   kr;
2586 
2587 	if (plp != NULL) {
2588 		*plp = NULL;
2589 	}
2590 	*uplp = NULL;
2591 
2592 	if (bufsize & 0xfff) {
2593 		return KERN_INVALID_ARGUMENT;
2594 	}
2595 
2596 	if (bufsize > MAX_UPL_SIZE_BYTES) {
2597 		return KERN_INVALID_ARGUMENT;
2598 	}
2599 
2600 	if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
2601 		if (uplflags & UPL_UBC_MSYNC) {
2602 			uplflags &= UPL_RET_ONLY_DIRTY;
2603 
2604 			uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
2605 			    UPL_SET_INTERNAL | UPL_SET_LITE;
2606 		} else if (uplflags & UPL_UBC_PAGEOUT) {
2607 			uplflags &= UPL_RET_ONLY_DIRTY;
2608 
2609 			if (uplflags & UPL_RET_ONLY_DIRTY) {
2610 				uplflags |= UPL_NOBLOCK;
2611 			}
2612 
2613 			uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
2614 			    UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
2615 		} else {
2616 			uplflags |= UPL_RET_ONLY_ABSENT |
2617 			    UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
2618 			    UPL_SET_INTERNAL | UPL_SET_LITE;
2619 
2620 			/*
2621 			 * if the requested size == PAGE_SIZE, we don't want to set
2622 			 * the UPL_NOBLOCK since we may be trying to recover from a
2623 			 * previous partial pagein I/O that occurred because we were low
2624 			 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2625 			 * since we're only asking for a single page, we can block w/o fear
2626 			 * of tying up pages while waiting for more to become available
2627 			 */
2628 			if (bufsize > PAGE_SIZE) {
2629 				uplflags |= UPL_NOBLOCK;
2630 			}
2631 		}
2632 	} else {
2633 		uplflags &= ~UPL_FOR_PAGEOUT;
2634 
2635 		if (uplflags & UPL_WILL_BE_DUMPED) {
2636 			uplflags &= ~UPL_WILL_BE_DUMPED;
2637 			uplflags |= (UPL_NO_SYNC | UPL_SET_INTERNAL);
2638 		} else {
2639 			uplflags |= (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
2640 		}
2641 	}
2642 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2643 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2644 		return KERN_INVALID_ARGUMENT;
2645 	}
2646 
2647 	kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
2648 	if (kr == KERN_SUCCESS && plp != NULL) {
2649 		*plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
2650 	}
2651 	return kr;
2652 }
2653 
2654 
2655 /*
2656  * ubc_upl_maxbufsize
2657  *
2658  * Return the maximum bufsize ubc_create_upl( ) will take.
2659  *
2660  * Parameters:	none
2661  *
2662  * Returns:	maximum size buffer (in bytes) ubc_create_upl( ) will take.
2663  */
2664 upl_size_t
ubc_upl_maxbufsize(void)2665 ubc_upl_maxbufsize(
2666 	void)
2667 {
2668 	return MAX_UPL_SIZE_BYTES;
2669 }
2670 
2671 /*
2672  * ubc_upl_map
2673  *
2674  * Map the page list assocated with the supplied upl into the kernel virtual
2675  * address space at the virtual address indicated by the dst_addr argument;
2676  * the entire upl is mapped
2677  *
2678  * Parameters:	upl			The upl to map
2679  *		dst_addr		The address at which to map the upl
2680  *
2681  * Returns:	KERN_SUCCESS		The upl has been mapped
2682  *		KERN_INVALID_ARGUMENT	The upl is UPL_NULL
2683  *		KERN_FAILURE		The upl is already mapped
2684  *	vm_map_enter:KERN_INVALID_ARGUMENT
2685  *					A failure code from vm_map_enter() due
2686  *					to an invalid argument
2687  */
2688 kern_return_t
ubc_upl_map(upl_t upl,vm_offset_t * dst_addr)2689 ubc_upl_map(
2690 	upl_t           upl,
2691 	vm_offset_t     *dst_addr)
2692 {
2693 	return vm_upl_map(kernel_map, upl, dst_addr);
2694 }
2695 
2696 /*
2697  * ubc_upl_map_range:- similar to ubc_upl_map but the focus is on a range
2698  * of the UPL. Takes an offset, size, and protection so that only a  part
2699  * of the UPL can be mapped with the right protections.
2700  */
2701 kern_return_t
ubc_upl_map_range(upl_t upl,vm_offset_t offset_to_map,vm_size_t size_to_map,vm_prot_t prot_to_map,vm_offset_t * dst_addr)2702 ubc_upl_map_range(
2703 	upl_t           upl,
2704 	vm_offset_t     offset_to_map,
2705 	vm_size_t       size_to_map,
2706 	vm_prot_t       prot_to_map,
2707 	vm_offset_t     *dst_addr)
2708 {
2709 	return vm_upl_map_range(kernel_map, upl, offset_to_map, size_to_map, prot_to_map, dst_addr);
2710 }
2711 
2712 
2713 /*
2714  * ubc_upl_unmap
2715  *
2716  * Unmap the page list assocated with the supplied upl from the kernel virtual
2717  * address space; the entire upl is unmapped.
2718  *
2719  * Parameters:	upl			The upl to unmap
2720  *
2721  * Returns:	KERN_SUCCESS		The upl has been unmapped
2722  *		KERN_FAILURE		The upl is not currently mapped
2723  *		KERN_INVALID_ARGUMENT	If the upl is UPL_NULL
2724  */
2725 kern_return_t
ubc_upl_unmap(upl_t upl)2726 ubc_upl_unmap(
2727 	upl_t   upl)
2728 {
2729 	return vm_upl_unmap(kernel_map, upl);
2730 }
2731 
2732 /*
2733  * ubc_upl_unmap_range:- similar to ubc_upl_unmap but the focus is
2734  * on part of the UPL that is mapped. The offset and size parameter
2735  * specifies what part of the UPL needs to be unmapped.
2736  *
2737  * Note: Currrently offset & size are unused as we always initiate the unmap from the
2738  * very beginning of the UPL's mapping and track the mapped size in the UPL. But we
2739  * might want to allow unmapping a UPL in the middle, for example, and we can use the
2740  * offset + size parameters for that purpose.
2741  */
2742 kern_return_t
ubc_upl_unmap_range(upl_t upl,vm_offset_t offset_to_unmap,vm_size_t size_to_unmap)2743 ubc_upl_unmap_range(
2744 	upl_t   upl,
2745 	vm_offset_t     offset_to_unmap,
2746 	vm_size_t       size_to_unmap)
2747 {
2748 	return vm_upl_unmap_range(kernel_map, upl, offset_to_unmap, size_to_unmap);
2749 }
2750 
2751 
2752 /*
2753  * ubc_upl_commit
2754  *
2755  * Commit the contents of the upl to the backing store
2756  *
2757  * Parameters:	upl			The upl to commit
2758  *
2759  * Returns:	KERN_SUCCESS		The upl has been committed
2760  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2761  *		KERN_FAILURE		The supplied upl does not represent
2762  *					device memory, and the offset plus the
2763  *					size would exceed the actual size of
2764  *					the upl
2765  *
2766  * Notes:	In practice, the only return value for this function should be
2767  *		KERN_SUCCESS, unless there has been data structure corruption;
2768  *		since the upl is deallocated regardless of success or failure,
2769  *		there's really nothing to do about this other than panic.
2770  *
2771  *		IMPORTANT: Use of this function should not be mixed with use of
2772  *		ubc_upl_commit_range(), due to the unconditional deallocation
2773  *		by this function.
2774  */
2775 kern_return_t
ubc_upl_commit(upl_t upl)2776 ubc_upl_commit(
2777 	upl_t                   upl)
2778 {
2779 	upl_page_info_t *pl;
2780 	kern_return_t   kr;
2781 
2782 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2783 	kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
2784 	upl_deallocate(upl);
2785 	return kr;
2786 }
2787 
2788 
2789 /*
2790  * ubc_upl_commit
2791  *
2792  * Commit the contents of the specified range of the upl to the backing store
2793  *
2794  * Parameters:	upl			The upl to commit
2795  *		offset			The offset into the upl
2796  *		size			The size of the region to be committed,
2797  *					starting at the specified offset
2798  *		flags			commit type (see below)
2799  *
2800  * Returns:	KERN_SUCCESS		The range has been committed
2801  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2802  *		KERN_FAILURE		The supplied upl does not represent
2803  *					device memory, and the offset plus the
2804  *					size would exceed the actual size of
2805  *					the upl
2806  *
2807  * Notes:	IMPORTANT: If the commit is successful, and the object is now
2808  *		empty, the upl will be deallocated.  Since the caller cannot
2809  *		check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2810  *		should generally only be used when the offset is 0 and the size
2811  *		is equal to the upl size.
2812  *
2813  *		The flags argument is a bitmap of flags on the rage of pages in
2814  *		the upl to be committed; allowable flags are:
2815  *
2816  *		o	UPL_COMMIT_FREE_ON_EMPTY	Free the upl when it is
2817  *							both empty and has been
2818  *							successfully committed
2819  *		o	UPL_COMMIT_CLEAR_DIRTY		Clear each pages dirty
2820  *							bit; will prevent a
2821  *							later pageout
2822  *		o	UPL_COMMIT_SET_DIRTY		Set each pages dirty
2823  *							bit; will cause a later
2824  *							pageout
2825  *		o	UPL_COMMIT_INACTIVATE		Clear each pages
2826  *							reference bit; the page
2827  *							will not be accessed
2828  *		o	UPL_COMMIT_ALLOW_ACCESS		Unbusy each page; pages
2829  *							become busy when an
2830  *							IOMemoryDescriptor is
2831  *							mapped or redirected,
2832  *							and we have to wait for
2833  *							an IOKit driver
2834  *
2835  *		The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2836  *		not be specified by the caller.
2837  *
2838  *		The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2839  *		mutually exclusive, and should not be combined.
2840  */
2841 kern_return_t
ubc_upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags)2842 ubc_upl_commit_range(
2843 	upl_t                   upl,
2844 	upl_offset_t            offset,
2845 	upl_size_t              size,
2846 	int                             flags)
2847 {
2848 	upl_page_info_t *pl;
2849 	boolean_t               empty;
2850 	kern_return_t   kr;
2851 
2852 	if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
2853 		flags |= UPL_COMMIT_NOTIFY_EMPTY;
2854 	}
2855 
2856 	if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
2857 		return KERN_INVALID_ARGUMENT;
2858 	}
2859 
2860 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2861 
2862 	kr = upl_commit_range(upl, offset, size, flags,
2863 	    pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
2864 
2865 	if ((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) {
2866 		upl_deallocate(upl);
2867 	}
2868 
2869 	return kr;
2870 }
2871 
2872 
2873 /*
2874  * ubc_upl_abort_range
2875  *
2876  * Abort the contents of the specified range of the specified upl
2877  *
2878  * Parameters:	upl			The upl to abort
2879  *		offset			The offset into the upl
2880  *		size			The size of the region to be aborted,
2881  *					starting at the specified offset
2882  *		abort_flags		abort type (see below)
2883  *
2884  * Returns:	KERN_SUCCESS		The range has been aborted
2885  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2886  *		KERN_FAILURE		The supplied upl does not represent
2887  *					device memory, and the offset plus the
2888  *					size would exceed the actual size of
2889  *					the upl
2890  *
2891  * Notes:	IMPORTANT: If the abort is successful, and the object is now
2892  *		empty, the upl will be deallocated.  Since the caller cannot
2893  *		check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2894  *		should generally only be used when the offset is 0 and the size
2895  *		is equal to the upl size.
2896  *
2897  *		The abort_flags argument is a bitmap of flags on the range of
2898  *		pages in the upl to be aborted; allowable flags are:
2899  *
2900  *		o	UPL_ABORT_FREE_ON_EMPTY	Free the upl when it is both
2901  *						empty and has been successfully
2902  *						aborted
2903  *		o	UPL_ABORT_RESTART	The operation must be restarted
2904  *		o	UPL_ABORT_UNAVAILABLE	The pages are unavailable
2905  *		o	UPL_ABORT_ERROR		An I/O error occurred
2906  *		o	UPL_ABORT_DUMP_PAGES	Just free the pages
2907  *		o	UPL_ABORT_NOTIFY_EMPTY	RESERVED
2908  *		o	UPL_ABORT_ALLOW_ACCESS	RESERVED
2909  *
2910  *		The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2911  *		not be specified by the caller.  It is intended to fulfill the
2912  *		same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2913  *		ubc_upl_commit_range(), but is never referenced internally.
2914  *
2915  *		The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2916  *		referenced; do not use it.
2917  */
2918 kern_return_t
ubc_upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int abort_flags)2919 ubc_upl_abort_range(
2920 	upl_t                   upl,
2921 	upl_offset_t            offset,
2922 	upl_size_t              size,
2923 	int                             abort_flags)
2924 {
2925 	kern_return_t   kr;
2926 	boolean_t               empty = FALSE;
2927 
2928 	if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) {
2929 		abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
2930 	}
2931 
2932 	kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
2933 
2934 	if ((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) {
2935 		upl_deallocate(upl);
2936 	}
2937 
2938 	return kr;
2939 }
2940 
2941 
2942 /*
2943  * ubc_upl_abort
2944  *
2945  * Abort the contents of the specified upl
2946  *
2947  * Parameters:	upl			The upl to abort
2948  *		abort_type		abort type (see below)
2949  *
2950  * Returns:	KERN_SUCCESS		The range has been aborted
2951  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2952  *		KERN_FAILURE		The supplied upl does not represent
2953  *					device memory, and the offset plus the
2954  *					size would exceed the actual size of
2955  *					the upl
2956  *
2957  * Notes:	IMPORTANT: If the abort is successful, and the object is now
2958  *		empty, the upl will be deallocated.  Since the caller cannot
2959  *		check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2960  *		should generally only be used when the offset is 0 and the size
2961  *		is equal to the upl size.
2962  *
2963  *		The abort_type is a bitmap of flags on the range of
2964  *		pages in the upl to be aborted; allowable flags are:
2965  *
2966  *		o	UPL_ABORT_FREE_ON_EMPTY	Free the upl when it is both
2967  *						empty and has been successfully
2968  *						aborted
2969  *		o	UPL_ABORT_RESTART	The operation must be restarted
2970  *		o	UPL_ABORT_UNAVAILABLE	The pages are unavailable
2971  *		o	UPL_ABORT_ERROR		An I/O error occurred
2972  *		o	UPL_ABORT_DUMP_PAGES	Just free the pages
2973  *		o	UPL_ABORT_NOTIFY_EMPTY	RESERVED
2974  *		o	UPL_ABORT_ALLOW_ACCESS	RESERVED
2975  *
2976  *		The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2977  *		not be specified by the caller.  It is intended to fulfill the
2978  *		same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2979  *		ubc_upl_commit_range(), but is never referenced internally.
2980  *
2981  *		The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2982  *		referenced; do not use it.
2983  */
2984 kern_return_t
ubc_upl_abort(upl_t upl,int abort_type)2985 ubc_upl_abort(
2986 	upl_t                   upl,
2987 	int                             abort_type)
2988 {
2989 	kern_return_t   kr;
2990 
2991 	kr = upl_abort(upl, abort_type);
2992 	upl_deallocate(upl);
2993 	return kr;
2994 }
2995 
2996 
2997 /*
2998  * ubc_upl_pageinfo
2999  *
3000  *  Retrieve the internal page list for the specified upl
3001  *
3002  * Parameters:	upl			The upl to obtain the page list from
3003  *
3004  * Returns:	!NULL			The (upl_page_info_t *) for the page
3005  *					list internal to the upl
3006  *		NULL			Error/no page list associated
3007  *
3008  * Notes:	IMPORTANT: The function is only valid on internal objects
3009  *		where the list request was made with the UPL_INTERNAL flag.
3010  *
3011  *		This function is a utility helper function, since some callers
3012  *		may not have direct access to the header defining the macro,
3013  *		due to abstraction layering constraints.
3014  */
3015 upl_page_info_t *
ubc_upl_pageinfo(upl_t upl)3016 ubc_upl_pageinfo(
3017 	upl_t                   upl)
3018 {
3019 	return UPL_GET_INTERNAL_PAGE_LIST(upl);
3020 }
3021 
3022 
3023 int
UBCINFOEXISTS(const struct vnode * vp)3024 UBCINFOEXISTS(const struct vnode * vp)
3025 {
3026 	return (vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL);
3027 }
3028 
3029 
3030 void
ubc_upl_range_needed(upl_t upl,int index,int count)3031 ubc_upl_range_needed(
3032 	upl_t           upl,
3033 	int             index,
3034 	int             count)
3035 {
3036 	upl_range_needed(upl, index, count);
3037 }
3038 
3039 boolean_t
ubc_is_mapped(const struct vnode * vp,boolean_t * writable)3040 ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
3041 {
3042 	if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) {
3043 		return FALSE;
3044 	}
3045 	if (writable) {
3046 		*writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
3047 	}
3048 	return TRUE;
3049 }
3050 
3051 boolean_t
ubc_is_mapped_writable(const struct vnode * vp)3052 ubc_is_mapped_writable(const struct vnode *vp)
3053 {
3054 	boolean_t writable;
3055 	return ubc_is_mapped(vp, &writable) && writable;
3056 }
3057 
3058 boolean_t
ubc_was_mapped(const struct vnode * vp,boolean_t * writable)3059 ubc_was_mapped(const struct vnode *vp, boolean_t *writable)
3060 {
3061 	if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_WASMAPPED)) {
3062 		return FALSE;
3063 	}
3064 	if (writable) {
3065 		*writable = ISSET(vp->v_ubcinfo->ui_flags, UI_WASMAPPEDWRITE);
3066 	}
3067 	return TRUE;
3068 }
3069 
3070 boolean_t
ubc_was_mapped_writable(const struct vnode * vp)3071 ubc_was_mapped_writable(const struct vnode *vp)
3072 {
3073 	boolean_t writable;
3074 	return ubc_was_mapped(vp, &writable) && writable;
3075 }
3076 
3077 
3078 /*
3079  * CODE SIGNING
3080  */
3081 static atomic_size_t cs_blob_size = 0;
3082 static atomic_uint_fast32_t cs_blob_count = 0;
3083 static atomic_size_t cs_blob_size_peak = 0;
3084 static atomic_size_t cs_blob_size_max = 0;
3085 static atomic_uint_fast32_t cs_blob_count_peak = 0;
3086 
3087 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count, 0, "Current number of code signature blobs");
3088 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size, "Current size of all code signature blobs");
3089 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
3090 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, "Peak size of code signature blobs");
3091 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, "Size of biggest code signature blob");
3092 
3093 /*
3094  * Function: csblob_parse_teamid
3095  *
3096  * Description: This function returns a pointer to the team id
3097  *               stored within the codedirectory of the csblob.
3098  *               If the codedirectory predates team-ids, it returns
3099  *               NULL.
3100  *               This does not copy the name but returns a pointer to
3101  *               it within the CD. Subsequently, the CD must be
3102  *               available when this is used.
3103  */
3104 
3105 static const char *
csblob_parse_teamid(struct cs_blob * csblob)3106 csblob_parse_teamid(struct cs_blob *csblob)
3107 {
3108 	const CS_CodeDirectory *cd;
3109 
3110 	cd = csblob->csb_cd;
3111 
3112 	if (ntohl(cd->version) < CS_SUPPORTSTEAMID) {
3113 		return NULL;
3114 	}
3115 
3116 	if (cd->teamOffset == 0) {
3117 		return NULL;
3118 	}
3119 
3120 	const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
3121 	if (cs_debug > 1) {
3122 		printf("found team-id %s in cdblob\n", name);
3123 	}
3124 
3125 	return name;
3126 }
3127 
3128 kern_return_t
ubc_cs_blob_allocate(vm_offset_t * blob_addr_p,vm_size_t * blob_size_p)3129 ubc_cs_blob_allocate(
3130 	vm_offset_t     *blob_addr_p,
3131 	vm_size_t       *blob_size_p)
3132 {
3133 	kern_return_t   kr = KERN_FAILURE;
3134 	vm_size_t       allocation_size = 0;
3135 
3136 	if (!blob_addr_p || !blob_size_p) {
3137 		return KERN_INVALID_ARGUMENT;
3138 	}
3139 	allocation_size = *blob_size_p;
3140 
3141 	if (ubc_cs_blob_pagewise_allocate(allocation_size) == true) {
3142 		/* Round up to page size */
3143 		allocation_size = round_page(allocation_size);
3144 
3145 		/* Allocate page-wise */
3146 		kr = kmem_alloc(
3147 			kernel_map,
3148 			blob_addr_p,
3149 			allocation_size,
3150 			KMA_KOBJECT | KMA_DATA | KMA_ZERO,
3151 			VM_KERN_MEMORY_SECURITY);
3152 	} else {
3153 		*blob_addr_p = (vm_offset_t)kalloc_data_tag(
3154 			allocation_size,
3155 			Z_WAITOK | Z_ZERO,
3156 			VM_KERN_MEMORY_SECURITY);
3157 
3158 		assert(*blob_addr_p != 0);
3159 		kr = KERN_SUCCESS;
3160 	}
3161 
3162 	if (kr == KERN_SUCCESS) {
3163 		*blob_size_p = allocation_size;
3164 	}
3165 
3166 	return kr;
3167 }
3168 
3169 void
ubc_cs_blob_deallocate(vm_offset_t blob_addr,vm_size_t blob_size)3170 ubc_cs_blob_deallocate(
3171 	vm_offset_t     blob_addr,
3172 	vm_size_t       blob_size)
3173 {
3174 	if (ubc_cs_blob_pagewise_allocate(blob_size) == true) {
3175 		kmem_free(kernel_map, blob_addr, blob_size);
3176 	} else {
3177 		kfree_data(blob_addr, blob_size);
3178 	}
3179 }
3180 
3181 /*
3182  * Some codesigned files use a lowest common denominator page size of
3183  * 4KiB, but can be used on systems that have a runtime page size of
3184  * 16KiB. Since faults will only occur on 16KiB ranges in
3185  * cs_validate_range(), we can convert the original Code Directory to
3186  * a multi-level scheme where groups of 4 hashes are combined to form
3187  * a new hash, which represents 16KiB in the on-disk file.  This can
3188  * reduce the wired memory requirement for the Code Directory by
3189  * 75%.
3190  */
3191 static boolean_t
ubc_cs_supports_multilevel_hash(struct cs_blob * blob __unused)3192 ubc_cs_supports_multilevel_hash(struct cs_blob *blob __unused)
3193 {
3194 	const CS_CodeDirectory *cd;
3195 
3196 #if CODE_SIGNING_MONITOR
3197 	// TODO: <rdar://problem/30954826>
3198 	if (csm_enabled() == true) {
3199 		return FALSE;
3200 	}
3201 #endif
3202 
3203 	/*
3204 	 * Only applies to binaries that ship as part of the OS,
3205 	 * primarily the shared cache.
3206 	 */
3207 	if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
3208 		return FALSE;
3209 	}
3210 
3211 	/*
3212 	 * If the runtime page size matches the code signing page
3213 	 * size, there is no work to do.
3214 	 */
3215 	if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
3216 		return FALSE;
3217 	}
3218 
3219 	cd = blob->csb_cd;
3220 
3221 	/*
3222 	 * There must be a valid integral multiple of hashes
3223 	 */
3224 	if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3225 		return FALSE;
3226 	}
3227 
3228 	/*
3229 	 * Scatter lists must also have ranges that have an integral number of hashes
3230 	 */
3231 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3232 		const SC_Scatter *scatter = (const SC_Scatter*)
3233 		    ((const char*)cd + ntohl(cd->scatterOffset));
3234 		/* iterate all scatter structs to make sure they are all aligned */
3235 		do {
3236 			uint32_t sbase = ntohl(scatter->base);
3237 			uint32_t scount = ntohl(scatter->count);
3238 
3239 			/* last scatter? */
3240 			if (scount == 0) {
3241 				break;
3242 			}
3243 
3244 			if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3245 				return FALSE;
3246 			}
3247 
3248 			if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3249 				return FALSE;
3250 			}
3251 
3252 			scatter++;
3253 		} while (1);
3254 	}
3255 
3256 	/* Covered range must be a multiple of the new page size */
3257 	if (ntohl(cd->codeLimit) & PAGE_MASK) {
3258 		return FALSE;
3259 	}
3260 
3261 	/* All checks pass */
3262 	return TRUE;
3263 }
3264 
3265 /*
3266  * Reconstruct a cs_blob with the code signature fields. This helper function
3267  * is useful because a lot of things often change the base address of the code
3268  * signature blob, which requires reconstructing some of the other pointers
3269  * within.
3270  */
3271 static errno_t
ubc_cs_blob_reconstruct(struct cs_blob * cs_blob,const vm_address_t signature_addr,const vm_address_t signature_size,const vm_offset_t code_directory_offset)3272 ubc_cs_blob_reconstruct(
3273 	struct cs_blob *cs_blob,
3274 	const vm_address_t signature_addr,
3275 	const vm_address_t signature_size,
3276 	const vm_offset_t code_directory_offset)
3277 {
3278 	const CS_CodeDirectory *code_directory = NULL;
3279 
3280 	/* Setup the signature blob address */
3281 	cs_blob->csb_mem_kaddr = (void*)signature_addr;
3282 	cs_blob->csb_mem_size = signature_size;
3283 
3284 	/* Setup the code directory in the blob */
3285 	code_directory = (const CS_CodeDirectory*)(signature_addr + code_directory_offset);
3286 	cs_blob->csb_cd = code_directory;
3287 
3288 	/* Setup the XML entitlements */
3289 	cs_blob->csb_entitlements_blob = csblob_find_blob_bytes(
3290 		(uint8_t*)signature_addr,
3291 		signature_size,
3292 		CSSLOT_ENTITLEMENTS,
3293 		CSMAGIC_EMBEDDED_ENTITLEMENTS);
3294 
3295 	/* Setup the DER entitlements */
3296 	cs_blob->csb_der_entitlements_blob = csblob_find_blob_bytes(
3297 		(uint8_t*)signature_addr,
3298 		signature_size,
3299 		CSSLOT_DER_ENTITLEMENTS,
3300 		CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3301 
3302 	return 0;
3303 }
3304 
3305 /*
3306  * Given a validated cs_blob, we reformat the structure to only include
3307  * the blobs which are required by the kernel for our current platform.
3308  * This saves significant memory with agile signatures.
3309  *
3310  * To support rewriting the code directory, potentially through
3311  * multilevel hashes, we provide a mechanism to allocate a code directory
3312  * of a specified size and zero it out --> caller can fill it in.
3313  *
3314  * We don't need to perform a lot of overflow checks as the assumption
3315  * here is that the cs_blob has already been validated.
3316  */
3317 static errno_t
ubc_cs_reconstitute_code_signature(const struct cs_blob * const blob,vm_address_t * const ret_mem_kaddr,vm_size_t * const ret_mem_size,vm_size_t code_directory_size,CS_CodeDirectory ** const code_directory)3318 ubc_cs_reconstitute_code_signature(
3319 	const struct cs_blob * const blob,
3320 	vm_address_t * const ret_mem_kaddr,
3321 	vm_size_t * const ret_mem_size,
3322 	vm_size_t code_directory_size,
3323 	CS_CodeDirectory ** const code_directory
3324 	)
3325 {
3326 	vm_address_t new_blob_addr = 0;
3327 	vm_size_t new_blob_size = 0;
3328 	vm_size_t new_code_directory_size = 0;
3329 	const CS_GenericBlob *best_code_directory = NULL;
3330 	const CS_GenericBlob *first_code_directory = NULL;
3331 	const CS_GenericBlob *der_entitlements_blob = NULL;
3332 	const CS_GenericBlob *entitlements_blob = NULL;
3333 	const CS_GenericBlob *cms_blob = NULL;
3334 	const CS_GenericBlob *launch_constraint_self = NULL;
3335 	const CS_GenericBlob *launch_constraint_parent = NULL;
3336 	const CS_GenericBlob *launch_constraint_responsible = NULL;
3337 	const CS_GenericBlob *library_constraint = NULL;
3338 	CS_SuperBlob *superblob = NULL;
3339 	uint32_t num_blobs = 0;
3340 	uint32_t blob_index = 0;
3341 	uint32_t blob_offset = 0;
3342 	kern_return_t ret;
3343 	int err;
3344 
3345 	if (!blob) {
3346 		if (cs_debug > 1) {
3347 			printf("CODE SIGNING: CS Blob passed in is NULL\n");
3348 		}
3349 		return EINVAL;
3350 	}
3351 
3352 	best_code_directory = (const CS_GenericBlob*)blob->csb_cd;
3353 	if (!best_code_directory) {
3354 		/* This case can never happen, and it is a sign of bad things */
3355 		panic("CODE SIGNING: Validated CS Blob has no code directory");
3356 	}
3357 
3358 	new_code_directory_size = code_directory_size;
3359 	if (new_code_directory_size == 0) {
3360 		new_code_directory_size = ntohl(best_code_directory->length);
3361 	}
3362 
3363 	/*
3364 	 * A code signature can contain multiple code directories, each of which contains hashes
3365 	 * of pages based on a hashing algorithm. The kernel selects which hashing algorithm is
3366 	 * the strongest, and consequently, marks one of these code directories as the best
3367 	 * matched one. More often than not, the best matched one is _not_ the first one.
3368 	 *
3369 	 * However, the CMS blob which cryptographically verifies the code signature is only
3370 	 * signed against the first code directory. Therefore, if the CMS blob is present, we also
3371 	 * need the first code directory to be able to verify it. Given this, we organize the
3372 	 * new cs_blob as following order:
3373 	 *
3374 	 * 1. best code directory
3375 	 * 2. DER encoded entitlements blob (if present)
3376 	 * 3. launch constraint self (if present)
3377 	 * 4. launch constraint parent (if present)
3378 	 * 5. launch constraint responsible (if present)
3379 	 * 6. library constraint (if present)
3380 	 * 7. entitlements blob (if present)
3381 	 * 8. cms blob (if present)
3382 	 * 9. first code directory (if not already the best match, and if cms blob is present)
3383 	 *
3384 	 * This order is chosen deliberately, as later on, we expect to get rid of the CMS blob
3385 	 * and the first code directory once their verification is complete.
3386 	 */
3387 
3388 	/* Storage for the super blob header */
3389 	new_blob_size += sizeof(CS_SuperBlob);
3390 
3391 	/* Guaranteed storage for the best code directory */
3392 	new_blob_size += sizeof(CS_BlobIndex);
3393 	new_blob_size += new_code_directory_size;
3394 	num_blobs += 1;
3395 
3396 	/* Conditional storage for the DER entitlements blob */
3397 	der_entitlements_blob = blob->csb_der_entitlements_blob;
3398 	if (der_entitlements_blob) {
3399 		new_blob_size += sizeof(CS_BlobIndex);
3400 		new_blob_size += ntohl(der_entitlements_blob->length);
3401 		num_blobs += 1;
3402 	}
3403 
3404 	/* Conditional storage for the launch constraints self blob */
3405 	launch_constraint_self = csblob_find_blob_bytes(
3406 		(const uint8_t *)blob->csb_mem_kaddr,
3407 		blob->csb_mem_size,
3408 		CSSLOT_LAUNCH_CONSTRAINT_SELF,
3409 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3410 	if (launch_constraint_self) {
3411 		new_blob_size += sizeof(CS_BlobIndex);
3412 		new_blob_size += ntohl(launch_constraint_self->length);
3413 		num_blobs += 1;
3414 	}
3415 
3416 	/* Conditional storage for the launch constraints parent blob */
3417 	launch_constraint_parent = csblob_find_blob_bytes(
3418 		(const uint8_t *)blob->csb_mem_kaddr,
3419 		blob->csb_mem_size,
3420 		CSSLOT_LAUNCH_CONSTRAINT_PARENT,
3421 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3422 	if (launch_constraint_parent) {
3423 		new_blob_size += sizeof(CS_BlobIndex);
3424 		new_blob_size += ntohl(launch_constraint_parent->length);
3425 		num_blobs += 1;
3426 	}
3427 
3428 	/* Conditional storage for the launch constraints responsible blob */
3429 	launch_constraint_responsible = csblob_find_blob_bytes(
3430 		(const uint8_t *)blob->csb_mem_kaddr,
3431 		blob->csb_mem_size,
3432 		CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE,
3433 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3434 	if (launch_constraint_responsible) {
3435 		new_blob_size += sizeof(CS_BlobIndex);
3436 		new_blob_size += ntohl(launch_constraint_responsible->length);
3437 		num_blobs += 1;
3438 	}
3439 
3440 	/* Conditional storage for the library constraintsblob */
3441 	library_constraint = csblob_find_blob_bytes(
3442 		(const uint8_t *)blob->csb_mem_kaddr,
3443 		blob->csb_mem_size,
3444 		CSSLOT_LIBRARY_CONSTRAINT,
3445 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3446 	if (library_constraint) {
3447 		new_blob_size += sizeof(CS_BlobIndex);
3448 		new_blob_size += ntohl(library_constraint->length);
3449 		num_blobs += 1;
3450 	}
3451 
3452 	/* Conditional storage for the entitlements blob */
3453 	entitlements_blob = blob->csb_entitlements_blob;
3454 	if (entitlements_blob) {
3455 		new_blob_size += sizeof(CS_BlobIndex);
3456 		new_blob_size += ntohl(entitlements_blob->length);
3457 		num_blobs += 1;
3458 	}
3459 
3460 	/* Conditional storage for the CMS blob */
3461 	cms_blob = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_SIGNATURESLOT, CSMAGIC_BLOBWRAPPER);
3462 	if (cms_blob) {
3463 		new_blob_size += sizeof(CS_BlobIndex);
3464 		new_blob_size += ntohl(cms_blob->length);
3465 		num_blobs += 1;
3466 	}
3467 
3468 	/*
3469 	 * Conditional storage for the first code directory.
3470 	 * This is only needed if a CMS blob exists and the best code directory isn't already
3471 	 * the first one. It is an error if we find a CMS blob but do not find a first code directory.
3472 	 */
3473 	if (cms_blob) {
3474 		first_code_directory = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_CODEDIRECTORY, CSMAGIC_CODEDIRECTORY);
3475 		if (first_code_directory == best_code_directory) {
3476 			/* We don't need the first code directory anymore, since the best one is already it */
3477 			first_code_directory = NULL;
3478 		} else if (first_code_directory) {
3479 			new_blob_size += sizeof(CS_BlobIndex);
3480 			new_blob_size += ntohl(first_code_directory->length);
3481 			num_blobs += 1;
3482 		} else {
3483 			printf("CODE SIGNING: Invalid CS Blob: found CMS blob but not a first code directory\n");
3484 			return EINVAL;
3485 		}
3486 	}
3487 
3488 	/*
3489 	 * The blob size could be rouded up to page size here, so we keep a copy
3490 	 * of the actual superblob length as well.
3491 	 */
3492 	vm_size_t new_blob_allocation_size = new_blob_size;
3493 	ret = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_allocation_size);
3494 	if (ret != KERN_SUCCESS) {
3495 		printf("CODE SIGNING: Failed to allocate memory for new code signing blob: %d\n", ret);
3496 		return ENOMEM;
3497 	}
3498 
3499 	/*
3500 	 * Fill out the superblob header and then all the blobs in the order listed
3501 	 * above.
3502 	 */
3503 	superblob = (CS_SuperBlob*)new_blob_addr;
3504 	superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
3505 	superblob->length = htonl((uint32_t)new_blob_size);
3506 	superblob->count = htonl(num_blobs);
3507 
3508 	blob_index = 0;
3509 	blob_offset = sizeof(CS_SuperBlob) + (num_blobs * sizeof(CS_BlobIndex));
3510 
3511 	/* Best code directory */
3512 	superblob->index[blob_index].offset = htonl(blob_offset);
3513 	if (first_code_directory) {
3514 		superblob->index[blob_index].type = htonl(CSSLOT_ALTERNATE_CODEDIRECTORIES);
3515 	} else {
3516 		superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3517 	}
3518 
3519 	if (code_directory_size > 0) {
3520 		/* We zero out the code directory, as we expect the caller to fill it in */
3521 		memset((void*)(new_blob_addr + blob_offset), 0, new_code_directory_size);
3522 	} else {
3523 		memcpy((void*)(new_blob_addr + blob_offset), best_code_directory, new_code_directory_size);
3524 	}
3525 
3526 	if (code_directory) {
3527 		*code_directory = (CS_CodeDirectory*)(new_blob_addr + blob_offset);
3528 	}
3529 	blob_offset += new_code_directory_size;
3530 
3531 	/* DER entitlements blob */
3532 	if (der_entitlements_blob) {
3533 		blob_index += 1;
3534 		superblob->index[blob_index].offset = htonl(blob_offset);
3535 		superblob->index[blob_index].type = htonl(CSSLOT_DER_ENTITLEMENTS);
3536 
3537 		memcpy((void*)(new_blob_addr + blob_offset), der_entitlements_blob, ntohl(der_entitlements_blob->length));
3538 		blob_offset += ntohl(der_entitlements_blob->length);
3539 	}
3540 
3541 	/* Launch constraints self blob */
3542 	if (launch_constraint_self) {
3543 		blob_index += 1;
3544 		superblob->index[blob_index].offset = htonl(blob_offset);
3545 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_SELF);
3546 
3547 		memcpy(
3548 			(void*)(new_blob_addr + blob_offset),
3549 			launch_constraint_self,
3550 			ntohl(launch_constraint_self->length));
3551 
3552 		blob_offset += ntohl(launch_constraint_self->length);
3553 	}
3554 
3555 	/* Launch constraints parent blob */
3556 	if (launch_constraint_parent) {
3557 		blob_index += 1;
3558 		superblob->index[blob_index].offset = htonl(blob_offset);
3559 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_PARENT);
3560 
3561 		memcpy(
3562 			(void*)(new_blob_addr + blob_offset),
3563 			launch_constraint_parent,
3564 			ntohl(launch_constraint_parent->length));
3565 
3566 		blob_offset += ntohl(launch_constraint_parent->length);
3567 	}
3568 
3569 	/* Launch constraints responsible blob */
3570 	if (launch_constraint_responsible) {
3571 		blob_index += 1;
3572 		superblob->index[blob_index].offset = htonl(blob_offset);
3573 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE);
3574 
3575 		memcpy(
3576 			(void*)(new_blob_addr + blob_offset),
3577 			launch_constraint_responsible,
3578 			ntohl(launch_constraint_responsible->length));
3579 
3580 		blob_offset += ntohl(launch_constraint_responsible->length);
3581 	}
3582 
3583 	/* library constraints blob */
3584 	if (library_constraint) {
3585 		blob_index += 1;
3586 		superblob->index[blob_index].offset = htonl(blob_offset);
3587 		superblob->index[blob_index].type = htonl(CSSLOT_LIBRARY_CONSTRAINT);
3588 
3589 		memcpy(
3590 			(void*)(new_blob_addr + blob_offset),
3591 			library_constraint,
3592 			ntohl(library_constraint->length));
3593 
3594 		blob_offset += ntohl(library_constraint->length);
3595 	}
3596 
3597 	/* Entitlements blob */
3598 	if (entitlements_blob) {
3599 		blob_index += 1;
3600 		superblob->index[blob_index].offset = htonl(blob_offset);
3601 		superblob->index[blob_index].type = htonl(CSSLOT_ENTITLEMENTS);
3602 
3603 		memcpy((void*)(new_blob_addr + blob_offset), entitlements_blob, ntohl(entitlements_blob->length));
3604 		blob_offset += ntohl(entitlements_blob->length);
3605 	}
3606 
3607 	/* CMS blob */
3608 	if (cms_blob) {
3609 		blob_index += 1;
3610 		superblob->index[blob_index].offset = htonl(blob_offset);
3611 		superblob->index[blob_index].type = htonl(CSSLOT_SIGNATURESLOT);
3612 		memcpy((void*)(new_blob_addr + blob_offset), cms_blob, ntohl(cms_blob->length));
3613 		blob_offset += ntohl(cms_blob->length);
3614 	}
3615 
3616 	/* First code directory */
3617 	if (first_code_directory) {
3618 		blob_index += 1;
3619 		superblob->index[blob_index].offset = htonl(blob_offset);
3620 		superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3621 		memcpy((void*)(new_blob_addr + blob_offset), first_code_directory, ntohl(first_code_directory->length));
3622 		blob_offset += ntohl(first_code_directory->length);
3623 	}
3624 
3625 	/*
3626 	 * We only validate the blob in case we copied in the best code directory.
3627 	 * In case the code directory size we were passed in wasn't 0, we memset the best
3628 	 * code directory to 0 and expect the caller to fill it in. In the same spirit, we
3629 	 * expect the caller to validate the code signature after they fill in the code
3630 	 * directory.
3631 	 */
3632 	if (code_directory_size == 0) {
3633 		const CS_CodeDirectory *validated_code_directory = NULL;
3634 		const CS_GenericBlob *validated_entitlements_blob = NULL;
3635 		const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3636 
3637 		ret = cs_validate_csblob(
3638 			(const uint8_t *)superblob,
3639 			new_blob_size,
3640 			&validated_code_directory,
3641 			&validated_entitlements_blob,
3642 			&validated_der_entitlements_blob);
3643 
3644 		if (ret) {
3645 			printf("unable to validate reconstituted cs_blob: %d\n", ret);
3646 			err = EINVAL;
3647 			goto fail;
3648 		}
3649 	}
3650 
3651 	if (ret_mem_kaddr) {
3652 		*ret_mem_kaddr = new_blob_addr;
3653 	}
3654 	if (ret_mem_size) {
3655 		*ret_mem_size = new_blob_allocation_size;
3656 	}
3657 
3658 	return 0;
3659 
3660 fail:
3661 	ubc_cs_blob_deallocate(new_blob_addr, new_blob_allocation_size);
3662 	return err;
3663 }
3664 
3665 /*
3666  * We use this function to clear out unnecessary bits from the code signature
3667  * blob which are no longer needed. We free these bits and give them back to
3668  * the kernel. This is needed since reconstitution includes extra data which is
3669  * needed only for verification but has no point in keeping afterwards.
3670  *
3671  * This results in significant memory reduction, especially for 3rd party apps
3672  * since we also get rid of the CMS blob.
3673  */
3674 static errno_t
ubc_cs_reconstitute_code_signature_2nd_stage(struct cs_blob * blob)3675 ubc_cs_reconstitute_code_signature_2nd_stage(
3676 	struct cs_blob *blob
3677 	)
3678 {
3679 	kern_return_t ret = KERN_FAILURE;
3680 	const CS_GenericBlob *launch_constraint_self = NULL;
3681 	const CS_GenericBlob *launch_constraint_parent = NULL;
3682 	const CS_GenericBlob *launch_constraint_responsible = NULL;
3683 	const CS_GenericBlob *library_constraint = NULL;
3684 	CS_SuperBlob *superblob = NULL;
3685 	uint32_t num_blobs = 0;
3686 	vm_size_t last_needed_blob_offset = 0;
3687 	vm_offset_t code_directory_offset = 0;
3688 
3689 	/*
3690 	 * Ordering of blobs we need to keep:
3691 	 * 1. Code directory
3692 	 * 2. DER encoded entitlements (if present)
3693 	 * 3. Launch constraints self (if present)
3694 	 * 4. Launch constraints parent (if present)
3695 	 * 5. Launch constraints responsible (if present)
3696 	 * 6. Library constraints (if present)
3697 	 *
3698 	 * We need to clear out the remaining page after these blobs end, and fix up
3699 	 * the superblob for the changes. Things gets a little more complicated for
3700 	 * blobs which may not have been kmem_allocated. For those, we simply just
3701 	 * allocate the new required space and copy into it.
3702 	 */
3703 
3704 	if (blob == NULL) {
3705 		printf("NULL blob passed in for 2nd stage reconstitution\n");
3706 		return EINVAL;
3707 	}
3708 	assert(blob->csb_reconstituted == true);
3709 
3710 	/* Ensure we're not page-wise allocated when in this function */
3711 	assert(ubc_cs_blob_pagewise_allocate(blob->csb_mem_size) == false);
3712 
3713 	if (!blob->csb_cd) {
3714 		/* This case can never happen, and it is a sign of bad things */
3715 		panic("validated cs_blob has no code directory");
3716 	}
3717 	superblob = (CS_SuperBlob*)blob->csb_mem_kaddr;
3718 
3719 	num_blobs = 1;
3720 	last_needed_blob_offset = ntohl(superblob->index[0].offset) + ntohl(blob->csb_cd->length);
3721 
3722 	/* Check for DER entitlements */
3723 	if (blob->csb_der_entitlements_blob) {
3724 		num_blobs += 1;
3725 		last_needed_blob_offset += ntohl(blob->csb_der_entitlements_blob->length);
3726 	}
3727 
3728 	/* Check for launch constraints self */
3729 	launch_constraint_self = csblob_find_blob_bytes(
3730 		(const uint8_t *)blob->csb_mem_kaddr,
3731 		blob->csb_mem_size,
3732 		CSSLOT_LAUNCH_CONSTRAINT_SELF,
3733 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3734 	if (launch_constraint_self) {
3735 		num_blobs += 1;
3736 		last_needed_blob_offset += ntohl(launch_constraint_self->length);
3737 	}
3738 
3739 	/* Check for launch constraints parent */
3740 	launch_constraint_parent = csblob_find_blob_bytes(
3741 		(const uint8_t *)blob->csb_mem_kaddr,
3742 		blob->csb_mem_size,
3743 		CSSLOT_LAUNCH_CONSTRAINT_PARENT,
3744 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3745 	if (launch_constraint_parent) {
3746 		num_blobs += 1;
3747 		last_needed_blob_offset += ntohl(launch_constraint_parent->length);
3748 	}
3749 
3750 	/* Check for launch constraints responsible */
3751 	launch_constraint_responsible = csblob_find_blob_bytes(
3752 		(const uint8_t *)blob->csb_mem_kaddr,
3753 		blob->csb_mem_size,
3754 		CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE,
3755 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3756 	if (launch_constraint_responsible) {
3757 		num_blobs += 1;
3758 		last_needed_blob_offset += ntohl(launch_constraint_responsible->length);
3759 	}
3760 
3761 	/* Check for library constraint */
3762 	library_constraint = csblob_find_blob_bytes(
3763 		(const uint8_t *)blob->csb_mem_kaddr,
3764 		blob->csb_mem_size,
3765 		CSSLOT_LIBRARY_CONSTRAINT,
3766 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3767 	if (library_constraint) {
3768 		num_blobs += 1;
3769 		last_needed_blob_offset += ntohl(library_constraint->length);
3770 	}
3771 
3772 	superblob->count = htonl(num_blobs);
3773 	superblob->length = htonl((uint32_t)last_needed_blob_offset);
3774 
3775 	/*
3776 	 * There is a chance that the code directory is marked within the superblob as an
3777 	 * alternate code directory. This happens when the first code directory isn't the
3778 	 * best one chosen by the kernel, so to be able to access both the first and the best,
3779 	 * we save the best one as an alternate one. Since we're getting rid of the first one
3780 	 * here, we mark the best one as the first one.
3781 	 */
3782 	superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
3783 
3784 	vm_address_t new_superblob = 0;
3785 	vm_size_t new_superblob_size = last_needed_blob_offset;
3786 
3787 	ret = ubc_cs_blob_allocate(&new_superblob, &new_superblob_size);
3788 	if (ret != KERN_SUCCESS) {
3789 		printf("unable to allocate memory for 2nd stage reconstitution: %d\n", ret);
3790 		return ENOMEM;
3791 	}
3792 	assert(new_superblob_size == last_needed_blob_offset);
3793 
3794 	/* Calculate the code directory offset */
3795 	code_directory_offset = (vm_offset_t)blob->csb_cd - (vm_offset_t)blob->csb_mem_kaddr;
3796 
3797 	/* Copy in the updated superblob into the new memory */
3798 	memcpy((void*)new_superblob, superblob, new_superblob_size);
3799 
3800 	/* Free the old code signature and old memory */
3801 	ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3802 
3803 	/* Reconstruct critical fields in the blob object */
3804 	ubc_cs_blob_reconstruct(
3805 		blob,
3806 		new_superblob,
3807 		new_superblob_size,
3808 		code_directory_offset);
3809 
3810 	/* XML entitlements should've been removed */
3811 	assert(blob->csb_entitlements_blob == NULL);
3812 
3813 	const CS_CodeDirectory *validated_code_directory = NULL;
3814 	const CS_GenericBlob *validated_entitlements_blob = NULL;
3815 	const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3816 
3817 	ret = cs_validate_csblob(
3818 		(const uint8_t*)blob->csb_mem_kaddr,
3819 		blob->csb_mem_size,
3820 		&validated_code_directory,
3821 		&validated_entitlements_blob,
3822 		&validated_der_entitlements_blob);
3823 	if (ret) {
3824 		printf("unable to validate code signature after 2nd stage reconstitution: %d\n", ret);
3825 		return EINVAL;
3826 	}
3827 
3828 	return 0;
3829 }
3830 
3831 static int
ubc_cs_convert_to_multilevel_hash(struct cs_blob * blob)3832 ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
3833 {
3834 	const CS_CodeDirectory  *old_cd, *cd;
3835 	CS_CodeDirectory        *new_cd;
3836 	const CS_GenericBlob *entitlements;
3837 	const CS_GenericBlob *der_entitlements;
3838 	vm_offset_t     new_blob_addr;
3839 	vm_size_t       new_blob_size;
3840 	vm_size_t       new_cdsize;
3841 	int                             error;
3842 
3843 	uint32_t                hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
3844 
3845 	if (cs_debug > 1) {
3846 		printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
3847 		    (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
3848 	}
3849 
3850 	old_cd = blob->csb_cd;
3851 
3852 	/* Up to the hashes, we can copy all data */
3853 	new_cdsize  = ntohl(old_cd->hashOffset);
3854 	new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
3855 
3856 	error = ubc_cs_reconstitute_code_signature(blob, &new_blob_addr, &new_blob_size, new_cdsize, &new_cd);
3857 	if (error != 0) {
3858 		printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
3859 		return error;
3860 	}
3861 	entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_ENTITLEMENTS, CSMAGIC_EMBEDDED_ENTITLEMENTS);
3862 	der_entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_DER_ENTITLEMENTS, CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3863 
3864 	memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
3865 
3866 	/* Update fields in the Code Directory structure */
3867 	new_cd->length = htonl((uint32_t)new_cdsize);
3868 
3869 	uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
3870 	nCodeSlots >>= hashes_per_new_hash_shift;
3871 	new_cd->nCodeSlots = htonl(nCodeSlots);
3872 
3873 	new_cd->pageSize = (uint8_t)PAGE_SHIFT; /* Not byte-swapped */
3874 
3875 	if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
3876 		SC_Scatter *scatter = (SC_Scatter*)
3877 		    ((char *)new_cd + ntohl(new_cd->scatterOffset));
3878 		/* iterate all scatter structs to scale their counts */
3879 		do {
3880 			uint32_t scount = ntohl(scatter->count);
3881 			uint32_t sbase  = ntohl(scatter->base);
3882 
3883 			/* last scatter? */
3884 			if (scount == 0) {
3885 				break;
3886 			}
3887 
3888 			scount >>= hashes_per_new_hash_shift;
3889 			scatter->count = htonl(scount);
3890 
3891 			sbase >>= hashes_per_new_hash_shift;
3892 			scatter->base = htonl(sbase);
3893 
3894 			scatter++;
3895 		} while (1);
3896 	}
3897 
3898 	/* For each group of hashes, hash them together */
3899 	const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
3900 	unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
3901 
3902 	uint32_t hash_index;
3903 	for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
3904 		union cs_hash_union     mdctx;
3905 
3906 		uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
3907 		const unsigned char *src = src_base + hash_index * source_hash_len;
3908 		unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
3909 
3910 		blob->csb_hashtype->cs_init(&mdctx);
3911 		blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
3912 		blob->csb_hashtype->cs_final(dst, &mdctx);
3913 	}
3914 
3915 	error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements, &der_entitlements);
3916 	if (error != 0) {
3917 		printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3918 		    error);
3919 
3920 		ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3921 		return error;
3922 	}
3923 
3924 	/* New Code Directory is ready for use, swap it out in the blob structure */
3925 	ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3926 
3927 	blob->csb_mem_size = new_blob_size;
3928 	blob->csb_mem_kaddr = (void *)new_blob_addr;
3929 	blob->csb_cd = cd;
3930 	blob->csb_entitlements_blob = NULL;
3931 
3932 	blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
3933 	blob->csb_reconstituted = true;
3934 
3935 	/* The blob has some cached attributes of the Code Directory, so update those */
3936 
3937 	blob->csb_hash_firstlevel_pageshift = blob->csb_hash_pageshift; /* Save the original page size */
3938 
3939 	blob->csb_hash_pageshift = PAGE_SHIFT;
3940 	blob->csb_end_offset = ntohl(cd->codeLimit);
3941 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3942 		const SC_Scatter *scatter = (const SC_Scatter*)
3943 		    ((const char*)cd + ntohl(cd->scatterOffset));
3944 		blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
3945 	} else {
3946 		blob->csb_start_offset = 0;
3947 	}
3948 
3949 	return 0;
3950 }
3951 
3952 static void
cs_blob_cleanup(struct cs_blob * blob)3953 cs_blob_cleanup(struct cs_blob *blob)
3954 {
3955 	if (blob->csb_entitlements != NULL) {
3956 		amfi->OSEntitlements_invalidate(blob->csb_entitlements);
3957 		osobject_release(blob->csb_entitlements);
3958 		blob->csb_entitlements = NULL;
3959 	}
3960 
3961 #if CODE_SIGNING_MONITOR
3962 	if (blob->csb_csm_obj != NULL) {
3963 		/* Unconditionally remove any profiles we may have associated */
3964 		csm_disassociate_provisioning_profile(blob->csb_csm_obj);
3965 
3966 		kern_return_t kr = csm_unregister_code_signature(blob->csb_csm_obj);
3967 		if (kr == KERN_SUCCESS) {
3968 			/*
3969 			 * If the code signature was monitor managed, the monitor will have freed it
3970 			 * itself in the unregistration call. It means we do not need to free the data
3971 			 * over here.
3972 			 */
3973 			if (blob->csb_csm_managed) {
3974 				blob->csb_mem_kaddr = NULL;
3975 				blob->csb_mem_size = 0;
3976 			}
3977 		} else if (kr == KERN_ABORTED) {
3978 			/*
3979 			 * The code-signing-monitor refused to unregister the code signature. It means
3980 			 * whatever memory was backing the code signature may not have been released, and
3981 			 * attempting to free it down below will not be successful. As a result, all we
3982 			 * can do is prevent the kernel from touching the data.
3983 			 */
3984 			blob->csb_mem_kaddr = NULL;
3985 			blob->csb_mem_size = 0;
3986 		}
3987 	}
3988 
3989 	/* Unconditionally remove references to the monitor */
3990 	blob->csb_csm_obj = NULL;
3991 	blob->csb_csm_managed = false;
3992 #endif
3993 
3994 	if (blob->csb_mem_kaddr) {
3995 		ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3996 	}
3997 	blob->csb_mem_kaddr = NULL;
3998 	blob->csb_mem_size = 0;
3999 }
4000 
4001 static void
cs_blob_ro_free(struct cs_blob * blob)4002 cs_blob_ro_free(struct cs_blob *blob)
4003 {
4004 	struct cs_blob tmp;
4005 
4006 	if (blob != NULL) {
4007 		/*
4008 		 * cs_blob_cleanup clears fields, so we need to pass it a
4009 		 * mutable copy.
4010 		 */
4011 		tmp = *blob;
4012 		cs_blob_cleanup(&tmp);
4013 
4014 		zfree_ro(ZONE_ID_CS_BLOB, blob);
4015 	}
4016 }
4017 
4018 /*
4019  * Free a cs_blob previously created by cs_blob_create_validated.
4020  */
4021 void
cs_blob_free(struct cs_blob * blob)4022 cs_blob_free(
4023 	struct cs_blob *blob)
4024 {
4025 	cs_blob_ro_free(blob);
4026 }
4027 
4028 static int
cs_blob_init_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob * blob,CS_CodeDirectory const ** const ret_cd)4029 cs_blob_init_validated(
4030 	vm_address_t * const addr,
4031 	vm_size_t size,
4032 	struct cs_blob *blob,
4033 	CS_CodeDirectory const ** const ret_cd)
4034 {
4035 	int error = EINVAL;
4036 	const CS_CodeDirectory *cd = NULL;
4037 	const CS_GenericBlob *entitlements = NULL;
4038 	const CS_GenericBlob *der_entitlements = NULL;
4039 	union cs_hash_union mdctx;
4040 	size_t length;
4041 
4042 	bzero(blob, sizeof(*blob));
4043 
4044 	/* fill in the new blob */
4045 	blob->csb_mem_size = size;
4046 	blob->csb_mem_offset = 0;
4047 	blob->csb_mem_kaddr = (void *)*addr;
4048 	blob->csb_flags = 0;
4049 	blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
4050 	blob->csb_platform_binary = 0;
4051 	blob->csb_platform_path = 0;
4052 	blob->csb_teamid = NULL;
4053 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4054 	blob->csb_supplement_teamid = NULL;
4055 #endif
4056 	blob->csb_entitlements_blob = NULL;
4057 	blob->csb_der_entitlements_blob = NULL;
4058 	blob->csb_entitlements = NULL;
4059 #if CODE_SIGNING_MONITOR
4060 	blob->csb_csm_obj = NULL;
4061 	blob->csb_csm_managed = false;
4062 #endif
4063 	blob->csb_reconstituted = false;
4064 	blob->csb_validation_category = CS_VALIDATION_CATEGORY_INVALID;
4065 
4066 	/* Transfer ownership. Even on error, this function will deallocate */
4067 	*addr = 0;
4068 
4069 	/*
4070 	 * Validate the blob's contents
4071 	 */
4072 	length = (size_t) size;
4073 	error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
4074 	    length, &cd, &entitlements, &der_entitlements);
4075 	if (error) {
4076 		if (cs_debug) {
4077 			printf("CODESIGNING: csblob invalid: %d\n", error);
4078 		}
4079 		/*
4080 		 * The vnode checker can't make the rest of this function
4081 		 * succeed if csblob validation failed, so bail */
4082 		goto out;
4083 	} else {
4084 		const unsigned char *md_base;
4085 		uint8_t hash[CS_HASH_MAX_SIZE];
4086 		int md_size;
4087 		vm_offset_t hash_pagemask;
4088 
4089 		blob->csb_cd = cd;
4090 		blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
4091 		blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
4092 		blob->csb_hashtype = cs_find_md(cd->hashType);
4093 		if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
4094 			panic("validated CodeDirectory but unsupported type");
4095 		}
4096 
4097 		blob->csb_hash_pageshift = cd->pageSize;
4098 		hash_pagemask = (1U << cd->pageSize) - 1;
4099 		blob->csb_hash_firstlevel_pageshift = 0;
4100 		blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
4101 		blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + hash_pagemask) & ~hash_pagemask);
4102 		if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
4103 			const SC_Scatter *scatter = (const SC_Scatter*)
4104 			    ((const char*)cd + ntohl(cd->scatterOffset));
4105 			blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * (1U << blob->csb_hash_pageshift);
4106 		} else {
4107 			blob->csb_start_offset = 0;
4108 		}
4109 		/* compute the blob's cdhash */
4110 		md_base = (const unsigned char *) cd;
4111 		md_size = ntohl(cd->length);
4112 
4113 		blob->csb_hashtype->cs_init(&mdctx);
4114 		blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
4115 		blob->csb_hashtype->cs_final(hash, &mdctx);
4116 
4117 		memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
4118 
4119 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4120 		blob->csb_linkage_hashtype = NULL;
4121 		if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0 &&
4122 		    ntohl(cd->linkageSize) >= CS_CDHASH_LEN) {
4123 			blob->csb_linkage_hashtype = cs_find_md(cd->linkageHashType);
4124 
4125 			if (blob->csb_linkage_hashtype != NULL) {
4126 				memcpy(blob->csb_linkage, (uint8_t const*)cd + ntohl(cd->linkageOffset),
4127 				    CS_CDHASH_LEN);
4128 			}
4129 		}
4130 #endif
4131 	}
4132 
4133 	error = 0;
4134 
4135 out:
4136 	if (error != 0) {
4137 		cs_blob_cleanup(blob);
4138 		blob = NULL;
4139 		cd = NULL;
4140 	}
4141 
4142 	if (ret_cd != NULL) {
4143 		*ret_cd = cd;
4144 	}
4145 
4146 	return error;
4147 }
4148 
4149 /*
4150  * Validate the code signature blob, create a struct cs_blob wrapper
4151  * and return it together with a pointer to the chosen code directory
4152  * and entitlements blob.
4153  *
4154  * Note that this takes ownership of the memory as addr, mainly because
4155  * this function can actually replace the passed in blob with another
4156  * one, e.g. when performing multilevel hashing optimization.
4157  */
4158 int
cs_blob_create_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob ** const ret_blob,CS_CodeDirectory const ** const ret_cd)4159 cs_blob_create_validated(
4160 	vm_address_t * const            addr,
4161 	vm_size_t                       size,
4162 	struct cs_blob ** const         ret_blob,
4163 	CS_CodeDirectory const ** const     ret_cd)
4164 {
4165 	struct cs_blob blob = {};
4166 	struct cs_blob *ro_blob;
4167 	int error;
4168 
4169 	if (ret_blob) {
4170 		*ret_blob = NULL;
4171 	}
4172 
4173 	if ((error = cs_blob_init_validated(addr, size, &blob, ret_cd)) != 0) {
4174 		return error;
4175 	}
4176 
4177 	if (ret_blob != NULL) {
4178 		ro_blob = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4179 		zalloc_ro_update_elem(ZONE_ID_CS_BLOB, ro_blob, &blob);
4180 		*ret_blob = ro_blob;
4181 	}
4182 
4183 	return error;
4184 }
4185 
4186 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4187 static void
cs_blob_supplement_free(struct cs_blob * const blob)4188 cs_blob_supplement_free(struct cs_blob * const blob)
4189 {
4190 	void *teamid;
4191 
4192 	if (blob != NULL) {
4193 		if (blob->csb_supplement_teamid != NULL) {
4194 			teamid = blob->csb_supplement_teamid;
4195 			vm_size_t teamid_size = strlen(blob->csb_supplement_teamid) + 1;
4196 			kfree_data(teamid, teamid_size);
4197 		}
4198 		cs_blob_ro_free(blob);
4199 	}
4200 }
4201 #endif
4202 
4203 static void
ubc_cs_blob_adjust_statistics(struct cs_blob const * blob)4204 ubc_cs_blob_adjust_statistics(struct cs_blob const *blob)
4205 {
4206 	/* Note that the atomic ops are not enough to guarantee
4207 	 * correctness: If a blob with an intermediate size is inserted
4208 	 * concurrently, we can lose a peak value assignment. But these
4209 	 * statistics are only advisory anyway, so we're not going to
4210 	 * employ full locking here. (Consequently, we are also okay with
4211 	 * relaxed ordering of those accesses.)
4212 	 */
4213 
4214 	unsigned int new_cs_blob_count = os_atomic_add(&cs_blob_count, 1, relaxed);
4215 	if (new_cs_blob_count > os_atomic_load(&cs_blob_count_peak, relaxed)) {
4216 		os_atomic_store(&cs_blob_count_peak, new_cs_blob_count, relaxed);
4217 	}
4218 
4219 	size_t new_cs_blob_size = os_atomic_add(&cs_blob_size, blob->csb_mem_size, relaxed);
4220 
4221 	if (new_cs_blob_size > os_atomic_load(&cs_blob_size_peak, relaxed)) {
4222 		os_atomic_store(&cs_blob_size_peak, new_cs_blob_size, relaxed);
4223 	}
4224 	if (blob->csb_mem_size > os_atomic_load(&cs_blob_size_max, relaxed)) {
4225 		os_atomic_store(&cs_blob_size_max, blob->csb_mem_size, relaxed);
4226 	}
4227 }
4228 
4229 static void
cs_blob_set_cpu_type(struct cs_blob * blob,cpu_type_t cputype)4230 cs_blob_set_cpu_type(struct cs_blob *blob, cpu_type_t cputype)
4231 {
4232 	zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_cpu_type, &cputype);
4233 }
4234 
4235 __abortlike
4236 static void
panic_cs_blob_backref_mismatch(struct cs_blob * blob,struct vnode * vp)4237 panic_cs_blob_backref_mismatch(struct cs_blob *blob, struct vnode *vp)
4238 {
4239 	panic("cs_blob vnode backref mismatch: blob=%p, vp=%p, "
4240 	    "blob->csb_vnode=%p", blob, vp, blob->csb_vnode);
4241 }
4242 
4243 void
cs_blob_require(struct cs_blob * blob,vnode_t vp)4244 cs_blob_require(struct cs_blob *blob, vnode_t vp)
4245 {
4246 	zone_require_ro(ZONE_ID_CS_BLOB, sizeof(struct cs_blob), blob);
4247 
4248 	if (vp != NULL && __improbable(blob->csb_vnode != vp)) {
4249 		panic_cs_blob_backref_mismatch(blob, vp);
4250 	}
4251 }
4252 
4253 #if CODE_SIGNING_MONITOR
4254 
4255 /**
4256  * Independently verify the authenticity of the code signature through the monitor
4257  * environment. This is required as otherwise the monitor won't allow associations
4258  * of the code signature with address spaces.
4259  *
4260  * Once we've verified the code signature, we no longer need to keep around any
4261  * provisioning profiles we may have registered with it. AMFI associates profiles
4262  * with the monitor during its validation (which happens before the monitor's).
4263  */
4264 static errno_t
verify_code_signature_monitor(struct cs_blob * cs_blob)4265 verify_code_signature_monitor(
4266 	struct cs_blob *cs_blob)
4267 {
4268 	kern_return_t ret = KERN_DENIED;
4269 
4270 	ret = csm_verify_code_signature(cs_blob->csb_csm_obj, &cs_blob->csb_csm_trust_level);
4271 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4272 		printf("unable to verify code signature with monitor: %d\n", ret);
4273 		return EPERM;
4274 	}
4275 
4276 	ret = csm_disassociate_provisioning_profile(cs_blob->csb_csm_obj);
4277 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_FOUND) && (ret != KERN_NOT_SUPPORTED)) {
4278 		printf("unable to disassociate profile from code signature: %d\n", ret);
4279 		return EPERM;
4280 	}
4281 
4282 	/* Associate the OSEntitlements kernel object with the monitor */
4283 	ret = csm_associate_os_entitlements(cs_blob->csb_csm_obj, cs_blob->csb_entitlements);
4284 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4285 		printf("unable to associate OSEntitlements with monitor: %d\n", ret);
4286 		return EPERM;
4287 	}
4288 
4289 	return 0;
4290 }
4291 
4292 /**
4293  * Register the code signature with the code signing monitor environment. This
4294  * will effectively make the blob data immutable, either because the blob memory
4295  * will be allocated and managed directory by the monitor, or because the monitor
4296  * will lockdown the memory associated with the blob.
4297  */
4298 static errno_t
register_code_signature_monitor(struct vnode * vnode,struct cs_blob * cs_blob,vm_offset_t code_directory_offset)4299 register_code_signature_monitor(
4300 	struct vnode *vnode,
4301 	struct cs_blob *cs_blob,
4302 	vm_offset_t code_directory_offset)
4303 {
4304 	kern_return_t ret = KERN_DENIED;
4305 	vm_address_t monitor_signature_addr = 0;
4306 	void *monitor_sig_object = NULL;
4307 	const char *vnode_path_ptr = NULL;
4308 
4309 	/*
4310 	 * Attempt to resolve the path for this vnode and pass it in to the code
4311 	 * signing monitor during registration.
4312 	 */
4313 	int vnode_path_len = MAXPATHLEN;
4314 	char *vnode_path = kalloc_data(vnode_path_len, Z_WAITOK);
4315 
4316 	/*
4317 	 * Taking a reference on the vnode recursively can sometimes lead to a
4318 	 * deadlock on the system. Since we already have a vnode pointer, it means
4319 	 * the caller performed a vnode lookup, which implicitly takes a reference
4320 	 * on the vnode. However, there is more than just having a reference on a
4321 	 * vnode which is important. vnode's also have an iocount, and we must only
4322 	 * access a vnode which has an iocount of greater than 0. Thankfully, all
4323 	 * the conditions which lead to calling this function ensure that this
4324 	 * vnode is safe to access here.
4325 	 *
4326 	 * For more details: rdar://105819068.
4327 	 */
4328 	errno_t error = vn_getpath(vnode, vnode_path, &vnode_path_len);
4329 	if (error == 0) {
4330 		vnode_path_ptr = vnode_path;
4331 	}
4332 
4333 	ret = csm_register_code_signature(
4334 		(vm_address_t)cs_blob->csb_mem_kaddr,
4335 		cs_blob->csb_mem_size,
4336 		code_directory_offset,
4337 		vnode_path_ptr,
4338 		&monitor_sig_object,
4339 		&monitor_signature_addr);
4340 
4341 	kfree_data(vnode_path, MAXPATHLEN);
4342 	vnode_path_ptr = NULL;
4343 
4344 	if (ret == KERN_SUCCESS) {
4345 		/* Reconstruct the cs_blob if the monitor used its own allocation */
4346 		if (monitor_signature_addr != (vm_address_t)cs_blob->csb_mem_kaddr) {
4347 			vm_address_t monitor_signature_size = cs_blob->csb_mem_size;
4348 
4349 			/* Free the old memory for the blob */
4350 			ubc_cs_blob_deallocate(
4351 				(vm_address_t)cs_blob->csb_mem_kaddr,
4352 				cs_blob->csb_mem_size);
4353 
4354 			/* Reconstruct critical fields in the blob object */
4355 			ubc_cs_blob_reconstruct(
4356 				cs_blob,
4357 				monitor_signature_addr,
4358 				monitor_signature_size,
4359 				code_directory_offset);
4360 
4361 			/* Mark the signature as monitor managed */
4362 			cs_blob->csb_csm_managed = true;
4363 		}
4364 	} else if (ret != KERN_NOT_SUPPORTED) {
4365 		printf("unable to register code signature with monitor: %d\n", ret);
4366 		return EPERM;
4367 	}
4368 
4369 	/* Save the monitor handle for the signature object -- may be NULL */
4370 	cs_blob->csb_csm_obj = monitor_sig_object;
4371 
4372 	return 0;
4373 }
4374 
4375 #endif /* CODE_SIGNING_MONITOR */
4376 
4377 static errno_t
validate_main_binary_check(struct cs_blob * csblob,cs_blob_add_flags_t csblob_add_flags)4378 validate_main_binary_check(
4379 	struct cs_blob *csblob,
4380 	cs_blob_add_flags_t csblob_add_flags)
4381 {
4382 #if XNU_TARGET_OS_OSX
4383 	(void)csblob;
4384 	(void)csblob_add_flags;
4385 	return 0;
4386 #else
4387 	const CS_CodeDirectory *first_cd = NULL;
4388 	const CS_CodeDirectory *alt_cd = NULL;
4389 	uint64_t exec_seg_flags = 0;
4390 	uint32_t slot = CSSLOT_CODEDIRECTORY;
4391 
4392 	/* Nothing to enforce if we're allowing main binaries */
4393 	if ((csblob_add_flags & CS_BLOB_ADD_ALLOW_MAIN_BINARY) != 0) {
4394 		return 0;
4395 	}
4396 
4397 	first_cd = (const CS_CodeDirectory*)csblob_find_blob(csblob, slot, CSMAGIC_CODEDIRECTORY);
4398 	if ((first_cd != NULL) && (ntohl(first_cd->version) >= CS_SUPPORTSEXECSEG)) {
4399 		exec_seg_flags |= ntohll(first_cd->execSegFlags);
4400 	}
4401 
4402 	for (uint32_t i = 0; i < CSSLOT_ALTERNATE_CODEDIRECTORY_MAX; i++) {
4403 		slot = CSSLOT_ALTERNATE_CODEDIRECTORIES + i;
4404 		alt_cd = (const CS_CodeDirectory*)csblob_find_blob(csblob, slot, CSMAGIC_CODEDIRECTORY);
4405 		if ((alt_cd == NULL) || (ntohl(alt_cd->version) < CS_SUPPORTSEXECSEG)) {
4406 			continue;
4407 		}
4408 		exec_seg_flags |= ntohll(alt_cd->execSegFlags);
4409 	}
4410 
4411 	if ((exec_seg_flags & CS_EXECSEG_MAIN_BINARY) != 0) {
4412 		return EBADEXEC;
4413 	}
4414 	return 0;
4415 #endif /* XNU_TARGET_OS_OSX */
4416 }
4417 
4418 /**
4419  * Accelerate entitlements for a code signature object. When we have a code
4420  * signing monitor, this acceleration is done within the monitor which then
4421  * passes back a CoreEntitlements query context the kernel can use. When we
4422  * don't have a code signing monitor, we accelerate the queries within the
4423  * kernel memory itself.
4424  *
4425  * This function must be called when the storage for the code signature can
4426  * no longer change.
4427  */
4428 static errno_t
accelerate_entitlement_queries(struct cs_blob * cs_blob)4429 accelerate_entitlement_queries(
4430 	struct cs_blob *cs_blob)
4431 {
4432 	kern_return_t ret = KERN_NOT_SUPPORTED;
4433 
4434 #if CODE_SIGNING_MONITOR
4435 	CEQueryContext_t ce_ctx = NULL;
4436 	const char *signing_id = NULL;
4437 
4438 	ret = csm_accelerate_entitlements(cs_blob->csb_csm_obj, &ce_ctx);
4439 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4440 		printf("unable to accelerate entitlements through the monitor: %d\n", ret);
4441 		return EPERM;
4442 	}
4443 
4444 	if (ret == KERN_SUCCESS) {
4445 		/* Call cannot not fail at this stage */
4446 		ret = csm_acquire_signing_identifier(cs_blob->csb_csm_obj, &signing_id);
4447 		assert(ret == KERN_SUCCESS);
4448 
4449 		/* Adjust the OSEntitlements context with AMFI */
4450 		ret = amfi->OSEntitlements.adjustContextWithMonitor(
4451 			cs_blob->csb_entitlements,
4452 			ce_ctx,
4453 			cs_blob->csb_csm_obj,
4454 			signing_id,
4455 			cs_blob->csb_flags);
4456 		if (ret != KERN_SUCCESS) {
4457 			printf("unable to adjust OSEntitlements context with monitor: %d\n", ret);
4458 			return EPERM;
4459 		}
4460 
4461 		return 0;
4462 	}
4463 #endif
4464 
4465 	/*
4466 	 * If we reach here, then either we don't have a code signing monitor, or
4467 	 * the code signing monitor isn't enabled for code signing, in which case,
4468 	 * AMFI is going to accelerate the entitlements context and adjust its
4469 	 * context on its own.
4470 	 */
4471 	assert(ret == KERN_NOT_SUPPORTED);
4472 
4473 	ret = amfi->OSEntitlements.adjustContextWithoutMonitor(
4474 		cs_blob->csb_entitlements,
4475 		cs_blob);
4476 
4477 	if (ret != KERN_SUCCESS) {
4478 		printf("unable to adjust OSEntitlements context without monitor: %d\n", ret);
4479 		return EPERM;
4480 	}
4481 
4482 	return 0;
4483 }
4484 
4485 /**
4486  * Ensure and validate that some security critical code signing blobs haven't
4487  * been stripped off from the code signature. This can happen if an attacker
4488  * chose to load a code signature sans these critical blobs, or if there is a
4489  * bug in reconstitution logic which remove these blobs from the code signature.
4490  */
4491 static errno_t
validate_auxiliary_signed_blobs(struct cs_blob * cs_blob)4492 validate_auxiliary_signed_blobs(
4493 	struct cs_blob *cs_blob)
4494 {
4495 	struct cs_blob_identifier {
4496 		uint32_t cs_slot;
4497 		uint32_t cs_magic;
4498 	};
4499 
4500 	const struct cs_blob_identifier identifiers[] = {
4501 		{CSSLOT_LAUNCH_CONSTRAINT_SELF, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4502 		{CSSLOT_LAUNCH_CONSTRAINT_PARENT, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4503 		{CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4504 		{CSSLOT_LIBRARY_CONSTRAINT, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT}
4505 	};
4506 	const uint32_t num_identifiers = sizeof(identifiers) / sizeof(identifiers[0]);
4507 
4508 	for (uint32_t i = 0; i < num_identifiers; i++) {
4509 		errno_t err = csblob_find_special_slot_blob(
4510 			cs_blob,
4511 			identifiers[i].cs_slot,
4512 			identifiers[i].cs_magic,
4513 			NULL,
4514 			NULL);
4515 
4516 		if (err != 0) {
4517 			printf("unable to validate security-critical blob: %d [%u|%u]\n",
4518 			    err, identifiers[i].cs_slot, identifiers[i].cs_magic);
4519 
4520 			return EPERM;
4521 		}
4522 	}
4523 
4524 	return 0;
4525 }
4526 
4527 /**
4528  * Setup multi-level hashing for the code signature. This isn't supported on most
4529  * shipping devices, but on ones where it is, it can result in significant savings
4530  * of memory from the code signature standpoint.
4531  *
4532  * Multi-level hashing is used to condense the code directory hashes in order to
4533  * improve memory consumption. We take four 4K page hashes, and condense them into
4534  * a single 16K hash, hence reducing the space consumed by the code directory by
4535  * about ~75%.
4536  */
4537 static errno_t
setup_multilevel_hashing(struct cs_blob * cs_blob)4538 setup_multilevel_hashing(
4539 	struct cs_blob *cs_blob)
4540 {
4541 	code_signing_monitor_type_t monitor_type = CS_MONITOR_TYPE_NONE;
4542 	errno_t err = -1;
4543 
4544 	/*
4545 	 * When we have a code signing monitor, we do not support multi-level hashing
4546 	 * since the code signature data is expected to be locked within memory which
4547 	 * cannot be written to by the kernel.
4548 	 *
4549 	 * Even when the code signing monitor isn't explicitly enabled, there are other
4550 	 * reasons for not performing multi-level hashing. For instance, Rosetta creates
4551 	 * issues with multi-level hashing on Apple Silicon Macs.
4552 	 */
4553 	code_signing_configuration(&monitor_type, NULL);
4554 	if (monitor_type != CS_MONITOR_TYPE_NONE) {
4555 		return 0;
4556 	}
4557 
4558 	/* We need to check if multi-level hashing is supported for this blob */
4559 	if (ubc_cs_supports_multilevel_hash(cs_blob) == false) {
4560 		return 0;
4561 	}
4562 
4563 	err = ubc_cs_convert_to_multilevel_hash(cs_blob);
4564 	if (err != 0) {
4565 		printf("unable to setup multi-level hashing: %d\n", err);
4566 		return err;
4567 	}
4568 
4569 	assert(cs_blob->csb_reconstituted == true);
4570 	return 0;
4571 }
4572 
4573 /**
4574  * Once code signature validation is complete, we can remove even more blobs from the
4575  * code signature as they are no longer needed. This goes on to conserve even more
4576  * system memory.
4577  */
4578 static errno_t
reconstitute_code_signature_2nd_stage(struct cs_blob * cs_blob)4579 reconstitute_code_signature_2nd_stage(
4580 	struct cs_blob *cs_blob)
4581 {
4582 	kern_return_t ret = KERN_NOT_SUPPORTED;
4583 	errno_t err = EPERM;
4584 
4585 	/* If we never reconstituted before, we won't be reconstituting again */
4586 	if (cs_blob->csb_reconstituted == false) {
4587 		return 0;
4588 	}
4589 
4590 #if CODE_SIGNING_MONITOR
4591 	/*
4592 	 * When we have a code signing monitor, the code signature is immutable until the
4593 	 * monitor decides to unlock parts of it. Therefore, 2nd stage reconstitution takes
4594 	 * place in the monitor when we have a monitor available.
4595 	 *
4596 	 * If the monitor isn't enforcing code signing (in which case the code signature is
4597 	 * NOT immutable), then we perform 2nd stage reconstitution within the kernel itself.
4598 	 */
4599 	vm_address_t unneeded_addr = 0;
4600 	vm_size_t unneeded_size = 0;
4601 
4602 	ret = csm_reconstitute_code_signature(
4603 		cs_blob->csb_csm_obj,
4604 		&unneeded_addr,
4605 		&unneeded_size);
4606 
4607 	if ((ret == KERN_SUCCESS) && unneeded_addr && unneeded_size) {
4608 		/* Free the unneded part of the blob */
4609 		kmem_free(kernel_map, unneeded_addr, unneeded_size);
4610 
4611 		/* Adjust the size in the blob object */
4612 		cs_blob->csb_mem_size -= unneeded_size;
4613 	}
4614 #endif
4615 
4616 	if (ret == KERN_SUCCESS) {
4617 		goto success;
4618 	} else if (ret != KERN_NOT_SUPPORTED) {
4619 		/*
4620 		 * A monitor environment is available, and it failed in performing 2nd stage
4621 		 * reconstitution. This is a fatal issue for code signing validation.
4622 		 */
4623 		printf("unable to reconstitute code signature through monitor: %d\n", ret);
4624 		return EPERM;
4625 	}
4626 
4627 	/* No monitor available if we reached here */
4628 	err = ubc_cs_reconstitute_code_signature_2nd_stage(cs_blob);
4629 	if (err != 0) {
4630 		return err;
4631 	}
4632 
4633 success:
4634 	/*
4635 	 * Regardless of whether we are performing 2nd stage reconstitution in the monitor
4636 	 * or in the kernel, we remove references to XML entitlements from the blob here.
4637 	 * None of the 2nd stage reconstitution code ever keeps these around, and they have
4638 	 * been explicitly deprecated and disallowed.
4639 	 */
4640 	cs_blob->csb_entitlements_blob = NULL;
4641 
4642 	return 0;
4643 }
4644 
4645 /**
4646  * A code signature blob often contains blob which aren't needed in the kernel. Since
4647  * the code signature is wired into kernel memory for the time it is used, it behooves
4648  * us to remove any blobs we have no need for in order to conserve memory.
4649  *
4650  * Some platforms support copying the entire SuperBlob stored in kernel memory into
4651  * userspace memory through the "csops" system call. There is an expectation that when
4652  * this happens, all the blobs which were a part of the code signature are copied in
4653  * to userspace memory. As a result, these platforms cannot reconstitute the code
4654  * signature since, or rather, these platforms cannot remove blobs from the signature,
4655  * thereby making reconstitution useless.
4656  */
4657 static errno_t
reconstitute_code_signature(struct cs_blob * cs_blob)4658 reconstitute_code_signature(
4659 	struct cs_blob *cs_blob)
4660 {
4661 	CS_CodeDirectory *code_directory = NULL;
4662 	vm_address_t signature_addr = 0;
4663 	vm_size_t signature_size = 0;
4664 	vm_offset_t code_directory_offset = 0;
4665 	bool platform_supports_reconstitution = false;
4666 
4667 #if CONFIG_CODE_SIGNATURE_RECONSTITUTION
4668 	platform_supports_reconstitution = true;
4669 #endif
4670 
4671 	/*
4672 	 * We can skip reconstitution if the code signing monitor isn't available or not
4673 	 * enabled. But if we do have a monitor, then reconsitution becomes required, as
4674 	 * there is an expectation of performing 2nd stage reconstitution through the
4675 	 * monitor itself.
4676 	 */
4677 	if (platform_supports_reconstitution == false) {
4678 #if CODE_SIGNING_MONITOR
4679 		if (csm_enabled() == true) {
4680 			printf("reconstitution required when code signing monitor is enabled\n");
4681 			return EPERM;
4682 		}
4683 #endif
4684 		return 0;
4685 	}
4686 
4687 	errno_t err = ubc_cs_reconstitute_code_signature(
4688 		cs_blob,
4689 		&signature_addr,
4690 		&signature_size,
4691 		0,
4692 		&code_directory);
4693 
4694 	if (err != 0) {
4695 		printf("unable to reconstitute code signature: %d\n", err);
4696 		return err;
4697 	}
4698 
4699 	/* Calculate the code directory offset */
4700 	code_directory_offset = (vm_offset_t)code_directory - signature_addr;
4701 
4702 	/* Reconstitution allocates new memory -- free the old one */
4703 	ubc_cs_blob_deallocate((vm_address_t)cs_blob->csb_mem_kaddr, cs_blob->csb_mem_size);
4704 
4705 	/* Reconstruct critical fields in the blob object */
4706 	ubc_cs_blob_reconstruct(
4707 		cs_blob,
4708 		signature_addr,
4709 		signature_size,
4710 		code_directory_offset);
4711 
4712 	/* Mark the object as reconstituted */
4713 	cs_blob->csb_reconstituted = true;
4714 
4715 	return 0;
4716 }
4717 
4718 int
ubc_cs_blob_add(struct vnode * vp,uint32_t platform,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t base_offset,vm_address_t * addr,vm_size_t size,struct image_params * imgp,__unused int flags,struct cs_blob ** ret_blob,cs_blob_add_flags_t csblob_add_flags)4719 ubc_cs_blob_add(
4720 	struct vnode    *vp,
4721 	uint32_t        platform,
4722 	cpu_type_t      cputype,
4723 	cpu_subtype_t   cpusubtype,
4724 	off_t           base_offset,
4725 	vm_address_t    *addr,
4726 	vm_size_t       size,
4727 	struct image_params *imgp,
4728 	__unused int    flags,
4729 	struct cs_blob  **ret_blob,
4730 	cs_blob_add_flags_t csblob_add_flags)
4731 {
4732 	ptrauth_generic_signature_t cs_blob_sig = {0};
4733 	struct ubc_info *uip = NULL;
4734 	struct cs_blob tmp_blob = {0};
4735 	struct cs_blob *blob_ro = NULL;
4736 	struct cs_blob *oblob = NULL;
4737 	CS_CodeDirectory const *cd = NULL;
4738 	off_t blob_start_offset = 0;
4739 	off_t blob_end_offset = 0;
4740 	boolean_t record_mtime = false;
4741 	kern_return_t kr = KERN_DENIED;
4742 	errno_t error = -1;
4743 
4744 #if HAS_APPLE_PAC
4745 	void *signed_entitlements = NULL;
4746 #if CODE_SIGNING_MONITOR
4747 	void *signed_monitor_obj = NULL;
4748 #endif
4749 #endif
4750 
4751 	if (ret_blob) {
4752 		*ret_blob = NULL;
4753 	}
4754 
4755 	/*
4756 	 * Create the struct cs_blob abstract data type which will get attached to
4757 	 * the vnode object. This function also validates the structural integrity
4758 	 * of the code signature blob being passed in.
4759 	 *
4760 	 * We initialize a temporary blob whose contents are then copied into an RO
4761 	 * blob which we allocate from the read-only allocator.
4762 	 */
4763 	error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
4764 	if (error != 0) {
4765 		printf("unable to create a validated cs_blob object: %d\n", error);
4766 		return error;
4767 	}
4768 
4769 	tmp_blob.csb_cpu_type = cputype;
4770 	tmp_blob.csb_cpu_subtype = cpusubtype & ~CPU_SUBTYPE_MASK;
4771 	tmp_blob.csb_base_offset = base_offset;
4772 
4773 	/* Perform 1st stage reconstitution */
4774 	error = reconstitute_code_signature(&tmp_blob);
4775 	if (error != 0) {
4776 		goto out;
4777 	}
4778 
4779 	/*
4780 	 * There is a strong design pattern we have to follow carefully within this
4781 	 * function. Since we're storing the struct cs_blob within RO-allocated
4782 	 * memory, it is immutable to modifications from within the kernel itself.
4783 	 *
4784 	 * However, before the contents of the blob are transferred to the immutable
4785 	 * cs_blob, they are kept on the stack. In order to protect against a kernel
4786 	 * R/W attacker, we must protect this stack variable. Most importantly, any
4787 	 * code paths which can block for a while must compute a PAC signature over
4788 	 * the stack variable, then perform the blocking operation, and then ensure
4789 	 * that the PAC signature over the stack variable is still valid to ensure
4790 	 * that an attacker did not overwrite contents of the blob by introducing a
4791 	 * maliciously long blocking operation, giving them the time required to go
4792 	 * and overwrite the contents of the blob.
4793 	 *
4794 	 * The most important fields to protect here are the OSEntitlements and the
4795 	 * code signing monitor object references. For these ones, we keep around
4796 	 * extra signed pointers diversified against the read-only blobs' memory
4797 	 * and then update the stack variable with these before updating the full
4798 	 * read-only blob.
4799 	 */
4800 
4801 	blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4802 	assert(blob_ro != NULL);
4803 
4804 	tmp_blob.csb_ro_addr = blob_ro;
4805 	tmp_blob.csb_vnode = vp;
4806 
4807 	/* AMFI needs to see the current blob state at the RO address */
4808 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
4809 
4810 #if CODE_SIGNING_MONITOR
4811 	error = register_code_signature_monitor(
4812 		vp,
4813 		&tmp_blob,
4814 		(vm_offset_t)tmp_blob.csb_cd - (vm_offset_t)tmp_blob.csb_mem_kaddr);
4815 
4816 	if (error != 0) {
4817 		goto out;
4818 	}
4819 
4820 #if HAS_APPLE_PAC
4821 	signed_monitor_obj = ptrauth_sign_unauthenticated(
4822 		tmp_blob.csb_csm_obj,
4823 		ptrauth_key_process_independent_data,
4824 		ptrauth_blend_discriminator(&blob_ro->csb_csm_obj,
4825 		OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_csm_obj")));
4826 #endif /* HAS_APPLE_PAC */
4827 
4828 #endif /* CODE_SIGNING_MONITOR */
4829 
4830 	/*
4831 	 * Ensure that we're honoring the main binary policy check on platforms which
4832 	 * require it. We perform this check at this stage to ensure the blob we're
4833 	 * looking at has been locked down by a code signing monitor if the system
4834 	 * has one.
4835 	 */
4836 	error = validate_main_binary_check(&tmp_blob, csblob_add_flags);
4837 	if (error != 0) {
4838 		printf("failed to verify main binary policy: %d\n", error);
4839 		goto out;
4840 	}
4841 
4842 #if CONFIG_MACF
4843 	unsigned int cs_flags = tmp_blob.csb_flags;
4844 	unsigned int signer_type = tmp_blob.csb_signer_type;
4845 
4846 	error = mac_vnode_check_signature(
4847 		vp,
4848 		&tmp_blob,
4849 		imgp,
4850 		&cs_flags,
4851 		&signer_type,
4852 		flags,
4853 		platform);
4854 
4855 	if (error != 0) {
4856 		printf("validation of code signature failed through MACF policy: %d\n", error);
4857 		goto out;
4858 	}
4859 
4860 #if HAS_APPLE_PAC
4861 	signed_entitlements = ptrauth_sign_unauthenticated(
4862 		tmp_blob.csb_entitlements,
4863 		ptrauth_key_process_independent_data,
4864 		ptrauth_blend_discriminator(&blob_ro->csb_entitlements,
4865 		OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements")));
4866 #endif
4867 
4868 	tmp_blob.csb_flags = cs_flags;
4869 	tmp_blob.csb_signer_type = signer_type;
4870 
4871 	if (tmp_blob.csb_flags & CS_PLATFORM_BINARY) {
4872 		tmp_blob.csb_platform_binary = 1;
4873 		tmp_blob.csb_platform_path = !!(tmp_blob.csb_flags & CS_PLATFORM_PATH);
4874 		tmp_blob.csb_teamid = NULL;
4875 	} else {
4876 		tmp_blob.csb_platform_binary = 0;
4877 		tmp_blob.csb_platform_path = 0;
4878 	}
4879 
4880 	if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !tmp_blob.csb_platform_binary) {
4881 		printf("dyld simulator runtime is not apple signed: proc: %d\n",
4882 		    proc_getpid(current_proc()));
4883 
4884 		error = EPERM;
4885 		goto out;
4886 	}
4887 #endif /* CONFIG_MACF */
4888 
4889 #if CODE_SIGNING_MONITOR
4890 	error = verify_code_signature_monitor(&tmp_blob);
4891 	if (error != 0) {
4892 		goto out;
4893 	}
4894 #endif
4895 
4896 	/* Perform 2nd stage reconstitution */
4897 	error = reconstitute_code_signature_2nd_stage(&tmp_blob);
4898 	if (error != 0) {
4899 		goto out;
4900 	}
4901 
4902 	/* Setup any multi-level hashing for the code signature */
4903 	error = setup_multilevel_hashing(&tmp_blob);
4904 	if (error != 0) {
4905 		goto out;
4906 	}
4907 
4908 	/* Ensure security critical auxiliary blobs still exist */
4909 	error = validate_auxiliary_signed_blobs(&tmp_blob);
4910 	if (error != 0) {
4911 		goto out;
4912 	}
4913 
4914 	/*
4915 	 * Accelerate the entitlement queries for this code signature. This must
4916 	 * be done only after we know that the code signature pointers within the
4917 	 * struct cs_blob aren't going to be shifted around anymore, which is why
4918 	 * this acceleration is done after setting up multilevel hashing, since
4919 	 * that is the last part of signature validation which can shift the code
4920 	 * signature around.
4921 	 */
4922 	error = accelerate_entitlement_queries(&tmp_blob);
4923 	if (error != 0) {
4924 		goto out;
4925 	}
4926 
4927 	/*
4928 	 * Parse and set the Team ID for this code signature. This only needs to
4929 	 * happen when the signature isn't marked as platform. Like above, this
4930 	 * has to happen after we know the pointers within struct cs_blob aren't
4931 	 * going to be shifted anymore.
4932 	 */
4933 	if ((tmp_blob.csb_flags & CS_PLATFORM_BINARY) == 0) {
4934 		tmp_blob.csb_teamid = csblob_parse_teamid(&tmp_blob);
4935 	}
4936 
4937 	/*
4938 	 * Validate the code signing blob's coverage. Ideally, we can just do this
4939 	 * in the beginning, right after structural validation, however, multilevel
4940 	 * hashing can change some offets.
4941 	 */
4942 	blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
4943 	blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
4944 	if (blob_start_offset >= blob_end_offset) {
4945 		error = EINVAL;
4946 		goto out;
4947 	} else if (blob_start_offset < 0 || blob_end_offset <= 0) {
4948 		error = EINVAL;
4949 		goto out;
4950 	}
4951 
4952 	/*
4953 	 * The vnode_lock, linked list traversal, and marking of the memory object as
4954 	 * signed can all be blocking operations. Compute a PAC over the tmp_blob.
4955 	 */
4956 	cs_blob_sig = ptrauth_utils_sign_blob_generic(
4957 		&tmp_blob,
4958 		sizeof(tmp_blob),
4959 		OS_PTRAUTH_DISCRIMINATOR("ubc_cs_blob_add.blocking_op0"),
4960 		PTRAUTH_ADDR_DIVERSIFY);
4961 
4962 	vnode_lock(vp);
4963 	if (!UBCINFOEXISTS(vp)) {
4964 		vnode_unlock(vp);
4965 		error = ENOENT;
4966 		goto out;
4967 	}
4968 	uip = vp->v_ubcinfo;
4969 
4970 	/* check if this new blob overlaps with an existing blob */
4971 	for (oblob = ubc_get_cs_blobs(vp);
4972 	    oblob != NULL;
4973 	    oblob = oblob->csb_next) {
4974 		off_t oblob_start_offset, oblob_end_offset;
4975 
4976 		if (tmp_blob.csb_signer_type != oblob->csb_signer_type) {  // signer type needs to be the same for slices
4977 			vnode_unlock(vp);
4978 			error = EALREADY;
4979 			goto out;
4980 		} else if (tmp_blob.csb_platform_binary) {  //platform binary needs to be the same for app slices
4981 			if (!oblob->csb_platform_binary) {
4982 				vnode_unlock(vp);
4983 				error = EALREADY;
4984 				goto out;
4985 			}
4986 		} else if (tmp_blob.csb_teamid) {  //teamid binary needs to be the same for app slices
4987 			if (oblob->csb_platform_binary ||
4988 			    oblob->csb_teamid == NULL ||
4989 			    strcmp(oblob->csb_teamid, tmp_blob.csb_teamid) != 0) {
4990 				vnode_unlock(vp);
4991 				error = EALREADY;
4992 				goto out;
4993 			}
4994 		} else {  // non teamid binary needs to be the same for app slices
4995 			if (oblob->csb_platform_binary ||
4996 			    oblob->csb_teamid != NULL) {
4997 				vnode_unlock(vp);
4998 				error = EALREADY;
4999 				goto out;
5000 			}
5001 		}
5002 
5003 		oblob_start_offset = (oblob->csb_base_offset +
5004 		    oblob->csb_start_offset);
5005 		oblob_end_offset = (oblob->csb_base_offset +
5006 		    oblob->csb_end_offset);
5007 		if (blob_start_offset >= oblob_end_offset ||
5008 		    blob_end_offset <= oblob_start_offset) {
5009 			/* no conflict with this existing blob */
5010 		} else {
5011 			/* conflict ! */
5012 			if (blob_start_offset == oblob_start_offset &&
5013 			    blob_end_offset == oblob_end_offset &&
5014 			    tmp_blob.csb_mem_size == oblob->csb_mem_size &&
5015 			    tmp_blob.csb_flags == oblob->csb_flags &&
5016 			    (tmp_blob.csb_cpu_type == CPU_TYPE_ANY ||
5017 			    oblob->csb_cpu_type == CPU_TYPE_ANY ||
5018 			    tmp_blob.csb_cpu_type == oblob->csb_cpu_type) &&
5019 			    !bcmp(tmp_blob.csb_cdhash,
5020 			    oblob->csb_cdhash,
5021 			    CS_CDHASH_LEN)) {
5022 				/*
5023 				 * We already have this blob:
5024 				 * we'll return success but
5025 				 * throw away the new blob.
5026 				 */
5027 				if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
5028 					/*
5029 					 * The old blob matches this one
5030 					 * but doesn't have any CPU type.
5031 					 * Update it with whatever the caller
5032 					 * provided this time.
5033 					 */
5034 					cs_blob_set_cpu_type(oblob, cputype);
5035 				}
5036 
5037 				/* The signature is still accepted, so update the
5038 				 * generation count. */
5039 				uip->cs_add_gen = cs_blob_generation_count;
5040 
5041 				vnode_unlock(vp);
5042 				if (ret_blob) {
5043 					*ret_blob = oblob;
5044 				}
5045 				error = EAGAIN;
5046 				goto out;
5047 			} else {
5048 				/* different blob: reject the new one */
5049 				vnode_unlock(vp);
5050 				error = EALREADY;
5051 				goto out;
5052 			}
5053 		}
5054 	}
5055 
5056 	/* mark this vnode's VM object as having "signed pages" */
5057 	kr = memory_object_signed(uip->ui_control, TRUE);
5058 	if (kr != KERN_SUCCESS) {
5059 		vnode_unlock(vp);
5060 		error = ENOENT;
5061 		goto out;
5062 	}
5063 
5064 	if (uip->cs_blobs == NULL) {
5065 		/* loading 1st blob: record the file's current "modify time" */
5066 		record_mtime = TRUE;
5067 	}
5068 
5069 	/* set the generation count for cs_blobs */
5070 	uip->cs_add_gen = cs_blob_generation_count;
5071 
5072 	/* Authenticate the PAC signature after blocking operation */
5073 	ptrauth_utils_auth_blob_generic(
5074 		&tmp_blob,
5075 		sizeof(tmp_blob),
5076 		OS_PTRAUTH_DISCRIMINATOR("ubc_cs_blob_add.blocking_op0"),
5077 		PTRAUTH_ADDR_DIVERSIFY,
5078 		cs_blob_sig);
5079 
5080 	/* Update the system statistics for code signatures blobs */
5081 	ubc_cs_blob_adjust_statistics(&tmp_blob);
5082 
5083 	/* Update the list pointer to reference other blobs for this vnode */
5084 	tmp_blob.csb_next = uip->cs_blobs;
5085 
5086 #if HAS_APPLE_PAC
5087 	/*
5088 	 * Update all the critical pointers in the blob with the RO diversified
5089 	 * values before updating the read-only blob with the full contents of
5090 	 * the struct cs_blob. We need to use memcpy here as otherwise a simple
5091 	 * assignment will cause the compiler to re-sign using the stack variable
5092 	 * as the address diversifier.
5093 	 */
5094 	memcpy((void*)&tmp_blob.csb_entitlements, &signed_entitlements, sizeof(void*));
5095 #if CODE_SIGNING_MONITOR
5096 	memcpy((void*)&tmp_blob.csb_csm_obj, &signed_monitor_obj, sizeof(void*));
5097 #endif
5098 #endif
5099 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5100 
5101 	/* Add a fence to ensure writes to the blob are visible on all threads */
5102 	os_atomic_thread_fence(seq_cst);
5103 
5104 	/*
5105 	 * Add the cs_blob to the front of the list of blobs for this vnode. We
5106 	 * add to the front of the list, and we never remove a blob from the list
5107 	 * which means ubc_cs_get_blobs can return whatever the top of the list
5108 	 * is, while still keeping the list valid. Useful for if we validate a
5109 	 * page while adding in a new blob for this vnode.
5110 	 */
5111 	uip->cs_blobs = blob_ro;
5112 
5113 	/* Make sure to reload pointer from uip to double check */
5114 	if (uip->cs_blobs->csb_next) {
5115 		zone_require_ro(ZONE_ID_CS_BLOB, sizeof(struct cs_blob), uip->cs_blobs->csb_next);
5116 	}
5117 
5118 	if (cs_debug > 1) {
5119 		proc_t p;
5120 		const char *name = vnode_getname_printable(vp);
5121 		p = current_proc();
5122 		printf("CODE SIGNING: proc %d(%s) "
5123 		    "loaded %s signatures for file (%s) "
5124 		    "range 0x%llx:0x%llx flags 0x%x\n",
5125 		    proc_getpid(p), p->p_comm,
5126 		    blob_ro->csb_cpu_type == -1 ? "detached" : "embedded",
5127 		    name,
5128 		    blob_ro->csb_base_offset + blob_ro->csb_start_offset,
5129 		    blob_ro->csb_base_offset + blob_ro->csb_end_offset,
5130 		    blob_ro->csb_flags);
5131 		vnode_putname_printable(name);
5132 	}
5133 
5134 	vnode_unlock(vp);
5135 
5136 	if (record_mtime) {
5137 		vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
5138 	}
5139 
5140 	if (ret_blob) {
5141 		*ret_blob = blob_ro;
5142 	}
5143 
5144 	error = 0;      /* success ! */
5145 
5146 out:
5147 	if (error) {
5148 		if (error != EAGAIN) {
5149 			printf("check_signature[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
5150 		}
5151 
5152 		cs_blob_cleanup(&tmp_blob);
5153 		if (blob_ro) {
5154 			zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
5155 		}
5156 	}
5157 
5158 	if (error == EAGAIN) {
5159 		/*
5160 		 * See above:  error is EAGAIN if we were asked
5161 		 * to add an existing blob again.  We cleaned the new
5162 		 * blob and we want to return success.
5163 		 */
5164 		error = 0;
5165 	}
5166 
5167 	return error;
5168 }
5169 
5170 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5171 int
ubc_cs_blob_add_supplement(struct vnode * vp,struct vnode * orig_vp,off_t base_offset,vm_address_t * addr,vm_size_t size,struct cs_blob ** ret_blob)5172 ubc_cs_blob_add_supplement(
5173 	struct vnode    *vp,
5174 	struct vnode    *orig_vp,
5175 	off_t           base_offset,
5176 	vm_address_t    *addr,
5177 	vm_size_t       size,
5178 	struct cs_blob  **ret_blob)
5179 {
5180 	kern_return_t           kr;
5181 	struct ubc_info         *uip, *orig_uip;
5182 	int                     error;
5183 	struct cs_blob          tmp_blob;
5184 	struct cs_blob          *orig_blob;
5185 	struct cs_blob          *blob_ro = NULL;
5186 	CS_CodeDirectory const *cd;
5187 	off_t                   blob_start_offset, blob_end_offset;
5188 
5189 	if (ret_blob) {
5190 		*ret_blob = NULL;
5191 	}
5192 
5193 	/* Create the struct cs_blob wrapper that will be attached to the vnode.
5194 	 * Validates the passed in blob in the process. */
5195 	error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
5196 
5197 	if (error != 0) {
5198 		printf("malformed code signature supplement blob: %d\n", error);
5199 		return error;
5200 	}
5201 
5202 	tmp_blob.csb_cpu_type = -1;
5203 	tmp_blob.csb_base_offset = base_offset;
5204 
5205 	tmp_blob.csb_reconstituted = false;
5206 
5207 	vnode_lock(orig_vp);
5208 	if (!UBCINFOEXISTS(orig_vp)) {
5209 		vnode_unlock(orig_vp);
5210 		error = ENOENT;
5211 		goto out;
5212 	}
5213 
5214 	orig_uip = orig_vp->v_ubcinfo;
5215 
5216 	/* check that the supplement's linked cdhash matches a cdhash of
5217 	 * the target image.
5218 	 */
5219 
5220 	if (tmp_blob.csb_linkage_hashtype == NULL) {
5221 		proc_t p;
5222 		const char *iname = vnode_getname_printable(vp);
5223 		p = current_proc();
5224 
5225 		printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
5226 		    "is not a supplemental.\n",
5227 		    proc_getpid(p), p->p_comm, iname);
5228 
5229 		error = EINVAL;
5230 
5231 		vnode_putname_printable(iname);
5232 		vnode_unlock(orig_vp);
5233 		goto out;
5234 	}
5235 	bool found_but_not_valid = false;
5236 	for (orig_blob = ubc_get_cs_blobs(orig_vp); orig_blob != NULL;
5237 	    orig_blob = orig_blob->csb_next) {
5238 		if (orig_blob->csb_hashtype == tmp_blob.csb_linkage_hashtype &&
5239 		    memcmp(orig_blob->csb_cdhash, tmp_blob.csb_linkage, CS_CDHASH_LEN) == 0) {
5240 			// Found match!
5241 			found_but_not_valid = ((orig_blob->csb_flags & CS_VALID) != CS_VALID);
5242 			break;
5243 		}
5244 	}
5245 
5246 	if (orig_blob == NULL || found_but_not_valid) {
5247 		// Not found.
5248 
5249 		proc_t p;
5250 		const char *iname = vnode_getname_printable(vp);
5251 		p = current_proc();
5252 
5253 		error = (orig_blob == NULL) ? ESRCH : EPERM;
5254 
5255 		printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
5256 		    "does not match any attached cdhash (error: %d).\n",
5257 		    proc_getpid(p), p->p_comm, iname, error);
5258 
5259 		vnode_putname_printable(iname);
5260 		vnode_unlock(orig_vp);
5261 		goto out;
5262 	}
5263 
5264 	vnode_unlock(orig_vp);
5265 
5266 	blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
5267 	tmp_blob.csb_ro_addr = blob_ro;
5268 	tmp_blob.csb_vnode = vp;
5269 
5270 	/* AMFI needs to see the current blob state at the RO address. */
5271 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5272 
5273 	// validate the signature against policy!
5274 #if CONFIG_MACF
5275 	unsigned int signer_type = tmp_blob.csb_signer_type;
5276 	error = mac_vnode_check_supplemental_signature(vp, &tmp_blob, orig_vp, orig_blob, &signer_type);
5277 
5278 	tmp_blob.csb_signer_type = signer_type;
5279 
5280 	if (error) {
5281 		if (cs_debug) {
5282 			printf("check_supplemental_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
5283 		}
5284 		goto out;
5285 	}
5286 #endif
5287 
5288 	// We allowed the supplemental signature blob so
5289 	// copy the platform bit or team-id from the linked signature and whether or not the original is developer code
5290 	tmp_blob.csb_platform_binary = 0;
5291 	tmp_blob.csb_platform_path = 0;
5292 	if (orig_blob->csb_platform_binary == 1) {
5293 		tmp_blob.csb_platform_binary = orig_blob->csb_platform_binary;
5294 		tmp_blob.csb_platform_path = orig_blob->csb_platform_path;
5295 	} else if (orig_blob->csb_teamid != NULL) {
5296 		vm_size_t teamid_size = strlen(orig_blob->csb_teamid) + 1;
5297 		tmp_blob.csb_supplement_teamid = kalloc_data(teamid_size, Z_WAITOK);
5298 		if (tmp_blob.csb_supplement_teamid == NULL) {
5299 			error = ENOMEM;
5300 			goto out;
5301 		}
5302 		strlcpy(tmp_blob.csb_supplement_teamid, orig_blob->csb_teamid, teamid_size);
5303 	}
5304 	tmp_blob.csb_flags = (orig_blob->csb_flags & CS_DEV_CODE);
5305 
5306 	// Validate the blob's coverage
5307 	blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
5308 	blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
5309 
5310 	if (blob_start_offset >= blob_end_offset || blob_start_offset < 0 || blob_end_offset <= 0) {
5311 		/* reject empty or backwards blob */
5312 		error = EINVAL;
5313 		goto out;
5314 	}
5315 
5316 	vnode_lock(vp);
5317 	if (!UBCINFOEXISTS(vp)) {
5318 		vnode_unlock(vp);
5319 		error = ENOENT;
5320 		goto out;
5321 	}
5322 	uip = vp->v_ubcinfo;
5323 
5324 	struct cs_blob *existing = uip->cs_blob_supplement;
5325 	if (existing != NULL) {
5326 		if (tmp_blob.csb_hashtype == existing->csb_hashtype &&
5327 		    memcmp(tmp_blob.csb_cdhash, existing->csb_cdhash, CS_CDHASH_LEN) == 0) {
5328 			error = EAGAIN; // non-fatal
5329 		} else {
5330 			error = EALREADY; // fatal
5331 		}
5332 
5333 		vnode_unlock(vp);
5334 		goto out;
5335 	}
5336 
5337 	/* mark this vnode's VM object as having "signed pages" */
5338 	kr = memory_object_signed(uip->ui_control, TRUE);
5339 	if (kr != KERN_SUCCESS) {
5340 		vnode_unlock(vp);
5341 		error = ENOENT;
5342 		goto out;
5343 	}
5344 
5345 
5346 	/* We still adjust statistics even for supplemental blobs, as they
5347 	 * consume memory just the same. */
5348 	ubc_cs_blob_adjust_statistics(&tmp_blob);
5349 	/* Unlike regular cs_blobs, we only ever support one supplement. */
5350 	tmp_blob.csb_next = NULL;
5351 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5352 
5353 	os_atomic_thread_fence(seq_cst); // Fence to prevent reordering here
5354 	uip->cs_blob_supplement = blob_ro;
5355 
5356 	/* Make sure to reload pointer from uip to double check */
5357 	if (__improbable(uip->cs_blob_supplement->csb_next)) {
5358 		panic("csb_next does not match expected NULL value");
5359 	}
5360 
5361 	vnode_unlock(vp);
5362 
5363 
5364 	if (cs_debug > 1) {
5365 		proc_t p;
5366 		const char *name = vnode_getname_printable(vp);
5367 		p = current_proc();
5368 		printf("CODE SIGNING: proc %d(%s) "
5369 		    "loaded supplemental signature for file (%s) "
5370 		    "range 0x%llx:0x%llx\n",
5371 		    proc_getpid(p), p->p_comm,
5372 		    name,
5373 		    blob_ro->csb_base_offset + blob_ro->csb_start_offset,
5374 		    blob_ro->csb_base_offset + blob_ro->csb_end_offset);
5375 		vnode_putname_printable(name);
5376 	}
5377 
5378 	if (ret_blob) {
5379 		*ret_blob = blob_ro;
5380 	}
5381 
5382 	error = 0; // Success!
5383 out:
5384 	if (error) {
5385 		if (cs_debug) {
5386 			printf("ubc_cs_blob_add_supplement[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
5387 		}
5388 
5389 		cs_blob_cleanup(&tmp_blob);
5390 		if (blob_ro) {
5391 			zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
5392 		}
5393 	}
5394 
5395 	if (error == EAGAIN) {
5396 		/* We were asked to add an existing blob.
5397 		 * We cleaned up and ignore the attempt. */
5398 		error = 0;
5399 	}
5400 
5401 	return error;
5402 }
5403 #endif
5404 
5405 
5406 
5407 void
csvnode_print_debug(struct vnode * vp)5408 csvnode_print_debug(struct vnode *vp)
5409 {
5410 	const char      *name = NULL;
5411 	struct ubc_info *uip;
5412 	struct cs_blob *blob;
5413 
5414 	name = vnode_getname_printable(vp);
5415 	if (name) {
5416 		printf("csvnode: name: %s\n", name);
5417 		vnode_putname_printable(name);
5418 	}
5419 
5420 	vnode_lock_spin(vp);
5421 
5422 	if (!UBCINFOEXISTS(vp)) {
5423 		blob = NULL;
5424 		goto out;
5425 	}
5426 
5427 	uip = vp->v_ubcinfo;
5428 	for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
5429 		printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
5430 		    (unsigned long)blob->csb_start_offset,
5431 		    (unsigned long)blob->csb_end_offset,
5432 		    blob->csb_flags,
5433 		    blob->csb_platform_binary ? "yes" : "no",
5434 		    blob->csb_platform_path ? "yes" : "no",
5435 		    blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
5436 	}
5437 
5438 out:
5439 	vnode_unlock(vp);
5440 }
5441 
5442 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5443 struct cs_blob *
ubc_cs_blob_get_supplement(struct vnode * vp,off_t offset)5444 ubc_cs_blob_get_supplement(
5445 	struct vnode    *vp,
5446 	off_t           offset)
5447 {
5448 	struct cs_blob *blob;
5449 	off_t offset_in_blob;
5450 
5451 	vnode_lock_spin(vp);
5452 
5453 	if (!UBCINFOEXISTS(vp)) {
5454 		blob = NULL;
5455 		goto out;
5456 	}
5457 
5458 	blob = vp->v_ubcinfo->cs_blob_supplement;
5459 
5460 	if (blob == NULL) {
5461 		// no supplemental blob
5462 		goto out;
5463 	}
5464 
5465 
5466 	if (offset != -1) {
5467 		offset_in_blob = offset - blob->csb_base_offset;
5468 		if (offset_in_blob < blob->csb_start_offset || offset_in_blob >= blob->csb_end_offset) {
5469 			// not actually covered by this blob
5470 			blob = NULL;
5471 		}
5472 	}
5473 
5474 out:
5475 	vnode_unlock(vp);
5476 
5477 	return blob;
5478 }
5479 #endif
5480 
5481 struct cs_blob *
ubc_cs_blob_get(struct vnode * vp,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t offset)5482 ubc_cs_blob_get(
5483 	struct vnode    *vp,
5484 	cpu_type_t      cputype,
5485 	cpu_subtype_t   cpusubtype,
5486 	off_t           offset)
5487 {
5488 	struct cs_blob  *blob;
5489 	off_t offset_in_blob;
5490 
5491 	vnode_lock_spin(vp);
5492 
5493 	if (!UBCINFOEXISTS(vp)) {
5494 		blob = NULL;
5495 		goto out;
5496 	}
5497 
5498 	for (blob = ubc_get_cs_blobs(vp);
5499 	    blob != NULL;
5500 	    blob = blob->csb_next) {
5501 		if (cputype != -1 && blob->csb_cpu_type == cputype && (cpusubtype == -1 || blob->csb_cpu_subtype == (cpusubtype & ~CPU_SUBTYPE_MASK))) {
5502 			break;
5503 		}
5504 		if (offset != -1) {
5505 			offset_in_blob = offset - blob->csb_base_offset;
5506 			if (offset_in_blob >= blob->csb_start_offset &&
5507 			    offset_in_blob < blob->csb_end_offset) {
5508 				/* our offset is covered by this blob */
5509 				break;
5510 			}
5511 		}
5512 	}
5513 
5514 out:
5515 	vnode_unlock(vp);
5516 
5517 	return blob;
5518 }
5519 
5520 void
ubc_cs_free_and_vnode_unlock(vnode_t vp)5521 ubc_cs_free_and_vnode_unlock(
5522 	vnode_t vp)
5523 {
5524 	struct ubc_info *uip = vp->v_ubcinfo;
5525 	struct cs_blob  *cs_blobs, *blob, *next_blob;
5526 
5527 	if (!(uip->ui_flags & UI_CSBLOBINVALID)) {
5528 		vnode_unlock(vp);
5529 		return;
5530 	}
5531 
5532 	uip->ui_flags &= ~UI_CSBLOBINVALID;
5533 
5534 	cs_blobs = uip->cs_blobs;
5535 	uip->cs_blobs = NULL;
5536 
5537 #if CHECK_CS_VALIDATION_BITMAP
5538 	ubc_cs_validation_bitmap_deallocate( uip );
5539 #endif
5540 
5541 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5542 	struct cs_blob  *cs_blob_supplement = uip->cs_blob_supplement;
5543 	uip->cs_blob_supplement = NULL;
5544 #endif
5545 
5546 	vnode_unlock(vp);
5547 
5548 	for (blob = cs_blobs;
5549 	    blob != NULL;
5550 	    blob = next_blob) {
5551 		next_blob = blob->csb_next;
5552 		os_atomic_add(&cs_blob_count, -1, relaxed);
5553 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5554 		cs_blob_ro_free(blob);
5555 	}
5556 
5557 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5558 	if (cs_blob_supplement != NULL) {
5559 		os_atomic_add(&cs_blob_count, -1, relaxed);
5560 		os_atomic_add(&cs_blob_size, -cs_blob_supplement->csb_mem_size, relaxed);
5561 		cs_blob_supplement_free(cs_blob_supplement);
5562 	}
5563 #endif
5564 }
5565 
5566 static void
ubc_cs_free(struct ubc_info * uip)5567 ubc_cs_free(
5568 	struct ubc_info *uip)
5569 {
5570 	struct cs_blob  *blob, *next_blob;
5571 
5572 	for (blob = uip->cs_blobs;
5573 	    blob != NULL;
5574 	    blob = next_blob) {
5575 		next_blob = blob->csb_next;
5576 		os_atomic_add(&cs_blob_count, -1, relaxed);
5577 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5578 		cs_blob_ro_free(blob);
5579 	}
5580 #if CHECK_CS_VALIDATION_BITMAP
5581 	ubc_cs_validation_bitmap_deallocate( uip );
5582 #endif
5583 	uip->cs_blobs = NULL;
5584 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5585 	if (uip->cs_blob_supplement != NULL) {
5586 		blob = uip->cs_blob_supplement;
5587 		os_atomic_add(&cs_blob_count, -1, relaxed);
5588 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5589 		cs_blob_supplement_free(uip->cs_blob_supplement);
5590 		uip->cs_blob_supplement = NULL;
5591 	}
5592 #endif
5593 }
5594 
5595 /* check cs blob generation on vnode
5596  * returns:
5597  *    0         : Success, the cs_blob attached is current
5598  *    ENEEDAUTH : Generation count mismatch. Needs authentication again.
5599  */
5600 int
ubc_cs_generation_check(struct vnode * vp)5601 ubc_cs_generation_check(
5602 	struct vnode    *vp)
5603 {
5604 	int retval = ENEEDAUTH;
5605 
5606 	vnode_lock_spin(vp);
5607 
5608 	if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
5609 		retval = 0;
5610 	}
5611 
5612 	vnode_unlock(vp);
5613 	return retval;
5614 }
5615 
5616 int
ubc_cs_blob_revalidate(struct vnode * vp,struct cs_blob * blob,struct image_params * imgp,int flags,uint32_t platform)5617 ubc_cs_blob_revalidate(
5618 	struct vnode    *vp,
5619 	struct cs_blob *blob,
5620 	struct image_params *imgp,
5621 	int flags,
5622 	uint32_t platform
5623 	)
5624 {
5625 	int error = 0;
5626 	const CS_CodeDirectory *cd = NULL;
5627 	const CS_GenericBlob *entitlements = NULL;
5628 	const CS_GenericBlob *der_entitlements = NULL;
5629 	size_t size;
5630 	assert(vp != NULL);
5631 	assert(blob != NULL);
5632 
5633 	if ((blob->csb_flags & CS_VALID) == 0) {
5634 		// If the blob attached to the vnode was invalidated, don't try to revalidate it
5635 		// Blob invalidation only occurs when the file that the blob is attached to is
5636 		// opened for writing, giving us a signal that the file is modified.
5637 		printf("CODESIGNING: can not re-validate a previously invalidated blob, reboot or create a new file.\n");
5638 		error = EPERM;
5639 		goto out;
5640 	}
5641 
5642 	size = blob->csb_mem_size;
5643 	error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
5644 	    size, &cd, &entitlements, &der_entitlements);
5645 	if (error) {
5646 		if (cs_debug) {
5647 			printf("CODESIGNING: csblob invalid: %d\n", error);
5648 		}
5649 		goto out;
5650 	}
5651 
5652 	unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
5653 	unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
5654 
5655 	if (blob->csb_reconstituted) {
5656 		/*
5657 		 * Code signatures that have been modified after validation
5658 		 * cannot be revalidated inline from their in-memory blob.
5659 		 *
5660 		 * That's okay, though, because the only path left that relies
5661 		 * on revalidation of existing in-memory blobs is the legacy
5662 		 * detached signature database path, which only exists on macOS,
5663 		 * which does not do reconstitution of any kind.
5664 		 */
5665 		if (cs_debug) {
5666 			printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
5667 		}
5668 
5669 		/*
5670 		 * EAGAIN tells the caller that they may reread the code
5671 		 * signature and try attaching it again, which is the same
5672 		 * thing they would do if there was no cs_blob yet in the
5673 		 * first place.
5674 		 *
5675 		 * Conveniently, after ubc_cs_blob_add did a successful
5676 		 * validation, it will detect that a matching cs_blob (cdhash,
5677 		 * offset, arch etc.) already exists, and return success
5678 		 * without re-adding a cs_blob to the vnode.
5679 		 */
5680 		return EAGAIN;
5681 	}
5682 
5683 	/* callout to mac_vnode_check_signature */
5684 #if CONFIG_MACF
5685 	error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform);
5686 	if (cs_debug && error) {
5687 		printf("revalidate: check_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
5688 	}
5689 #else
5690 	(void)flags;
5691 	(void)signer_type;
5692 #endif
5693 
5694 	/* update generation number if success */
5695 	vnode_lock_spin(vp);
5696 	struct cs_signer_info signer_info = {
5697 		.csb_flags = cs_flags,
5698 		.csb_signer_type = signer_type
5699 	};
5700 	zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_signer_info, &signer_info);
5701 	if (UBCINFOEXISTS(vp)) {
5702 		if (error == 0) {
5703 			vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
5704 		} else {
5705 			vp->v_ubcinfo->cs_add_gen = 0;
5706 		}
5707 	}
5708 
5709 	vnode_unlock(vp);
5710 
5711 out:
5712 	return error;
5713 }
5714 
5715 void
cs_blob_reset_cache()5716 cs_blob_reset_cache()
5717 {
5718 	/* incrementing odd no by 2 makes sure '0' is never reached. */
5719 	OSAddAtomic(+2, &cs_blob_generation_count);
5720 	printf("Reseting cs_blob cache from all vnodes. \n");
5721 }
5722 
5723 struct cs_blob *
ubc_get_cs_blobs(struct vnode * vp)5724 ubc_get_cs_blobs(
5725 	struct vnode    *vp)
5726 {
5727 	struct ubc_info *uip;
5728 	struct cs_blob  *blobs;
5729 
5730 	/*
5731 	 * No need to take the vnode lock here.  The caller must be holding
5732 	 * a reference on the vnode (via a VM mapping or open file descriptor),
5733 	 * so the vnode will not go away.  The ubc_info stays until the vnode
5734 	 * goes away.  And we only modify "blobs" by adding to the head of the
5735 	 * list.
5736 	 * The ubc_info could go away entirely if the vnode gets reclaimed as
5737 	 * part of a forced unmount.  In the case of a code-signature validation
5738 	 * during a page fault, the "paging_in_progress" reference on the VM
5739 	 * object guarantess that the vnode pager (and the ubc_info) won't go
5740 	 * away during the fault.
5741 	 * Other callers need to protect against vnode reclaim by holding the
5742 	 * vnode lock, for example.
5743 	 */
5744 
5745 	if (!UBCINFOEXISTS(vp)) {
5746 		blobs = NULL;
5747 		goto out;
5748 	}
5749 
5750 	uip = vp->v_ubcinfo;
5751 	blobs = uip->cs_blobs;
5752 	if (blobs != NULL) {
5753 		cs_blob_require(blobs, vp);
5754 	}
5755 
5756 out:
5757 	return blobs;
5758 }
5759 
5760 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5761 struct cs_blob *
ubc_get_cs_supplement(struct vnode * vp)5762 ubc_get_cs_supplement(
5763 	struct vnode    *vp)
5764 {
5765 	struct ubc_info *uip;
5766 	struct cs_blob  *blob;
5767 
5768 	/*
5769 	 * No need to take the vnode lock here.  The caller must be holding
5770 	 * a reference on the vnode (via a VM mapping or open file descriptor),
5771 	 * so the vnode will not go away.  The ubc_info stays until the vnode
5772 	 * goes away.
5773 	 * The ubc_info could go away entirely if the vnode gets reclaimed as
5774 	 * part of a forced unmount.  In the case of a code-signature validation
5775 	 * during a page fault, the "paging_in_progress" reference on the VM
5776 	 * object guarantess that the vnode pager (and the ubc_info) won't go
5777 	 * away during the fault.
5778 	 * Other callers need to protect against vnode reclaim by holding the
5779 	 * vnode lock, for example.
5780 	 */
5781 
5782 	if (!UBCINFOEXISTS(vp)) {
5783 		blob = NULL;
5784 		goto out;
5785 	}
5786 
5787 	uip = vp->v_ubcinfo;
5788 	blob = uip->cs_blob_supplement;
5789 	if (blob != NULL) {
5790 		cs_blob_require(blob, vp);
5791 	}
5792 
5793 out:
5794 	return blob;
5795 }
5796 #endif
5797 
5798 
5799 void
ubc_get_cs_mtime(struct vnode * vp,struct timespec * cs_mtime)5800 ubc_get_cs_mtime(
5801 	struct vnode    *vp,
5802 	struct timespec *cs_mtime)
5803 {
5804 	struct ubc_info *uip;
5805 
5806 	if (!UBCINFOEXISTS(vp)) {
5807 		cs_mtime->tv_sec = 0;
5808 		cs_mtime->tv_nsec = 0;
5809 		return;
5810 	}
5811 
5812 	uip = vp->v_ubcinfo;
5813 	cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
5814 	cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
5815 }
5816 
5817 unsigned long cs_validate_page_no_hash = 0;
5818 unsigned long cs_validate_page_bad_hash = 0;
5819 static boolean_t
cs_validate_hash(struct cs_blob * blobs,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t * bytes_processed,unsigned * tainted)5820 cs_validate_hash(
5821 	struct cs_blob          *blobs,
5822 	memory_object_t         pager,
5823 	memory_object_offset_t  page_offset,
5824 	const void              *data,
5825 	vm_size_t               *bytes_processed,
5826 	unsigned                *tainted)
5827 {
5828 	union cs_hash_union     mdctx;
5829 	struct cs_hash const    *hashtype = NULL;
5830 	unsigned char           actual_hash[CS_HASH_MAX_SIZE];
5831 	unsigned char           expected_hash[CS_HASH_MAX_SIZE];
5832 	boolean_t               found_hash;
5833 	struct cs_blob          *blob;
5834 	const CS_CodeDirectory  *cd;
5835 	const unsigned char     *hash;
5836 	boolean_t               validated;
5837 	off_t                   offset; /* page offset in the file */
5838 	size_t                  size;
5839 	off_t                   codeLimit = 0;
5840 	const char              *lower_bound, *upper_bound;
5841 	vm_offset_t             kaddr, blob_addr;
5842 
5843 	/* retrieve the expected hash */
5844 	found_hash = FALSE;
5845 
5846 	for (blob = blobs;
5847 	    blob != NULL;
5848 	    blob = blob->csb_next) {
5849 		offset = page_offset - blob->csb_base_offset;
5850 		if (offset < blob->csb_start_offset ||
5851 		    offset >= blob->csb_end_offset) {
5852 			/* our page is not covered by this blob */
5853 			continue;
5854 		}
5855 
5856 		/* blob data has been released */
5857 		kaddr = (vm_offset_t)blob->csb_mem_kaddr;
5858 		if (kaddr == 0) {
5859 			continue;
5860 		}
5861 
5862 		blob_addr = kaddr + blob->csb_mem_offset;
5863 		lower_bound = CAST_DOWN(char *, blob_addr);
5864 		upper_bound = lower_bound + blob->csb_mem_size;
5865 
5866 		cd = blob->csb_cd;
5867 		if (cd != NULL) {
5868 			/* all CD's that have been injected is already validated */
5869 
5870 			hashtype = blob->csb_hashtype;
5871 			if (hashtype == NULL) {
5872 				panic("unknown hash type ?");
5873 			}
5874 			if (hashtype->cs_digest_size > sizeof(actual_hash)) {
5875 				panic("hash size too large");
5876 			}
5877 			if (offset & ((1U << blob->csb_hash_pageshift) - 1)) {
5878 				panic("offset not aligned to cshash boundary");
5879 			}
5880 
5881 			codeLimit = ntohl(cd->codeLimit);
5882 
5883 			hash = hashes(cd, (uint32_t)(offset >> blob->csb_hash_pageshift),
5884 			    hashtype->cs_size,
5885 			    lower_bound, upper_bound);
5886 			if (hash != NULL) {
5887 				bcopy(hash, expected_hash, hashtype->cs_size);
5888 				found_hash = TRUE;
5889 			}
5890 
5891 			break;
5892 		}
5893 	}
5894 
5895 	if (found_hash == FALSE) {
5896 		/*
5897 		 * We can't verify this page because there is no signature
5898 		 * for it (yet).  It's possible that this part of the object
5899 		 * is not signed, or that signatures for that part have not
5900 		 * been loaded yet.
5901 		 * Report that the page has not been validated and let the
5902 		 * caller decide if it wants to accept it or not.
5903 		 */
5904 		cs_validate_page_no_hash++;
5905 		if (cs_debug > 1) {
5906 			printf("CODE SIGNING: cs_validate_page: "
5907 			    "mobj %p off 0x%llx: no hash to validate !?\n",
5908 			    pager, page_offset);
5909 		}
5910 		validated = FALSE;
5911 		*tainted = 0;
5912 	} else {
5913 		*tainted = 0;
5914 
5915 		size = (1U << blob->csb_hash_pageshift);
5916 		*bytes_processed = size;
5917 
5918 		const uint32_t *asha1, *esha1;
5919 		if ((off_t)(offset + size) > codeLimit) {
5920 			/* partial page at end of segment */
5921 			assert(offset < codeLimit);
5922 			size = (size_t) (codeLimit & (size - 1));
5923 			*tainted |= CS_VALIDATE_NX;
5924 		}
5925 
5926 		hashtype->cs_init(&mdctx);
5927 
5928 		if (blob->csb_hash_firstlevel_pageshift) {
5929 			const unsigned char *partial_data = (const unsigned char *)data;
5930 			size_t i;
5931 			for (i = 0; i < size;) {
5932 				union cs_hash_union     partialctx;
5933 				unsigned char partial_digest[CS_HASH_MAX_SIZE];
5934 				size_t partial_size = MIN(size - i, (1U << blob->csb_hash_firstlevel_pageshift));
5935 
5936 				hashtype->cs_init(&partialctx);
5937 				hashtype->cs_update(&partialctx, partial_data, partial_size);
5938 				hashtype->cs_final(partial_digest, &partialctx);
5939 
5940 				/* Update cumulative multi-level hash */
5941 				hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
5942 				partial_data = partial_data + partial_size;
5943 				i += partial_size;
5944 			}
5945 		} else {
5946 			hashtype->cs_update(&mdctx, data, size);
5947 		}
5948 		hashtype->cs_final(actual_hash, &mdctx);
5949 
5950 		asha1 = (const uint32_t *) actual_hash;
5951 		esha1 = (const uint32_t *) expected_hash;
5952 
5953 		if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
5954 			if (cs_debug) {
5955 				printf("CODE SIGNING: cs_validate_page: "
5956 				    "mobj %p off 0x%llx size 0x%lx: "
5957 				    "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
5958 				    "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
5959 				    pager, page_offset, size,
5960 				    asha1[0], asha1[1], asha1[2],
5961 				    asha1[3], asha1[4],
5962 				    esha1[0], esha1[1], esha1[2],
5963 				    esha1[3], esha1[4]);
5964 			}
5965 			cs_validate_page_bad_hash++;
5966 			*tainted |= CS_VALIDATE_TAINTED;
5967 		} else {
5968 			if (cs_debug > 10) {
5969 				printf("CODE SIGNING: cs_validate_page: "
5970 				    "mobj %p off 0x%llx size 0x%lx: "
5971 				    "SHA1 OK\n",
5972 				    pager, page_offset, size);
5973 			}
5974 		}
5975 		validated = TRUE;
5976 	}
5977 
5978 	return validated;
5979 }
5980 
5981 boolean_t
cs_validate_range(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t dsize,unsigned * tainted)5982 cs_validate_range(
5983 	struct vnode    *vp,
5984 	memory_object_t         pager,
5985 	memory_object_offset_t  page_offset,
5986 	const void              *data,
5987 	vm_size_t               dsize,
5988 	unsigned                *tainted)
5989 {
5990 	vm_size_t offset_in_range;
5991 	boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
5992 
5993 	struct cs_blob *blobs = ubc_get_cs_blobs(vp);
5994 
5995 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5996 	if (blobs == NULL && proc_is_translated(current_proc())) {
5997 		struct cs_blob *supp = ubc_get_cs_supplement(vp);
5998 
5999 		if (supp != NULL) {
6000 			blobs = supp;
6001 		} else {
6002 			return FALSE;
6003 		}
6004 	}
6005 #endif
6006 
6007 #if DEVELOPMENT || DEBUG
6008 	code_signing_config_t cs_config = 0;
6009 
6010 	/*
6011 	 * This exemption is specifically useful for systems which want to avoid paying
6012 	 * the cost of verifying the integrity of pages, since that is done by computing
6013 	 * hashes, which can take some time.
6014 	 */
6015 	code_signing_configuration(NULL, &cs_config);
6016 	if (cs_config & CS_CONFIG_INTEGRITY_SKIP) {
6017 		*tainted = 0;
6018 
6019 		/* Return early to avoid paying the cost of hashing */
6020 		return true;
6021 	}
6022 #endif
6023 
6024 	*tainted = 0;
6025 
6026 	for (offset_in_range = 0;
6027 	    offset_in_range < dsize;
6028 	    /* offset_in_range updated based on bytes processed */) {
6029 		unsigned subrange_tainted = 0;
6030 		boolean_t subrange_validated;
6031 		vm_size_t bytes_processed = 0;
6032 
6033 		subrange_validated = cs_validate_hash(blobs,
6034 		    pager,
6035 		    page_offset + offset_in_range,
6036 		    (const void *)((const char *)data + offset_in_range),
6037 		    &bytes_processed,
6038 		    &subrange_tainted);
6039 
6040 		*tainted |= subrange_tainted;
6041 
6042 		if (bytes_processed == 0) {
6043 			/* Cannote make forward progress, so return an error */
6044 			all_subranges_validated = FALSE;
6045 			break;
6046 		} else if (subrange_validated == FALSE) {
6047 			all_subranges_validated = FALSE;
6048 			/* Keep going to detect other types of failures in subranges */
6049 		}
6050 
6051 		offset_in_range += bytes_processed;
6052 	}
6053 
6054 	return all_subranges_validated;
6055 }
6056 
6057 void
cs_validate_page(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,int * validated_p,int * tainted_p,int * nx_p)6058 cs_validate_page(
6059 	struct vnode            *vp,
6060 	memory_object_t         pager,
6061 	memory_object_offset_t  page_offset,
6062 	const void              *data,
6063 	int                     *validated_p,
6064 	int                     *tainted_p,
6065 	int                     *nx_p)
6066 {
6067 	vm_size_t offset_in_page;
6068 	struct cs_blob *blobs;
6069 
6070 	blobs = ubc_get_cs_blobs(vp);
6071 
6072 #if CONFIG_SUPPLEMENTAL_SIGNATURES
6073 	if (blobs == NULL && proc_is_translated(current_proc())) {
6074 		struct cs_blob *supp = ubc_get_cs_supplement(vp);
6075 
6076 		if (supp != NULL) {
6077 			blobs = supp;
6078 		}
6079 	}
6080 #endif
6081 
6082 #if DEVELOPMENT || DEBUG
6083 	code_signing_config_t cs_config = 0;
6084 
6085 	/*
6086 	 * This exemption is specifically useful for systems which want to avoid paying
6087 	 * the cost of verifying the integrity of pages, since that is done by computing
6088 	 * hashes, which can take some time.
6089 	 */
6090 	code_signing_configuration(NULL, &cs_config);
6091 	if (cs_config & CS_CONFIG_INTEGRITY_SKIP) {
6092 		*validated_p = VMP_CS_ALL_TRUE;
6093 		*tainted_p = VMP_CS_ALL_FALSE;
6094 		*nx_p = VMP_CS_ALL_FALSE;
6095 
6096 		/* Return early to avoid paying the cost of hashing */
6097 		return;
6098 	}
6099 #endif
6100 
6101 	*validated_p = VMP_CS_ALL_FALSE;
6102 	*tainted_p = VMP_CS_ALL_FALSE;
6103 	*nx_p = VMP_CS_ALL_FALSE;
6104 
6105 	for (offset_in_page = 0;
6106 	    offset_in_page < PAGE_SIZE;
6107 	    /* offset_in_page updated based on bytes processed */) {
6108 		unsigned subrange_tainted = 0;
6109 		boolean_t subrange_validated;
6110 		vm_size_t bytes_processed = 0;
6111 		int sub_bit;
6112 
6113 		subrange_validated = cs_validate_hash(blobs,
6114 		    pager,
6115 		    page_offset + offset_in_page,
6116 		    (const void *)((const char *)data + offset_in_page),
6117 		    &bytes_processed,
6118 		    &subrange_tainted);
6119 
6120 		if (bytes_processed == 0) {
6121 			/* 4k chunk not code-signed: try next one */
6122 			offset_in_page += FOURK_PAGE_SIZE;
6123 			continue;
6124 		}
6125 		if (offset_in_page == 0 &&
6126 		    bytes_processed > PAGE_SIZE - FOURK_PAGE_SIZE) {
6127 			/* all processed: no 4k granularity */
6128 			if (subrange_validated) {
6129 				*validated_p = VMP_CS_ALL_TRUE;
6130 			}
6131 			if (subrange_tainted & CS_VALIDATE_TAINTED) {
6132 				*tainted_p = VMP_CS_ALL_TRUE;
6133 			}
6134 			if (subrange_tainted & CS_VALIDATE_NX) {
6135 				*nx_p = VMP_CS_ALL_TRUE;
6136 			}
6137 			break;
6138 		}
6139 		/* we only handle 4k or 16k code-signing granularity... */
6140 		assertf(bytes_processed <= FOURK_PAGE_SIZE,
6141 		    "vp %p blobs %p offset 0x%llx + 0x%llx bytes_processed 0x%llx\n",
6142 		    vp, blobs, (uint64_t)page_offset,
6143 		    (uint64_t)offset_in_page, (uint64_t)bytes_processed);
6144 		sub_bit = 1 << (offset_in_page >> FOURK_PAGE_SHIFT);
6145 		if (subrange_validated) {
6146 			*validated_p |= sub_bit;
6147 		}
6148 		if (subrange_tainted & CS_VALIDATE_TAINTED) {
6149 			*tainted_p |= sub_bit;
6150 		}
6151 		if (subrange_tainted & CS_VALIDATE_NX) {
6152 			*nx_p |= sub_bit;
6153 		}
6154 		/* go to next 4k chunk */
6155 		offset_in_page += FOURK_PAGE_SIZE;
6156 	}
6157 
6158 	return;
6159 }
6160 
6161 int
ubc_cs_getcdhash(vnode_t vp,off_t offset,unsigned char * cdhash,uint8_t * type)6162 ubc_cs_getcdhash(
6163 	vnode_t         vp,
6164 	off_t           offset,
6165 	unsigned char   *cdhash,
6166 	uint8_t         *type)
6167 {
6168 	struct cs_blob  *blobs, *blob;
6169 	off_t           rel_offset;
6170 	int             ret;
6171 
6172 	vnode_lock(vp);
6173 
6174 	blobs = ubc_get_cs_blobs(vp);
6175 	for (blob = blobs;
6176 	    blob != NULL;
6177 	    blob = blob->csb_next) {
6178 		/* compute offset relative to this blob */
6179 		rel_offset = offset - blob->csb_base_offset;
6180 		if (rel_offset >= blob->csb_start_offset &&
6181 		    rel_offset < blob->csb_end_offset) {
6182 			/* this blob does cover our "offset" ! */
6183 			break;
6184 		}
6185 	}
6186 
6187 	if (blob == NULL) {
6188 		/* we didn't find a blob covering "offset" */
6189 		ret = EBADEXEC; /* XXX any better error ? */
6190 	} else {
6191 		/* get the CDHash of that blob */
6192 		bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash));
6193 
6194 		/* get the type of the CDHash */
6195 		if (type != NULL) {
6196 			*type = blob->csb_cd->hashType;
6197 		}
6198 
6199 		ret = 0;
6200 	}
6201 
6202 	vnode_unlock(vp);
6203 
6204 	return ret;
6205 }
6206 
6207 boolean_t
ubc_cs_is_range_codesigned(vnode_t vp,mach_vm_offset_t start,mach_vm_size_t size)6208 ubc_cs_is_range_codesigned(
6209 	vnode_t                 vp,
6210 	mach_vm_offset_t        start,
6211 	mach_vm_size_t          size)
6212 {
6213 	struct cs_blob          *csblob;
6214 	mach_vm_offset_t        blob_start;
6215 	mach_vm_offset_t        blob_end;
6216 
6217 	if (vp == NULL) {
6218 		/* no file: no code signature */
6219 		return FALSE;
6220 	}
6221 	if (size == 0) {
6222 		/* no range: no code signature */
6223 		return FALSE;
6224 	}
6225 	if (start + size < start) {
6226 		/* overflow */
6227 		return FALSE;
6228 	}
6229 
6230 	csblob = ubc_cs_blob_get(vp, -1, -1, start);
6231 	if (csblob == NULL) {
6232 		return FALSE;
6233 	}
6234 
6235 	/*
6236 	 * We currently check if the range is covered by a single blob,
6237 	 * which should always be the case for the dyld shared cache.
6238 	 * If we ever want to make this routine handle other cases, we
6239 	 * would have to iterate if the blob does not cover the full range.
6240 	 */
6241 	blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
6242 	    csblob->csb_start_offset);
6243 	blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
6244 	    csblob->csb_end_offset);
6245 	if (blob_start > start || blob_end < (start + size)) {
6246 		/* range not fully covered by this code-signing blob */
6247 		return FALSE;
6248 	}
6249 
6250 	return TRUE;
6251 }
6252 
6253 #if CHECK_CS_VALIDATION_BITMAP
6254 #define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
6255 extern  boolean_t       root_fs_upgrade_try;
6256 
6257 /*
6258  * Should we use the code-sign bitmap to avoid repeated code-sign validation?
6259  * Depends:
6260  * a) Is the target vnode on the root filesystem?
6261  * b) Has someone tried to mount the root filesystem read-write?
6262  * If answers are (a) yes AND (b) no, then we can use the bitmap.
6263  */
6264 #define USE_CODE_SIGN_BITMAP(vp)        ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
6265 kern_return_t
ubc_cs_validation_bitmap_allocate(vnode_t vp)6266 ubc_cs_validation_bitmap_allocate(
6267 	vnode_t         vp)
6268 {
6269 	kern_return_t   kr = KERN_SUCCESS;
6270 	struct ubc_info *uip;
6271 	char            *target_bitmap;
6272 	vm_object_size_t        bitmap_size;
6273 
6274 	if (!USE_CODE_SIGN_BITMAP(vp) || (!UBCINFOEXISTS(vp))) {
6275 		kr = KERN_INVALID_ARGUMENT;
6276 	} else {
6277 		uip = vp->v_ubcinfo;
6278 
6279 		if (uip->cs_valid_bitmap == NULL) {
6280 			bitmap_size = stob(uip->ui_size);
6281 			target_bitmap = (char*) kalloc_data((vm_size_t)bitmap_size, Z_WAITOK | Z_ZERO);
6282 			if (target_bitmap == 0) {
6283 				kr = KERN_NO_SPACE;
6284 			} else {
6285 				kr = KERN_SUCCESS;
6286 			}
6287 			if (kr == KERN_SUCCESS) {
6288 				uip->cs_valid_bitmap = (void*)target_bitmap;
6289 				uip->cs_valid_bitmap_size = bitmap_size;
6290 			}
6291 		}
6292 	}
6293 	return kr;
6294 }
6295 
6296 kern_return_t
ubc_cs_check_validation_bitmap(vnode_t vp,memory_object_offset_t offset,int optype)6297 ubc_cs_check_validation_bitmap(
6298 	vnode_t                 vp,
6299 	memory_object_offset_t          offset,
6300 	int                     optype)
6301 {
6302 	kern_return_t   kr = KERN_SUCCESS;
6303 
6304 	if (!USE_CODE_SIGN_BITMAP(vp) || !UBCINFOEXISTS(vp)) {
6305 		kr = KERN_INVALID_ARGUMENT;
6306 	} else {
6307 		struct ubc_info *uip = vp->v_ubcinfo;
6308 		char            *target_bitmap = uip->cs_valid_bitmap;
6309 
6310 		if (target_bitmap == NULL) {
6311 			kr = KERN_INVALID_ARGUMENT;
6312 		} else {
6313 			uint64_t        bit, byte;
6314 			bit = atop_64( offset );
6315 			byte = bit >> 3;
6316 
6317 			if (byte > uip->cs_valid_bitmap_size) {
6318 				kr = KERN_INVALID_ARGUMENT;
6319 			} else {
6320 				if (optype == CS_BITMAP_SET) {
6321 					target_bitmap[byte] |= (1 << (bit & 07));
6322 					kr = KERN_SUCCESS;
6323 				} else if (optype == CS_BITMAP_CLEAR) {
6324 					target_bitmap[byte] &= ~(1 << (bit & 07));
6325 					kr = KERN_SUCCESS;
6326 				} else if (optype == CS_BITMAP_CHECK) {
6327 					if (target_bitmap[byte] & (1 << (bit & 07))) {
6328 						kr = KERN_SUCCESS;
6329 					} else {
6330 						kr = KERN_FAILURE;
6331 					}
6332 				}
6333 			}
6334 		}
6335 	}
6336 	return kr;
6337 }
6338 
6339 void
ubc_cs_validation_bitmap_deallocate(struct ubc_info * uip)6340 ubc_cs_validation_bitmap_deallocate(
6341 	struct ubc_info *uip)
6342 {
6343 	if (uip->cs_valid_bitmap != NULL) {
6344 		kfree_data(uip->cs_valid_bitmap, (vm_size_t)uip->cs_valid_bitmap_size);
6345 		uip->cs_valid_bitmap = NULL;
6346 	}
6347 }
6348 #else
6349 kern_return_t
ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)6350 ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)
6351 {
6352 	return KERN_INVALID_ARGUMENT;
6353 }
6354 
6355 kern_return_t
ubc_cs_check_validation_bitmap(__unused struct vnode * vp,__unused memory_object_offset_t offset,__unused int optype)6356 ubc_cs_check_validation_bitmap(
6357 	__unused struct vnode *vp,
6358 	__unused memory_object_offset_t offset,
6359 	__unused int optype)
6360 {
6361 	return KERN_INVALID_ARGUMENT;
6362 }
6363 
6364 void
ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info * uip)6365 ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info *uip)
6366 {
6367 	return;
6368 }
6369 #endif /* CHECK_CS_VALIDATION_BITMAP */
6370 
6371 #if CODE_SIGNING_MONITOR
6372 
6373 kern_return_t
cs_associate_blob_with_mapping(void * pmap,vm_map_offset_t start,vm_map_size_t size,vm_object_offset_t offset,void * blobs_p)6374 cs_associate_blob_with_mapping(
6375 	void                    *pmap,
6376 	vm_map_offset_t         start,
6377 	vm_map_size_t           size,
6378 	vm_object_offset_t      offset,
6379 	void                    *blobs_p)
6380 {
6381 	off_t                   blob_start_offset, blob_end_offset;
6382 	kern_return_t           kr;
6383 	struct cs_blob          *blobs, *blob;
6384 	vm_offset_t             kaddr;
6385 	void                    *monitor_sig_obj = NULL;
6386 
6387 	if (csm_enabled() == false) {
6388 		return KERN_NOT_SUPPORTED;
6389 	}
6390 
6391 	blobs = (struct cs_blob *)blobs_p;
6392 
6393 	for (blob = blobs;
6394 	    blob != NULL;
6395 	    blob = blob->csb_next) {
6396 		blob_start_offset = (blob->csb_base_offset +
6397 		    blob->csb_start_offset);
6398 		blob_end_offset = (blob->csb_base_offset +
6399 		    blob->csb_end_offset);
6400 		if ((off_t) offset < blob_start_offset ||
6401 		    (off_t) offset >= blob_end_offset ||
6402 		    (off_t) (offset + size) <= blob_start_offset ||
6403 		    (off_t) (offset + size) > blob_end_offset) {
6404 			continue;
6405 		}
6406 
6407 		kaddr = (vm_offset_t)blob->csb_mem_kaddr;
6408 		if (kaddr == 0) {
6409 			/* blob data has been released */
6410 			continue;
6411 		}
6412 
6413 		monitor_sig_obj = blob->csb_csm_obj;
6414 		if (monitor_sig_obj == NULL) {
6415 			continue;
6416 		}
6417 
6418 		break;
6419 	}
6420 
6421 	if (monitor_sig_obj != NULL) {
6422 		vm_offset_t segment_offset = offset - blob_start_offset;
6423 		kr = csm_associate_code_signature(pmap, monitor_sig_obj, start, size, segment_offset);
6424 	} else {
6425 		kr = KERN_CODESIGN_ERROR;
6426 	}
6427 
6428 	return kr;
6429 }
6430 
6431 #endif /* CODE_SIGNING_MONITOR */
6432