xref: /xnu-8792.61.2/bsd/kern/ubc_subr.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  *	File:	ubc_subr.c
30  *	Author:	Umesh Vaishampayan [[email protected]]
31  *		05-Aug-1999	umeshv	Created.
32  *
33  *	Functions related to Unified Buffer cache.
34  *
35  * Caller of UBC functions MUST have a valid reference on the vnode.
36  *
37  */
38 
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/lock.h>
43 #include <sys/mman.h>
44 #include <sys/mount_internal.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/ucred.h>
48 #include <sys/proc_internal.h>
49 #include <sys/kauth.h>
50 #include <sys/buf.h>
51 #include <sys/user.h>
52 #include <sys/codesign.h>
53 #include <sys/codedir_internal.h>
54 #include <sys/fsevents.h>
55 #include <sys/fcntl.h>
56 #include <sys/reboot.h>
57 #include <sys/code_signing.h>
58 
59 #include <mach/mach_types.h>
60 #include <mach/memory_object_types.h>
61 #include <mach/memory_object_control.h>
62 #include <mach/vm_map.h>
63 #include <mach/mach_vm.h>
64 #include <mach/upl.h>
65 
66 #include <kern/kern_types.h>
67 #include <kern/kalloc.h>
68 #include <kern/zalloc.h>
69 #include <kern/thread.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_protos.h> /* last */
73 
74 #include <libkern/crypto/sha1.h>
75 #include <libkern/crypto/sha2.h>
76 #include <libkern/libkern.h>
77 
78 #include <security/mac_framework.h>
79 #include <stdbool.h>
80 #include <stdatomic.h>
81 #include <libkern/amfi/amfi.h>
82 
83 /* XXX These should be in a BSD accessible Mach header, but aren't. */
84 extern kern_return_t memory_object_pages_resident(memory_object_control_t,
85     boolean_t *);
86 extern kern_return_t    memory_object_signed(memory_object_control_t control,
87     boolean_t is_signed);
88 extern boolean_t        memory_object_is_signed(memory_object_control_t);
89 extern void             memory_object_mark_trusted(
90 	memory_object_control_t         control);
91 
92 /* XXX Same for those. */
93 
94 extern void Debugger(const char *message);
95 
96 #if DIAGNOSTIC
97 #if defined(assert)
98 #undef assert
99 #endif
100 #define assert(cond)    \
101     ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
102 #else
103 #include <kern/assert.h>
104 #endif /* DIAGNOSTIC */
105 
106 static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
107 static int ubc_umcallback(vnode_t, void *);
108 static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
109 static void ubc_cs_free(struct ubc_info *uip);
110 
111 static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
112 static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
113 
114 ZONE_DEFINE_TYPE(ubc_info_zone, "ubc_info zone", struct ubc_info,
115     ZC_ZFREE_CLEARMEM);
116 static uint32_t cs_blob_generation_count = 1;
117 
118 /*
119  * CODESIGNING
120  * Routines to navigate code signing data structures in the kernel...
121  */
122 
123 ZONE_DEFINE_ID(ZONE_ID_CS_BLOB, "cs_blob zone", struct cs_blob,
124     ZC_READONLY | ZC_ZFREE_CLEARMEM);
125 
126 extern int cs_debug;
127 
128 #define PAGE_SHIFT_4K           (12)
129 
130 static boolean_t
cs_valid_range(const void * start,const void * end,const void * lower_bound,const void * upper_bound)131 cs_valid_range(
132 	const void *start,
133 	const void *end,
134 	const void *lower_bound,
135 	const void *upper_bound)
136 {
137 	if (upper_bound < lower_bound ||
138 	    end < start) {
139 		return FALSE;
140 	}
141 
142 	if (start < lower_bound ||
143 	    end > upper_bound) {
144 		return FALSE;
145 	}
146 
147 	return TRUE;
148 }
149 
150 typedef void (*cs_md_init)(void *ctx);
151 typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
152 typedef void (*cs_md_final)(void *hash, void *ctx);
153 
154 struct cs_hash {
155 	uint8_t             cs_type;    /* type code as per code signing */
156 	size_t              cs_size;    /* size of effective hash (may be truncated) */
157 	size_t              cs_digest_size;/* size of native hash */
158 	cs_md_init          cs_init;
159 	cs_md_update        cs_update;
160 	cs_md_final         cs_final;
161 };
162 
163 uint8_t
cs_hash_type(struct cs_hash const * const cs_hash)164 cs_hash_type(
165 	struct cs_hash const * const cs_hash)
166 {
167 	return cs_hash->cs_type;
168 }
169 
170 static const struct cs_hash cs_hash_sha1 = {
171 	.cs_type = CS_HASHTYPE_SHA1,
172 	.cs_size = CS_SHA1_LEN,
173 	.cs_digest_size = SHA_DIGEST_LENGTH,
174 	.cs_init = (cs_md_init)SHA1Init,
175 	.cs_update = (cs_md_update)SHA1Update,
176 	.cs_final = (cs_md_final)SHA1Final,
177 };
178 #if CRYPTO_SHA2
179 static const struct cs_hash cs_hash_sha256 = {
180 	.cs_type = CS_HASHTYPE_SHA256,
181 	.cs_size = SHA256_DIGEST_LENGTH,
182 	.cs_digest_size = SHA256_DIGEST_LENGTH,
183 	.cs_init = (cs_md_init)SHA256_Init,
184 	.cs_update = (cs_md_update)SHA256_Update,
185 	.cs_final = (cs_md_final)SHA256_Final,
186 };
187 static const struct cs_hash cs_hash_sha256_truncate = {
188 	.cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
189 	.cs_size = CS_SHA256_TRUNCATED_LEN,
190 	.cs_digest_size = SHA256_DIGEST_LENGTH,
191 	.cs_init = (cs_md_init)SHA256_Init,
192 	.cs_update = (cs_md_update)SHA256_Update,
193 	.cs_final = (cs_md_final)SHA256_Final,
194 };
195 static const struct cs_hash cs_hash_sha384 = {
196 	.cs_type = CS_HASHTYPE_SHA384,
197 	.cs_size = SHA384_DIGEST_LENGTH,
198 	.cs_digest_size = SHA384_DIGEST_LENGTH,
199 	.cs_init = (cs_md_init)SHA384_Init,
200 	.cs_update = (cs_md_update)SHA384_Update,
201 	.cs_final = (cs_md_final)SHA384_Final,
202 };
203 #endif
204 
205 static struct cs_hash const *
cs_find_md(uint8_t type)206 cs_find_md(uint8_t type)
207 {
208 	if (type == CS_HASHTYPE_SHA1) {
209 		return &cs_hash_sha1;
210 #if CRYPTO_SHA2
211 	} else if (type == CS_HASHTYPE_SHA256) {
212 		return &cs_hash_sha256;
213 	} else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
214 		return &cs_hash_sha256_truncate;
215 	} else if (type == CS_HASHTYPE_SHA384) {
216 		return &cs_hash_sha384;
217 #endif
218 	}
219 	return NULL;
220 }
221 
222 union cs_hash_union {
223 	SHA1_CTX                sha1ctxt;
224 	SHA256_CTX              sha256ctx;
225 	SHA384_CTX              sha384ctx;
226 };
227 
228 
229 /*
230  * Choose among different hash algorithms.
231  * Higher is better, 0 => don't use at all.
232  */
233 static const uint32_t hashPriorities[] = {
234 	CS_HASHTYPE_SHA1,
235 	CS_HASHTYPE_SHA256_TRUNCATED,
236 	CS_HASHTYPE_SHA256,
237 	CS_HASHTYPE_SHA384,
238 };
239 
240 static unsigned int
hash_rank(const CS_CodeDirectory * cd)241 hash_rank(const CS_CodeDirectory *cd)
242 {
243 	uint32_t type = cd->hashType;
244 	unsigned int n;
245 
246 	for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) {
247 		if (hashPriorities[n] == type) {
248 			return n + 1;
249 		}
250 	}
251 	return 0;       /* not supported */
252 }
253 
254 
255 /*
256  * Locating a page hash
257  */
258 static const unsigned char *
hashes(const CS_CodeDirectory * cd,uint32_t page,size_t hash_len,const char * lower_bound,const char * upper_bound)259 hashes(
260 	const CS_CodeDirectory *cd,
261 	uint32_t page,
262 	size_t hash_len,
263 	const char *lower_bound,
264 	const char *upper_bound)
265 {
266 	const unsigned char *base, *top, *hash;
267 	uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
268 
269 	assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
270 
271 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
272 		/* Get first scatter struct */
273 		const SC_Scatter *scatter = (const SC_Scatter*)
274 		    ((const char*)cd + ntohl(cd->scatterOffset));
275 		uint32_t hashindex = 0, scount, sbase = 0;
276 		/* iterate all scatter structs */
277 		do {
278 			if ((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
279 				if (cs_debug) {
280 					printf("CODE SIGNING: Scatter extends past Code Directory\n");
281 				}
282 				return NULL;
283 			}
284 
285 			scount = ntohl(scatter->count);
286 			uint32_t new_base = ntohl(scatter->base);
287 
288 			/* last scatter? */
289 			if (scount == 0) {
290 				return NULL;
291 			}
292 
293 			if ((hashindex > 0) && (new_base <= sbase)) {
294 				if (cs_debug) {
295 					printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
296 					    sbase, new_base);
297 				}
298 				return NULL;    /* unordered scatter array */
299 			}
300 			sbase = new_base;
301 
302 			/* this scatter beyond page we're looking for? */
303 			if (sbase > page) {
304 				return NULL;
305 			}
306 
307 			if (sbase + scount >= page) {
308 				/* Found the scatter struct that is
309 				 * referencing our page */
310 
311 				/* base = address of first hash covered by scatter */
312 				base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
313 				    hashindex * hash_len;
314 				/* top = address of first hash after this scatter */
315 				top = base + scount * hash_len;
316 				if (!cs_valid_range(base, top, lower_bound,
317 				    upper_bound) ||
318 				    hashindex > nCodeSlots) {
319 					return NULL;
320 				}
321 
322 				break;
323 			}
324 
325 			/* this scatter struct is before the page we're looking
326 			 * for. Iterate. */
327 			hashindex += scount;
328 			scatter++;
329 		} while (1);
330 
331 		hash = base + (page - sbase) * hash_len;
332 	} else {
333 		base = (const unsigned char *)cd + ntohl(cd->hashOffset);
334 		top = base + nCodeSlots * hash_len;
335 		if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
336 		    page > nCodeSlots) {
337 			return NULL;
338 		}
339 		assert(page < nCodeSlots);
340 
341 		hash = base + page * hash_len;
342 	}
343 
344 	if (!cs_valid_range(hash, hash + hash_len,
345 	    lower_bound, upper_bound)) {
346 		hash = NULL;
347 	}
348 
349 	return hash;
350 }
351 
352 /*
353  * cs_validate_codedirectory
354  *
355  * Validate that pointers inside the code directory to make sure that
356  * all offsets and lengths are constrained within the buffer.
357  *
358  * Parameters:	cd			Pointer to code directory buffer
359  *		length			Length of buffer
360  *
361  * Returns:	0			Success
362  *		EBADEXEC		Invalid code signature
363  */
364 
365 static int
cs_validate_codedirectory(const CS_CodeDirectory * cd,size_t length)366 cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
367 {
368 	struct cs_hash const *hashtype;
369 
370 	if (length < sizeof(*cd)) {
371 		return EBADEXEC;
372 	}
373 	if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) {
374 		return EBADEXEC;
375 	}
376 	if ((cd->pageSize != PAGE_SHIFT_4K) && (cd->pageSize != PAGE_SHIFT)) {
377 		printf("disallowing unsupported code signature page shift: %u\n", cd->pageSize);
378 		return EBADEXEC;
379 	}
380 	hashtype = cs_find_md(cd->hashType);
381 	if (hashtype == NULL) {
382 		return EBADEXEC;
383 	}
384 
385 	if (cd->hashSize != hashtype->cs_size) {
386 		return EBADEXEC;
387 	}
388 
389 	if (length < ntohl(cd->hashOffset)) {
390 		return EBADEXEC;
391 	}
392 
393 	/* check that nSpecialSlots fits in the buffer in front of hashOffset */
394 	if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) {
395 		return EBADEXEC;
396 	}
397 
398 	/* check that codeslots fits in the buffer */
399 	if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) {
400 		return EBADEXEC;
401 	}
402 
403 	if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
404 		if (length < ntohl(cd->scatterOffset)) {
405 			return EBADEXEC;
406 		}
407 
408 		const SC_Scatter *scatter = (const SC_Scatter *)
409 		    (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
410 		uint32_t nPages = 0;
411 
412 		/*
413 		 * Check each scatter buffer, since we don't know the
414 		 * length of the scatter buffer array, we have to
415 		 * check each entry.
416 		 */
417 		while (1) {
418 			/* check that the end of each scatter buffer in within the length */
419 			if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) {
420 				return EBADEXEC;
421 			}
422 			uint32_t scount = ntohl(scatter->count);
423 			if (scount == 0) {
424 				break;
425 			}
426 			if (nPages + scount < nPages) {
427 				return EBADEXEC;
428 			}
429 			nPages += scount;
430 			scatter++;
431 
432 			/* XXX check that basees doesn't overlap */
433 			/* XXX check that targetOffset doesn't overlap */
434 		}
435 #if 0 /* rdar://12579439 */
436 		if (nPages != ntohl(cd->nCodeSlots)) {
437 			return EBADEXEC;
438 		}
439 #endif
440 	}
441 
442 	if (length < ntohl(cd->identOffset)) {
443 		return EBADEXEC;
444 	}
445 
446 	/* identifier is NUL terminated string */
447 	if (cd->identOffset) {
448 		const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
449 		if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) {
450 			return EBADEXEC;
451 		}
452 	}
453 
454 	/* team identifier is NULL terminated string */
455 	if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
456 		if (length < ntohl(cd->teamOffset)) {
457 			return EBADEXEC;
458 		}
459 
460 		const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
461 		if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) {
462 			return EBADEXEC;
463 		}
464 	}
465 
466 	/* linkage is variable length binary data */
467 	if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0) {
468 		const uintptr_t ptr = (uintptr_t)cd + ntohl(cd->linkageOffset);
469 		const uintptr_t ptr_end = ptr + ntohl(cd->linkageSize);
470 
471 		if (ptr_end < ptr || ptr < (uintptr_t)cd || ptr_end > (uintptr_t)cd + length) {
472 			return EBADEXEC;
473 		}
474 	}
475 
476 
477 	return 0;
478 }
479 
480 /*
481  *
482  */
483 
484 static int
cs_validate_blob(const CS_GenericBlob * blob,size_t length)485 cs_validate_blob(const CS_GenericBlob *blob, size_t length)
486 {
487 	if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) {
488 		return EBADEXEC;
489 	}
490 	return 0;
491 }
492 
493 /*
494  * cs_validate_csblob
495  *
496  * Validate that superblob/embedded code directory to make sure that
497  * all internal pointers are valid.
498  *
499  * Will validate both a superblob csblob and a "raw" code directory.
500  *
501  *
502  * Parameters:	buffer			Pointer to code signature
503  *		length			Length of buffer
504  *		rcd			returns pointer to code directory
505  *
506  * Returns:	0			Success
507  *		EBADEXEC		Invalid code signature
508  */
509 
510 static int
cs_validate_csblob(const uint8_t * addr,const size_t blob_size,const CS_CodeDirectory ** rcd,const CS_GenericBlob ** rentitlements,const CS_GenericBlob ** rder_entitlements)511 cs_validate_csblob(
512 	const uint8_t *addr,
513 	const size_t blob_size,
514 	const CS_CodeDirectory **rcd,
515 	const CS_GenericBlob **rentitlements,
516 	const CS_GenericBlob **rder_entitlements)
517 {
518 	const CS_GenericBlob *blob;
519 	int error;
520 	size_t length;
521 	const CS_GenericBlob *self_constraint = NULL;
522 	const CS_GenericBlob *parent_constraint = NULL;
523 	const CS_GenericBlob *responsible_proc_constraint = NULL;
524 
525 	*rcd = NULL;
526 	*rentitlements = NULL;
527 	*rder_entitlements = NULL;
528 
529 	blob = (const CS_GenericBlob *)(const void *)addr;
530 
531 	length = blob_size;
532 	error = cs_validate_blob(blob, length);
533 	if (error) {
534 		return error;
535 	}
536 	length = ntohl(blob->length);
537 
538 	if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
539 		const CS_SuperBlob *sb;
540 		uint32_t n, count;
541 		const CS_CodeDirectory *best_cd = NULL;
542 		unsigned int best_rank = 0;
543 #if XNU_PLATFORM_WatchOS
544 		const CS_CodeDirectory *sha1_cd = NULL;
545 #endif
546 
547 		if (length < sizeof(CS_SuperBlob)) {
548 			return EBADEXEC;
549 		}
550 
551 		sb = (const CS_SuperBlob *)blob;
552 		count = ntohl(sb->count);
553 
554 		/* check that the array of BlobIndex fits in the rest of the data */
555 		if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) {
556 			return EBADEXEC;
557 		}
558 
559 		/* now check each BlobIndex */
560 		for (n = 0; n < count; n++) {
561 			const CS_BlobIndex *blobIndex = &sb->index[n];
562 			uint32_t type = ntohl(blobIndex->type);
563 			uint32_t offset = ntohl(blobIndex->offset);
564 			if (length < offset) {
565 				return EBADEXEC;
566 			}
567 
568 			const CS_GenericBlob *subBlob =
569 			    (const CS_GenericBlob *)(const void *)(addr + offset);
570 
571 			size_t subLength = length - offset;
572 
573 			if ((error = cs_validate_blob(subBlob, subLength)) != 0) {
574 				return error;
575 			}
576 			subLength = ntohl(subBlob->length);
577 
578 			/* extra validation for CDs, that is also returned */
579 			if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
580 				const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
581 				if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) {
582 					return error;
583 				}
584 				unsigned int rank = hash_rank(candidate);
585 				if (cs_debug > 3) {
586 					printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
587 				}
588 				if (best_cd == NULL || rank > best_rank) {
589 					best_cd = candidate;
590 					best_rank = rank;
591 
592 					if (cs_debug > 2) {
593 						printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
594 					}
595 					*rcd = best_cd;
596 				} else if (best_cd != NULL && rank == best_rank) {
597 					/* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
598 					printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
599 					return EBADEXEC;
600 				}
601 #if XNU_PLATFORM_WatchOS
602 				if (candidate->hashType == CS_HASHTYPE_SHA1) {
603 					if (sha1_cd != NULL) {
604 						printf("multiple sha1 CodeDirectories in signature; rejecting\n");
605 						return EBADEXEC;
606 					}
607 					sha1_cd = candidate;
608 				}
609 #endif
610 			} else if (type == CSSLOT_ENTITLEMENTS) {
611 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
612 					return EBADEXEC;
613 				}
614 				if (*rentitlements != NULL) {
615 					printf("multiple entitlements blobs\n");
616 					return EBADEXEC;
617 				}
618 				*rentitlements = subBlob;
619 			} else if (type == CSSLOT_DER_ENTITLEMENTS) {
620 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_DER_ENTITLEMENTS) {
621 					return EBADEXEC;
622 				}
623 				if (*rder_entitlements != NULL) {
624 					printf("multiple der entitlements blobs\n");
625 					return EBADEXEC;
626 				}
627 				*rder_entitlements = subBlob;
628 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_SELF) {
629 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
630 					return EBADEXEC;
631 				}
632 				if (self_constraint != NULL) {
633 					printf("multiple self constraint blobs\n");
634 					return EBADEXEC;
635 				}
636 				self_constraint = subBlob;
637 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_PARENT) {
638 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
639 					return EBADEXEC;
640 				}
641 				if (parent_constraint != NULL) {
642 					printf("multiple parent constraint blobs\n");
643 					return EBADEXEC;
644 				}
645 				parent_constraint = subBlob;
646 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE) {
647 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
648 					return EBADEXEC;
649 				}
650 				if (responsible_proc_constraint != NULL) {
651 					printf("multiple responsible process constraint blobs\n");
652 					return EBADEXEC;
653 				}
654 				responsible_proc_constraint = subBlob;
655 			}
656 		}
657 
658 #if XNU_PLATFORM_WatchOS
659 		/* To keep watchOS fast enough, we have to resort to sha1 for
660 		 * some code.
661 		 *
662 		 * At the time of writing this comment, known sha1 attacks are
663 		 * collision attacks (not preimage or second preimage
664 		 * attacks), which do not apply to platform binaries since
665 		 * they have a fixed hash in the trust cache.  Given this
666 		 * property, we only prefer sha1 code directories for adhoc
667 		 * signatures, which always have to be in a trust cache to be
668 		 * valid (can-load-cdhash does not exist for watchOS). Those
669 		 * are, incidentally, also the platform binaries, for which we
670 		 * care about the performance hit that sha256 would bring us.
671 		 *
672 		 * Platform binaries may still contain a (not chosen) sha256
673 		 * code directory, which keeps software updates that switch to
674 		 * sha256-only small.
675 		 */
676 
677 		if (*rcd != NULL && sha1_cd != NULL && (ntohl(sha1_cd->flags) & CS_ADHOC)) {
678 			if (sha1_cd->flags != (*rcd)->flags) {
679 				printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
680 				    (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags);
681 				*rcd = NULL;
682 				return EBADEXEC;
683 			}
684 
685 			*rcd = sha1_cd;
686 		}
687 #endif
688 	} else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
689 		if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) {
690 			return error;
691 		}
692 		*rcd = (const CS_CodeDirectory *)blob;
693 	} else {
694 		return EBADEXEC;
695 	}
696 
697 	if (*rcd == NULL) {
698 		return EBADEXEC;
699 	}
700 
701 	return 0;
702 }
703 
704 /*
705  * cs_find_blob_bytes
706  *
707  * Find an blob from the superblob/code directory. The blob must have
708  * been been validated by cs_validate_csblob() before calling
709  * this. Use csblob_find_blob() instead.
710  *
711  * Will also find a "raw" code directory if its stored as well as
712  * searching the superblob.
713  *
714  * Parameters:	buffer			Pointer to code signature
715  *		length			Length of buffer
716  *		type			type of blob to find
717  *		magic			the magic number for that blob
718  *
719  * Returns:	pointer			Success
720  *		NULL			Buffer not found
721  */
722 
723 const CS_GenericBlob *
csblob_find_blob_bytes(const uint8_t * addr,size_t length,uint32_t type,uint32_t magic)724 csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
725 {
726 	const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
727 
728 	if ((addr + length) < addr) {
729 		panic("CODE SIGNING: CS Blob length overflow for addr: %p", addr);
730 	}
731 
732 	if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
733 		const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
734 		size_t n, count = ntohl(sb->count);
735 
736 		for (n = 0; n < count; n++) {
737 			if (ntohl(sb->index[n].type) != type) {
738 				continue;
739 			}
740 			uint32_t offset = ntohl(sb->index[n].offset);
741 			if (length - sizeof(const CS_GenericBlob) < offset) {
742 				return NULL;
743 			}
744 			blob = (const CS_GenericBlob *)(const void *)(addr + offset);
745 			if (ntohl(blob->magic) != magic) {
746 				continue;
747 			}
748 			if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
749 				panic("CODE SIGNING: CS Blob length overflow for blob at: %p", blob);
750 			} else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
751 				continue;
752 			}
753 			return blob;
754 		}
755 	} else if (type == CSSLOT_CODEDIRECTORY && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
756 	    && magic == CSMAGIC_CODEDIRECTORY) {
757 		if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
758 			panic("CODE SIGNING: CS Blob length overflow for code directory blob at: %p", blob);
759 		} else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
760 			return NULL;
761 		}
762 		return blob;
763 	}
764 	return NULL;
765 }
766 
767 
768 const CS_GenericBlob *
csblob_find_blob(struct cs_blob * csblob,uint32_t type,uint32_t magic)769 csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
770 {
771 	if ((csblob->csb_flags & CS_VALID) == 0) {
772 		return NULL;
773 	}
774 	return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
775 }
776 
777 static const uint8_t *
find_special_slot(const CS_CodeDirectory * cd,size_t slotsize,uint32_t slot)778 find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
779 {
780 	/* there is no zero special slot since that is the first code slot */
781 	if (ntohl(cd->nSpecialSlots) < slot || slot == 0) {
782 		return NULL;
783 	}
784 
785 	return (const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot);
786 }
787 
788 static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
789 
790 static int
csblob_find_special_slot_blob(struct cs_blob * csblob,uint32_t slot,uint32_t magic,const CS_GenericBlob ** out_start,size_t * out_length)791 csblob_find_special_slot_blob(struct cs_blob* csblob, uint32_t slot, uint32_t magic, const CS_GenericBlob **out_start, size_t *out_length)
792 {
793 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
794 	const CS_GenericBlob *blob;
795 	const CS_CodeDirectory *code_dir;
796 	const uint8_t *embedded_hash;
797 	union cs_hash_union context;
798 
799 	if (out_start) {
800 		*out_start = NULL;
801 	}
802 	if (out_length) {
803 		*out_length = 0;
804 	}
805 
806 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
807 		return EBADEXEC;
808 	}
809 
810 	code_dir = csblob->csb_cd;
811 
812 	blob = csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, slot, magic);
813 
814 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, slot);
815 
816 	if (embedded_hash == NULL) {
817 		if (blob) {
818 			return EBADEXEC;
819 		}
820 		return 0;
821 	} else if (blob == NULL) {
822 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
823 			return EBADEXEC;
824 		} else {
825 			return 0;
826 		}
827 	}
828 
829 	csblob->csb_hashtype->cs_init(&context);
830 	csblob->csb_hashtype->cs_update(&context, blob, ntohl(blob->length));
831 	csblob->csb_hashtype->cs_final(computed_hash, &context);
832 
833 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
834 		return EBADEXEC;
835 	}
836 	if (out_start) {
837 		*out_start = blob;
838 	}
839 	if (out_length) {
840 		*out_length = ntohl(blob->length);
841 	}
842 
843 	return 0;
844 }
845 
846 int
csblob_get_entitlements(struct cs_blob * csblob,void ** out_start,size_t * out_length)847 csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
848 {
849 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
850 	const CS_GenericBlob *entitlements;
851 	const CS_CodeDirectory *code_dir;
852 	const uint8_t *embedded_hash;
853 	union cs_hash_union context;
854 
855 	*out_start = NULL;
856 	*out_length = 0;
857 
858 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
859 		return EBADEXEC;
860 	}
861 
862 	code_dir = csblob->csb_cd;
863 
864 	if ((csblob->csb_flags & CS_VALID) == 0) {
865 		entitlements = NULL;
866 	} else {
867 		entitlements = csblob->csb_entitlements_blob;
868 	}
869 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
870 
871 	if (embedded_hash == NULL) {
872 		if (entitlements) {
873 			return EBADEXEC;
874 		}
875 		return 0;
876 	} else if (entitlements == NULL) {
877 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
878 			return EBADEXEC;
879 		} else {
880 			return 0;
881 		}
882 	}
883 
884 	csblob->csb_hashtype->cs_init(&context);
885 	csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
886 	csblob->csb_hashtype->cs_final(computed_hash, &context);
887 
888 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
889 		return EBADEXEC;
890 	}
891 
892 	*out_start = __DECONST(void *, entitlements);
893 	*out_length = ntohl(entitlements->length);
894 
895 	return 0;
896 }
897 
898 const CS_GenericBlob*
csblob_get_der_entitlements_unsafe(struct cs_blob * csblob)899 csblob_get_der_entitlements_unsafe(struct cs_blob * csblob)
900 {
901 	if ((csblob->csb_flags & CS_VALID) == 0) {
902 		return NULL;
903 	}
904 
905 	return csblob->csb_der_entitlements_blob;
906 }
907 
908 int
csblob_get_der_entitlements(struct cs_blob * csblob,const CS_GenericBlob ** out_start,size_t * out_length)909 csblob_get_der_entitlements(struct cs_blob *csblob, const CS_GenericBlob **out_start, size_t *out_length)
910 {
911 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
912 	const CS_GenericBlob *der_entitlements;
913 	const CS_CodeDirectory *code_dir;
914 	const uint8_t *embedded_hash;
915 	union cs_hash_union context;
916 
917 	*out_start = NULL;
918 	*out_length = 0;
919 
920 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
921 		return EBADEXEC;
922 	}
923 
924 	code_dir = csblob->csb_cd;
925 
926 	if ((csblob->csb_flags & CS_VALID) == 0) {
927 		der_entitlements = NULL;
928 	} else {
929 		der_entitlements = csblob->csb_der_entitlements_blob;
930 	}
931 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_DER_ENTITLEMENTS);
932 
933 	if (embedded_hash == NULL) {
934 		if (der_entitlements) {
935 			return EBADEXEC;
936 		}
937 		return 0;
938 	} else if (der_entitlements == NULL) {
939 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
940 			return EBADEXEC;
941 		} else {
942 			return 0;
943 		}
944 	}
945 
946 	csblob->csb_hashtype->cs_init(&context);
947 	csblob->csb_hashtype->cs_update(&context, der_entitlements, ntohl(der_entitlements->length));
948 	csblob->csb_hashtype->cs_final(computed_hash, &context);
949 
950 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
951 		return EBADEXEC;
952 	}
953 
954 	*out_start = der_entitlements;
955 	*out_length = ntohl(der_entitlements->length);
956 
957 	return 0;
958 }
959 
960 /*
961  * Register a provisioning profile with a cs_blob.
962  */
963 int
csblob_register_profile(struct cs_blob __unused * csblob,void __unused * profile_addr,vm_size_t __unused profile_size)964 csblob_register_profile(
965 	struct cs_blob __unused *csblob,
966 	void __unused *profile_addr,
967 	vm_size_t __unused profile_size)
968 {
969 #if CODE_SIGNING_MONITOR
970 	/* Profiles only need to be registered for monitor environments */
971 	assert(profile_addr != NULL);
972 	assert(profile_size != 0);
973 	assert(csblob != NULL);
974 
975 	/* This is the old registration interface -- create a fake UUID */
976 	uuid_t fake_uuid = {0};
977 	uuid_generate(fake_uuid);
978 
979 	kern_return_t kr = register_provisioning_profile(
980 		fake_uuid,
981 		profile_addr, profile_size);
982 
983 	if (kr != KERN_SUCCESS) {
984 		if (kr == KERN_ALREADY_IN_SET) {
985 			panic("CODE SIGNGING: duplicate UUIDs not expected on this interface");
986 		}
987 		return EPERM;
988 	}
989 
990 	/* Associate the profile with the monitor's signature object */
991 	kr = associate_provisioning_profile(
992 		csblob->csb_pmap_cs_entry,
993 		fake_uuid);
994 
995 	if (kr != KERN_SUCCESS) {
996 		return EPERM;
997 	}
998 
999 	return 0;
1000 #else
1001 	return 0;
1002 #endif /* CODE_SIGNING_MONITOR */
1003 }
1004 
1005 int
csblob_register_profile_uuid(struct cs_blob __unused * csblob,const uuid_t __unused profile_uuid,void __unused * profile_addr,vm_size_t __unused profile_size)1006 csblob_register_profile_uuid(
1007 	struct cs_blob __unused *csblob,
1008 	const uuid_t __unused profile_uuid,
1009 	void __unused *profile_addr,
1010 	vm_size_t __unused profile_size)
1011 {
1012 #if CODE_SIGNING_MONITOR
1013 	/* Profiles only need to be registered for monitor environments */
1014 	assert(profile_addr != NULL);
1015 	assert(profile_size != 0);
1016 	assert(csblob != NULL);
1017 
1018 	kern_return_t kr = register_provisioning_profile(
1019 		profile_uuid,
1020 		profile_addr, profile_size);
1021 
1022 	if ((kr != KERN_SUCCESS) && (kr != KERN_ALREADY_IN_SET)) {
1023 		return EPERM;
1024 	}
1025 
1026 	/* Associate the profile with the monitor's signature object */
1027 	kr = associate_provisioning_profile(
1028 		csblob->csb_pmap_cs_entry,
1029 		profile_uuid);
1030 
1031 	if (kr != KERN_SUCCESS) {
1032 		return EPERM;
1033 	}
1034 
1035 	return 0;
1036 #else
1037 	return 0;
1038 #endif /* CODE_SIGNING_MONITOR */
1039 }
1040 
1041 /*
1042  * CODESIGNING
1043  * End of routines to navigate code signing data structures in the kernel.
1044  */
1045 
1046 
1047 
1048 /*
1049  * ubc_info_init
1050  *
1051  * Allocate and attach an empty ubc_info structure to a vnode
1052  *
1053  * Parameters:	vp			Pointer to the vnode
1054  *
1055  * Returns:	0			Success
1056  *	vnode_size:ENOMEM		Not enough space
1057  *	vnode_size:???			Other error from vnode_getattr
1058  *
1059  */
1060 int
ubc_info_init(struct vnode * vp)1061 ubc_info_init(struct vnode *vp)
1062 {
1063 	return ubc_info_init_internal(vp, 0, 0);
1064 }
1065 
1066 
1067 /*
1068  * ubc_info_init_withsize
1069  *
1070  * Allocate and attach a sized ubc_info structure to a vnode
1071  *
1072  * Parameters:	vp			Pointer to the vnode
1073  *		filesize		The size of the file
1074  *
1075  * Returns:	0			Success
1076  *	vnode_size:ENOMEM		Not enough space
1077  *	vnode_size:???			Other error from vnode_getattr
1078  */
1079 int
ubc_info_init_withsize(struct vnode * vp,off_t filesize)1080 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
1081 {
1082 	return ubc_info_init_internal(vp, 1, filesize);
1083 }
1084 
1085 
1086 /*
1087  * ubc_info_init_internal
1088  *
1089  * Allocate and attach a ubc_info structure to a vnode
1090  *
1091  * Parameters:	vp			Pointer to the vnode
1092  *		withfsize{0,1}		Zero if the size should be obtained
1093  *					from the vnode; otherwise, use filesize
1094  *		filesize		The size of the file, if withfsize == 1
1095  *
1096  * Returns:	0			Success
1097  *	vnode_size:ENOMEM		Not enough space
1098  *	vnode_size:???			Other error from vnode_getattr
1099  *
1100  * Notes:	We call a blocking zalloc(), and the zone was created as an
1101  *		expandable and collectable zone, so if no memory is available,
1102  *		it is possible for zalloc() to block indefinitely.  zalloc()
1103  *		may also panic if the zone of zones is exhausted, since it's
1104  *		NOT expandable.
1105  *
1106  *		We unconditionally call vnode_pager_setup(), even if this is
1107  *		a reuse of a ubc_info; in that case, we should probably assert
1108  *		that it does not already have a pager association, but do not.
1109  *
1110  *		Since memory_object_create_named() can only fail from receiving
1111  *		an invalid pager argument, the explicit check and panic is
1112  *		merely precautionary.
1113  */
1114 static int
ubc_info_init_internal(vnode_t vp,int withfsize,off_t filesize)1115 ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
1116 {
1117 	struct ubc_info *uip;
1118 	void *  pager;
1119 	int error = 0;
1120 	kern_return_t kret;
1121 	memory_object_control_t control;
1122 
1123 	uip = vp->v_ubcinfo;
1124 
1125 	/*
1126 	 * If there is not already a ubc_info attached to the vnode, we
1127 	 * attach one; otherwise, we will reuse the one that's there.
1128 	 */
1129 	if (uip == UBC_INFO_NULL) {
1130 		uip = zalloc_flags(ubc_info_zone, Z_WAITOK | Z_ZERO);
1131 
1132 		uip->ui_vnode = vp;
1133 		uip->ui_flags = UI_INITED;
1134 		uip->ui_ucred = NOCRED;
1135 	}
1136 	assert(uip->ui_flags != UI_NONE);
1137 	assert(uip->ui_vnode == vp);
1138 
1139 	/* now set this ubc_info in the vnode */
1140 	vp->v_ubcinfo = uip;
1141 
1142 	/*
1143 	 * Allocate a pager object for this vnode
1144 	 *
1145 	 * XXX The value of the pager parameter is currently ignored.
1146 	 * XXX Presumably, this API changed to avoid the race between
1147 	 * XXX setting the pager and the UI_HASPAGER flag.
1148 	 */
1149 	pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
1150 	assert(pager);
1151 
1152 	/*
1153 	 * Explicitly set the pager into the ubc_info, after setting the
1154 	 * UI_HASPAGER flag.
1155 	 */
1156 	SET(uip->ui_flags, UI_HASPAGER);
1157 	uip->ui_pager = pager;
1158 
1159 	/*
1160 	 * Note: We can not use VNOP_GETATTR() to get accurate
1161 	 * value of ui_size because this may be an NFS vnode, and
1162 	 * nfs_getattr() can call vinvalbuf(); if this happens,
1163 	 * ubc_info is not set up to deal with that event.
1164 	 * So use bogus size.
1165 	 */
1166 
1167 	/*
1168 	 * create a vnode - vm_object association
1169 	 * memory_object_create_named() creates a "named" reference on the
1170 	 * memory object we hold this reference as long as the vnode is
1171 	 * "alive."  Since memory_object_create_named() took its own reference
1172 	 * on the vnode pager we passed it, we can drop the reference
1173 	 * vnode_pager_setup() returned here.
1174 	 */
1175 	kret = memory_object_create_named(pager,
1176 	    (memory_object_size_t)uip->ui_size, &control);
1177 	vnode_pager_deallocate(pager);
1178 	if (kret != KERN_SUCCESS) {
1179 		panic("ubc_info_init: memory_object_create_named returned %d", kret);
1180 	}
1181 
1182 	assert(control);
1183 	uip->ui_control = control;      /* cache the value of the mo control */
1184 	SET(uip->ui_flags, UI_HASOBJREF);       /* with a named reference */
1185 
1186 	if (withfsize == 0) {
1187 		/* initialize the size */
1188 		error = vnode_size(vp, &uip->ui_size, vfs_context_current());
1189 		if (error) {
1190 			uip->ui_size = 0;
1191 		}
1192 	} else {
1193 		uip->ui_size = filesize;
1194 	}
1195 	vp->v_lflag |= VNAMED_UBC;      /* vnode has a named ubc reference */
1196 
1197 	return error;
1198 }
1199 
1200 
1201 /*
1202  * ubc_info_free
1203  *
1204  * Free a ubc_info structure
1205  *
1206  * Parameters:	uip			A pointer to the ubc_info to free
1207  *
1208  * Returns:	(void)
1209  *
1210  * Notes:	If there is a credential that has subsequently been associated
1211  *		with the ubc_info via a call to ubc_setcred(), the reference
1212  *		to the credential is dropped.
1213  *
1214  *		It's actually impossible for a ubc_info.ui_control to take the
1215  *		value MEMORY_OBJECT_CONTROL_NULL.
1216  */
1217 static void
ubc_info_free(struct ubc_info * uip)1218 ubc_info_free(struct ubc_info *uip)
1219 {
1220 	if (IS_VALID_CRED(uip->ui_ucred)) {
1221 		kauth_cred_unref(&uip->ui_ucred);
1222 	}
1223 
1224 	if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) {
1225 		memory_object_control_deallocate(uip->ui_control);
1226 	}
1227 
1228 	cluster_release(uip);
1229 	ubc_cs_free(uip);
1230 
1231 	zfree(ubc_info_zone, uip);
1232 	return;
1233 }
1234 
1235 
1236 void
ubc_info_deallocate(struct ubc_info * uip)1237 ubc_info_deallocate(struct ubc_info *uip)
1238 {
1239 	ubc_info_free(uip);
1240 }
1241 
1242 /*
1243  * ubc_setsize_ex
1244  *
1245  * Tell the VM that the the size of the file represented by the vnode has
1246  * changed
1247  *
1248  * Parameters:	vp	   The vp whose backing file size is
1249  *					   being changed
1250  *				nsize  The new size of the backing file
1251  *				opts   Options
1252  *
1253  * Returns:	EINVAL for new size < 0
1254  *			ENOENT if no UBC info exists
1255  *          EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1256  *          Other errors (mapped to errno_t) returned by VM functions
1257  *
1258  * Notes:   This function will indicate success if the new size is the
1259  *		    same or larger than the old size (in this case, the
1260  *		    remainder of the file will require modification or use of
1261  *		    an existing upl to access successfully).
1262  *
1263  *		    This function will fail if the new file size is smaller,
1264  *		    and the memory region being invalidated was unable to
1265  *		    actually be invalidated and/or the last page could not be
1266  *		    flushed, if the new size is not aligned to a page
1267  *		    boundary.  This is usually indicative of an I/O error.
1268  */
1269 errno_t
ubc_setsize_ex(struct vnode * vp,off_t nsize,ubc_setsize_opts_t opts)1270 ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
1271 {
1272 	off_t osize;    /* ui_size before change */
1273 	off_t lastpg, olastpgend, lastoff;
1274 	struct ubc_info *uip;
1275 	memory_object_control_t control;
1276 	kern_return_t kret = KERN_SUCCESS;
1277 
1278 	if (nsize < (off_t)0) {
1279 		return EINVAL;
1280 	}
1281 
1282 	if (!UBCINFOEXISTS(vp)) {
1283 		return ENOENT;
1284 	}
1285 
1286 	uip = vp->v_ubcinfo;
1287 	osize = uip->ui_size;
1288 
1289 	if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) {
1290 		return EAGAIN;
1291 	}
1292 
1293 	/*
1294 	 * Update the size before flushing the VM
1295 	 */
1296 	uip->ui_size = nsize;
1297 
1298 	if (nsize >= osize) {   /* Nothing more to do */
1299 		if (nsize > osize) {
1300 			lock_vnode_and_post(vp, NOTE_EXTEND);
1301 		}
1302 
1303 		return 0;
1304 	}
1305 
1306 	/*
1307 	 * When the file shrinks, invalidate the pages beyond the
1308 	 * new size. Also get rid of garbage beyond nsize on the
1309 	 * last page. The ui_size already has the nsize, so any
1310 	 * subsequent page-in will zero-fill the tail properly
1311 	 */
1312 	lastpg = trunc_page_64(nsize);
1313 	olastpgend = round_page_64(osize);
1314 	control = uip->ui_control;
1315 	assert(control);
1316 	lastoff = (nsize & PAGE_MASK_64);
1317 
1318 	if (lastoff) {
1319 		upl_t           upl;
1320 		upl_page_info_t *pl;
1321 
1322 		/*
1323 		 * new EOF ends up in the middle of a page
1324 		 * zero the tail of this page if it's currently
1325 		 * present in the cache
1326 		 */
1327 		kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
1328 
1329 		if (kret != KERN_SUCCESS) {
1330 			panic("ubc_setsize: ubc_create_upl (error = %d)", kret);
1331 		}
1332 
1333 		if (upl_valid_page(pl, 0)) {
1334 			cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
1335 		}
1336 
1337 		ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1338 
1339 		lastpg += PAGE_SIZE_64;
1340 	}
1341 	if (olastpgend > lastpg) {
1342 		int     flags;
1343 
1344 		if (lastpg == 0) {
1345 			flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
1346 		} else {
1347 			flags = MEMORY_OBJECT_DATA_FLUSH;
1348 		}
1349 		/*
1350 		 * invalidate the pages beyond the new EOF page
1351 		 *
1352 		 */
1353 		kret = memory_object_lock_request(control,
1354 		    (memory_object_offset_t)lastpg,
1355 		    (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
1356 		    MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
1357 		if (kret != KERN_SUCCESS) {
1358 			printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
1359 		}
1360 	}
1361 	return mach_to_bsd_errno(kret);
1362 }
1363 
1364 // Returns true for success
1365 int
ubc_setsize(vnode_t vp,off_t nsize)1366 ubc_setsize(vnode_t vp, off_t nsize)
1367 {
1368 	return ubc_setsize_ex(vp, nsize, 0) == 0;
1369 }
1370 
1371 /*
1372  * ubc_getsize
1373  *
1374  * Get the size of the file assocated with the specified vnode
1375  *
1376  * Parameters:	vp			The vnode whose size is of interest
1377  *
1378  * Returns:	0			There is no ubc_info associated with
1379  *					this vnode, or the size is zero
1380  *		!0			The size of the file
1381  *
1382  * Notes:	Using this routine, it is not possible for a caller to
1383  *		successfully distinguish between a vnode associate with a zero
1384  *		length file, and a vnode with no associated ubc_info.  The
1385  *		caller therefore needs to not care, or needs to ensure that
1386  *		they have previously successfully called ubc_info_init() or
1387  *		ubc_info_init_withsize().
1388  */
1389 off_t
ubc_getsize(struct vnode * vp)1390 ubc_getsize(struct vnode *vp)
1391 {
1392 	/* people depend on the side effect of this working this way
1393 	 * as they call this for directory
1394 	 */
1395 	if (!UBCINFOEXISTS(vp)) {
1396 		return (off_t)0;
1397 	}
1398 	return vp->v_ubcinfo->ui_size;
1399 }
1400 
1401 
1402 /*
1403  * ubc_umount
1404  *
1405  * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1406  * mount point
1407  *
1408  * Parameters:	mp			The mount point
1409  *
1410  * Returns:	0			Success
1411  *
1412  * Notes:	There is no failure indication for this function.
1413  *
1414  *		This function is used in the unmount path; since it may block
1415  *		I/O indefinitely, it should not be used in the forced unmount
1416  *		path, since a device unavailability could also block that
1417  *		indefinitely.
1418  *
1419  *		Because there is no device ejection interlock on USB, FireWire,
1420  *		or similar devices, it's possible that an ejection that begins
1421  *		subsequent to the vnode_iterate() completing, either on one of
1422  *		those devices, or a network mount for which the server quits
1423  *		responding, etc., may cause the caller to block indefinitely.
1424  */
1425 __private_extern__ int
ubc_umount(struct mount * mp)1426 ubc_umount(struct mount *mp)
1427 {
1428 	vnode_iterate(mp, 0, ubc_umcallback, 0);
1429 	return 0;
1430 }
1431 
1432 
1433 /*
1434  * ubc_umcallback
1435  *
1436  * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1437  * and vnode_iterate() for details of implementation.
1438  */
1439 static int
ubc_umcallback(vnode_t vp,__unused void * args)1440 ubc_umcallback(vnode_t vp, __unused void * args)
1441 {
1442 	if (UBCINFOEXISTS(vp)) {
1443 		(void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1444 	}
1445 	return VNODE_RETURNED;
1446 }
1447 
1448 
1449 /*
1450  * ubc_getcred
1451  *
1452  * Get the credentials currently active for the ubc_info associated with the
1453  * vnode.
1454  *
1455  * Parameters:	vp			The vnode whose ubc_info credentials
1456  *					are to be retrieved
1457  *
1458  * Returns:	!NOCRED			The credentials
1459  *		NOCRED			If there is no ubc_info for the vnode,
1460  *					or if there is one, but it has not had
1461  *					any credentials associated with it via
1462  *					a call to ubc_setcred()
1463  */
1464 kauth_cred_t
ubc_getcred(struct vnode * vp)1465 ubc_getcred(struct vnode *vp)
1466 {
1467 	if (UBCINFOEXISTS(vp)) {
1468 		return vp->v_ubcinfo->ui_ucred;
1469 	}
1470 
1471 	return NOCRED;
1472 }
1473 
1474 
1475 /*
1476  * ubc_setthreadcred
1477  *
1478  * If they are not already set, set the credentials of the ubc_info structure
1479  * associated with the vnode to those of the supplied thread; otherwise leave
1480  * them alone.
1481  *
1482  * Parameters:	vp			The vnode whose ubc_info creds are to
1483  *					be set
1484  *		p			The process whose credentials are to
1485  *					be used, if not running on an assumed
1486  *					credential
1487  *		thread			The thread whose credentials are to
1488  *					be used
1489  *
1490  * Returns:	1			This vnode has no associated ubc_info
1491  *		0			Success
1492  *
1493  * Notes:	This function takes a proc parameter to account for bootstrap
1494  *		issues where a task or thread may call this routine, either
1495  *		before credentials have been initialized by bsd_init(), or if
1496  *		there is no BSD info asscoiate with a mach thread yet.  This
1497  *		is known to happen in both the initial swap and memory mapping
1498  *		calls.
1499  *
1500  *		This function is generally used only in the following cases:
1501  *
1502  *		o	a memory mapped file via the mmap() system call
1503  *		o	a swap store backing file
1504  *		o	subsequent to a successful write via vn_write()
1505  *
1506  *		The information is then used by the NFS client in order to
1507  *		cons up a wire message in either the page-in or page-out path.
1508  *
1509  *		There are two potential problems with the use of this API:
1510  *
1511  *		o	Because the write path only set it on a successful
1512  *			write, there is a race window between setting the
1513  *			credential and its use to evict the pages to the
1514  *			remote file server
1515  *
1516  *		o	Because a page-in may occur prior to a write, the
1517  *			credential may not be set at this time, if the page-in
1518  *			is not the result of a mapping established via mmap().
1519  *
1520  *		In both these cases, this will be triggered from the paging
1521  *		path, which will instead use the credential of the current
1522  *		process, which in this case is either the dynamic_pager or
1523  *		the kernel task, both of which utilize "root" credentials.
1524  *
1525  *		This may potentially permit operations to occur which should
1526  *		be denied, or it may cause to be denied operations which
1527  *		should be permitted, depending on the configuration of the NFS
1528  *		server.
1529  */
1530 int
ubc_setthreadcred(struct vnode * vp,proc_t p,thread_t thread)1531 ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
1532 {
1533 	struct ubc_info *uip;
1534 	thread_ro_t tro = get_thread_ro(thread);
1535 
1536 	if (!UBCINFOEXISTS(vp)) {
1537 		return 1;
1538 	}
1539 
1540 	assert(thread == current_thread());
1541 
1542 	vnode_lock(vp);
1543 
1544 	uip = vp->v_ubcinfo;
1545 
1546 	if (!IS_VALID_CRED(uip->ui_ucred)) {
1547 		/* use per-thread cred, if assumed identity, else proc cred */
1548 		if (tro->tro_flags & TRO_SETUID) {
1549 			uip->ui_ucred = tro->tro_cred;
1550 			kauth_cred_ref(uip->ui_ucred);
1551 		} else {
1552 			uip->ui_ucred = kauth_cred_proc_ref(p);
1553 		}
1554 	}
1555 	vnode_unlock(vp);
1556 
1557 	return 0;
1558 }
1559 
1560 
1561 /*
1562  * ubc_setcred
1563  *
1564  * If they are not already set, set the credentials of the ubc_info structure
1565  * associated with the vnode to those of the process; otherwise leave them
1566  * alone.
1567  *
1568  * Parameters:	vp			The vnode whose ubc_info creds are to
1569  *					be set
1570  *		p			The process whose credentials are to
1571  *					be used
1572  *
1573  * Returns:	0			This vnode has no associated ubc_info
1574  *		1			Success
1575  *
1576  * Notes:	The return values for this function are inverted from nearly
1577  *		all other uses in the kernel.
1578  *
1579  *		See also ubc_setthreadcred(), above.
1580  *
1581  *		This function is considered deprecated, and generally should
1582  *		not be used, as it is incompatible with per-thread credentials;
1583  *		it exists for legacy KPI reasons.
1584  *
1585  * DEPRECATION:	ubc_setcred() is being deprecated. Please use
1586  *		ubc_setthreadcred() instead.
1587  */
1588 int
ubc_setcred(struct vnode * vp,proc_t p)1589 ubc_setcred(struct vnode *vp, proc_t p)
1590 {
1591 	struct ubc_info *uip;
1592 	kauth_cred_t credp;
1593 
1594 	/* If there is no ubc_info, deny the operation */
1595 	if (!UBCINFOEXISTS(vp)) {
1596 		return 0;
1597 	}
1598 
1599 	/*
1600 	 * Check to see if there is already a credential reference in the
1601 	 * ubc_info; if there is not, take one on the supplied credential.
1602 	 */
1603 	vnode_lock(vp);
1604 	uip = vp->v_ubcinfo;
1605 	credp = uip->ui_ucred;
1606 	if (!IS_VALID_CRED(credp)) {
1607 		uip->ui_ucred = kauth_cred_proc_ref(p);
1608 	}
1609 	vnode_unlock(vp);
1610 
1611 	return 1;
1612 }
1613 
1614 /*
1615  * ubc_getpager
1616  *
1617  * Get the pager associated with the ubc_info associated with the vnode.
1618  *
1619  * Parameters:	vp			The vnode to obtain the pager from
1620  *
1621  * Returns:	!VNODE_PAGER_NULL	The memory_object_t for the pager
1622  *		VNODE_PAGER_NULL	There is no ubc_info for this vnode
1623  *
1624  * Notes:	For each vnode that has a ubc_info associated with it, that
1625  *		ubc_info SHALL have a pager associated with it, so in the
1626  *		normal case, it's impossible to return VNODE_PAGER_NULL for
1627  *		a vnode with an associated ubc_info.
1628  */
1629 __private_extern__ memory_object_t
ubc_getpager(struct vnode * vp)1630 ubc_getpager(struct vnode *vp)
1631 {
1632 	if (UBCINFOEXISTS(vp)) {
1633 		return vp->v_ubcinfo->ui_pager;
1634 	}
1635 
1636 	return 0;
1637 }
1638 
1639 
1640 /*
1641  * ubc_getobject
1642  *
1643  * Get the memory object control associated with the ubc_info associated with
1644  * the vnode
1645  *
1646  * Parameters:	vp			The vnode to obtain the memory object
1647  *					from
1648  *		flags			DEPRECATED
1649  *
1650  * Returns:	!MEMORY_OBJECT_CONTROL_NULL
1651  *		MEMORY_OBJECT_CONTROL_NULL
1652  *
1653  * Notes:	Historically, if the flags were not "do not reactivate", this
1654  *		function would look up the memory object using the pager if
1655  *		it did not exist (this could be the case if the vnode had
1656  *		been previously reactivated).  The flags would also permit a
1657  *		hold to be requested, which would have created an object
1658  *		reference, if one had not already existed.  This usage is
1659  *		deprecated, as it would permit a race between finding and
1660  *		taking the reference vs. a single reference being dropped in
1661  *		another thread.
1662  */
1663 memory_object_control_t
ubc_getobject(struct vnode * vp,__unused int flags)1664 ubc_getobject(struct vnode *vp, __unused int flags)
1665 {
1666 	if (UBCINFOEXISTS(vp)) {
1667 		return vp->v_ubcinfo->ui_control;
1668 	}
1669 
1670 	return MEMORY_OBJECT_CONTROL_NULL;
1671 }
1672 
1673 /*
1674  * ubc_blktooff
1675  *
1676  * Convert a given block number to a memory backing object (file) offset for a
1677  * given vnode
1678  *
1679  * Parameters:	vp			The vnode in which the block is located
1680  *		blkno			The block number to convert
1681  *
1682  * Returns:	!-1			The offset into the backing object
1683  *		-1			There is no ubc_info associated with
1684  *					the vnode
1685  *		-1			An error occurred in the underlying VFS
1686  *					while translating the block to an
1687  *					offset; the most likely cause is that
1688  *					the caller specified a block past the
1689  *					end of the file, but this could also be
1690  *					any other error from VNOP_BLKTOOFF().
1691  *
1692  * Note:	Representing the error in band loses some information, but does
1693  *		not occlude a valid offset, since an off_t of -1 is normally
1694  *		used to represent EOF.  If we had a more reliable constant in
1695  *		our header files for it (i.e. explicitly cast to an off_t), we
1696  *		would use it here instead.
1697  */
1698 off_t
ubc_blktooff(vnode_t vp,daddr64_t blkno)1699 ubc_blktooff(vnode_t vp, daddr64_t blkno)
1700 {
1701 	off_t file_offset = -1;
1702 	int error;
1703 
1704 	if (UBCINFOEXISTS(vp)) {
1705 		error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
1706 		if (error) {
1707 			file_offset = -1;
1708 		}
1709 	}
1710 
1711 	return file_offset;
1712 }
1713 
1714 
1715 /*
1716  * ubc_offtoblk
1717  *
1718  * Convert a given offset in a memory backing object into a block number for a
1719  * given vnode
1720  *
1721  * Parameters:	vp			The vnode in which the offset is
1722  *					located
1723  *		offset			The offset into the backing object
1724  *
1725  * Returns:	!-1			The returned block number
1726  *		-1			There is no ubc_info associated with
1727  *					the vnode
1728  *		-1			An error occurred in the underlying VFS
1729  *					while translating the block to an
1730  *					offset; the most likely cause is that
1731  *					the caller specified a block past the
1732  *					end of the file, but this could also be
1733  *					any other error from VNOP_OFFTOBLK().
1734  *
1735  * Note:	Representing the error in band loses some information, but does
1736  *		not occlude a valid block number, since block numbers exceed
1737  *		the valid range for offsets, due to their relative sizes.  If
1738  *		we had a more reliable constant than -1 in our header files
1739  *		for it (i.e. explicitly cast to an daddr64_t), we would use it
1740  *		here instead.
1741  */
1742 daddr64_t
ubc_offtoblk(vnode_t vp,off_t offset)1743 ubc_offtoblk(vnode_t vp, off_t offset)
1744 {
1745 	daddr64_t blkno = -1;
1746 	int error = 0;
1747 
1748 	if (UBCINFOEXISTS(vp)) {
1749 		error = VNOP_OFFTOBLK(vp, offset, &blkno);
1750 		if (error) {
1751 			blkno = -1;
1752 		}
1753 	}
1754 
1755 	return blkno;
1756 }
1757 
1758 
1759 /*
1760  * ubc_pages_resident
1761  *
1762  * Determine whether or not a given vnode has pages resident via the memory
1763  * object control associated with the ubc_info associated with the vnode
1764  *
1765  * Parameters:	vp			The vnode we want to know about
1766  *
1767  * Returns:	1			Yes
1768  *		0			No
1769  */
1770 int
ubc_pages_resident(vnode_t vp)1771 ubc_pages_resident(vnode_t vp)
1772 {
1773 	kern_return_t           kret;
1774 	boolean_t                       has_pages_resident;
1775 
1776 	if (!UBCINFOEXISTS(vp)) {
1777 		return 0;
1778 	}
1779 
1780 	/*
1781 	 * The following call may fail if an invalid ui_control is specified,
1782 	 * or if there is no VM object associated with the control object.  In
1783 	 * either case, reacting to it as if there were no pages resident will
1784 	 * result in correct behavior.
1785 	 */
1786 	kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
1787 
1788 	if (kret != KERN_SUCCESS) {
1789 		return 0;
1790 	}
1791 
1792 	if (has_pages_resident == TRUE) {
1793 		return 1;
1794 	}
1795 
1796 	return 0;
1797 }
1798 
1799 /*
1800  * ubc_msync
1801  *
1802  * Clean and/or invalidate a range in the memory object that backs this vnode
1803  *
1804  * Parameters:	vp			The vnode whose associated ubc_info's
1805  *					associated memory object is to have a
1806  *					range invalidated within it
1807  *		beg_off			The start of the range, as an offset
1808  *		end_off			The end of the range, as an offset
1809  *		resid_off		The address of an off_t supplied by the
1810  *					caller; may be set to NULL to ignore
1811  *		flags			See ubc_msync_internal()
1812  *
1813  * Returns:	0			Success
1814  *		!0			Failure; an errno is returned
1815  *
1816  * Implicit Returns:
1817  *		*resid_off, modified	If non-NULL, the  contents are ALWAYS
1818  *					modified; they are initialized to the
1819  *					beg_off, and in case of an I/O error,
1820  *					the difference between beg_off and the
1821  *					current value will reflect what was
1822  *					able to be written before the error
1823  *					occurred.  If no error is returned, the
1824  *					value of the resid_off is undefined; do
1825  *					NOT use it in place of end_off if you
1826  *					intend to increment from the end of the
1827  *					last call and call iteratively.
1828  *
1829  * Notes:	see ubc_msync_internal() for more detailed information.
1830  *
1831  */
1832 errno_t
ubc_msync(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags)1833 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
1834 {
1835 	int retval;
1836 	int io_errno = 0;
1837 
1838 	if (resid_off) {
1839 		*resid_off = beg_off;
1840 	}
1841 
1842 	retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
1843 
1844 	if (retval == 0 && io_errno == 0) {
1845 		return EINVAL;
1846 	}
1847 	return io_errno;
1848 }
1849 
1850 
1851 /*
1852  * ubc_msync_internal
1853  *
1854  * Clean and/or invalidate a range in the memory object that backs this vnode
1855  *
1856  * Parameters:	vp			The vnode whose associated ubc_info's
1857  *					associated memory object is to have a
1858  *					range invalidated within it
1859  *		beg_off			The start of the range, as an offset
1860  *		end_off			The end of the range, as an offset
1861  *		resid_off		The address of an off_t supplied by the
1862  *					caller; may be set to NULL to ignore
1863  *		flags			MUST contain at least one of the flags
1864  *					UBC_INVALIDATE, UBC_PUSHDIRTY, or
1865  *					UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1866  *					UBC_SYNC may also be specified to cause
1867  *					this function to block until the
1868  *					operation is complete.  The behavior
1869  *					of UBC_SYNC is otherwise undefined.
1870  *		io_errno		The address of an int to contain the
1871  *					errno from a failed I/O operation, if
1872  *					one occurs; may be set to NULL to
1873  *					ignore
1874  *
1875  * Returns:	1			Success
1876  *		0			Failure
1877  *
1878  * Implicit Returns:
1879  *		*resid_off, modified	The contents of this offset MAY be
1880  *					modified; in case of an I/O error, the
1881  *					difference between beg_off and the
1882  *					current value will reflect what was
1883  *					able to be written before the error
1884  *					occurred.
1885  *		*io_errno, modified	The contents of this offset are set to
1886  *					an errno, if an error occurs; if the
1887  *					caller supplies an io_errno parameter,
1888  *					they should be careful to initialize it
1889  *					to 0 before calling this function to
1890  *					enable them to distinguish an error
1891  *					with a valid *resid_off from an invalid
1892  *					one, and to avoid potentially falsely
1893  *					reporting an error, depending on use.
1894  *
1895  * Notes:	If there is no ubc_info associated with the vnode supplied,
1896  *		this function immediately returns success.
1897  *
1898  *		If the value of end_off is less than or equal to beg_off, this
1899  *		function immediately returns success; that is, end_off is NOT
1900  *		inclusive.
1901  *
1902  *		IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1903  *		UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1904  *		attempt to block on in-progress I/O by calling this function
1905  *		with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1906  *		in order to block pending on the I/O already in progress.
1907  *
1908  *		The start offset is truncated to the page boundary and the
1909  *		size is adjusted to include the last page in the range; that
1910  *		is, end_off on exactly a page boundary will not change if it
1911  *		is rounded, and the range of bytes written will be from the
1912  *		truncate beg_off to the rounded (end_off - 1).
1913  */
1914 static int
ubc_msync_internal(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags,int * io_errno)1915 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1916 {
1917 	memory_object_size_t    tsize;
1918 	kern_return_t           kret;
1919 	int request_flags = 0;
1920 	int flush_flags   = MEMORY_OBJECT_RETURN_NONE;
1921 
1922 	if (!UBCINFOEXISTS(vp)) {
1923 		return 0;
1924 	}
1925 	if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) {
1926 		return 0;
1927 	}
1928 	if (end_off <= beg_off) {
1929 		return 1;
1930 	}
1931 
1932 	if (flags & UBC_INVALIDATE) {
1933 		/*
1934 		 * discard the resident pages
1935 		 */
1936 		request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
1937 	}
1938 
1939 	if (flags & UBC_SYNC) {
1940 		/*
1941 		 * wait for all the I/O to complete before returning
1942 		 */
1943 		request_flags |= MEMORY_OBJECT_IO_SYNC;
1944 	}
1945 
1946 	if (flags & UBC_PUSHDIRTY) {
1947 		/*
1948 		 * we only return the dirty pages in the range
1949 		 */
1950 		flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
1951 	}
1952 
1953 	if (flags & UBC_PUSHALL) {
1954 		/*
1955 		 * then return all the interesting pages in the range (both
1956 		 * dirty and precious) to the pager
1957 		 */
1958 		flush_flags = MEMORY_OBJECT_RETURN_ALL;
1959 	}
1960 
1961 	beg_off = trunc_page_64(beg_off);
1962 	end_off = round_page_64(end_off);
1963 	tsize   = (memory_object_size_t)end_off - beg_off;
1964 
1965 	/* flush and/or invalidate pages in the range requested */
1966 	kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
1967 	    beg_off, tsize,
1968 	    (memory_object_offset_t *)resid_off,
1969 	    io_errno, flush_flags, request_flags,
1970 	    VM_PROT_NO_CHANGE);
1971 
1972 	return (kret == KERN_SUCCESS) ? 1 : 0;
1973 }
1974 
1975 
1976 /*
1977  * ubc_map
1978  *
1979  * Explicitly map a vnode that has an associate ubc_info, and add a reference
1980  * to it for the ubc system, if there isn't one already, so it will not be
1981  * recycled while it's in use, and set flags on the ubc_info to indicate that
1982  * we have done this
1983  *
1984  * Parameters:	vp			The vnode to map
1985  *		flags			The mapping flags for the vnode; this
1986  *					will be a combination of one or more of
1987  *					PROT_READ, PROT_WRITE, and PROT_EXEC
1988  *
1989  * Returns:	0			Success
1990  *		EPERM			Permission was denied
1991  *
1992  * Notes:	An I/O reference on the vnode must already be held on entry
1993  *
1994  *		If there is no ubc_info associated with the vnode, this function
1995  *		will return success.
1996  *
1997  *		If a permission error occurs, this function will return
1998  *		failure; all other failures will cause this function to return
1999  *		success.
2000  *
2001  *		IMPORTANT: This is an internal use function, and its symbols
2002  *		are not exported, hence its error checking is not very robust.
2003  *		It is primarily used by:
2004  *
2005  *		o	mmap(), when mapping a file
2006  *		o	When mapping a shared file (a shared library in the
2007  *			shared segment region)
2008  *		o	When loading a program image during the exec process
2009  *
2010  *		...all of these uses ignore the return code, and any fault that
2011  *		results later because of a failure is handled in the fix-up path
2012  *		of the fault handler.  The interface exists primarily as a
2013  *		performance hint.
2014  *
2015  *		Given that third party implementation of the type of interfaces
2016  *		that would use this function, such as alternative executable
2017  *		formats, etc., are unsupported, this function is not exported
2018  *		for general use.
2019  *
2020  *		The extra reference is held until the VM system unmaps the
2021  *		vnode from its own context to maintain a vnode reference in
2022  *		cases like open()/mmap()/close(), which leave the backing
2023  *		object referenced by a mapped memory region in a process
2024  *		address space.
2025  */
2026 __private_extern__ int
ubc_map(vnode_t vp,int flags)2027 ubc_map(vnode_t vp, int flags)
2028 {
2029 	struct ubc_info *uip;
2030 	int error = 0;
2031 	int need_ref = 0;
2032 	int need_wakeup = 0;
2033 
2034 	if (UBCINFOEXISTS(vp)) {
2035 		vnode_lock(vp);
2036 		uip = vp->v_ubcinfo;
2037 
2038 		while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2039 			SET(uip->ui_flags, UI_MAPWAITING);
2040 			(void) msleep(&uip->ui_flags, &vp->v_lock,
2041 			    PRIBIO, "ubc_map", NULL);
2042 		}
2043 		SET(uip->ui_flags, UI_MAPBUSY);
2044 		vnode_unlock(vp);
2045 
2046 		error = VNOP_MMAP(vp, flags, vfs_context_current());
2047 
2048 		/*
2049 		 * rdar://problem/22587101 required that we stop propagating
2050 		 * EPERM up the stack. Otherwise, we would have to funnel up
2051 		 * the error at all the call sites for memory_object_map().
2052 		 * The risk is in having to undo the map/object/entry state at
2053 		 * all these call sites. It would also affect more than just mmap()
2054 		 * e.g. vm_remap().
2055 		 *
2056 		 *	if (error != EPERM)
2057 		 *              error = 0;
2058 		 */
2059 
2060 		error = 0;
2061 
2062 		vnode_lock_spin(vp);
2063 
2064 		if (error == 0) {
2065 			if (!ISSET(uip->ui_flags, UI_ISMAPPED)) {
2066 				need_ref = 1;
2067 			}
2068 			SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
2069 			if (flags & PROT_WRITE) {
2070 				SET(uip->ui_flags, UI_MAPPEDWRITE);
2071 			}
2072 		}
2073 		CLR(uip->ui_flags, UI_MAPBUSY);
2074 
2075 		if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2076 			CLR(uip->ui_flags, UI_MAPWAITING);
2077 			need_wakeup = 1;
2078 		}
2079 		vnode_unlock(vp);
2080 
2081 		if (need_wakeup) {
2082 			wakeup(&uip->ui_flags);
2083 		}
2084 
2085 		if (need_ref) {
2086 			/*
2087 			 * Make sure we get a ref as we can't unwind from here
2088 			 */
2089 			if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) {
2090 				panic("%s : VNODE_REF_FORCE failed", __FUNCTION__);
2091 			}
2092 			/*
2093 			 * Vnodes that are on "unreliable" media (like disk
2094 			 * images, network filesystems, 3rd-party filesystems,
2095 			 * and possibly external devices) could see their
2096 			 * contents be changed via the backing store without
2097 			 * triggering copy-on-write, so we can't fully rely
2098 			 * on copy-on-write and might have to resort to
2099 			 * copy-on-read to protect "privileged" processes and
2100 			 * prevent privilege escalation.
2101 			 *
2102 			 * The root filesystem is considered "reliable" because
2103 			 * there's not much point in trying to protect
2104 			 * ourselves from such a vulnerability and the extra
2105 			 * cost of copy-on-read (CPU time and memory pressure)
2106 			 * could result in some serious regressions.
2107 			 */
2108 			if (vp->v_mount != NULL &&
2109 			    ((vp->v_mount->mnt_flag & MNT_ROOTFS) ||
2110 			    vnode_on_reliable_media(vp))) {
2111 				/*
2112 				 * This vnode is deemed "reliable" so mark
2113 				 * its VM object as "trusted".
2114 				 */
2115 				memory_object_mark_trusted(uip->ui_control);
2116 			} else {
2117 //				printf("BUGGYCOW: %s:%d vp %p \"%s\" in mnt %p \"%s\" is untrusted\n", __FUNCTION__, __LINE__, vp, vp->v_name, vp->v_mount, vp->v_mount->mnt_vnodecovered->v_name);
2118 			}
2119 		}
2120 	}
2121 	return error;
2122 }
2123 
2124 
2125 /*
2126  * ubc_destroy_named
2127  *
2128  * Destroy the named memory object associated with the ubc_info control object
2129  * associated with the designated vnode, if there is a ubc_info associated
2130  * with the vnode, and a control object is associated with it
2131  *
2132  * Parameters:	vp			The designated vnode
2133  *
2134  * Returns:	(void)
2135  *
2136  * Notes:	This function is called on vnode termination for all vnodes,
2137  *		and must therefore not assume that there is a ubc_info that is
2138  *		associated with the vnode, nor that there is a control object
2139  *		associated with the ubc_info.
2140  *
2141  *		If all the conditions necessary are present, this function
2142  *		calls memory_object_destory(), which will in turn end up
2143  *		calling ubc_unmap() to release any vnode references that were
2144  *		established via ubc_map().
2145  *
2146  *		IMPORTANT: This is an internal use function that is used
2147  *		exclusively by the internal use function vclean().
2148  */
2149 __private_extern__ void
ubc_destroy_named(vnode_t vp)2150 ubc_destroy_named(vnode_t vp)
2151 {
2152 	memory_object_control_t control;
2153 	struct ubc_info *uip;
2154 	kern_return_t kret;
2155 
2156 	if (UBCINFOEXISTS(vp)) {
2157 		uip = vp->v_ubcinfo;
2158 
2159 		/* Terminate the memory object  */
2160 		control = ubc_getobject(vp, UBC_HOLDOBJECT);
2161 		if (control != MEMORY_OBJECT_CONTROL_NULL) {
2162 			kret = memory_object_destroy(control, 0);
2163 			if (kret != KERN_SUCCESS) {
2164 				panic("ubc_destroy_named: memory_object_destroy failed");
2165 			}
2166 		}
2167 	}
2168 }
2169 
2170 
2171 /*
2172  * ubc_isinuse
2173  *
2174  * Determine whether or not a vnode is currently in use by ubc at a level in
2175  * excess of the requested busycount
2176  *
2177  * Parameters:	vp			The vnode to check
2178  *		busycount		The threshold busy count, used to bias
2179  *					the count usually already held by the
2180  *					caller to avoid races
2181  *
2182  * Returns:	1			The vnode is in use over the threshold
2183  *		0			The vnode is not in use over the
2184  *					threshold
2185  *
2186  * Notes:	Because the vnode is only held locked while actually asking
2187  *		the use count, this function only represents a snapshot of the
2188  *		current state of the vnode.  If more accurate information is
2189  *		required, an additional busycount should be held by the caller
2190  *		and a non-zero busycount used.
2191  *
2192  *		If there is no ubc_info associated with the vnode, this
2193  *		function will report that the vnode is not in use by ubc.
2194  */
2195 int
ubc_isinuse(struct vnode * vp,int busycount)2196 ubc_isinuse(struct vnode *vp, int busycount)
2197 {
2198 	if (!UBCINFOEXISTS(vp)) {
2199 		return 0;
2200 	}
2201 	return ubc_isinuse_locked(vp, busycount, 0);
2202 }
2203 
2204 
2205 /*
2206  * ubc_isinuse_locked
2207  *
2208  * Determine whether or not a vnode is currently in use by ubc at a level in
2209  * excess of the requested busycount
2210  *
2211  * Parameters:	vp			The vnode to check
2212  *		busycount		The threshold busy count, used to bias
2213  *					the count usually already held by the
2214  *					caller to avoid races
2215  *		locked			True if the vnode is already locked by
2216  *					the caller
2217  *
2218  * Returns:	1			The vnode is in use over the threshold
2219  *		0			The vnode is not in use over the
2220  *					threshold
2221  *
2222  * Notes:	If the vnode is not locked on entry, it is locked while
2223  *		actually asking the use count.  If this is the case, this
2224  *		function only represents a snapshot of the current state of
2225  *		the vnode.  If more accurate information is required, the
2226  *		vnode lock should be held by the caller, otherwise an
2227  *		additional busycount should be held by the caller and a
2228  *		non-zero busycount used.
2229  *
2230  *		If there is no ubc_info associated with the vnode, this
2231  *		function will report that the vnode is not in use by ubc.
2232  */
2233 int
ubc_isinuse_locked(struct vnode * vp,int busycount,int locked)2234 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
2235 {
2236 	int retval = 0;
2237 
2238 
2239 	if (!locked) {
2240 		vnode_lock_spin(vp);
2241 	}
2242 
2243 	if ((vp->v_usecount - vp->v_kusecount) > busycount) {
2244 		retval = 1;
2245 	}
2246 
2247 	if (!locked) {
2248 		vnode_unlock(vp);
2249 	}
2250 	return retval;
2251 }
2252 
2253 
2254 /*
2255  * ubc_unmap
2256  *
2257  * Reverse the effects of a ubc_map() call for a given vnode
2258  *
2259  * Parameters:	vp			vnode to unmap from ubc
2260  *
2261  * Returns:	(void)
2262  *
2263  * Notes:	This is an internal use function used by vnode_pager_unmap().
2264  *		It will attempt to obtain a reference on the supplied vnode,
2265  *		and if it can do so, and there is an associated ubc_info, and
2266  *		the flags indicate that it was mapped via ubc_map(), then the
2267  *		flag is cleared, the mapping removed, and the reference taken
2268  *		by ubc_map() is released.
2269  *
2270  *		IMPORTANT: This MUST only be called by the VM
2271  *		to prevent race conditions.
2272  */
2273 __private_extern__ void
ubc_unmap(struct vnode * vp)2274 ubc_unmap(struct vnode *vp)
2275 {
2276 	struct ubc_info *uip;
2277 	int     need_rele = 0;
2278 	int     need_wakeup = 0;
2279 
2280 	if (vnode_getwithref(vp)) {
2281 		return;
2282 	}
2283 
2284 	if (UBCINFOEXISTS(vp)) {
2285 		bool want_fsevent = false;
2286 
2287 		vnode_lock(vp);
2288 		uip = vp->v_ubcinfo;
2289 
2290 		while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2291 			SET(uip->ui_flags, UI_MAPWAITING);
2292 			(void) msleep(&uip->ui_flags, &vp->v_lock,
2293 			    PRIBIO, "ubc_unmap", NULL);
2294 		}
2295 		SET(uip->ui_flags, UI_MAPBUSY);
2296 
2297 		if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
2298 			if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) {
2299 				want_fsevent = true;
2300 			}
2301 
2302 			need_rele = 1;
2303 
2304 			/*
2305 			 * We want to clear the mapped flags after we've called
2306 			 * VNOP_MNOMAP to avoid certain races and allow
2307 			 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2308 			 */
2309 		}
2310 		vnode_unlock(vp);
2311 
2312 		if (need_rele) {
2313 			vfs_context_t ctx = vfs_context_current();
2314 
2315 			(void)VNOP_MNOMAP(vp, ctx);
2316 
2317 #if CONFIG_FSE
2318 			/*
2319 			 * Why do we want an fsevent here?  Normally the
2320 			 * content modified fsevent is posted when a file is
2321 			 * closed and only if it's written to via conventional
2322 			 * means.  It's perfectly legal to close a file and
2323 			 * keep your mappings and we don't currently track
2324 			 * whether it was written to via a mapping.
2325 			 * Therefore, we need to post an fsevent here if the
2326 			 * file was mapped writable.  This may result in false
2327 			 * events, i.e. we post a notification when nothing
2328 			 * has really changed.
2329 			 */
2330 			if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
2331 				add_fsevent(FSE_CONTENT_MODIFIED_NO_HLINK, ctx,
2332 				    FSE_ARG_VNODE, vp,
2333 				    FSE_ARG_DONE);
2334 			}
2335 #endif
2336 
2337 			vnode_rele(vp);
2338 		}
2339 
2340 		vnode_lock_spin(vp);
2341 
2342 		if (need_rele) {
2343 			CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
2344 		}
2345 
2346 		CLR(uip->ui_flags, UI_MAPBUSY);
2347 
2348 		if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2349 			CLR(uip->ui_flags, UI_MAPWAITING);
2350 			need_wakeup = 1;
2351 		}
2352 		vnode_unlock(vp);
2353 
2354 		if (need_wakeup) {
2355 			wakeup(&uip->ui_flags);
2356 		}
2357 	}
2358 	/*
2359 	 * the drop of the vnode ref will cleanup
2360 	 */
2361 	vnode_put(vp);
2362 }
2363 
2364 
2365 /*
2366  * ubc_page_op
2367  *
2368  * Manipulate individual page state for a vnode with an associated ubc_info
2369  * with an associated memory object control.
2370  *
2371  * Parameters:	vp			The vnode backing the page
2372  *		f_offset		A file offset interior to the page
2373  *		ops			The operations to perform, as a bitmap
2374  *					(see below for more information)
2375  *		phys_entryp		The address of a ppnum_t; may be NULL
2376  *					to ignore
2377  *		flagsp			A pointer to an int to contain flags;
2378  *					may be NULL to ignore
2379  *
2380  * Returns:	KERN_SUCCESS		Success
2381  *		KERN_INVALID_ARGUMENT	If the memory object control has no VM
2382  *					object associated
2383  *		KERN_INVALID_OBJECT	If UPL_POP_PHYSICAL and the object is
2384  *					not physically contiguous
2385  *		KERN_INVALID_OBJECT	If !UPL_POP_PHYSICAL and the object is
2386  *					physically contiguous
2387  *		KERN_FAILURE		If the page cannot be looked up
2388  *
2389  * Implicit Returns:
2390  *		*phys_entryp (modified)	If phys_entryp is non-NULL and
2391  *					UPL_POP_PHYSICAL
2392  *		*flagsp (modified)	If flagsp is non-NULL and there was
2393  *					!UPL_POP_PHYSICAL and a KERN_SUCCESS
2394  *
2395  * Notes:	For object boundaries, it is considerably more efficient to
2396  *		ensure that f_offset is in fact on a page boundary, as this
2397  *		will avoid internal use of the hash table to identify the
2398  *		page, and would therefore skip a number of early optimizations.
2399  *		Since this is a page operation anyway, the caller should try
2400  *		to pass only a page aligned offset because of this.
2401  *
2402  *		*flagsp may be modified even if this function fails.  If it is
2403  *		modified, it will contain the condition of the page before the
2404  *		requested operation was attempted; these will only include the
2405  *		bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2406  *		UPL_POP_SET, or UPL_POP_CLR bits.
2407  *
2408  *		The flags field may contain a specific operation, such as
2409  *		UPL_POP_PHYSICAL or UPL_POP_DUMP:
2410  *
2411  *		o	UPL_POP_PHYSICAL	Fail if not contiguous; if
2412  *						*phys_entryp and successful, set
2413  *						*phys_entryp
2414  *		o	UPL_POP_DUMP		Dump the specified page
2415  *
2416  *		Otherwise, it is treated as a bitmap of one or more page
2417  *		operations to perform on the final memory object; allowable
2418  *		bit values are:
2419  *
2420  *		o	UPL_POP_DIRTY		The page is dirty
2421  *		o	UPL_POP_PAGEOUT		The page is paged out
2422  *		o	UPL_POP_PRECIOUS	The page is precious
2423  *		o	UPL_POP_ABSENT		The page is absent
2424  *		o	UPL_POP_BUSY		The page is busy
2425  *
2426  *		If the page status is only being queried and not modified, then
2427  *		not other bits should be specified.  However, if it is being
2428  *		modified, exactly ONE of the following bits should be set:
2429  *
2430  *		o	UPL_POP_SET		Set the current bitmap bits
2431  *		o	UPL_POP_CLR		Clear the current bitmap bits
2432  *
2433  *		Thus to effect a combination of setting an clearing, it may be
2434  *		necessary to call this function twice.  If this is done, the
2435  *		set should be used before the clear, since clearing may trigger
2436  *		a wakeup on the destination page, and if the page is backed by
2437  *		an encrypted swap file, setting will trigger the decryption
2438  *		needed before the wakeup occurs.
2439  */
2440 kern_return_t
ubc_page_op(struct vnode * vp,off_t f_offset,int ops,ppnum_t * phys_entryp,int * flagsp)2441 ubc_page_op(
2442 	struct vnode    *vp,
2443 	off_t           f_offset,
2444 	int             ops,
2445 	ppnum_t *phys_entryp,
2446 	int             *flagsp)
2447 {
2448 	memory_object_control_t         control;
2449 
2450 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2451 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2452 		return KERN_INVALID_ARGUMENT;
2453 	}
2454 
2455 	return memory_object_page_op(control,
2456 	           (memory_object_offset_t)f_offset,
2457 	           ops,
2458 	           phys_entryp,
2459 	           flagsp);
2460 }
2461 
2462 
2463 /*
2464  * ubc_range_op
2465  *
2466  * Manipulate page state for a range of memory for a vnode with an associated
2467  * ubc_info with an associated memory object control, when page level state is
2468  * not required to be returned from the call (i.e. there are no phys_entryp or
2469  * flagsp parameters to this call, and it takes a range which may contain
2470  * multiple pages, rather than an offset interior to a single page).
2471  *
2472  * Parameters:	vp			The vnode backing the page
2473  *		f_offset_beg		A file offset interior to the start page
2474  *		f_offset_end		A file offset interior to the end page
2475  *		ops			The operations to perform, as a bitmap
2476  *					(see below for more information)
2477  *		range			The address of an int; may be NULL to
2478  *					ignore
2479  *
2480  * Returns:	KERN_SUCCESS		Success
2481  *		KERN_INVALID_ARGUMENT	If the memory object control has no VM
2482  *					object associated
2483  *		KERN_INVALID_OBJECT	If the object is physically contiguous
2484  *
2485  * Implicit Returns:
2486  *		*range (modified)	If range is non-NULL, its contents will
2487  *					be modified to contain the number of
2488  *					bytes successfully operated upon.
2489  *
2490  * Notes:	IMPORTANT: This function cannot be used on a range that
2491  *		consists of physically contiguous pages.
2492  *
2493  *		For object boundaries, it is considerably more efficient to
2494  *		ensure that f_offset_beg and f_offset_end are in fact on page
2495  *		boundaries, as this will avoid internal use of the hash table
2496  *		to identify the page, and would therefore skip a number of
2497  *		early optimizations.  Since this is an operation on a set of
2498  *		pages anyway, the caller should try to pass only a page aligned
2499  *		offsets because of this.
2500  *
2501  *		*range will be modified only if this function succeeds.
2502  *
2503  *		The flags field MUST contain a specific operation; allowable
2504  *		values are:
2505  *
2506  *		o	UPL_ROP_ABSENT	Returns the extent of the range
2507  *					presented which is absent, starting
2508  *					with the start address presented
2509  *
2510  *		o	UPL_ROP_PRESENT	Returns the extent of the range
2511  *					presented which is present (resident),
2512  *					starting with the start address
2513  *					presented
2514  *		o	UPL_ROP_DUMP	Dump the pages which are found in the
2515  *					target object for the target range.
2516  *
2517  *		IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2518  *		multiple regions in the range, only the first matching region
2519  *		is returned.
2520  */
2521 kern_return_t
ubc_range_op(struct vnode * vp,off_t f_offset_beg,off_t f_offset_end,int ops,int * range)2522 ubc_range_op(
2523 	struct vnode    *vp,
2524 	off_t           f_offset_beg,
2525 	off_t           f_offset_end,
2526 	int             ops,
2527 	int             *range)
2528 {
2529 	memory_object_control_t         control;
2530 
2531 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2532 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2533 		return KERN_INVALID_ARGUMENT;
2534 	}
2535 
2536 	return memory_object_range_op(control,
2537 	           (memory_object_offset_t)f_offset_beg,
2538 	           (memory_object_offset_t)f_offset_end,
2539 	           ops,
2540 	           range);
2541 }
2542 
2543 
2544 /*
2545  * ubc_create_upl
2546  *
2547  * Given a vnode, cause the population of a portion of the vm_object; based on
2548  * the nature of the request, the pages returned may contain valid data, or
2549  * they may be uninitialized.
2550  *
2551  * Parameters:	vp			The vnode from which to create the upl
2552  *		f_offset		The start offset into the backing store
2553  *					represented by the vnode
2554  *		bufsize			The size of the upl to create
2555  *		uplp			Pointer to the upl_t to receive the
2556  *					created upl; MUST NOT be NULL
2557  *		plp			Pointer to receive the internal page
2558  *					list for the created upl; MAY be NULL
2559  *					to ignore
2560  *
2561  * Returns:	KERN_SUCCESS		The requested upl has been created
2562  *		KERN_INVALID_ARGUMENT	The bufsize argument is not an even
2563  *					multiple of the page size
2564  *		KERN_INVALID_ARGUMENT	There is no ubc_info associated with
2565  *					the vnode, or there is no memory object
2566  *					control associated with the ubc_info
2567  *	memory_object_upl_request:KERN_INVALID_VALUE
2568  *					The supplied upl_flags argument is
2569  *					invalid
2570  * Implicit Returns:
2571  *		*uplp (modified)
2572  *		*plp (modified)		If non-NULL, the value of *plp will be
2573  *					modified to point to the internal page
2574  *					list; this modification may occur even
2575  *					if this function is unsuccessful, in
2576  *					which case the contents may be invalid
2577  *
2578  * Note:	If successful, the returned *uplp MUST subsequently be freed
2579  *		via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2580  *		ubc_upl_abort(), or ubc_upl_abort_range().
2581  */
2582 kern_return_t
ubc_create_upl_external(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags)2583 ubc_create_upl_external(
2584 	struct vnode    *vp,
2585 	off_t           f_offset,
2586 	int             bufsize,
2587 	upl_t           *uplp,
2588 	upl_page_info_t **plp,
2589 	int             uplflags)
2590 {
2591 	return ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt());
2592 }
2593 
2594 kern_return_t
ubc_create_upl_kernel(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags,vm_tag_t tag)2595 ubc_create_upl_kernel(
2596 	struct vnode    *vp,
2597 	off_t           f_offset,
2598 	int             bufsize,
2599 	upl_t           *uplp,
2600 	upl_page_info_t **plp,
2601 	int             uplflags,
2602 	vm_tag_t tag)
2603 {
2604 	memory_object_control_t         control;
2605 	kern_return_t                   kr;
2606 
2607 	if (plp != NULL) {
2608 		*plp = NULL;
2609 	}
2610 	*uplp = NULL;
2611 
2612 	if (bufsize & 0xfff) {
2613 		return KERN_INVALID_ARGUMENT;
2614 	}
2615 
2616 	if (bufsize > MAX_UPL_SIZE_BYTES) {
2617 		return KERN_INVALID_ARGUMENT;
2618 	}
2619 
2620 	if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
2621 		if (uplflags & UPL_UBC_MSYNC) {
2622 			uplflags &= UPL_RET_ONLY_DIRTY;
2623 
2624 			uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
2625 			    UPL_SET_INTERNAL | UPL_SET_LITE;
2626 		} else if (uplflags & UPL_UBC_PAGEOUT) {
2627 			uplflags &= UPL_RET_ONLY_DIRTY;
2628 
2629 			if (uplflags & UPL_RET_ONLY_DIRTY) {
2630 				uplflags |= UPL_NOBLOCK;
2631 			}
2632 
2633 			uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
2634 			    UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
2635 		} else {
2636 			uplflags |= UPL_RET_ONLY_ABSENT |
2637 			    UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
2638 			    UPL_SET_INTERNAL | UPL_SET_LITE;
2639 
2640 			/*
2641 			 * if the requested size == PAGE_SIZE, we don't want to set
2642 			 * the UPL_NOBLOCK since we may be trying to recover from a
2643 			 * previous partial pagein I/O that occurred because we were low
2644 			 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2645 			 * since we're only asking for a single page, we can block w/o fear
2646 			 * of tying up pages while waiting for more to become available
2647 			 */
2648 			if (bufsize > PAGE_SIZE) {
2649 				uplflags |= UPL_NOBLOCK;
2650 			}
2651 		}
2652 	} else {
2653 		uplflags &= ~UPL_FOR_PAGEOUT;
2654 
2655 		if (uplflags & UPL_WILL_BE_DUMPED) {
2656 			uplflags &= ~UPL_WILL_BE_DUMPED;
2657 			uplflags |= (UPL_NO_SYNC | UPL_SET_INTERNAL);
2658 		} else {
2659 			uplflags |= (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
2660 		}
2661 	}
2662 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2663 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2664 		return KERN_INVALID_ARGUMENT;
2665 	}
2666 
2667 	kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
2668 	if (kr == KERN_SUCCESS && plp != NULL) {
2669 		*plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
2670 	}
2671 	return kr;
2672 }
2673 
2674 
2675 /*
2676  * ubc_upl_maxbufsize
2677  *
2678  * Return the maximum bufsize ubc_create_upl( ) will take.
2679  *
2680  * Parameters:	none
2681  *
2682  * Returns:	maximum size buffer (in bytes) ubc_create_upl( ) will take.
2683  */
2684 upl_size_t
ubc_upl_maxbufsize(void)2685 ubc_upl_maxbufsize(
2686 	void)
2687 {
2688 	return MAX_UPL_SIZE_BYTES;
2689 }
2690 
2691 /*
2692  * ubc_upl_map
2693  *
2694  * Map the page list assocated with the supplied upl into the kernel virtual
2695  * address space at the virtual address indicated by the dst_addr argument;
2696  * the entire upl is mapped
2697  *
2698  * Parameters:	upl			The upl to map
2699  *		dst_addr		The address at which to map the upl
2700  *
2701  * Returns:	KERN_SUCCESS		The upl has been mapped
2702  *		KERN_INVALID_ARGUMENT	The upl is UPL_NULL
2703  *		KERN_FAILURE		The upl is already mapped
2704  *	vm_map_enter:KERN_INVALID_ARGUMENT
2705  *					A failure code from vm_map_enter() due
2706  *					to an invalid argument
2707  */
2708 kern_return_t
ubc_upl_map(upl_t upl,vm_offset_t * dst_addr)2709 ubc_upl_map(
2710 	upl_t           upl,
2711 	vm_offset_t     *dst_addr)
2712 {
2713 	return vm_upl_map(kernel_map, upl, dst_addr);
2714 }
2715 
2716 /*
2717  * ubc_upl_map_range:- similar to ubc_upl_map but the focus is on a range
2718  * of the UPL. Takes an offset, size, and protection so that only a  part
2719  * of the UPL can be mapped with the right protections.
2720  */
2721 kern_return_t
ubc_upl_map_range(upl_t upl,vm_offset_t offset_to_map,vm_size_t size_to_map,vm_prot_t prot_to_map,vm_offset_t * dst_addr)2722 ubc_upl_map_range(
2723 	upl_t           upl,
2724 	vm_offset_t     offset_to_map,
2725 	vm_size_t       size_to_map,
2726 	vm_prot_t       prot_to_map,
2727 	vm_offset_t     *dst_addr)
2728 {
2729 	return vm_upl_map_range(kernel_map, upl, offset_to_map, size_to_map, prot_to_map, dst_addr);
2730 }
2731 
2732 
2733 /*
2734  * ubc_upl_unmap
2735  *
2736  * Unmap the page list assocated with the supplied upl from the kernel virtual
2737  * address space; the entire upl is unmapped.
2738  *
2739  * Parameters:	upl			The upl to unmap
2740  *
2741  * Returns:	KERN_SUCCESS		The upl has been unmapped
2742  *		KERN_FAILURE		The upl is not currently mapped
2743  *		KERN_INVALID_ARGUMENT	If the upl is UPL_NULL
2744  */
2745 kern_return_t
ubc_upl_unmap(upl_t upl)2746 ubc_upl_unmap(
2747 	upl_t   upl)
2748 {
2749 	return vm_upl_unmap(kernel_map, upl);
2750 }
2751 
2752 /*
2753  * ubc_upl_unmap_range:- similar to ubc_upl_unmap but the focus is
2754  * on part of the UPL that is mapped. The offset and size parameter
2755  * specifies what part of the UPL needs to be unmapped.
2756  *
2757  * Note: Currrently offset & size are unused as we always initiate the unmap from the
2758  * very beginning of the UPL's mapping and track the mapped size in the UPL. But we
2759  * might want to allow unmapping a UPL in the middle, for example, and we can use the
2760  * offset + size parameters for that purpose.
2761  */
2762 kern_return_t
ubc_upl_unmap_range(upl_t upl,vm_offset_t offset_to_unmap,vm_size_t size_to_unmap)2763 ubc_upl_unmap_range(
2764 	upl_t   upl,
2765 	vm_offset_t     offset_to_unmap,
2766 	vm_size_t       size_to_unmap)
2767 {
2768 	return vm_upl_unmap_range(kernel_map, upl, offset_to_unmap, size_to_unmap);
2769 }
2770 
2771 
2772 /*
2773  * ubc_upl_commit
2774  *
2775  * Commit the contents of the upl to the backing store
2776  *
2777  * Parameters:	upl			The upl to commit
2778  *
2779  * Returns:	KERN_SUCCESS		The upl has been committed
2780  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2781  *		KERN_FAILURE		The supplied upl does not represent
2782  *					device memory, and the offset plus the
2783  *					size would exceed the actual size of
2784  *					the upl
2785  *
2786  * Notes:	In practice, the only return value for this function should be
2787  *		KERN_SUCCESS, unless there has been data structure corruption;
2788  *		since the upl is deallocated regardless of success or failure,
2789  *		there's really nothing to do about this other than panic.
2790  *
2791  *		IMPORTANT: Use of this function should not be mixed with use of
2792  *		ubc_upl_commit_range(), due to the unconditional deallocation
2793  *		by this function.
2794  */
2795 kern_return_t
ubc_upl_commit(upl_t upl)2796 ubc_upl_commit(
2797 	upl_t                   upl)
2798 {
2799 	upl_page_info_t *pl;
2800 	kern_return_t   kr;
2801 
2802 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2803 	kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
2804 	upl_deallocate(upl);
2805 	return kr;
2806 }
2807 
2808 
2809 /*
2810  * ubc_upl_commit
2811  *
2812  * Commit the contents of the specified range of the upl to the backing store
2813  *
2814  * Parameters:	upl			The upl to commit
2815  *		offset			The offset into the upl
2816  *		size			The size of the region to be committed,
2817  *					starting at the specified offset
2818  *		flags			commit type (see below)
2819  *
2820  * Returns:	KERN_SUCCESS		The range has been committed
2821  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2822  *		KERN_FAILURE		The supplied upl does not represent
2823  *					device memory, and the offset plus the
2824  *					size would exceed the actual size of
2825  *					the upl
2826  *
2827  * Notes:	IMPORTANT: If the commit is successful, and the object is now
2828  *		empty, the upl will be deallocated.  Since the caller cannot
2829  *		check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2830  *		should generally only be used when the offset is 0 and the size
2831  *		is equal to the upl size.
2832  *
2833  *		The flags argument is a bitmap of flags on the rage of pages in
2834  *		the upl to be committed; allowable flags are:
2835  *
2836  *		o	UPL_COMMIT_FREE_ON_EMPTY	Free the upl when it is
2837  *							both empty and has been
2838  *							successfully committed
2839  *		o	UPL_COMMIT_CLEAR_DIRTY		Clear each pages dirty
2840  *							bit; will prevent a
2841  *							later pageout
2842  *		o	UPL_COMMIT_SET_DIRTY		Set each pages dirty
2843  *							bit; will cause a later
2844  *							pageout
2845  *		o	UPL_COMMIT_INACTIVATE		Clear each pages
2846  *							reference bit; the page
2847  *							will not be accessed
2848  *		o	UPL_COMMIT_ALLOW_ACCESS		Unbusy each page; pages
2849  *							become busy when an
2850  *							IOMemoryDescriptor is
2851  *							mapped or redirected,
2852  *							and we have to wait for
2853  *							an IOKit driver
2854  *
2855  *		The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2856  *		not be specified by the caller.
2857  *
2858  *		The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2859  *		mutually exclusive, and should not be combined.
2860  */
2861 kern_return_t
ubc_upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags)2862 ubc_upl_commit_range(
2863 	upl_t                   upl,
2864 	upl_offset_t            offset,
2865 	upl_size_t              size,
2866 	int                             flags)
2867 {
2868 	upl_page_info_t *pl;
2869 	boolean_t               empty;
2870 	kern_return_t   kr;
2871 
2872 	if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
2873 		flags |= UPL_COMMIT_NOTIFY_EMPTY;
2874 	}
2875 
2876 	if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
2877 		return KERN_INVALID_ARGUMENT;
2878 	}
2879 
2880 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2881 
2882 	kr = upl_commit_range(upl, offset, size, flags,
2883 	    pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
2884 
2885 	if ((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) {
2886 		upl_deallocate(upl);
2887 	}
2888 
2889 	return kr;
2890 }
2891 
2892 
2893 /*
2894  * ubc_upl_abort_range
2895  *
2896  * Abort the contents of the specified range of the specified upl
2897  *
2898  * Parameters:	upl			The upl to abort
2899  *		offset			The offset into the upl
2900  *		size			The size of the region to be aborted,
2901  *					starting at the specified offset
2902  *		abort_flags		abort type (see below)
2903  *
2904  * Returns:	KERN_SUCCESS		The range has been aborted
2905  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2906  *		KERN_FAILURE		The supplied upl does not represent
2907  *					device memory, and the offset plus the
2908  *					size would exceed the actual size of
2909  *					the upl
2910  *
2911  * Notes:	IMPORTANT: If the abort is successful, and the object is now
2912  *		empty, the upl will be deallocated.  Since the caller cannot
2913  *		check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2914  *		should generally only be used when the offset is 0 and the size
2915  *		is equal to the upl size.
2916  *
2917  *		The abort_flags argument is a bitmap of flags on the range of
2918  *		pages in the upl to be aborted; allowable flags are:
2919  *
2920  *		o	UPL_ABORT_FREE_ON_EMPTY	Free the upl when it is both
2921  *						empty and has been successfully
2922  *						aborted
2923  *		o	UPL_ABORT_RESTART	The operation must be restarted
2924  *		o	UPL_ABORT_UNAVAILABLE	The pages are unavailable
2925  *		o	UPL_ABORT_ERROR		An I/O error occurred
2926  *		o	UPL_ABORT_DUMP_PAGES	Just free the pages
2927  *		o	UPL_ABORT_NOTIFY_EMPTY	RESERVED
2928  *		o	UPL_ABORT_ALLOW_ACCESS	RESERVED
2929  *
2930  *		The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2931  *		not be specified by the caller.  It is intended to fulfill the
2932  *		same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2933  *		ubc_upl_commit_range(), but is never referenced internally.
2934  *
2935  *		The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2936  *		referenced; do not use it.
2937  */
2938 kern_return_t
ubc_upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int abort_flags)2939 ubc_upl_abort_range(
2940 	upl_t                   upl,
2941 	upl_offset_t            offset,
2942 	upl_size_t              size,
2943 	int                             abort_flags)
2944 {
2945 	kern_return_t   kr;
2946 	boolean_t               empty = FALSE;
2947 
2948 	if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) {
2949 		abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
2950 	}
2951 
2952 	kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
2953 
2954 	if ((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) {
2955 		upl_deallocate(upl);
2956 	}
2957 
2958 	return kr;
2959 }
2960 
2961 
2962 /*
2963  * ubc_upl_abort
2964  *
2965  * Abort the contents of the specified upl
2966  *
2967  * Parameters:	upl			The upl to abort
2968  *		abort_type		abort type (see below)
2969  *
2970  * Returns:	KERN_SUCCESS		The range has been aborted
2971  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2972  *		KERN_FAILURE		The supplied upl does not represent
2973  *					device memory, and the offset plus the
2974  *					size would exceed the actual size of
2975  *					the upl
2976  *
2977  * Notes:	IMPORTANT: If the abort is successful, and the object is now
2978  *		empty, the upl will be deallocated.  Since the caller cannot
2979  *		check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2980  *		should generally only be used when the offset is 0 and the size
2981  *		is equal to the upl size.
2982  *
2983  *		The abort_type is a bitmap of flags on the range of
2984  *		pages in the upl to be aborted; allowable flags are:
2985  *
2986  *		o	UPL_ABORT_FREE_ON_EMPTY	Free the upl when it is both
2987  *						empty and has been successfully
2988  *						aborted
2989  *		o	UPL_ABORT_RESTART	The operation must be restarted
2990  *		o	UPL_ABORT_UNAVAILABLE	The pages are unavailable
2991  *		o	UPL_ABORT_ERROR		An I/O error occurred
2992  *		o	UPL_ABORT_DUMP_PAGES	Just free the pages
2993  *		o	UPL_ABORT_NOTIFY_EMPTY	RESERVED
2994  *		o	UPL_ABORT_ALLOW_ACCESS	RESERVED
2995  *
2996  *		The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2997  *		not be specified by the caller.  It is intended to fulfill the
2998  *		same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2999  *		ubc_upl_commit_range(), but is never referenced internally.
3000  *
3001  *		The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
3002  *		referenced; do not use it.
3003  */
3004 kern_return_t
ubc_upl_abort(upl_t upl,int abort_type)3005 ubc_upl_abort(
3006 	upl_t                   upl,
3007 	int                             abort_type)
3008 {
3009 	kern_return_t   kr;
3010 
3011 	kr = upl_abort(upl, abort_type);
3012 	upl_deallocate(upl);
3013 	return kr;
3014 }
3015 
3016 
3017 /*
3018  * ubc_upl_pageinfo
3019  *
3020  *  Retrieve the internal page list for the specified upl
3021  *
3022  * Parameters:	upl			The upl to obtain the page list from
3023  *
3024  * Returns:	!NULL			The (upl_page_info_t *) for the page
3025  *					list internal to the upl
3026  *		NULL			Error/no page list associated
3027  *
3028  * Notes:	IMPORTANT: The function is only valid on internal objects
3029  *		where the list request was made with the UPL_INTERNAL flag.
3030  *
3031  *		This function is a utility helper function, since some callers
3032  *		may not have direct access to the header defining the macro,
3033  *		due to abstraction layering constraints.
3034  */
3035 upl_page_info_t *
ubc_upl_pageinfo(upl_t upl)3036 ubc_upl_pageinfo(
3037 	upl_t                   upl)
3038 {
3039 	return UPL_GET_INTERNAL_PAGE_LIST(upl);
3040 }
3041 
3042 
3043 int
UBCINFOEXISTS(const struct vnode * vp)3044 UBCINFOEXISTS(const struct vnode * vp)
3045 {
3046 	return (vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL);
3047 }
3048 
3049 
3050 void
ubc_upl_range_needed(upl_t upl,int index,int count)3051 ubc_upl_range_needed(
3052 	upl_t           upl,
3053 	int             index,
3054 	int             count)
3055 {
3056 	upl_range_needed(upl, index, count);
3057 }
3058 
3059 boolean_t
ubc_is_mapped(const struct vnode * vp,boolean_t * writable)3060 ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
3061 {
3062 	if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) {
3063 		return FALSE;
3064 	}
3065 	if (writable) {
3066 		*writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
3067 	}
3068 	return TRUE;
3069 }
3070 
3071 boolean_t
ubc_is_mapped_writable(const struct vnode * vp)3072 ubc_is_mapped_writable(const struct vnode *vp)
3073 {
3074 	boolean_t writable;
3075 	return ubc_is_mapped(vp, &writable) && writable;
3076 }
3077 
3078 
3079 /*
3080  * CODE SIGNING
3081  */
3082 static atomic_size_t cs_blob_size = 0;
3083 static atomic_uint_fast32_t cs_blob_count = 0;
3084 static atomic_size_t cs_blob_size_peak = 0;
3085 static atomic_size_t cs_blob_size_max = 0;
3086 static atomic_uint_fast32_t cs_blob_count_peak = 0;
3087 
3088 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count, 0, "Current number of code signature blobs");
3089 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size, "Current size of all code signature blobs");
3090 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
3091 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, "Peak size of code signature blobs");
3092 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, "Size of biggest code signature blob");
3093 
3094 /*
3095  * Function: csblob_parse_teamid
3096  *
3097  * Description: This function returns a pointer to the team id
3098  *               stored within the codedirectory of the csblob.
3099  *               If the codedirectory predates team-ids, it returns
3100  *               NULL.
3101  *               This does not copy the name but returns a pointer to
3102  *               it within the CD. Subsequently, the CD must be
3103  *               available when this is used.
3104  */
3105 
3106 static const char *
csblob_parse_teamid(struct cs_blob * csblob)3107 csblob_parse_teamid(struct cs_blob *csblob)
3108 {
3109 	const CS_CodeDirectory *cd;
3110 
3111 	cd = csblob->csb_cd;
3112 
3113 	if (ntohl(cd->version) < CS_SUPPORTSTEAMID) {
3114 		return NULL;
3115 	}
3116 
3117 	if (cd->teamOffset == 0) {
3118 		return NULL;
3119 	}
3120 
3121 	const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
3122 	if (cs_debug > 1) {
3123 		printf("found team-id %s in cdblob\n", name);
3124 	}
3125 
3126 	return name;
3127 }
3128 
3129 
3130 kern_return_t
ubc_cs_blob_allocate(vm_offset_t * blob_addr_p,vm_size_t * blob_size_p)3131 ubc_cs_blob_allocate(
3132 	vm_offset_t     *blob_addr_p,
3133 	vm_size_t       *blob_size_p)
3134 {
3135 	kern_return_t   kr = KERN_FAILURE;
3136 	vm_size_t       allocation_size = 0;
3137 
3138 	if (!blob_addr_p || !blob_size_p) {
3139 		return KERN_INVALID_ARGUMENT;
3140 	}
3141 	allocation_size = *blob_size_p;
3142 
3143 	{
3144 		/*
3145 		 * This can be cross-freed between AMFI and XNU, so we need to make
3146 		 * sure that the adoption is coordinated between the two.
3147 		 *
3148 		 * rdar://87408704
3149 		 */
3150 		__typed_allocators_ignore_push
3151 		*blob_addr_p = (vm_offset_t) kheap_alloc_tag(KHEAP_DEFAULT,
3152 		    allocation_size, Z_WAITOK | Z_ZERO, VM_KERN_MEMORY_SECURITY);
3153 		__typed_allocators_ignore_pop
3154 
3155 		if (*blob_addr_p == 0) {
3156 			kr = KERN_NO_SPACE;
3157 		} else {
3158 			kr = KERN_SUCCESS;
3159 		}
3160 	}
3161 
3162 	if (kr == KERN_SUCCESS) {
3163 		*blob_size_p = allocation_size;
3164 	}
3165 
3166 	return kr;
3167 }
3168 
3169 void
ubc_cs_blob_deallocate(vm_offset_t blob_addr,vm_size_t blob_size)3170 ubc_cs_blob_deallocate(
3171 	vm_offset_t     blob_addr,
3172 	vm_size_t       blob_size)
3173 {
3174 	{
3175 		/*
3176 		 * This can be cross-freed between AMFI and XNU, so we need to make
3177 		 * sure that the adoption is coordinated between the two.
3178 		 *
3179 		 * rdar://87408704
3180 		 */
3181 		__typed_allocators_ignore(kheap_free(KHEAP_DEFAULT, blob_addr, blob_size));
3182 	}
3183 }
3184 
3185 /*
3186  * Some codesigned files use a lowest common denominator page size of
3187  * 4KiB, but can be used on systems that have a runtime page size of
3188  * 16KiB. Since faults will only occur on 16KiB ranges in
3189  * cs_validate_range(), we can convert the original Code Directory to
3190  * a multi-level scheme where groups of 4 hashes are combined to form
3191  * a new hash, which represents 16KiB in the on-disk file.  This can
3192  * reduce the wired memory requirement for the Code Directory by
3193  * 75%. Care must be taken for binaries that use the "fourk" VM pager
3194  * for unaligned access, which may still attempt to validate on
3195  * non-16KiB multiples for compatibility with 3rd party binaries.
3196  */
3197 static boolean_t
ubc_cs_supports_multilevel_hash(struct cs_blob * blob __unused)3198 ubc_cs_supports_multilevel_hash(struct cs_blob *blob __unused)
3199 {
3200 	const CS_CodeDirectory *cd;
3201 
3202 
3203 	/*
3204 	 * Only applies to binaries that ship as part of the OS,
3205 	 * primarily the shared cache.
3206 	 */
3207 	if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
3208 		return FALSE;
3209 	}
3210 
3211 	/*
3212 	 * If the runtime page size matches the code signing page
3213 	 * size, there is no work to do.
3214 	 */
3215 	if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
3216 		return FALSE;
3217 	}
3218 
3219 	cd = blob->csb_cd;
3220 
3221 	/*
3222 	 * There must be a valid integral multiple of hashes
3223 	 */
3224 	if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3225 		return FALSE;
3226 	}
3227 
3228 	/*
3229 	 * Scatter lists must also have ranges that have an integral number of hashes
3230 	 */
3231 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3232 		const SC_Scatter *scatter = (const SC_Scatter*)
3233 		    ((const char*)cd + ntohl(cd->scatterOffset));
3234 		/* iterate all scatter structs to make sure they are all aligned */
3235 		do {
3236 			uint32_t sbase = ntohl(scatter->base);
3237 			uint32_t scount = ntohl(scatter->count);
3238 
3239 			/* last scatter? */
3240 			if (scount == 0) {
3241 				break;
3242 			}
3243 
3244 			if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3245 				return FALSE;
3246 			}
3247 
3248 			if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3249 				return FALSE;
3250 			}
3251 
3252 			scatter++;
3253 		} while (1);
3254 	}
3255 
3256 	/* Covered range must be a multiple of the new page size */
3257 	if (ntohl(cd->codeLimit) & PAGE_MASK) {
3258 		return FALSE;
3259 	}
3260 
3261 	/* All checks pass */
3262 	return TRUE;
3263 }
3264 
3265 /*
3266  * Given a validated cs_blob, we reformat the structure to only include
3267  * the blobs which are required by the kernel for our current platform.
3268  * This saves significant memory with agile signatures.
3269  *
3270  * To support rewriting the code directory, potentially through
3271  * multilevel hashes, we provide a mechanism to allocate a code directory
3272  * of a specified size and zero it out --> caller can fill it in.
3273  *
3274  * We don't need to perform a lot of overflow checks as the assumption
3275  * here is that the cs_blob has already been validated.
3276  */
3277 static int
ubc_cs_reconstitute_code_signature(const struct cs_blob * const blob,vm_address_t * const ret_mem_kaddr,vm_size_t * const ret_mem_size,vm_size_t code_directory_size,CS_CodeDirectory ** const code_directory)3278 ubc_cs_reconstitute_code_signature(
3279 	const struct cs_blob * const blob,
3280 	vm_address_t * const ret_mem_kaddr,
3281 	vm_size_t * const ret_mem_size,
3282 	vm_size_t code_directory_size,
3283 	CS_CodeDirectory ** const code_directory
3284 	)
3285 {
3286 	vm_address_t new_blob_addr = 0;
3287 	vm_size_t new_blob_size = 0;
3288 	vm_size_t new_code_directory_size = 0;
3289 	const CS_GenericBlob *best_code_directory = NULL;
3290 	const CS_GenericBlob *first_code_directory = NULL;
3291 	const CS_GenericBlob *der_entitlements_blob = NULL;
3292 	const CS_GenericBlob *entitlements_blob = NULL;
3293 	const CS_GenericBlob *cms_blob = NULL;
3294 	const CS_GenericBlob *launch_constraint_self = NULL;
3295 	const CS_GenericBlob *launch_constraint_parent = NULL;
3296 	const CS_GenericBlob *launch_constraint_responsible = NULL;
3297 	CS_SuperBlob *superblob = NULL;
3298 	uint32_t num_blobs = 0;
3299 	uint32_t blob_index = 0;
3300 	uint32_t blob_offset = 0;
3301 	kern_return_t ret;
3302 	int err;
3303 
3304 	if (!blob) {
3305 		if (cs_debug > 1) {
3306 			printf("CODE SIGNING: CS Blob passed in is NULL\n");
3307 		}
3308 		return EINVAL;
3309 	}
3310 
3311 	best_code_directory = (const CS_GenericBlob*)blob->csb_cd;
3312 	if (!best_code_directory) {
3313 		/* This case can never happen, and it is a sign of bad things */
3314 		panic("CODE SIGNING: Validated CS Blob has no code directory");
3315 	}
3316 
3317 	new_code_directory_size = code_directory_size;
3318 	if (new_code_directory_size == 0) {
3319 		new_code_directory_size = ntohl(best_code_directory->length);
3320 	}
3321 
3322 	/*
3323 	 * A code signature can contain multiple code directories, each of which contains hashes
3324 	 * of pages based on a hashing algorithm. The kernel selects which hashing algorithm is
3325 	 * the strongest, and consequently, marks one of these code directories as the best
3326 	 * matched one. More often than not, the best matched one is _not_ the first one.
3327 	 *
3328 	 * However, the CMS blob which cryptographically verifies the code signature is only
3329 	 * signed against the first code directory. Therefore, if the CMS blob is present, we also
3330 	 * need the first code directory to be able to verify it. Given this, we organize the
3331 	 * new cs_blob as following order:
3332 	 *
3333 	 * 1. best code directory
3334 	 * 2. DER encoded entitlements blob (if present)
3335 	 * 3. launch constraint self (if present)
3336 	 * 4. launch constraint parent (if present)
3337 	 * 5. launch constraint responsible (if present)
3338 	 * 6. entitlements blob (if present)
3339 	 * 7. cms blob (if present)
3340 	 * 8. first code directory (if not already the best match, and if cms blob is present)
3341 	 *
3342 	 * This order is chosen deliberately, as later on, we expect to get rid of the CMS blob
3343 	 * and the first code directory once their verification is complete.
3344 	 */
3345 
3346 	/* Storage for the super blob header */
3347 	new_blob_size += sizeof(CS_SuperBlob);
3348 
3349 	/* Guaranteed storage for the best code directory */
3350 	new_blob_size += sizeof(CS_BlobIndex);
3351 	new_blob_size += new_code_directory_size;
3352 	num_blobs += 1;
3353 
3354 	/* Conditional storage for the DER entitlements blob */
3355 	der_entitlements_blob = blob->csb_der_entitlements_blob;
3356 	if (der_entitlements_blob) {
3357 		new_blob_size += sizeof(CS_BlobIndex);
3358 		new_blob_size += ntohl(der_entitlements_blob->length);
3359 		num_blobs += 1;
3360 	}
3361 
3362 	/* Conditional storage for the launch constraints self blob */
3363 	launch_constraint_self = csblob_find_blob_bytes(
3364 		(const uint8_t *)blob->csb_mem_kaddr,
3365 		blob->csb_mem_size,
3366 		CSSLOT_LAUNCH_CONSTRAINT_SELF,
3367 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3368 	if (launch_constraint_self) {
3369 		new_blob_size += sizeof(CS_BlobIndex);
3370 		new_blob_size += ntohl(launch_constraint_self->length);
3371 		num_blobs += 1;
3372 	}
3373 
3374 	/* Conditional storage for the launch constraints parent blob */
3375 	launch_constraint_parent = csblob_find_blob_bytes(
3376 		(const uint8_t *)blob->csb_mem_kaddr,
3377 		blob->csb_mem_size,
3378 		CSSLOT_LAUNCH_CONSTRAINT_PARENT,
3379 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3380 	if (launch_constraint_parent) {
3381 		new_blob_size += sizeof(CS_BlobIndex);
3382 		new_blob_size += ntohl(launch_constraint_parent->length);
3383 		num_blobs += 1;
3384 	}
3385 
3386 	/* Conditional storage for the launch constraints responsible blob */
3387 	launch_constraint_responsible = csblob_find_blob_bytes(
3388 		(const uint8_t *)blob->csb_mem_kaddr,
3389 		blob->csb_mem_size,
3390 		CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE,
3391 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3392 	if (launch_constraint_responsible) {
3393 		new_blob_size += sizeof(CS_BlobIndex);
3394 		new_blob_size += ntohl(launch_constraint_responsible->length);
3395 		num_blobs += 1;
3396 	}
3397 
3398 	/* Conditional storage for the entitlements blob */
3399 	entitlements_blob = blob->csb_entitlements_blob;
3400 	if (entitlements_blob) {
3401 		new_blob_size += sizeof(CS_BlobIndex);
3402 		new_blob_size += ntohl(entitlements_blob->length);
3403 		num_blobs += 1;
3404 	}
3405 
3406 	/* Conditional storage for the CMS blob */
3407 	cms_blob = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_SIGNATURESLOT, CSMAGIC_BLOBWRAPPER);
3408 	if (cms_blob) {
3409 		new_blob_size += sizeof(CS_BlobIndex);
3410 		new_blob_size += ntohl(cms_blob->length);
3411 		num_blobs += 1;
3412 	}
3413 
3414 	/*
3415 	 * Conditional storage for the first code directory.
3416 	 * This is only needed if a CMS blob exists and the best code directory isn't already
3417 	 * the first one. It is an error if we find a CMS blob but do not find a first code directory.
3418 	 */
3419 	if (cms_blob) {
3420 		first_code_directory = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_CODEDIRECTORY, CSMAGIC_CODEDIRECTORY);
3421 		if (first_code_directory == best_code_directory) {
3422 			/* We don't need the first code directory anymore, since the best one is already it */
3423 			first_code_directory = NULL;
3424 		} else if (first_code_directory) {
3425 			new_blob_size += sizeof(CS_BlobIndex);
3426 			new_blob_size += ntohl(first_code_directory->length);
3427 			num_blobs += 1;
3428 		} else {
3429 			printf("CODE SIGNING: Invalid CS Blob: found CMS blob but not a first code directory\n");
3430 			return EINVAL;
3431 		}
3432 	}
3433 
3434 	/*
3435 	 * The blob size could be rouded up to page size here, so we keep a copy
3436 	 * of the actual superblob length as well.
3437 	 */
3438 	vm_size_t new_blob_allocation_size = new_blob_size;
3439 	ret = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_allocation_size);
3440 	if (ret != KERN_SUCCESS) {
3441 		printf("CODE SIGNING: Failed to allocate memory for new code signing blob: %d\n", ret);
3442 		return ENOMEM;
3443 	}
3444 
3445 	/*
3446 	 * Fill out the superblob header and then all the blobs in the order listed
3447 	 * above.
3448 	 */
3449 	superblob = (CS_SuperBlob*)new_blob_addr;
3450 	superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
3451 	superblob->length = htonl((uint32_t)new_blob_size);
3452 	superblob->count = htonl(num_blobs);
3453 
3454 	blob_index = 0;
3455 	blob_offset = sizeof(CS_SuperBlob) + (num_blobs * sizeof(CS_BlobIndex));
3456 
3457 	/* Best code directory */
3458 	superblob->index[blob_index].offset = htonl(blob_offset);
3459 	if (first_code_directory) {
3460 		superblob->index[blob_index].type = htonl(CSSLOT_ALTERNATE_CODEDIRECTORIES);
3461 	} else {
3462 		superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3463 	}
3464 
3465 	if (code_directory_size > 0) {
3466 		/* We zero out the code directory, as we expect the caller to fill it in */
3467 		memset((void*)(new_blob_addr + blob_offset), 0, new_code_directory_size);
3468 	} else {
3469 		memcpy((void*)(new_blob_addr + blob_offset), best_code_directory, new_code_directory_size);
3470 	}
3471 
3472 	if (code_directory) {
3473 		*code_directory = (CS_CodeDirectory*)(new_blob_addr + blob_offset);
3474 	}
3475 	blob_offset += new_code_directory_size;
3476 
3477 	/* DER entitlements blob */
3478 	if (der_entitlements_blob) {
3479 		blob_index += 1;
3480 		superblob->index[blob_index].offset = htonl(blob_offset);
3481 		superblob->index[blob_index].type = htonl(CSSLOT_DER_ENTITLEMENTS);
3482 
3483 		memcpy((void*)(new_blob_addr + blob_offset), der_entitlements_blob, ntohl(der_entitlements_blob->length));
3484 		blob_offset += ntohl(der_entitlements_blob->length);
3485 	}
3486 
3487 	/* Launch constraints self blob */
3488 	if (launch_constraint_self) {
3489 		blob_index += 1;
3490 		superblob->index[blob_index].offset = htonl(blob_offset);
3491 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_SELF);
3492 
3493 		memcpy(
3494 			(void*)(new_blob_addr + blob_offset),
3495 			launch_constraint_self,
3496 			ntohl(launch_constraint_self->length));
3497 
3498 		blob_offset += ntohl(launch_constraint_self->length);
3499 	}
3500 
3501 	/* Launch constraints parent blob */
3502 	if (launch_constraint_parent) {
3503 		blob_index += 1;
3504 		superblob->index[blob_index].offset = htonl(blob_offset);
3505 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_PARENT);
3506 
3507 		memcpy(
3508 			(void*)(new_blob_addr + blob_offset),
3509 			launch_constraint_parent,
3510 			ntohl(launch_constraint_parent->length));
3511 
3512 		blob_offset += ntohl(launch_constraint_parent->length);
3513 	}
3514 
3515 	/* Launch constraints responsible blob */
3516 	if (launch_constraint_responsible) {
3517 		blob_index += 1;
3518 		superblob->index[blob_index].offset = htonl(blob_offset);
3519 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE);
3520 
3521 		memcpy(
3522 			(void*)(new_blob_addr + blob_offset),
3523 			launch_constraint_responsible,
3524 			ntohl(launch_constraint_responsible->length));
3525 
3526 		blob_offset += ntohl(launch_constraint_responsible->length);
3527 	}
3528 
3529 	/* Entitlements blob */
3530 	if (entitlements_blob) {
3531 		blob_index += 1;
3532 		superblob->index[blob_index].offset = htonl(blob_offset);
3533 		superblob->index[blob_index].type = htonl(CSSLOT_ENTITLEMENTS);
3534 
3535 		memcpy((void*)(new_blob_addr + blob_offset), entitlements_blob, ntohl(entitlements_blob->length));
3536 		blob_offset += ntohl(entitlements_blob->length);
3537 	}
3538 
3539 	/* CMS blob */
3540 	if (cms_blob) {
3541 		blob_index += 1;
3542 		superblob->index[blob_index].offset = htonl(blob_offset);
3543 		superblob->index[blob_index].type = htonl(CSSLOT_SIGNATURESLOT);
3544 		memcpy((void*)(new_blob_addr + blob_offset), cms_blob, ntohl(cms_blob->length));
3545 		blob_offset += ntohl(cms_blob->length);
3546 	}
3547 
3548 	/* First code directory */
3549 	if (first_code_directory) {
3550 		blob_index += 1;
3551 		superblob->index[blob_index].offset = htonl(blob_offset);
3552 		superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3553 		memcpy((void*)(new_blob_addr + blob_offset), first_code_directory, ntohl(first_code_directory->length));
3554 		blob_offset += ntohl(first_code_directory->length);
3555 	}
3556 
3557 	/*
3558 	 * We only validate the blob in case we copied in the best code directory.
3559 	 * In case the code directory size we were passed in wasn't 0, we memset the best
3560 	 * code directory to 0 and expect the caller to fill it in. In the same spirit, we
3561 	 * expect the caller to validate the code signature after they fill in the code
3562 	 * directory.
3563 	 */
3564 	if (code_directory_size == 0) {
3565 		const CS_CodeDirectory *validated_code_directory = NULL;
3566 		const CS_GenericBlob *validated_entitlements_blob = NULL;
3567 		const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3568 		ret = cs_validate_csblob((const uint8_t *)superblob, new_blob_size, &validated_code_directory, &validated_entitlements_blob, &validated_der_entitlements_blob);
3569 		if (ret) {
3570 			printf("CODE SIGNING: Validation of reconstituted blob failed: %d\n", ret);
3571 			err = EINVAL;
3572 			goto fail;
3573 		}
3574 	}
3575 
3576 	if (ret_mem_kaddr) {
3577 		*ret_mem_kaddr = new_blob_addr;
3578 	}
3579 	if (ret_mem_size) {
3580 		*ret_mem_size = new_blob_allocation_size;
3581 	}
3582 
3583 	return 0;
3584 
3585 fail:
3586 	ubc_cs_blob_deallocate(new_blob_addr, new_blob_allocation_size);
3587 	return err;
3588 }
3589 
3590 #if CONFIG_ENFORCE_SIGNED_CODE
3591 /*
3592  * We use this function to clear out unnecessary bits from the code signature
3593  * blob which are no longer needed. We free these bits and give them back to
3594  * the kernel. This is needed since reconstitution includes extra data which is
3595  * needed only for verification but has no point in keeping afterwards.
3596  *
3597  * This results in significant memory reduction, especially for 3rd party apps
3598  * since we also get rid of the CMS blob.
3599  */
3600 static int
ubc_cs_clear_unneeded_code_signature(struct cs_blob * blob)3601 ubc_cs_clear_unneeded_code_signature(
3602 	struct cs_blob *blob
3603 	)
3604 {
3605 	const CS_GenericBlob *launch_constraint_self = NULL;
3606 	const CS_GenericBlob *launch_constraint_parent = NULL;
3607 	const CS_GenericBlob *launch_constraint_responsible = NULL;
3608 	CS_SuperBlob *superblob = NULL;
3609 	uint32_t num_blobs = 0;
3610 	vm_size_t last_needed_blob_offset = 0;
3611 	kern_return_t ret = KERN_FAILURE;
3612 	bool kmem_allocated = false;
3613 
3614 	/*
3615 	 * Ordering of blobs we need to keep:
3616 	 * 1. Code directory
3617 	 * 2. DER encoded entitlements (if present)
3618 	 * 3. Launch constraints self (if present)
3619 	 * 3. Launch constraints parent (if present)
3620 	 * 3. Launch constraints responsible (if present)
3621 	 *
3622 	 * We need to clear out the remaining page after these blobs end, and fix up
3623 	 * the superblob for the changes. Things gets a little more complicated for
3624 	 * blobs which may not have been kmem_allocated. For those, we simply just
3625 	 * allocate the new required space and copy into it.
3626 	 */
3627 
3628 	if (!blob) {
3629 		if (cs_debug > 1) {
3630 			printf("CODE SIGNING: CS Blob passed in is NULL\n");
3631 		}
3632 		return EINVAL;
3633 	}
3634 
3635 	if (!blob->csb_reconstituted) {
3636 		/*
3637 		 * Nothing for us to do, since we can't make any claims about how this
3638 		 * blob may have been ordered.
3639 		 */
3640 		return 0;
3641 	}
3642 
3643 
3644 	if (!blob->csb_cd) {
3645 		/* This case can never happen, and it is a sign of bad things */
3646 		panic("CODE SIGNING: Validated CS Blob has no code directory");
3647 	}
3648 	superblob = (CS_SuperBlob*)blob->csb_mem_kaddr;
3649 
3650 	num_blobs = 1;
3651 	last_needed_blob_offset = ntohl(superblob->index[0].offset) + ntohl(blob->csb_cd->length);
3652 
3653 	/* Check for DER entitlements */
3654 	if (blob->csb_der_entitlements_blob) {
3655 		num_blobs += 1;
3656 		last_needed_blob_offset += ntohl(blob->csb_der_entitlements_blob->length);
3657 	}
3658 
3659 	/* Check for launch constraints self */
3660 	launch_constraint_self = csblob_find_blob_bytes(
3661 		(const uint8_t *)blob->csb_mem_kaddr,
3662 		blob->csb_mem_size,
3663 		CSSLOT_LAUNCH_CONSTRAINT_SELF,
3664 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3665 	if (launch_constraint_self) {
3666 		num_blobs += 1;
3667 		last_needed_blob_offset += ntohl(launch_constraint_self->length);
3668 	}
3669 
3670 	/* Check for launch constraints parent */
3671 	launch_constraint_parent = csblob_find_blob_bytes(
3672 		(const uint8_t *)blob->csb_mem_kaddr,
3673 		blob->csb_mem_size,
3674 		CSSLOT_LAUNCH_CONSTRAINT_PARENT,
3675 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3676 	if (launch_constraint_parent) {
3677 		num_blobs += 1;
3678 		last_needed_blob_offset += ntohl(launch_constraint_parent->length);
3679 	}
3680 
3681 	/* Check for launch constraints responsible */
3682 	launch_constraint_responsible = csblob_find_blob_bytes(
3683 		(const uint8_t *)blob->csb_mem_kaddr,
3684 		blob->csb_mem_size,
3685 		CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE,
3686 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3687 	if (launch_constraint_responsible) {
3688 		num_blobs += 1;
3689 		last_needed_blob_offset += ntohl(launch_constraint_responsible->length);
3690 	}
3691 
3692 	superblob->count = htonl(num_blobs);
3693 	superblob->length = htonl((uint32_t)last_needed_blob_offset);
3694 
3695 	/*
3696 	 * There is a chance that the code directory is marked within the superblob as an
3697 	 * alternate code directory. This happens when the first code directory isn't the
3698 	 * best one chosen by the kernel, so to be able to access both the first and the best,
3699 	 * we save the best one as an alternate one. Since we're getting rid of the first one
3700 	 * here, we mark the best one as the first one.
3701 	 */
3702 	superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
3703 
3704 	/*
3705 	 * If we are kmem_allocated, then we can free all the remaining pages which we no longer
3706 	 * need. However, this cannot be done if we didn't allocate page-wise, but byte-wise through
3707 	 * something like kalloc. In the latter case, we just allocate the required space again, and
3708 	 * copy over only the required portion of the superblob.
3709 	 */
3710 	if (kmem_allocated) {
3711 		vm_size_t last_needed_page_offset = round_page(last_needed_blob_offset);
3712 		assert(last_needed_page_offset <= blob->csb_mem_size);
3713 
3714 		vm_address_t unneeded_blob_addr = (vm_address_t)blob->csb_mem_kaddr + last_needed_page_offset;
3715 		vm_size_t unneeded_blob_size = blob->csb_mem_size - last_needed_page_offset;
3716 
3717 		/* These both need to be page aligned */
3718 		assert((unneeded_blob_addr & PAGE_MASK) == 0);
3719 		assert((unneeded_blob_size & PAGE_MASK) == 0);
3720 
3721 		/* Free the unneeded memory */
3722 		if (unneeded_blob_addr && unneeded_blob_size) {
3723 			kmem_free(kernel_map, unneeded_blob_addr, unneeded_blob_size);
3724 		}
3725 
3726 		/* Zero out the remaining bytes in the same page */
3727 		vm_size_t unneeded_bytes_in_page = last_needed_page_offset - last_needed_blob_offset;
3728 		memset((uint8_t*)superblob + last_needed_blob_offset, 0, unneeded_bytes_in_page);
3729 		blob->csb_mem_size = last_needed_page_offset;
3730 	} else {
3731 		vm_address_t new_superblob = 0;
3732 		vm_size_t new_superblob_size = last_needed_blob_offset;
3733 
3734 		ret = ubc_cs_blob_allocate(&new_superblob, &new_superblob_size);
3735 		if (ret != KERN_SUCCESS) {
3736 			printf("CODE SIGNING: Unable to allocate space when trying to clear unneeded code signature blobs: %d\n", ret);
3737 			return ENOMEM;
3738 		}
3739 
3740 		/*
3741 		 * As we weren't kmem_allocated before, we will not be kmem_allocated again. This should
3742 		 * mean the size we passed in is exactly the size we should get back for the allocation.
3743 		 */
3744 		assert(new_superblob_size == last_needed_blob_offset);
3745 
3746 		/* Copy in the updated superblob into the new memory */
3747 		memcpy((void*)new_superblob, superblob, new_superblob_size);
3748 
3749 		/* Free the old code signature and old memory */
3750 		ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3751 
3752 		/* Setup the code signature blob again */
3753 		blob->csb_mem_kaddr = (void *)new_superblob;
3754 		blob->csb_mem_size = new_superblob_size;
3755 		blob->csb_cd = (const CS_CodeDirectory*)csblob_find_blob_bytes(
3756 			(uint8_t*)new_superblob,
3757 			new_superblob_size,
3758 			CSSLOT_CODEDIRECTORY,
3759 			CSMAGIC_CODEDIRECTORY);
3760 
3761 		blob->csb_der_entitlements_blob = csblob_find_blob_bytes(
3762 			(uint8_t*)new_superblob,
3763 			new_superblob_size,
3764 			CSSLOT_DER_ENTITLEMENTS,
3765 			CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3766 	}
3767 
3768 	blob->csb_entitlements_blob = NULL;
3769 
3770 	const CS_CodeDirectory *validated_code_directory = NULL;
3771 	const CS_GenericBlob *validated_entitlements_blob = NULL;
3772 	const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3773 
3774 	ret = cs_validate_csblob(
3775 		(const uint8_t *)blob->csb_mem_kaddr,
3776 		blob->csb_mem_size,
3777 		&validated_code_directory,
3778 		&validated_entitlements_blob,
3779 		&validated_der_entitlements_blob);
3780 	if (ret) {
3781 		printf("CODE SIGNING: Validation of blob after clearing unneeded code signature blobs failed: %d\n", ret);
3782 		return EINVAL;
3783 	}
3784 
3785 	return 0;
3786 }
3787 #endif /* CONFIG_ENFORCE_SIGNED_CODE */
3788 
3789 static int
ubc_cs_convert_to_multilevel_hash(struct cs_blob * blob)3790 ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
3791 {
3792 	const CS_CodeDirectory  *old_cd, *cd;
3793 	CS_CodeDirectory        *new_cd;
3794 	const CS_GenericBlob *entitlements;
3795 	const CS_GenericBlob *der_entitlements;
3796 	vm_offset_t     new_blob_addr;
3797 	vm_size_t       new_blob_size;
3798 	vm_size_t       new_cdsize;
3799 	int                             error;
3800 
3801 	uint32_t                hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
3802 
3803 	if (cs_debug > 1) {
3804 		printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
3805 		    (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
3806 	}
3807 
3808 	old_cd = blob->csb_cd;
3809 
3810 	/* Up to the hashes, we can copy all data */
3811 	new_cdsize  = ntohl(old_cd->hashOffset);
3812 	new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
3813 
3814 	error = ubc_cs_reconstitute_code_signature(blob, &new_blob_addr, &new_blob_size, new_cdsize, &new_cd);
3815 	if (error != 0) {
3816 		printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
3817 		return error;
3818 	}
3819 	entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_ENTITLEMENTS, CSMAGIC_EMBEDDED_ENTITLEMENTS);
3820 	der_entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_DER_ENTITLEMENTS, CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3821 
3822 	memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
3823 
3824 	/* Update fields in the Code Directory structure */
3825 	new_cd->length = htonl((uint32_t)new_cdsize);
3826 
3827 	uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
3828 	nCodeSlots >>= hashes_per_new_hash_shift;
3829 	new_cd->nCodeSlots = htonl(nCodeSlots);
3830 
3831 	new_cd->pageSize = (uint8_t)PAGE_SHIFT; /* Not byte-swapped */
3832 
3833 	if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
3834 		SC_Scatter *scatter = (SC_Scatter*)
3835 		    ((char *)new_cd + ntohl(new_cd->scatterOffset));
3836 		/* iterate all scatter structs to scale their counts */
3837 		do {
3838 			uint32_t scount = ntohl(scatter->count);
3839 			uint32_t sbase  = ntohl(scatter->base);
3840 
3841 			/* last scatter? */
3842 			if (scount == 0) {
3843 				break;
3844 			}
3845 
3846 			scount >>= hashes_per_new_hash_shift;
3847 			scatter->count = htonl(scount);
3848 
3849 			sbase >>= hashes_per_new_hash_shift;
3850 			scatter->base = htonl(sbase);
3851 
3852 			scatter++;
3853 		} while (1);
3854 	}
3855 
3856 	/* For each group of hashes, hash them together */
3857 	const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
3858 	unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
3859 
3860 	uint32_t hash_index;
3861 	for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
3862 		union cs_hash_union     mdctx;
3863 
3864 		uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
3865 		const unsigned char *src = src_base + hash_index * source_hash_len;
3866 		unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
3867 
3868 		blob->csb_hashtype->cs_init(&mdctx);
3869 		blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
3870 		blob->csb_hashtype->cs_final(dst, &mdctx);
3871 	}
3872 
3873 	error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements, &der_entitlements);
3874 	if (error != 0) {
3875 		printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3876 		    error);
3877 
3878 		ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3879 		return error;
3880 	}
3881 
3882 	/* New Code Directory is ready for use, swap it out in the blob structure */
3883 	ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3884 
3885 	blob->csb_mem_size = new_blob_size;
3886 	blob->csb_mem_kaddr = (void *)new_blob_addr;
3887 	blob->csb_cd = cd;
3888 	blob->csb_entitlements_blob = NULL;
3889 
3890 	blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
3891 	blob->csb_reconstituted = true;
3892 
3893 	/* The blob has some cached attributes of the Code Directory, so update those */
3894 
3895 	blob->csb_hash_firstlevel_pageshift = blob->csb_hash_pageshift; /* Save the original page size */
3896 
3897 	blob->csb_hash_pageshift = PAGE_SHIFT;
3898 	blob->csb_end_offset = ntohl(cd->codeLimit);
3899 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3900 		const SC_Scatter *scatter = (const SC_Scatter*)
3901 		    ((const char*)cd + ntohl(cd->scatterOffset));
3902 		blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
3903 	} else {
3904 		blob->csb_start_offset = 0;
3905 	}
3906 
3907 	return 0;
3908 }
3909 
3910 static void
cs_blob_cleanup(struct cs_blob * blob)3911 cs_blob_cleanup(struct cs_blob *blob)
3912 {
3913 	if (blob->csb_entitlements != NULL) {
3914 		amfi->OSEntitlements_invalidate(blob->csb_entitlements);
3915 		osobject_release(blob->csb_entitlements);
3916 		blob->csb_entitlements = NULL;
3917 	}
3918 
3919 	if (blob->csb_mem_kaddr) {
3920 		ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3921 	}
3922 	blob->csb_mem_kaddr = NULL;
3923 	blob->csb_mem_size = 0;
3924 }
3925 
3926 static void
cs_blob_ro_free(struct cs_blob * blob)3927 cs_blob_ro_free(struct cs_blob *blob)
3928 {
3929 	struct cs_blob tmp;
3930 
3931 	if (blob != NULL) {
3932 		/*
3933 		 * cs_blob_cleanup clears fields, so we need to pass it a
3934 		 * mutable copy.
3935 		 */
3936 		tmp = *blob;
3937 		cs_blob_cleanup(&tmp);
3938 
3939 		zfree_ro(ZONE_ID_CS_BLOB, blob);
3940 	}
3941 }
3942 
3943 /*
3944  * Free a cs_blob previously created by cs_blob_create_validated.
3945  */
3946 void
cs_blob_free(struct cs_blob * blob)3947 cs_blob_free(
3948 	struct cs_blob *blob)
3949 {
3950 	cs_blob_ro_free(blob);
3951 }
3952 
3953 static int
cs_blob_init_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob * blob,CS_CodeDirectory const ** const ret_cd)3954 cs_blob_init_validated(
3955 	vm_address_t * const addr,
3956 	vm_size_t size,
3957 	struct cs_blob *blob,
3958 	CS_CodeDirectory const ** const ret_cd)
3959 {
3960 	int error = EINVAL;
3961 	const CS_CodeDirectory *cd = NULL;
3962 	const CS_GenericBlob *entitlements = NULL;
3963 	const CS_GenericBlob *der_entitlements = NULL;
3964 	union cs_hash_union mdctx;
3965 	size_t length;
3966 
3967 	bzero(blob, sizeof(*blob));
3968 
3969 	/* fill in the new blob */
3970 	blob->csb_mem_size = size;
3971 	blob->csb_mem_offset = 0;
3972 	blob->csb_mem_kaddr = (void *)*addr;
3973 	blob->csb_flags = 0;
3974 	blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
3975 	blob->csb_platform_binary = 0;
3976 	blob->csb_platform_path = 0;
3977 	blob->csb_teamid = NULL;
3978 #if CONFIG_SUPPLEMENTAL_SIGNATURES
3979 	blob->csb_supplement_teamid = NULL;
3980 #endif
3981 	blob->csb_entitlements_blob = NULL;
3982 	blob->csb_der_entitlements_blob = NULL;
3983 	blob->csb_entitlements = NULL;
3984 #if PMAP_CS_INCLUDE_CODE_SIGNING
3985 	blob->csb_pmap_cs_entry = NULL;
3986 #endif
3987 	blob->csb_reconstituted = false;
3988 	blob->csb_validation_category = CS_VALIDATION_CATEGORY_INVALID;
3989 
3990 	/* Transfer ownership. Even on error, this function will deallocate */
3991 	*addr = 0;
3992 
3993 	/*
3994 	 * Validate the blob's contents
3995 	 */
3996 	length = (size_t) size;
3997 	error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
3998 	    length, &cd, &entitlements, &der_entitlements);
3999 	if (error) {
4000 		if (cs_debug) {
4001 			printf("CODESIGNING: csblob invalid: %d\n", error);
4002 		}
4003 		/*
4004 		 * The vnode checker can't make the rest of this function
4005 		 * succeed if csblob validation failed, so bail */
4006 		goto out;
4007 	} else {
4008 		const unsigned char *md_base;
4009 		uint8_t hash[CS_HASH_MAX_SIZE];
4010 		int md_size;
4011 		vm_offset_t hash_pagemask;
4012 
4013 		blob->csb_cd = cd;
4014 		blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
4015 		blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
4016 		blob->csb_hashtype = cs_find_md(cd->hashType);
4017 		if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
4018 			panic("validated CodeDirectory but unsupported type");
4019 		}
4020 
4021 		blob->csb_hash_pageshift = cd->pageSize;
4022 		hash_pagemask = (1U << cd->pageSize) - 1;
4023 		blob->csb_hash_firstlevel_pageshift = 0;
4024 		blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
4025 		blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + hash_pagemask) & ~hash_pagemask);
4026 		if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
4027 			const SC_Scatter *scatter = (const SC_Scatter*)
4028 			    ((const char*)cd + ntohl(cd->scatterOffset));
4029 			blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * (1U << blob->csb_hash_pageshift);
4030 		} else {
4031 			blob->csb_start_offset = 0;
4032 		}
4033 		/* compute the blob's cdhash */
4034 		md_base = (const unsigned char *) cd;
4035 		md_size = ntohl(cd->length);
4036 
4037 		blob->csb_hashtype->cs_init(&mdctx);
4038 		blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
4039 		blob->csb_hashtype->cs_final(hash, &mdctx);
4040 
4041 		memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
4042 
4043 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4044 		blob->csb_linkage_hashtype = NULL;
4045 		if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0 &&
4046 		    ntohl(cd->linkageSize) >= CS_CDHASH_LEN) {
4047 			blob->csb_linkage_hashtype = cs_find_md(cd->linkageHashType);
4048 
4049 			if (blob->csb_linkage_hashtype != NULL) {
4050 				memcpy(blob->csb_linkage, (uint8_t const*)cd + ntohl(cd->linkageOffset),
4051 				    CS_CDHASH_LEN);
4052 			}
4053 		}
4054 #endif
4055 	}
4056 
4057 	error = 0;
4058 
4059 out:
4060 	if (error != 0) {
4061 		cs_blob_cleanup(blob);
4062 		blob = NULL;
4063 		cd = NULL;
4064 	}
4065 
4066 	if (ret_cd != NULL) {
4067 		*ret_cd = cd;
4068 	}
4069 
4070 	return error;
4071 }
4072 
4073 /*
4074  * Validate the code signature blob, create a struct cs_blob wrapper
4075  * and return it together with a pointer to the chosen code directory
4076  * and entitlements blob.
4077  *
4078  * Note that this takes ownership of the memory as addr, mainly because
4079  * this function can actually replace the passed in blob with another
4080  * one, e.g. when performing multilevel hashing optimization.
4081  */
4082 int
cs_blob_create_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob ** const ret_blob,CS_CodeDirectory const ** const ret_cd)4083 cs_blob_create_validated(
4084 	vm_address_t * const            addr,
4085 	vm_size_t                       size,
4086 	struct cs_blob ** const         ret_blob,
4087 	CS_CodeDirectory const ** const     ret_cd)
4088 {
4089 	struct cs_blob blob = {};
4090 	struct cs_blob *ro_blob;
4091 	int error;
4092 
4093 	if (ret_blob) {
4094 		*ret_blob = NULL;
4095 	}
4096 
4097 	if ((error = cs_blob_init_validated(addr, size, &blob, ret_cd)) != 0) {
4098 		return error;
4099 	}
4100 
4101 	if (ret_blob != NULL) {
4102 		ro_blob = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4103 		zalloc_ro_update_elem(ZONE_ID_CS_BLOB, ro_blob, &blob);
4104 		*ret_blob = ro_blob;
4105 	}
4106 
4107 	return error;
4108 }
4109 
4110 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4111 static void
cs_blob_supplement_free(struct cs_blob * const blob)4112 cs_blob_supplement_free(struct cs_blob * const blob)
4113 {
4114 	void *teamid;
4115 
4116 	if (blob != NULL) {
4117 		if (blob->csb_supplement_teamid != NULL) {
4118 			teamid = blob->csb_supplement_teamid;
4119 			vm_size_t teamid_size = strlen(blob->csb_supplement_teamid) + 1;
4120 			kfree_data(teamid, teamid_size);
4121 		}
4122 		cs_blob_ro_free(blob);
4123 	}
4124 }
4125 #endif
4126 
4127 static void
ubc_cs_blob_adjust_statistics(struct cs_blob const * blob)4128 ubc_cs_blob_adjust_statistics(struct cs_blob const *blob)
4129 {
4130 	/* Note that the atomic ops are not enough to guarantee
4131 	 * correctness: If a blob with an intermediate size is inserted
4132 	 * concurrently, we can lose a peak value assignment. But these
4133 	 * statistics are only advisory anyway, so we're not going to
4134 	 * employ full locking here. (Consequently, we are also okay with
4135 	 * relaxed ordering of those accesses.)
4136 	 */
4137 
4138 	unsigned int new_cs_blob_count = os_atomic_add(&cs_blob_count, 1, relaxed);
4139 	if (new_cs_blob_count > os_atomic_load(&cs_blob_count_peak, relaxed)) {
4140 		os_atomic_store(&cs_blob_count_peak, new_cs_blob_count, relaxed);
4141 	}
4142 
4143 	size_t new_cs_blob_size = os_atomic_add(&cs_blob_size, blob->csb_mem_size, relaxed);
4144 
4145 	if (new_cs_blob_size > os_atomic_load(&cs_blob_size_peak, relaxed)) {
4146 		os_atomic_store(&cs_blob_size_peak, new_cs_blob_size, relaxed);
4147 	}
4148 	if (blob->csb_mem_size > os_atomic_load(&cs_blob_size_max, relaxed)) {
4149 		os_atomic_store(&cs_blob_size_max, blob->csb_mem_size, relaxed);
4150 	}
4151 }
4152 
4153 static void
cs_blob_set_cpu_type(struct cs_blob * blob,cpu_type_t cputype)4154 cs_blob_set_cpu_type(struct cs_blob *blob, cpu_type_t cputype)
4155 {
4156 	zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_cpu_type, &cputype);
4157 }
4158 
4159 __abortlike
4160 static void
panic_cs_blob_backref_mismatch(struct cs_blob * blob,struct vnode * vp)4161 panic_cs_blob_backref_mismatch(struct cs_blob *blob, struct vnode *vp)
4162 {
4163 	panic("cs_blob vnode backref mismatch: blob=%p, vp=%p, "
4164 	    "blob->csb_vnode=%p", blob, vp, blob->csb_vnode);
4165 }
4166 
4167 void
cs_blob_require(struct cs_blob * blob,vnode_t vp)4168 cs_blob_require(struct cs_blob *blob, vnode_t vp)
4169 {
4170 	zone_require_ro(ZONE_ID_CS_BLOB, sizeof(struct cs_blob), blob);
4171 
4172 	if (vp != NULL && __improbable(blob->csb_vnode != vp)) {
4173 		panic_cs_blob_backref_mismatch(blob, vp);
4174 	}
4175 }
4176 
4177 int
ubc_cs_blob_add(struct vnode * vp,uint32_t platform,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t base_offset,vm_address_t * addr,vm_size_t size,struct image_params * imgp,__unused int flags,struct cs_blob ** ret_blob)4178 ubc_cs_blob_add(
4179 	struct vnode    *vp,
4180 	uint32_t        platform,
4181 	cpu_type_t      cputype,
4182 	cpu_subtype_t   cpusubtype,
4183 	off_t           base_offset,
4184 	vm_address_t    *addr,
4185 	vm_size_t       size,
4186 	struct image_params *imgp,
4187 	__unused int    flags,
4188 	struct cs_blob  **ret_blob)
4189 {
4190 	kern_return_t           kr;
4191 	struct ubc_info         *uip;
4192 	struct cs_blob          tmp_blob;
4193 	struct cs_blob          *blob_ro = NULL;
4194 	struct cs_blob          *oblob;
4195 	int                     error;
4196 	CS_CodeDirectory const *cd;
4197 	off_t                   blob_start_offset, blob_end_offset;
4198 	boolean_t               record_mtime;
4199 
4200 	record_mtime = FALSE;
4201 	if (ret_blob) {
4202 		*ret_blob = NULL;
4203 	}
4204 
4205 	/* Create the struct cs_blob wrapper that will be attached to the vnode.
4206 	 * Validates the passed in blob in the process. */
4207 	error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
4208 
4209 	if (error != 0) {
4210 		printf("malform code signature blob: %d\n", error);
4211 		return error;
4212 	}
4213 
4214 	tmp_blob.csb_cpu_type = cputype;
4215 	tmp_blob.csb_cpu_subtype = cpusubtype & ~CPU_SUBTYPE_MASK;
4216 	tmp_blob.csb_base_offset = base_offset;
4217 
4218 #if CONFIG_ENFORCE_SIGNED_CODE
4219 	/*
4220 	 * Reconstitute code signature
4221 	 */
4222 	{
4223 		vm_address_t new_mem_kaddr = 0;
4224 		vm_size_t new_mem_size = 0;
4225 
4226 		CS_CodeDirectory *new_cd = NULL;
4227 		const CS_GenericBlob *new_entitlements = NULL;
4228 		const CS_GenericBlob *new_der_entitlements = NULL;
4229 
4230 		error = ubc_cs_reconstitute_code_signature(&tmp_blob, &new_mem_kaddr, &new_mem_size, 0, &new_cd);
4231 		if (error != 0) {
4232 			printf("failed code signature reconstitution: %d\n", error);
4233 			goto out;
4234 		}
4235 		new_entitlements = csblob_find_blob_bytes((uint8_t*)new_mem_kaddr, new_mem_size, CSSLOT_ENTITLEMENTS, CSMAGIC_EMBEDDED_ENTITLEMENTS);
4236 		new_der_entitlements = csblob_find_blob_bytes((uint8_t*)new_mem_kaddr, new_mem_size, CSSLOT_DER_ENTITLEMENTS, CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
4237 
4238 		ubc_cs_blob_deallocate((vm_offset_t)tmp_blob.csb_mem_kaddr, tmp_blob.csb_mem_size);
4239 
4240 		tmp_blob.csb_mem_kaddr = (void *)new_mem_kaddr;
4241 		tmp_blob.csb_mem_size = new_mem_size;
4242 		tmp_blob.csb_cd = new_cd;
4243 		tmp_blob.csb_entitlements_blob = new_entitlements;
4244 		tmp_blob.csb_der_entitlements_blob = new_der_entitlements;
4245 		tmp_blob.csb_reconstituted = true;
4246 	}
4247 #endif
4248 
4249 
4250 	blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4251 	tmp_blob.csb_ro_addr = blob_ro;
4252 	tmp_blob.csb_vnode = vp;
4253 
4254 	/* AMFI needs to see the current blob state at the RO address. */
4255 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
4256 
4257 #if CONFIG_MACF
4258 	/*
4259 	 * Let policy module check whether the blob's signature is accepted.
4260 	 */
4261 
4262 	unsigned int cs_flags = tmp_blob.csb_flags;
4263 	unsigned int signer_type = tmp_blob.csb_signer_type;
4264 	error = mac_vnode_check_signature(vp, &tmp_blob, imgp, &cs_flags, &signer_type, flags, platform);
4265 
4266 	tmp_blob.csb_flags = cs_flags;
4267 	tmp_blob.csb_signer_type = signer_type;
4268 
4269 	if (error) {
4270 		if (cs_debug) {
4271 			printf("check_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
4272 		}
4273 		goto out;
4274 	}
4275 	if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(tmp_blob.csb_flags & CS_PLATFORM_BINARY)) {
4276 		if (cs_debug) {
4277 			printf("check_signature[pid: %d], is not apple signed\n", proc_getpid(current_proc()));
4278 		}
4279 		error = EPERM;
4280 		goto out;
4281 	}
4282 #endif
4283 
4284 
4285 #if CONFIG_ENFORCE_SIGNED_CODE
4286 	/*
4287 	 * When this flag is turned on, we reconstitue the code signature to only
4288 	 * include the blobs which are needed. This may include the first code
4289 	 * directory and the CMS blob. However, now that verification of this blob
4290 	 * is complete, we don't need all these blobs. Hence, we clear them out.
4291 	 */
4292 
4293 	if (ubc_cs_clear_unneeded_code_signature(&tmp_blob)) {
4294 		error = EPERM;
4295 		goto out;
4296 	}
4297 #endif /* CONFIG_ENFORCE_SIGNED_CODE */
4298 
4299 	tmp_blob.csb_entitlements_blob = NULL;
4300 
4301 #if CODE_SIGNING_MONITOR
4302 	/* Disassociate any associated provisioning profiles from the monitor */
4303 	kr = disassociate_provisioning_profile(tmp_blob.csb_pmap_cs_entry);
4304 	if ((kr != KERN_SUCCESS) && (kr != KERN_NOT_FOUND)) {
4305 		printf("CODE SIGNING: error with disassociating profile[pid: %d]: %d\n",
4306 		    proc_getpid(current_proc()), kr);
4307 
4308 		error = EPERM;
4309 		goto out;
4310 	}
4311 #endif
4312 
4313 	if (tmp_blob.csb_flags & CS_PLATFORM_BINARY) {
4314 		if (cs_debug > 1) {
4315 			printf("check_signature[pid: %d]: platform binary\n", proc_getpid(current_proc()));
4316 		}
4317 		tmp_blob.csb_platform_binary = 1;
4318 		tmp_blob.csb_platform_path = !!(tmp_blob.csb_flags & CS_PLATFORM_PATH);
4319 	} else {
4320 		tmp_blob.csb_platform_binary = 0;
4321 		tmp_blob.csb_platform_path = 0;
4322 		tmp_blob.csb_teamid = csblob_parse_teamid(&tmp_blob);
4323 		if (cs_debug > 1) {
4324 			if (tmp_blob.csb_teamid) {
4325 				printf("check_signature[pid: %d]: team-id is %s\n", proc_getpid(current_proc()), tmp_blob.csb_teamid);
4326 			} else {
4327 				printf("check_signature[pid: %d]: no team-id\n", proc_getpid(current_proc()));
4328 			}
4329 		}
4330 	}
4331 
4332 	/*
4333 	 * Validate that launch constraints haven't been stripped
4334 	 */
4335 	kr = csblob_find_special_slot_blob(&tmp_blob, CSSLOT_LAUNCH_CONSTRAINT_SELF, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT, NULL, NULL);
4336 	if (kr) {
4337 		printf("check_signature[pid: %d]: embedded self launch constraint was removed\n", proc_getpid(current_proc()));
4338 		error = EPERM;
4339 		goto out;
4340 	}
4341 	kr = csblob_find_special_slot_blob(&tmp_blob, CSSLOT_LAUNCH_CONSTRAINT_PARENT, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT, NULL, NULL);
4342 	if (kr) {
4343 		printf("check_signature[pid: %d]: embedded parent launch constraint was removed\n", proc_getpid(current_proc()));
4344 		error = EPERM;
4345 		goto out;
4346 	}
4347 	kr = csblob_find_special_slot_blob(&tmp_blob, CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT, NULL, NULL);
4348 	if (kr) {
4349 		printf("check_signature[pid: %d]: embedded responsible launch constraint was removed\n", proc_getpid(current_proc()));
4350 		error = EPERM;
4351 		goto out;
4352 	}
4353 
4354 	/*
4355 	 * Validate the blob's coverage
4356 	 */
4357 	blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
4358 	blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
4359 
4360 	if (blob_start_offset >= blob_end_offset ||
4361 	    blob_start_offset < 0 ||
4362 	    blob_end_offset <= 0) {
4363 		/* reject empty or backwards blob */
4364 		error = EINVAL;
4365 		goto out;
4366 	}
4367 
4368 	if (ubc_cs_supports_multilevel_hash(&tmp_blob)) {
4369 		error = ubc_cs_convert_to_multilevel_hash(&tmp_blob);
4370 		if (error != 0) {
4371 			printf("failed multilevel hash conversion: %d\n", error);
4372 			goto out;
4373 		}
4374 		tmp_blob.csb_reconstituted = true;
4375 	}
4376 
4377 	vnode_lock(vp);
4378 	if (!UBCINFOEXISTS(vp)) {
4379 		vnode_unlock(vp);
4380 		error = ENOENT;
4381 		goto out;
4382 	}
4383 	uip = vp->v_ubcinfo;
4384 
4385 	/* check if this new blob overlaps with an existing blob */
4386 	for (oblob = ubc_get_cs_blobs(vp);
4387 	    oblob != NULL;
4388 	    oblob = oblob->csb_next) {
4389 		off_t oblob_start_offset, oblob_end_offset;
4390 
4391 		if (tmp_blob.csb_signer_type != oblob->csb_signer_type) {  // signer type needs to be the same for slices
4392 			vnode_unlock(vp);
4393 			error = EALREADY;
4394 			goto out;
4395 		} else if (tmp_blob.csb_platform_binary) {  //platform binary needs to be the same for app slices
4396 			if (!oblob->csb_platform_binary) {
4397 				vnode_unlock(vp);
4398 				error = EALREADY;
4399 				goto out;
4400 			}
4401 		} else if (tmp_blob.csb_teamid) {  //teamid binary needs to be the same for app slices
4402 			if (oblob->csb_platform_binary ||
4403 			    oblob->csb_teamid == NULL ||
4404 			    strcmp(oblob->csb_teamid, tmp_blob.csb_teamid) != 0) {
4405 				vnode_unlock(vp);
4406 				error = EALREADY;
4407 				goto out;
4408 			}
4409 		} else {  // non teamid binary needs to be the same for app slices
4410 			if (oblob->csb_platform_binary ||
4411 			    oblob->csb_teamid != NULL) {
4412 				vnode_unlock(vp);
4413 				error = EALREADY;
4414 				goto out;
4415 			}
4416 		}
4417 
4418 		oblob_start_offset = (oblob->csb_base_offset +
4419 		    oblob->csb_start_offset);
4420 		oblob_end_offset = (oblob->csb_base_offset +
4421 		    oblob->csb_end_offset);
4422 		if (blob_start_offset >= oblob_end_offset ||
4423 		    blob_end_offset <= oblob_start_offset) {
4424 			/* no conflict with this existing blob */
4425 		} else {
4426 			/* conflict ! */
4427 			if (blob_start_offset == oblob_start_offset &&
4428 			    blob_end_offset == oblob_end_offset &&
4429 			    tmp_blob.csb_mem_size == oblob->csb_mem_size &&
4430 			    tmp_blob.csb_flags == oblob->csb_flags &&
4431 			    (tmp_blob.csb_cpu_type == CPU_TYPE_ANY ||
4432 			    oblob->csb_cpu_type == CPU_TYPE_ANY ||
4433 			    tmp_blob.csb_cpu_type == oblob->csb_cpu_type) &&
4434 			    !bcmp(tmp_blob.csb_cdhash,
4435 			    oblob->csb_cdhash,
4436 			    CS_CDHASH_LEN)) {
4437 				/*
4438 				 * We already have this blob:
4439 				 * we'll return success but
4440 				 * throw away the new blob.
4441 				 */
4442 				if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
4443 					/*
4444 					 * The old blob matches this one
4445 					 * but doesn't have any CPU type.
4446 					 * Update it with whatever the caller
4447 					 * provided this time.
4448 					 */
4449 					cs_blob_set_cpu_type(oblob, cputype);
4450 				}
4451 
4452 				/* The signature is still accepted, so update the
4453 				 * generation count. */
4454 				uip->cs_add_gen = cs_blob_generation_count;
4455 
4456 				vnode_unlock(vp);
4457 				if (ret_blob) {
4458 					*ret_blob = oblob;
4459 				}
4460 				error = EAGAIN;
4461 				goto out;
4462 			} else {
4463 				/* different blob: reject the new one */
4464 				vnode_unlock(vp);
4465 				error = EALREADY;
4466 				goto out;
4467 			}
4468 		}
4469 	}
4470 
4471 
4472 	/* mark this vnode's VM object as having "signed pages" */
4473 	kr = memory_object_signed(uip->ui_control, TRUE);
4474 	if (kr != KERN_SUCCESS) {
4475 		vnode_unlock(vp);
4476 		error = ENOENT;
4477 		goto out;
4478 	}
4479 
4480 	if (uip->cs_blobs == NULL) {
4481 		/* loading 1st blob: record the file's current "modify time" */
4482 		record_mtime = TRUE;
4483 	}
4484 
4485 	/* set the generation count for cs_blobs */
4486 	uip->cs_add_gen = cs_blob_generation_count;
4487 	tmp_blob.csb_next = uip->cs_blobs;
4488 	ubc_cs_blob_adjust_statistics(&tmp_blob);
4489 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
4490 
4491 	/*
4492 	 * Add this blob to the list of blobs for this vnode.
4493 	 * We always add at the front of the list and we never remove a
4494 	 * blob from the list, so ubc_cs_get_blobs() can return whatever
4495 	 * the top of the list was and that list will remain valid
4496 	 * while we validate a page, even after we release the vnode's lock.
4497 	 */
4498 	os_atomic_thread_fence(seq_cst); // fence to make sure all of the writes happen before we update the head
4499 	uip->cs_blobs = blob_ro;
4500 
4501 
4502 	if (cs_debug > 1) {
4503 		proc_t p;
4504 		const char *name = vnode_getname_printable(vp);
4505 		p = current_proc();
4506 		printf("CODE SIGNING: proc %d(%s) "
4507 		    "loaded %s signatures for file (%s) "
4508 		    "range 0x%llx:0x%llx flags 0x%x\n",
4509 		    proc_getpid(p), p->p_comm,
4510 		    blob_ro->csb_cpu_type == -1 ? "detached" : "embedded",
4511 		    name,
4512 		    blob_ro->csb_base_offset + blob_ro->csb_start_offset,
4513 		    blob_ro->csb_base_offset + blob_ro->csb_end_offset,
4514 		    blob_ro->csb_flags);
4515 		vnode_putname_printable(name);
4516 	}
4517 
4518 	vnode_unlock(vp);
4519 
4520 	if (record_mtime) {
4521 		vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
4522 	}
4523 
4524 	if (ret_blob) {
4525 		*ret_blob = blob_ro;
4526 	}
4527 
4528 	error = 0;      /* success ! */
4529 
4530 out:
4531 	if (error) {
4532 		if (cs_debug) {
4533 			printf("check_signature[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
4534 		}
4535 
4536 		cs_blob_cleanup(&tmp_blob);
4537 		if (blob_ro) {
4538 			zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
4539 		}
4540 	}
4541 
4542 	if (error == EAGAIN) {
4543 		/*
4544 		 * See above:  error is EAGAIN if we were asked
4545 		 * to add an existing blob again.  We cleaned the new
4546 		 * blob and we want to return success.
4547 		 */
4548 		error = 0;
4549 	}
4550 
4551 	return error;
4552 }
4553 
4554 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4555 int
ubc_cs_blob_add_supplement(struct vnode * vp,struct vnode * orig_vp,off_t base_offset,vm_address_t * addr,vm_size_t size,struct cs_blob ** ret_blob)4556 ubc_cs_blob_add_supplement(
4557 	struct vnode    *vp,
4558 	struct vnode    *orig_vp,
4559 	off_t           base_offset,
4560 	vm_address_t    *addr,
4561 	vm_size_t       size,
4562 	struct cs_blob  **ret_blob)
4563 {
4564 	kern_return_t           kr;
4565 	struct ubc_info         *uip, *orig_uip;
4566 	int                     error;
4567 	struct cs_blob          tmp_blob;
4568 	struct cs_blob          *orig_blob;
4569 	struct cs_blob          *blob_ro = NULL;
4570 	CS_CodeDirectory const *cd;
4571 	off_t                   blob_start_offset, blob_end_offset;
4572 
4573 	if (ret_blob) {
4574 		*ret_blob = NULL;
4575 	}
4576 
4577 	/* Create the struct cs_blob wrapper that will be attached to the vnode.
4578 	 * Validates the passed in blob in the process. */
4579 	error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
4580 
4581 	if (error != 0) {
4582 		printf("malformed code signature supplement blob: %d\n", error);
4583 		return error;
4584 	}
4585 
4586 	tmp_blob.csb_cpu_type = -1;
4587 	tmp_blob.csb_base_offset = base_offset;
4588 
4589 	tmp_blob.csb_reconstituted = false;
4590 
4591 	vnode_lock(orig_vp);
4592 	if (!UBCINFOEXISTS(orig_vp)) {
4593 		vnode_unlock(orig_vp);
4594 		error = ENOENT;
4595 		goto out;
4596 	}
4597 
4598 	orig_uip = orig_vp->v_ubcinfo;
4599 
4600 	/* check that the supplement's linked cdhash matches a cdhash of
4601 	 * the target image.
4602 	 */
4603 
4604 	if (tmp_blob.csb_linkage_hashtype == NULL) {
4605 		proc_t p;
4606 		const char *iname = vnode_getname_printable(vp);
4607 		p = current_proc();
4608 
4609 		printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
4610 		    "is not a supplemental.\n",
4611 		    proc_getpid(p), p->p_comm, iname);
4612 
4613 		error = EINVAL;
4614 
4615 		vnode_putname_printable(iname);
4616 		vnode_unlock(orig_vp);
4617 		goto out;
4618 	}
4619 	bool found_but_not_valid = false;
4620 	for (orig_blob = ubc_get_cs_blobs(orig_vp); orig_blob != NULL;
4621 	    orig_blob = orig_blob->csb_next) {
4622 		if (orig_blob->csb_hashtype == tmp_blob.csb_linkage_hashtype &&
4623 		    memcmp(orig_blob->csb_cdhash, tmp_blob.csb_linkage, CS_CDHASH_LEN) == 0) {
4624 			// Found match!
4625 			found_but_not_valid = ((orig_blob->csb_flags & CS_VALID) != CS_VALID);
4626 			break;
4627 		}
4628 	}
4629 
4630 	if (orig_blob == NULL || found_but_not_valid) {
4631 		// Not found.
4632 
4633 		proc_t p;
4634 		const char *iname = vnode_getname_printable(vp);
4635 		p = current_proc();
4636 
4637 		error = (orig_blob == NULL) ? ESRCH : EPERM;
4638 
4639 		printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
4640 		    "does not match any attached cdhash (error: %d).\n",
4641 		    proc_getpid(p), p->p_comm, iname, error);
4642 
4643 		vnode_putname_printable(iname);
4644 		vnode_unlock(orig_vp);
4645 		goto out;
4646 	}
4647 
4648 	vnode_unlock(orig_vp);
4649 
4650 	blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4651 	tmp_blob.csb_ro_addr = blob_ro;
4652 	tmp_blob.csb_vnode = vp;
4653 
4654 	/* AMFI needs to see the current blob state at the RO address. */
4655 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
4656 
4657 	// validate the signature against policy!
4658 #if CONFIG_MACF
4659 	unsigned int signer_type = tmp_blob.csb_signer_type;
4660 	error = mac_vnode_check_supplemental_signature(vp, &tmp_blob, orig_vp, orig_blob, &signer_type);
4661 
4662 	tmp_blob.csb_signer_type = signer_type;
4663 
4664 	if (error) {
4665 		if (cs_debug) {
4666 			printf("check_supplemental_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
4667 		}
4668 		goto out;
4669 	}
4670 #endif
4671 
4672 	// We allowed the supplemental signature blob so
4673 	// copy the platform bit or team-id from the linked signature and whether or not the original is developer code
4674 	tmp_blob.csb_platform_binary = 0;
4675 	tmp_blob.csb_platform_path = 0;
4676 	if (orig_blob->csb_platform_binary == 1) {
4677 		tmp_blob.csb_platform_binary = orig_blob->csb_platform_binary;
4678 		tmp_blob.csb_platform_path = orig_blob->csb_platform_path;
4679 	} else if (orig_blob->csb_teamid != NULL) {
4680 		vm_size_t teamid_size = strlen(orig_blob->csb_teamid) + 1;
4681 		tmp_blob.csb_supplement_teamid = kalloc_data(teamid_size, Z_WAITOK);
4682 		if (tmp_blob.csb_supplement_teamid == NULL) {
4683 			error = ENOMEM;
4684 			goto out;
4685 		}
4686 		strlcpy(tmp_blob.csb_supplement_teamid, orig_blob->csb_teamid, teamid_size);
4687 	}
4688 	tmp_blob.csb_flags = (orig_blob->csb_flags & CS_DEV_CODE);
4689 
4690 	// Validate the blob's coverage
4691 	blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
4692 	blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
4693 
4694 	if (blob_start_offset >= blob_end_offset || blob_start_offset < 0 || blob_end_offset <= 0) {
4695 		/* reject empty or backwards blob */
4696 		error = EINVAL;
4697 		goto out;
4698 	}
4699 
4700 	vnode_lock(vp);
4701 	if (!UBCINFOEXISTS(vp)) {
4702 		vnode_unlock(vp);
4703 		error = ENOENT;
4704 		goto out;
4705 	}
4706 	uip = vp->v_ubcinfo;
4707 
4708 	struct cs_blob *existing = uip->cs_blob_supplement;
4709 	if (existing != NULL) {
4710 		if (tmp_blob.csb_hashtype == existing->csb_hashtype &&
4711 		    memcmp(tmp_blob.csb_cdhash, existing->csb_cdhash, CS_CDHASH_LEN) == 0) {
4712 			error = EAGAIN; // non-fatal
4713 		} else {
4714 			error = EALREADY; // fatal
4715 		}
4716 
4717 		vnode_unlock(vp);
4718 		goto out;
4719 	}
4720 
4721 	/* mark this vnode's VM object as having "signed pages" */
4722 	kr = memory_object_signed(uip->ui_control, TRUE);
4723 	if (kr != KERN_SUCCESS) {
4724 		vnode_unlock(vp);
4725 		error = ENOENT;
4726 		goto out;
4727 	}
4728 
4729 
4730 	/* We still adjust statistics even for supplemental blobs, as they
4731 	 * consume memory just the same. */
4732 	ubc_cs_blob_adjust_statistics(&tmp_blob);
4733 	/* Unlike regular cs_blobs, we only ever support one supplement. */
4734 	tmp_blob.csb_next = NULL;
4735 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
4736 
4737 	os_atomic_thread_fence(seq_cst); // Fence to prevent reordering here
4738 	uip->cs_blob_supplement = blob_ro;
4739 
4740 	vnode_unlock(vp);
4741 
4742 
4743 	if (cs_debug > 1) {
4744 		proc_t p;
4745 		const char *name = vnode_getname_printable(vp);
4746 		p = current_proc();
4747 		printf("CODE SIGNING: proc %d(%s) "
4748 		    "loaded supplemental signature for file (%s) "
4749 		    "range 0x%llx:0x%llx\n",
4750 		    proc_getpid(p), p->p_comm,
4751 		    name,
4752 		    blob_ro->csb_base_offset + blob_ro->csb_start_offset,
4753 		    blob_ro->csb_base_offset + blob_ro->csb_end_offset);
4754 		vnode_putname_printable(name);
4755 	}
4756 
4757 	if (ret_blob) {
4758 		*ret_blob = blob_ro;
4759 	}
4760 
4761 	error = 0; // Success!
4762 out:
4763 	if (error) {
4764 		if (cs_debug) {
4765 			printf("ubc_cs_blob_add_supplement[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
4766 		}
4767 
4768 		cs_blob_cleanup(&tmp_blob);
4769 		if (blob_ro) {
4770 			zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
4771 		}
4772 	}
4773 
4774 	if (error == EAGAIN) {
4775 		/* We were asked to add an existing blob.
4776 		 * We cleaned up and ignore the attempt. */
4777 		error = 0;
4778 	}
4779 
4780 	return error;
4781 }
4782 #endif
4783 
4784 
4785 
4786 void
csvnode_print_debug(struct vnode * vp)4787 csvnode_print_debug(struct vnode *vp)
4788 {
4789 	const char      *name = NULL;
4790 	struct ubc_info *uip;
4791 	struct cs_blob *blob;
4792 
4793 	name = vnode_getname_printable(vp);
4794 	if (name) {
4795 		printf("csvnode: name: %s\n", name);
4796 		vnode_putname_printable(name);
4797 	}
4798 
4799 	vnode_lock_spin(vp);
4800 
4801 	if (!UBCINFOEXISTS(vp)) {
4802 		blob = NULL;
4803 		goto out;
4804 	}
4805 
4806 	uip = vp->v_ubcinfo;
4807 	for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
4808 		printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
4809 		    (unsigned long)blob->csb_start_offset,
4810 		    (unsigned long)blob->csb_end_offset,
4811 		    blob->csb_flags,
4812 		    blob->csb_platform_binary ? "yes" : "no",
4813 		    blob->csb_platform_path ? "yes" : "no",
4814 		    blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
4815 	}
4816 
4817 out:
4818 	vnode_unlock(vp);
4819 }
4820 
4821 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4822 struct cs_blob *
ubc_cs_blob_get_supplement(struct vnode * vp,off_t offset)4823 ubc_cs_blob_get_supplement(
4824 	struct vnode    *vp,
4825 	off_t           offset)
4826 {
4827 	struct cs_blob *blob;
4828 	off_t offset_in_blob;
4829 
4830 	vnode_lock_spin(vp);
4831 
4832 	if (!UBCINFOEXISTS(vp)) {
4833 		blob = NULL;
4834 		goto out;
4835 	}
4836 
4837 	blob = vp->v_ubcinfo->cs_blob_supplement;
4838 
4839 	if (blob == NULL) {
4840 		// no supplemental blob
4841 		goto out;
4842 	}
4843 
4844 
4845 	if (offset != -1) {
4846 		offset_in_blob = offset - blob->csb_base_offset;
4847 		if (offset_in_blob < blob->csb_start_offset || offset_in_blob >= blob->csb_end_offset) {
4848 			// not actually covered by this blob
4849 			blob = NULL;
4850 		}
4851 	}
4852 
4853 out:
4854 	vnode_unlock(vp);
4855 
4856 	return blob;
4857 }
4858 #endif
4859 
4860 struct cs_blob *
ubc_cs_blob_get(struct vnode * vp,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t offset)4861 ubc_cs_blob_get(
4862 	struct vnode    *vp,
4863 	cpu_type_t      cputype,
4864 	cpu_subtype_t   cpusubtype,
4865 	off_t           offset)
4866 {
4867 	struct cs_blob  *blob;
4868 	off_t offset_in_blob;
4869 
4870 	vnode_lock_spin(vp);
4871 
4872 	if (!UBCINFOEXISTS(vp)) {
4873 		blob = NULL;
4874 		goto out;
4875 	}
4876 
4877 	for (blob = ubc_get_cs_blobs(vp);
4878 	    blob != NULL;
4879 	    blob = blob->csb_next) {
4880 		if (cputype != -1 && blob->csb_cpu_type == cputype && (cpusubtype == -1 || blob->csb_cpu_subtype == (cpusubtype & ~CPU_SUBTYPE_MASK))) {
4881 			break;
4882 		}
4883 		if (offset != -1) {
4884 			offset_in_blob = offset - blob->csb_base_offset;
4885 			if (offset_in_blob >= blob->csb_start_offset &&
4886 			    offset_in_blob < blob->csb_end_offset) {
4887 				/* our offset is covered by this blob */
4888 				break;
4889 			}
4890 		}
4891 	}
4892 
4893 out:
4894 	vnode_unlock(vp);
4895 
4896 	return blob;
4897 }
4898 
4899 void
ubc_cs_free_and_vnode_unlock(vnode_t vp)4900 ubc_cs_free_and_vnode_unlock(
4901 	vnode_t vp)
4902 {
4903 	struct ubc_info *uip = vp->v_ubcinfo;
4904 	struct cs_blob  *cs_blobs, *blob, *next_blob;
4905 
4906 	if (!(uip->ui_flags & UI_CSBLOBINVALID)) {
4907 		vnode_unlock(vp);
4908 		return;
4909 	}
4910 
4911 	uip->ui_flags &= ~UI_CSBLOBINVALID;
4912 
4913 	cs_blobs = uip->cs_blobs;
4914 	uip->cs_blobs = NULL;
4915 
4916 #if CHECK_CS_VALIDATION_BITMAP
4917 	ubc_cs_validation_bitmap_deallocate( uip );
4918 #endif
4919 
4920 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4921 	struct cs_blob  *cs_blob_supplement = uip->cs_blob_supplement;
4922 	uip->cs_blob_supplement = NULL;
4923 #endif
4924 
4925 	vnode_unlock(vp);
4926 
4927 	for (blob = cs_blobs;
4928 	    blob != NULL;
4929 	    blob = next_blob) {
4930 		next_blob = blob->csb_next;
4931 		os_atomic_add(&cs_blob_count, -1, relaxed);
4932 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
4933 		cs_blob_ro_free(blob);
4934 	}
4935 
4936 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4937 	if (cs_blob_supplement != NULL) {
4938 		os_atomic_add(&cs_blob_count, -1, relaxed);
4939 		os_atomic_add(&cs_blob_size, -cs_blob_supplement->csb_mem_size, relaxed);
4940 		cs_blob_supplement_free(cs_blob_supplement);
4941 	}
4942 #endif
4943 }
4944 
4945 static void
ubc_cs_free(struct ubc_info * uip)4946 ubc_cs_free(
4947 	struct ubc_info *uip)
4948 {
4949 	struct cs_blob  *blob, *next_blob;
4950 
4951 	for (blob = uip->cs_blobs;
4952 	    blob != NULL;
4953 	    blob = next_blob) {
4954 		next_blob = blob->csb_next;
4955 		os_atomic_add(&cs_blob_count, -1, relaxed);
4956 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
4957 		cs_blob_ro_free(blob);
4958 	}
4959 #if CHECK_CS_VALIDATION_BITMAP
4960 	ubc_cs_validation_bitmap_deallocate( uip );
4961 #endif
4962 	uip->cs_blobs = NULL;
4963 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4964 	if (uip->cs_blob_supplement != NULL) {
4965 		blob = uip->cs_blob_supplement;
4966 		os_atomic_add(&cs_blob_count, -1, relaxed);
4967 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
4968 		cs_blob_supplement_free(uip->cs_blob_supplement);
4969 		uip->cs_blob_supplement = NULL;
4970 	}
4971 #endif
4972 }
4973 
4974 /* check cs blob generation on vnode
4975  * returns:
4976  *    0         : Success, the cs_blob attached is current
4977  *    ENEEDAUTH : Generation count mismatch. Needs authentication again.
4978  */
4979 int
ubc_cs_generation_check(struct vnode * vp)4980 ubc_cs_generation_check(
4981 	struct vnode    *vp)
4982 {
4983 	int retval = ENEEDAUTH;
4984 
4985 	vnode_lock_spin(vp);
4986 
4987 	if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
4988 		retval = 0;
4989 	}
4990 
4991 	vnode_unlock(vp);
4992 	return retval;
4993 }
4994 
4995 int
ubc_cs_blob_revalidate(struct vnode * vp,struct cs_blob * blob,struct image_params * imgp,int flags,uint32_t platform)4996 ubc_cs_blob_revalidate(
4997 	struct vnode    *vp,
4998 	struct cs_blob *blob,
4999 	struct image_params *imgp,
5000 	int flags,
5001 	uint32_t platform
5002 	)
5003 {
5004 	int error = 0;
5005 	const CS_CodeDirectory *cd = NULL;
5006 	const CS_GenericBlob *entitlements = NULL;
5007 	const CS_GenericBlob *der_entitlements = NULL;
5008 	size_t size;
5009 	assert(vp != NULL);
5010 	assert(blob != NULL);
5011 
5012 	if ((blob->csb_flags & CS_VALID) == 0) {
5013 		// If the blob attached to the vnode was invalidated, don't try to revalidate it
5014 		// Blob invalidation only occurs when the file that the blob is attached to is
5015 		// opened for writing, giving us a signal that the file is modified.
5016 		printf("CODESIGNING: can not re-validate a previously invalidated blob, reboot or create a new file.\n");
5017 		error = EPERM;
5018 		goto out;
5019 	}
5020 
5021 	size = blob->csb_mem_size;
5022 	error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
5023 	    size, &cd, &entitlements, &der_entitlements);
5024 	if (error) {
5025 		if (cs_debug) {
5026 			printf("CODESIGNING: csblob invalid: %d\n", error);
5027 		}
5028 		goto out;
5029 	}
5030 
5031 	unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
5032 	unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
5033 
5034 	if (blob->csb_reconstituted) {
5035 		/*
5036 		 * Code signatures that have been modified after validation
5037 		 * cannot be revalidated inline from their in-memory blob.
5038 		 *
5039 		 * That's okay, though, because the only path left that relies
5040 		 * on revalidation of existing in-memory blobs is the legacy
5041 		 * detached signature database path, which only exists on macOS,
5042 		 * which does not do reconstitution of any kind.
5043 		 */
5044 		if (cs_debug) {
5045 			printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
5046 		}
5047 
5048 		/*
5049 		 * EAGAIN tells the caller that they may reread the code
5050 		 * signature and try attaching it again, which is the same
5051 		 * thing they would do if there was no cs_blob yet in the
5052 		 * first place.
5053 		 *
5054 		 * Conveniently, after ubc_cs_blob_add did a successful
5055 		 * validation, it will detect that a matching cs_blob (cdhash,
5056 		 * offset, arch etc.) already exists, and return success
5057 		 * without re-adding a cs_blob to the vnode.
5058 		 */
5059 		return EAGAIN;
5060 	}
5061 
5062 	/* callout to mac_vnode_check_signature */
5063 #if CONFIG_MACF
5064 	error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform);
5065 	if (cs_debug && error) {
5066 		printf("revalidate: check_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
5067 	}
5068 #else
5069 	(void)flags;
5070 	(void)signer_type;
5071 #endif
5072 
5073 	/* update generation number if success */
5074 	vnode_lock_spin(vp);
5075 	struct cs_signer_info signer_info = {
5076 		.csb_flags = cs_flags,
5077 		.csb_signer_type = signer_type
5078 	};
5079 	zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_signer_info, &signer_info);
5080 	if (UBCINFOEXISTS(vp)) {
5081 		if (error == 0) {
5082 			vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
5083 		} else {
5084 			vp->v_ubcinfo->cs_add_gen = 0;
5085 		}
5086 	}
5087 
5088 	vnode_unlock(vp);
5089 
5090 out:
5091 	return error;
5092 }
5093 
5094 void
cs_blob_reset_cache()5095 cs_blob_reset_cache()
5096 {
5097 	/* incrementing odd no by 2 makes sure '0' is never reached. */
5098 	OSAddAtomic(+2, &cs_blob_generation_count);
5099 	printf("Reseting cs_blob cache from all vnodes. \n");
5100 }
5101 
5102 struct cs_blob *
ubc_get_cs_blobs(struct vnode * vp)5103 ubc_get_cs_blobs(
5104 	struct vnode    *vp)
5105 {
5106 	struct ubc_info *uip;
5107 	struct cs_blob  *blobs;
5108 
5109 	/*
5110 	 * No need to take the vnode lock here.  The caller must be holding
5111 	 * a reference on the vnode (via a VM mapping or open file descriptor),
5112 	 * so the vnode will not go away.  The ubc_info stays until the vnode
5113 	 * goes away.  And we only modify "blobs" by adding to the head of the
5114 	 * list.
5115 	 * The ubc_info could go away entirely if the vnode gets reclaimed as
5116 	 * part of a forced unmount.  In the case of a code-signature validation
5117 	 * during a page fault, the "paging_in_progress" reference on the VM
5118 	 * object guarantess that the vnode pager (and the ubc_info) won't go
5119 	 * away during the fault.
5120 	 * Other callers need to protect against vnode reclaim by holding the
5121 	 * vnode lock, for example.
5122 	 */
5123 
5124 	if (!UBCINFOEXISTS(vp)) {
5125 		blobs = NULL;
5126 		goto out;
5127 	}
5128 
5129 	uip = vp->v_ubcinfo;
5130 	blobs = uip->cs_blobs;
5131 	if (blobs != NULL) {
5132 		cs_blob_require(blobs, vp);
5133 	}
5134 
5135 out:
5136 	return blobs;
5137 }
5138 
5139 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5140 struct cs_blob *
ubc_get_cs_supplement(struct vnode * vp)5141 ubc_get_cs_supplement(
5142 	struct vnode    *vp)
5143 {
5144 	struct ubc_info *uip;
5145 	struct cs_blob  *blob;
5146 
5147 	/*
5148 	 * No need to take the vnode lock here.  The caller must be holding
5149 	 * a reference on the vnode (via a VM mapping or open file descriptor),
5150 	 * so the vnode will not go away.  The ubc_info stays until the vnode
5151 	 * goes away.
5152 	 * The ubc_info could go away entirely if the vnode gets reclaimed as
5153 	 * part of a forced unmount.  In the case of a code-signature validation
5154 	 * during a page fault, the "paging_in_progress" reference on the VM
5155 	 * object guarantess that the vnode pager (and the ubc_info) won't go
5156 	 * away during the fault.
5157 	 * Other callers need to protect against vnode reclaim by holding the
5158 	 * vnode lock, for example.
5159 	 */
5160 
5161 	if (!UBCINFOEXISTS(vp)) {
5162 		blob = NULL;
5163 		goto out;
5164 	}
5165 
5166 	uip = vp->v_ubcinfo;
5167 	blob = uip->cs_blob_supplement;
5168 	if (blob != NULL) {
5169 		cs_blob_require(blob, vp);
5170 	}
5171 
5172 out:
5173 	return blob;
5174 }
5175 #endif
5176 
5177 
5178 void
ubc_get_cs_mtime(struct vnode * vp,struct timespec * cs_mtime)5179 ubc_get_cs_mtime(
5180 	struct vnode    *vp,
5181 	struct timespec *cs_mtime)
5182 {
5183 	struct ubc_info *uip;
5184 
5185 	if (!UBCINFOEXISTS(vp)) {
5186 		cs_mtime->tv_sec = 0;
5187 		cs_mtime->tv_nsec = 0;
5188 		return;
5189 	}
5190 
5191 	uip = vp->v_ubcinfo;
5192 	cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
5193 	cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
5194 }
5195 
5196 unsigned long cs_validate_page_no_hash = 0;
5197 unsigned long cs_validate_page_bad_hash = 0;
5198 static boolean_t
cs_validate_hash(struct cs_blob * blobs,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t * bytes_processed,unsigned * tainted)5199 cs_validate_hash(
5200 	struct cs_blob          *blobs,
5201 	memory_object_t         pager,
5202 	memory_object_offset_t  page_offset,
5203 	const void              *data,
5204 	vm_size_t               *bytes_processed,
5205 	unsigned                *tainted)
5206 {
5207 	union cs_hash_union     mdctx;
5208 	struct cs_hash const    *hashtype = NULL;
5209 	unsigned char           actual_hash[CS_HASH_MAX_SIZE];
5210 	unsigned char           expected_hash[CS_HASH_MAX_SIZE];
5211 	boolean_t               found_hash;
5212 	struct cs_blob          *blob;
5213 	const CS_CodeDirectory  *cd;
5214 	const unsigned char     *hash;
5215 	boolean_t               validated;
5216 	off_t                   offset; /* page offset in the file */
5217 	size_t                  size;
5218 	off_t                   codeLimit = 0;
5219 	const char              *lower_bound, *upper_bound;
5220 	vm_offset_t             kaddr, blob_addr;
5221 
5222 	/* retrieve the expected hash */
5223 	found_hash = FALSE;
5224 
5225 	for (blob = blobs;
5226 	    blob != NULL;
5227 	    blob = blob->csb_next) {
5228 		offset = page_offset - blob->csb_base_offset;
5229 		if (offset < blob->csb_start_offset ||
5230 		    offset >= blob->csb_end_offset) {
5231 			/* our page is not covered by this blob */
5232 			continue;
5233 		}
5234 
5235 		/* blob data has been released */
5236 		kaddr = (vm_offset_t)blob->csb_mem_kaddr;
5237 		if (kaddr == 0) {
5238 			continue;
5239 		}
5240 
5241 		blob_addr = kaddr + blob->csb_mem_offset;
5242 		lower_bound = CAST_DOWN(char *, blob_addr);
5243 		upper_bound = lower_bound + blob->csb_mem_size;
5244 
5245 		cd = blob->csb_cd;
5246 		if (cd != NULL) {
5247 			/* all CD's that have been injected is already validated */
5248 
5249 			hashtype = blob->csb_hashtype;
5250 			if (hashtype == NULL) {
5251 				panic("unknown hash type ?");
5252 			}
5253 			if (hashtype->cs_digest_size > sizeof(actual_hash)) {
5254 				panic("hash size too large");
5255 			}
5256 			if (offset & ((1U << blob->csb_hash_pageshift) - 1)) {
5257 				panic("offset not aligned to cshash boundary");
5258 			}
5259 
5260 			codeLimit = ntohl(cd->codeLimit);
5261 
5262 			hash = hashes(cd, (uint32_t)(offset >> blob->csb_hash_pageshift),
5263 			    hashtype->cs_size,
5264 			    lower_bound, upper_bound);
5265 			if (hash != NULL) {
5266 				bcopy(hash, expected_hash, hashtype->cs_size);
5267 				found_hash = TRUE;
5268 			}
5269 
5270 			break;
5271 		}
5272 	}
5273 
5274 	if (found_hash == FALSE) {
5275 		/*
5276 		 * We can't verify this page because there is no signature
5277 		 * for it (yet).  It's possible that this part of the object
5278 		 * is not signed, or that signatures for that part have not
5279 		 * been loaded yet.
5280 		 * Report that the page has not been validated and let the
5281 		 * caller decide if it wants to accept it or not.
5282 		 */
5283 		cs_validate_page_no_hash++;
5284 		if (cs_debug > 1) {
5285 			printf("CODE SIGNING: cs_validate_page: "
5286 			    "mobj %p off 0x%llx: no hash to validate !?\n",
5287 			    pager, page_offset);
5288 		}
5289 		validated = FALSE;
5290 		*tainted = 0;
5291 	} else {
5292 		*tainted = 0;
5293 
5294 		size = (1U << blob->csb_hash_pageshift);
5295 		*bytes_processed = size;
5296 
5297 		const uint32_t *asha1, *esha1;
5298 		if ((off_t)(offset + size) > codeLimit) {
5299 			/* partial page at end of segment */
5300 			assert(offset < codeLimit);
5301 			size = (size_t) (codeLimit & (size - 1));
5302 			*tainted |= CS_VALIDATE_NX;
5303 		}
5304 
5305 		hashtype->cs_init(&mdctx);
5306 
5307 		if (blob->csb_hash_firstlevel_pageshift) {
5308 			const unsigned char *partial_data = (const unsigned char *)data;
5309 			size_t i;
5310 			for (i = 0; i < size;) {
5311 				union cs_hash_union     partialctx;
5312 				unsigned char partial_digest[CS_HASH_MAX_SIZE];
5313 				size_t partial_size = MIN(size - i, (1U << blob->csb_hash_firstlevel_pageshift));
5314 
5315 				hashtype->cs_init(&partialctx);
5316 				hashtype->cs_update(&partialctx, partial_data, partial_size);
5317 				hashtype->cs_final(partial_digest, &partialctx);
5318 
5319 				/* Update cumulative multi-level hash */
5320 				hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
5321 				partial_data = partial_data + partial_size;
5322 				i += partial_size;
5323 			}
5324 		} else {
5325 			hashtype->cs_update(&mdctx, data, size);
5326 		}
5327 		hashtype->cs_final(actual_hash, &mdctx);
5328 
5329 		asha1 = (const uint32_t *) actual_hash;
5330 		esha1 = (const uint32_t *) expected_hash;
5331 
5332 		if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
5333 			if (cs_debug) {
5334 				printf("CODE SIGNING: cs_validate_page: "
5335 				    "mobj %p off 0x%llx size 0x%lx: "
5336 				    "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
5337 				    "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
5338 				    pager, page_offset, size,
5339 				    asha1[0], asha1[1], asha1[2],
5340 				    asha1[3], asha1[4],
5341 				    esha1[0], esha1[1], esha1[2],
5342 				    esha1[3], esha1[4]);
5343 			}
5344 			cs_validate_page_bad_hash++;
5345 			*tainted |= CS_VALIDATE_TAINTED;
5346 		} else {
5347 			if (cs_debug > 10) {
5348 				printf("CODE SIGNING: cs_validate_page: "
5349 				    "mobj %p off 0x%llx size 0x%lx: "
5350 				    "SHA1 OK\n",
5351 				    pager, page_offset, size);
5352 			}
5353 		}
5354 		validated = TRUE;
5355 	}
5356 
5357 	return validated;
5358 }
5359 
5360 boolean_t
cs_validate_range(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t dsize,unsigned * tainted)5361 cs_validate_range(
5362 	struct vnode    *vp,
5363 	memory_object_t         pager,
5364 	memory_object_offset_t  page_offset,
5365 	const void              *data,
5366 	vm_size_t               dsize,
5367 	unsigned                *tainted)
5368 {
5369 	vm_size_t offset_in_range;
5370 	boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
5371 
5372 	struct cs_blob *blobs = ubc_get_cs_blobs(vp);
5373 
5374 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5375 	if (blobs == NULL && proc_is_translated(current_proc())) {
5376 		struct cs_blob *supp = ubc_get_cs_supplement(vp);
5377 
5378 		if (supp != NULL) {
5379 			blobs = supp;
5380 		} else {
5381 			return FALSE;
5382 		}
5383 	}
5384 #endif
5385 
5386 
5387 
5388 	*tainted = 0;
5389 
5390 	for (offset_in_range = 0;
5391 	    offset_in_range < dsize;
5392 	    /* offset_in_range updated based on bytes processed */) {
5393 		unsigned subrange_tainted = 0;
5394 		boolean_t subrange_validated;
5395 		vm_size_t bytes_processed = 0;
5396 
5397 		subrange_validated = cs_validate_hash(blobs,
5398 		    pager,
5399 		    page_offset + offset_in_range,
5400 		    (const void *)((const char *)data + offset_in_range),
5401 		    &bytes_processed,
5402 		    &subrange_tainted);
5403 
5404 		*tainted |= subrange_tainted;
5405 
5406 		if (bytes_processed == 0) {
5407 			/* Cannote make forward progress, so return an error */
5408 			all_subranges_validated = FALSE;
5409 			break;
5410 		} else if (subrange_validated == FALSE) {
5411 			all_subranges_validated = FALSE;
5412 			/* Keep going to detect other types of failures in subranges */
5413 		}
5414 
5415 		offset_in_range += bytes_processed;
5416 	}
5417 
5418 	return all_subranges_validated;
5419 }
5420 
5421 void
cs_validate_page(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,int * validated_p,int * tainted_p,int * nx_p)5422 cs_validate_page(
5423 	struct vnode            *vp,
5424 	memory_object_t         pager,
5425 	memory_object_offset_t  page_offset,
5426 	const void              *data,
5427 	int                     *validated_p,
5428 	int                     *tainted_p,
5429 	int                     *nx_p)
5430 {
5431 	vm_size_t offset_in_page;
5432 	struct cs_blob *blobs;
5433 
5434 	blobs = ubc_get_cs_blobs(vp);
5435 
5436 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5437 	if (blobs == NULL && proc_is_translated(current_proc())) {
5438 		struct cs_blob *supp = ubc_get_cs_supplement(vp);
5439 
5440 		if (supp != NULL) {
5441 			blobs = supp;
5442 		}
5443 	}
5444 #endif
5445 
5446 	*validated_p = VMP_CS_ALL_FALSE;
5447 	*tainted_p = VMP_CS_ALL_FALSE;
5448 	*nx_p = VMP_CS_ALL_FALSE;
5449 
5450 	for (offset_in_page = 0;
5451 	    offset_in_page < PAGE_SIZE;
5452 	    /* offset_in_page updated based on bytes processed */) {
5453 		unsigned subrange_tainted = 0;
5454 		boolean_t subrange_validated;
5455 		vm_size_t bytes_processed = 0;
5456 		int sub_bit;
5457 
5458 		subrange_validated = cs_validate_hash(blobs,
5459 		    pager,
5460 		    page_offset + offset_in_page,
5461 		    (const void *)((const char *)data + offset_in_page),
5462 		    &bytes_processed,
5463 		    &subrange_tainted);
5464 
5465 		if (bytes_processed == 0) {
5466 			/* 4k chunk not code-signed: try next one */
5467 			offset_in_page += FOURK_PAGE_SIZE;
5468 			continue;
5469 		}
5470 		if (offset_in_page == 0 &&
5471 		    bytes_processed > PAGE_SIZE - FOURK_PAGE_SIZE) {
5472 			/* all processed: no 4k granularity */
5473 			if (subrange_validated) {
5474 				*validated_p = VMP_CS_ALL_TRUE;
5475 			}
5476 			if (subrange_tainted & CS_VALIDATE_TAINTED) {
5477 				*tainted_p = VMP_CS_ALL_TRUE;
5478 			}
5479 			if (subrange_tainted & CS_VALIDATE_NX) {
5480 				*nx_p = VMP_CS_ALL_TRUE;
5481 			}
5482 			break;
5483 		}
5484 		/* we only handle 4k or 16k code-signing granularity... */
5485 		assertf(bytes_processed <= FOURK_PAGE_SIZE,
5486 		    "vp %p blobs %p offset 0x%llx + 0x%llx bytes_processed 0x%llx\n",
5487 		    vp, blobs, (uint64_t)page_offset,
5488 		    (uint64_t)offset_in_page, (uint64_t)bytes_processed);
5489 		sub_bit = 1 << (offset_in_page >> FOURK_PAGE_SHIFT);
5490 		if (subrange_validated) {
5491 			*validated_p |= sub_bit;
5492 		}
5493 		if (subrange_tainted & CS_VALIDATE_TAINTED) {
5494 			*tainted_p |= sub_bit;
5495 		}
5496 		if (subrange_tainted & CS_VALIDATE_NX) {
5497 			*nx_p |= sub_bit;
5498 		}
5499 		/* go to next 4k chunk */
5500 		offset_in_page += FOURK_PAGE_SIZE;
5501 	}
5502 
5503 	return;
5504 }
5505 
5506 int
ubc_cs_getcdhash(vnode_t vp,off_t offset,unsigned char * cdhash)5507 ubc_cs_getcdhash(
5508 	vnode_t         vp,
5509 	off_t           offset,
5510 	unsigned char   *cdhash)
5511 {
5512 	struct cs_blob  *blobs, *blob;
5513 	off_t           rel_offset;
5514 	int             ret;
5515 
5516 	vnode_lock(vp);
5517 
5518 	blobs = ubc_get_cs_blobs(vp);
5519 	for (blob = blobs;
5520 	    blob != NULL;
5521 	    blob = blob->csb_next) {
5522 		/* compute offset relative to this blob */
5523 		rel_offset = offset - blob->csb_base_offset;
5524 		if (rel_offset >= blob->csb_start_offset &&
5525 		    rel_offset < blob->csb_end_offset) {
5526 			/* this blob does cover our "offset" ! */
5527 			break;
5528 		}
5529 	}
5530 
5531 	if (blob == NULL) {
5532 		/* we didn't find a blob covering "offset" */
5533 		ret = EBADEXEC; /* XXX any better error ? */
5534 	} else {
5535 		/* get the SHA1 hash of that blob */
5536 		bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash));
5537 		ret = 0;
5538 	}
5539 
5540 	vnode_unlock(vp);
5541 
5542 	return ret;
5543 }
5544 
5545 boolean_t
ubc_cs_is_range_codesigned(vnode_t vp,mach_vm_offset_t start,mach_vm_size_t size)5546 ubc_cs_is_range_codesigned(
5547 	vnode_t                 vp,
5548 	mach_vm_offset_t        start,
5549 	mach_vm_size_t          size)
5550 {
5551 	struct cs_blob          *csblob;
5552 	mach_vm_offset_t        blob_start;
5553 	mach_vm_offset_t        blob_end;
5554 
5555 	if (vp == NULL) {
5556 		/* no file: no code signature */
5557 		return FALSE;
5558 	}
5559 	if (size == 0) {
5560 		/* no range: no code signature */
5561 		return FALSE;
5562 	}
5563 	if (start + size < start) {
5564 		/* overflow */
5565 		return FALSE;
5566 	}
5567 
5568 	csblob = ubc_cs_blob_get(vp, -1, -1, start);
5569 	if (csblob == NULL) {
5570 		return FALSE;
5571 	}
5572 
5573 	/*
5574 	 * We currently check if the range is covered by a single blob,
5575 	 * which should always be the case for the dyld shared cache.
5576 	 * If we ever want to make this routine handle other cases, we
5577 	 * would have to iterate if the blob does not cover the full range.
5578 	 */
5579 	blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
5580 	    csblob->csb_start_offset);
5581 	blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
5582 	    csblob->csb_end_offset);
5583 	if (blob_start > start || blob_end < (start + size)) {
5584 		/* range not fully covered by this code-signing blob */
5585 		return FALSE;
5586 	}
5587 
5588 	return TRUE;
5589 }
5590 
5591 #if CHECK_CS_VALIDATION_BITMAP
5592 #define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
5593 extern  boolean_t       root_fs_upgrade_try;
5594 
5595 /*
5596  * Should we use the code-sign bitmap to avoid repeated code-sign validation?
5597  * Depends:
5598  * a) Is the target vnode on the root filesystem?
5599  * b) Has someone tried to mount the root filesystem read-write?
5600  * If answers are (a) yes AND (b) no, then we can use the bitmap.
5601  */
5602 #define USE_CODE_SIGN_BITMAP(vp)        ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
5603 kern_return_t
ubc_cs_validation_bitmap_allocate(vnode_t vp)5604 ubc_cs_validation_bitmap_allocate(
5605 	vnode_t         vp)
5606 {
5607 	kern_return_t   kr = KERN_SUCCESS;
5608 	struct ubc_info *uip;
5609 	char            *target_bitmap;
5610 	vm_object_size_t        bitmap_size;
5611 
5612 	if (!USE_CODE_SIGN_BITMAP(vp) || (!UBCINFOEXISTS(vp))) {
5613 		kr = KERN_INVALID_ARGUMENT;
5614 	} else {
5615 		uip = vp->v_ubcinfo;
5616 
5617 		if (uip->cs_valid_bitmap == NULL) {
5618 			bitmap_size = stob(uip->ui_size);
5619 			target_bitmap = (char*) kalloc_data((vm_size_t)bitmap_size, Z_WAITOK | Z_ZERO);
5620 			if (target_bitmap == 0) {
5621 				kr = KERN_NO_SPACE;
5622 			} else {
5623 				kr = KERN_SUCCESS;
5624 			}
5625 			if (kr == KERN_SUCCESS) {
5626 				uip->cs_valid_bitmap = (void*)target_bitmap;
5627 				uip->cs_valid_bitmap_size = bitmap_size;
5628 			}
5629 		}
5630 	}
5631 	return kr;
5632 }
5633 
5634 kern_return_t
ubc_cs_check_validation_bitmap(vnode_t vp,memory_object_offset_t offset,int optype)5635 ubc_cs_check_validation_bitmap(
5636 	vnode_t                 vp,
5637 	memory_object_offset_t          offset,
5638 	int                     optype)
5639 {
5640 	kern_return_t   kr = KERN_SUCCESS;
5641 
5642 	if (!USE_CODE_SIGN_BITMAP(vp) || !UBCINFOEXISTS(vp)) {
5643 		kr = KERN_INVALID_ARGUMENT;
5644 	} else {
5645 		struct ubc_info *uip = vp->v_ubcinfo;
5646 		char            *target_bitmap = uip->cs_valid_bitmap;
5647 
5648 		if (target_bitmap == NULL) {
5649 			kr = KERN_INVALID_ARGUMENT;
5650 		} else {
5651 			uint64_t        bit, byte;
5652 			bit = atop_64( offset );
5653 			byte = bit >> 3;
5654 
5655 			if (byte > uip->cs_valid_bitmap_size) {
5656 				kr = KERN_INVALID_ARGUMENT;
5657 			} else {
5658 				if (optype == CS_BITMAP_SET) {
5659 					target_bitmap[byte] |= (1 << (bit & 07));
5660 					kr = KERN_SUCCESS;
5661 				} else if (optype == CS_BITMAP_CLEAR) {
5662 					target_bitmap[byte] &= ~(1 << (bit & 07));
5663 					kr = KERN_SUCCESS;
5664 				} else if (optype == CS_BITMAP_CHECK) {
5665 					if (target_bitmap[byte] & (1 << (bit & 07))) {
5666 						kr = KERN_SUCCESS;
5667 					} else {
5668 						kr = KERN_FAILURE;
5669 					}
5670 				}
5671 			}
5672 		}
5673 	}
5674 	return kr;
5675 }
5676 
5677 void
ubc_cs_validation_bitmap_deallocate(struct ubc_info * uip)5678 ubc_cs_validation_bitmap_deallocate(
5679 	struct ubc_info *uip)
5680 {
5681 	if (uip->cs_valid_bitmap != NULL) {
5682 		kfree_data(uip->cs_valid_bitmap, (vm_size_t)uip->cs_valid_bitmap_size);
5683 		uip->cs_valid_bitmap = NULL;
5684 	}
5685 }
5686 #else
5687 kern_return_t
ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)5688 ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)
5689 {
5690 	return KERN_INVALID_ARGUMENT;
5691 }
5692 
5693 kern_return_t
ubc_cs_check_validation_bitmap(__unused struct vnode * vp,__unused memory_object_offset_t offset,__unused int optype)5694 ubc_cs_check_validation_bitmap(
5695 	__unused struct vnode *vp,
5696 	__unused memory_object_offset_t offset,
5697 	__unused int optype)
5698 {
5699 	return KERN_INVALID_ARGUMENT;
5700 }
5701 
5702 void
ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info * uip)5703 ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info *uip)
5704 {
5705 	return;
5706 }
5707 #endif /* CHECK_CS_VALIDATION_BITMAP */
5708 
5709