xref: /xnu-8019.80.24/bsd/kern/ubc_subr.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  *	File:	ubc_subr.c
30  *	Author:	Umesh Vaishampayan [[email protected]]
31  *		05-Aug-1999	umeshv	Created.
32  *
33  *	Functions related to Unified Buffer cache.
34  *
35  * Caller of UBC functions MUST have a valid reference on the vnode.
36  *
37  */
38 
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/lock.h>
43 #include <sys/mman.h>
44 #include <sys/mount_internal.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/ucred.h>
48 #include <sys/proc_internal.h>
49 #include <sys/kauth.h>
50 #include <sys/buf.h>
51 #include <sys/user.h>
52 #include <sys/codesign.h>
53 #include <sys/codedir_internal.h>
54 #include <sys/fsevents.h>
55 #include <sys/fcntl.h>
56 #include <sys/reboot.h>
57 
58 #include <mach/mach_types.h>
59 #include <mach/memory_object_types.h>
60 #include <mach/memory_object_control.h>
61 #include <mach/vm_map.h>
62 #include <mach/mach_vm.h>
63 #include <mach/upl.h>
64 
65 #include <kern/kern_types.h>
66 #include <kern/kalloc.h>
67 #include <kern/zalloc.h>
68 #include <kern/thread.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_protos.h> /* last */
72 
73 #include <libkern/crypto/sha1.h>
74 #include <libkern/crypto/sha2.h>
75 #include <libkern/libkern.h>
76 
77 #include <security/mac_framework.h>
78 #include <stdbool.h>
79 #include <stdatomic.h>
80 #include <libkern/amfi/amfi.h>
81 
82 /* XXX These should be in a BSD accessible Mach header, but aren't. */
83 extern kern_return_t memory_object_pages_resident(memory_object_control_t,
84     boolean_t *);
85 extern kern_return_t    memory_object_signed(memory_object_control_t control,
86     boolean_t is_signed);
87 extern boolean_t        memory_object_is_signed(memory_object_control_t);
88 extern void             memory_object_mark_trusted(
89 	memory_object_control_t         control);
90 
91 /* XXX Same for those. */
92 
93 extern void Debugger(const char *message);
94 
95 #if DIAGNOSTIC
96 #if defined(assert)
97 #undef assert
98 #endif
99 #define assert(cond)    \
100     ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
101 #else
102 #include <kern/assert.h>
103 #endif /* DIAGNOSTIC */
104 
105 static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
106 static int ubc_umcallback(vnode_t, void *);
107 static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
108 static void ubc_cs_free(struct ubc_info *uip);
109 
110 static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
111 static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
112 
113 ZONE_DECLARE(ubc_info_zone, "ubc_info zone", sizeof(struct ubc_info),
114     ZC_ZFREE_CLEARMEM);
115 static uint32_t cs_blob_generation_count = 1;
116 
117 /*
118  * CODESIGNING
119  * Routines to navigate code signing data structures in the kernel...
120  */
121 
122 static SECURITY_READ_ONLY_LATE(zone_t) cs_blob_zone;
123 ZONE_INIT(&cs_blob_zone, "cs_blob zone", sizeof(struct cs_blob),
124     ZC_READONLY | ZC_ZFREE_CLEARMEM, ZONE_ID_CS_BLOB, NULL);
125 
126 extern int cs_debug;
127 
128 #define PAGE_SHIFT_4K           (12)
129 
130 static boolean_t
cs_valid_range(const void * start,const void * end,const void * lower_bound,const void * upper_bound)131 cs_valid_range(
132 	const void *start,
133 	const void *end,
134 	const void *lower_bound,
135 	const void *upper_bound)
136 {
137 	if (upper_bound < lower_bound ||
138 	    end < start) {
139 		return FALSE;
140 	}
141 
142 	if (start < lower_bound ||
143 	    end > upper_bound) {
144 		return FALSE;
145 	}
146 
147 	return TRUE;
148 }
149 
150 typedef void (*cs_md_init)(void *ctx);
151 typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
152 typedef void (*cs_md_final)(void *hash, void *ctx);
153 
154 struct cs_hash {
155 	uint8_t             cs_type;    /* type code as per code signing */
156 	size_t              cs_size;    /* size of effective hash (may be truncated) */
157 	size_t              cs_digest_size;/* size of native hash */
158 	cs_md_init          cs_init;
159 	cs_md_update        cs_update;
160 	cs_md_final         cs_final;
161 };
162 
163 uint8_t
cs_hash_type(struct cs_hash const * const cs_hash)164 cs_hash_type(
165 	struct cs_hash const * const cs_hash)
166 {
167 	return cs_hash->cs_type;
168 }
169 
170 static const struct cs_hash cs_hash_sha1 = {
171 	.cs_type = CS_HASHTYPE_SHA1,
172 	.cs_size = CS_SHA1_LEN,
173 	.cs_digest_size = SHA_DIGEST_LENGTH,
174 	.cs_init = (cs_md_init)SHA1Init,
175 	.cs_update = (cs_md_update)SHA1Update,
176 	.cs_final = (cs_md_final)SHA1Final,
177 };
178 #if CRYPTO_SHA2
179 static const struct cs_hash cs_hash_sha256 = {
180 	.cs_type = CS_HASHTYPE_SHA256,
181 	.cs_size = SHA256_DIGEST_LENGTH,
182 	.cs_digest_size = SHA256_DIGEST_LENGTH,
183 	.cs_init = (cs_md_init)SHA256_Init,
184 	.cs_update = (cs_md_update)SHA256_Update,
185 	.cs_final = (cs_md_final)SHA256_Final,
186 };
187 static const struct cs_hash cs_hash_sha256_truncate = {
188 	.cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
189 	.cs_size = CS_SHA256_TRUNCATED_LEN,
190 	.cs_digest_size = SHA256_DIGEST_LENGTH,
191 	.cs_init = (cs_md_init)SHA256_Init,
192 	.cs_update = (cs_md_update)SHA256_Update,
193 	.cs_final = (cs_md_final)SHA256_Final,
194 };
195 static const struct cs_hash cs_hash_sha384 = {
196 	.cs_type = CS_HASHTYPE_SHA384,
197 	.cs_size = SHA384_DIGEST_LENGTH,
198 	.cs_digest_size = SHA384_DIGEST_LENGTH,
199 	.cs_init = (cs_md_init)SHA384_Init,
200 	.cs_update = (cs_md_update)SHA384_Update,
201 	.cs_final = (cs_md_final)SHA384_Final,
202 };
203 #endif
204 
205 static struct cs_hash const *
cs_find_md(uint8_t type)206 cs_find_md(uint8_t type)
207 {
208 	if (type == CS_HASHTYPE_SHA1) {
209 		return &cs_hash_sha1;
210 #if CRYPTO_SHA2
211 	} else if (type == CS_HASHTYPE_SHA256) {
212 		return &cs_hash_sha256;
213 	} else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
214 		return &cs_hash_sha256_truncate;
215 	} else if (type == CS_HASHTYPE_SHA384) {
216 		return &cs_hash_sha384;
217 #endif
218 	}
219 	return NULL;
220 }
221 
222 union cs_hash_union {
223 	SHA1_CTX                sha1ctxt;
224 	SHA256_CTX              sha256ctx;
225 	SHA384_CTX              sha384ctx;
226 };
227 
228 
229 /*
230  * Choose among different hash algorithms.
231  * Higher is better, 0 => don't use at all.
232  */
233 static const uint32_t hashPriorities[] = {
234 	CS_HASHTYPE_SHA1,
235 	CS_HASHTYPE_SHA256_TRUNCATED,
236 	CS_HASHTYPE_SHA256,
237 	CS_HASHTYPE_SHA384,
238 };
239 
240 static unsigned int
hash_rank(const CS_CodeDirectory * cd)241 hash_rank(const CS_CodeDirectory *cd)
242 {
243 	uint32_t type = cd->hashType;
244 	unsigned int n;
245 
246 	for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) {
247 		if (hashPriorities[n] == type) {
248 			return n + 1;
249 		}
250 	}
251 	return 0;       /* not supported */
252 }
253 
254 
255 /*
256  * Locating a page hash
257  */
258 static const unsigned char *
hashes(const CS_CodeDirectory * cd,uint32_t page,size_t hash_len,const char * lower_bound,const char * upper_bound)259 hashes(
260 	const CS_CodeDirectory *cd,
261 	uint32_t page,
262 	size_t hash_len,
263 	const char *lower_bound,
264 	const char *upper_bound)
265 {
266 	const unsigned char *base, *top, *hash;
267 	uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
268 
269 	assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
270 
271 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
272 		/* Get first scatter struct */
273 		const SC_Scatter *scatter = (const SC_Scatter*)
274 		    ((const char*)cd + ntohl(cd->scatterOffset));
275 		uint32_t hashindex = 0, scount, sbase = 0;
276 		/* iterate all scatter structs */
277 		do {
278 			if ((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
279 				if (cs_debug) {
280 					printf("CODE SIGNING: Scatter extends past Code Directory\n");
281 				}
282 				return NULL;
283 			}
284 
285 			scount = ntohl(scatter->count);
286 			uint32_t new_base = ntohl(scatter->base);
287 
288 			/* last scatter? */
289 			if (scount == 0) {
290 				return NULL;
291 			}
292 
293 			if ((hashindex > 0) && (new_base <= sbase)) {
294 				if (cs_debug) {
295 					printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
296 					    sbase, new_base);
297 				}
298 				return NULL;    /* unordered scatter array */
299 			}
300 			sbase = new_base;
301 
302 			/* this scatter beyond page we're looking for? */
303 			if (sbase > page) {
304 				return NULL;
305 			}
306 
307 			if (sbase + scount >= page) {
308 				/* Found the scatter struct that is
309 				 * referencing our page */
310 
311 				/* base = address of first hash covered by scatter */
312 				base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
313 				    hashindex * hash_len;
314 				/* top = address of first hash after this scatter */
315 				top = base + scount * hash_len;
316 				if (!cs_valid_range(base, top, lower_bound,
317 				    upper_bound) ||
318 				    hashindex > nCodeSlots) {
319 					return NULL;
320 				}
321 
322 				break;
323 			}
324 
325 			/* this scatter struct is before the page we're looking
326 			 * for. Iterate. */
327 			hashindex += scount;
328 			scatter++;
329 		} while (1);
330 
331 		hash = base + (page - sbase) * hash_len;
332 	} else {
333 		base = (const unsigned char *)cd + ntohl(cd->hashOffset);
334 		top = base + nCodeSlots * hash_len;
335 		if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
336 		    page > nCodeSlots) {
337 			return NULL;
338 		}
339 		assert(page < nCodeSlots);
340 
341 		hash = base + page * hash_len;
342 	}
343 
344 	if (!cs_valid_range(hash, hash + hash_len,
345 	    lower_bound, upper_bound)) {
346 		hash = NULL;
347 	}
348 
349 	return hash;
350 }
351 
352 /*
353  * cs_validate_codedirectory
354  *
355  * Validate that pointers inside the code directory to make sure that
356  * all offsets and lengths are constrained within the buffer.
357  *
358  * Parameters:	cd			Pointer to code directory buffer
359  *		length			Length of buffer
360  *
361  * Returns:	0			Success
362  *		EBADEXEC		Invalid code signature
363  */
364 
365 static int
cs_validate_codedirectory(const CS_CodeDirectory * cd,size_t length)366 cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
367 {
368 	struct cs_hash const *hashtype;
369 
370 	if (length < sizeof(*cd)) {
371 		return EBADEXEC;
372 	}
373 	if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) {
374 		return EBADEXEC;
375 	}
376 	if (cd->pageSize < PAGE_SHIFT_4K || cd->pageSize > PAGE_SHIFT) {
377 		return EBADEXEC;
378 	}
379 	hashtype = cs_find_md(cd->hashType);
380 	if (hashtype == NULL) {
381 		return EBADEXEC;
382 	}
383 
384 	if (cd->hashSize != hashtype->cs_size) {
385 		return EBADEXEC;
386 	}
387 
388 	if (length < ntohl(cd->hashOffset)) {
389 		return EBADEXEC;
390 	}
391 
392 	/* check that nSpecialSlots fits in the buffer in front of hashOffset */
393 	if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) {
394 		return EBADEXEC;
395 	}
396 
397 	/* check that codeslots fits in the buffer */
398 	if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) {
399 		return EBADEXEC;
400 	}
401 
402 	if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
403 		if (length < ntohl(cd->scatterOffset)) {
404 			return EBADEXEC;
405 		}
406 
407 		const SC_Scatter *scatter = (const SC_Scatter *)
408 		    (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
409 		uint32_t nPages = 0;
410 
411 		/*
412 		 * Check each scatter buffer, since we don't know the
413 		 * length of the scatter buffer array, we have to
414 		 * check each entry.
415 		 */
416 		while (1) {
417 			/* check that the end of each scatter buffer in within the length */
418 			if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) {
419 				return EBADEXEC;
420 			}
421 			uint32_t scount = ntohl(scatter->count);
422 			if (scount == 0) {
423 				break;
424 			}
425 			if (nPages + scount < nPages) {
426 				return EBADEXEC;
427 			}
428 			nPages += scount;
429 			scatter++;
430 
431 			/* XXX check that basees doesn't overlap */
432 			/* XXX check that targetOffset doesn't overlap */
433 		}
434 #if 0 /* rdar://12579439 */
435 		if (nPages != ntohl(cd->nCodeSlots)) {
436 			return EBADEXEC;
437 		}
438 #endif
439 	}
440 
441 	if (length < ntohl(cd->identOffset)) {
442 		return EBADEXEC;
443 	}
444 
445 	/* identifier is NUL terminated string */
446 	if (cd->identOffset) {
447 		const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
448 		if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) {
449 			return EBADEXEC;
450 		}
451 	}
452 
453 	/* team identifier is NULL terminated string */
454 	if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
455 		if (length < ntohl(cd->teamOffset)) {
456 			return EBADEXEC;
457 		}
458 
459 		const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
460 		if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) {
461 			return EBADEXEC;
462 		}
463 	}
464 
465 	/* linkage is variable length binary data */
466 	if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0) {
467 		const uintptr_t ptr = (uintptr_t)cd + ntohl(cd->linkageOffset);
468 		const uintptr_t ptr_end = ptr + ntohl(cd->linkageSize);
469 
470 		if (ptr_end < ptr || ptr < (uintptr_t)cd || ptr_end > (uintptr_t)cd + length) {
471 			return EBADEXEC;
472 		}
473 	}
474 
475 
476 	return 0;
477 }
478 
479 /*
480  *
481  */
482 
483 static int
cs_validate_blob(const CS_GenericBlob * blob,size_t length)484 cs_validate_blob(const CS_GenericBlob *blob, size_t length)
485 {
486 	if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) {
487 		return EBADEXEC;
488 	}
489 	return 0;
490 }
491 
492 /*
493  * cs_validate_csblob
494  *
495  * Validate that superblob/embedded code directory to make sure that
496  * all internal pointers are valid.
497  *
498  * Will validate both a superblob csblob and a "raw" code directory.
499  *
500  *
501  * Parameters:	buffer			Pointer to code signature
502  *		length			Length of buffer
503  *		rcd			returns pointer to code directory
504  *
505  * Returns:	0			Success
506  *		EBADEXEC		Invalid code signature
507  */
508 
509 static int
cs_validate_csblob(const uint8_t * addr,const size_t blob_size,const CS_CodeDirectory ** rcd,const CS_GenericBlob ** rentitlements,const CS_GenericBlob ** rder_entitlements)510 cs_validate_csblob(
511 	const uint8_t *addr,
512 	const size_t blob_size,
513 	const CS_CodeDirectory **rcd,
514 	const CS_GenericBlob **rentitlements,
515 	const CS_GenericBlob **rder_entitlements)
516 {
517 	const CS_GenericBlob *blob;
518 	int error;
519 	size_t length;
520 
521 	*rcd = NULL;
522 	*rentitlements = NULL;
523 	*rder_entitlements = NULL;
524 
525 	blob = (const CS_GenericBlob *)(const void *)addr;
526 
527 	length = blob_size;
528 	error = cs_validate_blob(blob, length);
529 	if (error) {
530 		return error;
531 	}
532 	length = ntohl(blob->length);
533 
534 	if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
535 		const CS_SuperBlob *sb;
536 		uint32_t n, count;
537 		const CS_CodeDirectory *best_cd = NULL;
538 		unsigned int best_rank = 0;
539 #if PLATFORM_WatchOS
540 		const CS_CodeDirectory *sha1_cd = NULL;
541 #endif
542 
543 		if (length < sizeof(CS_SuperBlob)) {
544 			return EBADEXEC;
545 		}
546 
547 		sb = (const CS_SuperBlob *)blob;
548 		count = ntohl(sb->count);
549 
550 		/* check that the array of BlobIndex fits in the rest of the data */
551 		if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) {
552 			return EBADEXEC;
553 		}
554 
555 		/* now check each BlobIndex */
556 		for (n = 0; n < count; n++) {
557 			const CS_BlobIndex *blobIndex = &sb->index[n];
558 			uint32_t type = ntohl(blobIndex->type);
559 			uint32_t offset = ntohl(blobIndex->offset);
560 			if (length < offset) {
561 				return EBADEXEC;
562 			}
563 
564 			const CS_GenericBlob *subBlob =
565 			    (const CS_GenericBlob *)(const void *)(addr + offset);
566 
567 			size_t subLength = length - offset;
568 
569 			if ((error = cs_validate_blob(subBlob, subLength)) != 0) {
570 				return error;
571 			}
572 			subLength = ntohl(subBlob->length);
573 
574 			/* extra validation for CDs, that is also returned */
575 			if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
576 				const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
577 				if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) {
578 					return error;
579 				}
580 				unsigned int rank = hash_rank(candidate);
581 				if (cs_debug > 3) {
582 					printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
583 				}
584 				if (best_cd == NULL || rank > best_rank) {
585 					best_cd = candidate;
586 					best_rank = rank;
587 
588 					if (cs_debug > 2) {
589 						printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
590 					}
591 					*rcd = best_cd;
592 				} else if (best_cd != NULL && rank == best_rank) {
593 					/* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
594 					printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
595 					return EBADEXEC;
596 				}
597 #if PLATFORM_WatchOS
598 				if (candidate->hashType == CS_HASHTYPE_SHA1) {
599 					if (sha1_cd != NULL) {
600 						printf("multiple sha1 CodeDirectories in signature; rejecting\n");
601 						return EBADEXEC;
602 					}
603 					sha1_cd = candidate;
604 				}
605 #endif
606 			} else if (type == CSSLOT_ENTITLEMENTS) {
607 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
608 					return EBADEXEC;
609 				}
610 				if (*rentitlements != NULL) {
611 					printf("multiple entitlements blobs\n");
612 					return EBADEXEC;
613 				}
614 				*rentitlements = subBlob;
615 			} else if (type == CSSLOT_DER_ENTITLEMENTS) {
616 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_DER_ENTITLEMENTS) {
617 					return EBADEXEC;
618 				}
619 				if (*rder_entitlements != NULL) {
620 					printf("multiple der entitlements blobs\n");
621 					return EBADEXEC;
622 				}
623 				*rder_entitlements = subBlob;
624 			}
625 		}
626 
627 #if PLATFORM_WatchOS
628 		/* To keep watchOS fast enough, we have to resort to sha1 for
629 		 * some code.
630 		 *
631 		 * At the time of writing this comment, known sha1 attacks are
632 		 * collision attacks (not preimage or second preimage
633 		 * attacks), which do not apply to platform binaries since
634 		 * they have a fixed hash in the trust cache.  Given this
635 		 * property, we only prefer sha1 code directories for adhoc
636 		 * signatures, which always have to be in a trust cache to be
637 		 * valid (can-load-cdhash does not exist for watchOS). Those
638 		 * are, incidentally, also the platform binaries, for which we
639 		 * care about the performance hit that sha256 would bring us.
640 		 *
641 		 * Platform binaries may still contain a (not chosen) sha256
642 		 * code directory, which keeps software updates that switch to
643 		 * sha256-only small.
644 		 */
645 
646 		if (*rcd != NULL && sha1_cd != NULL && (ntohl(sha1_cd->flags) & CS_ADHOC)) {
647 			if (sha1_cd->flags != (*rcd)->flags) {
648 				printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
649 				    (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags);
650 				*rcd = NULL;
651 				return EBADEXEC;
652 			}
653 
654 			*rcd = sha1_cd;
655 		}
656 #endif
657 	} else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
658 		if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) {
659 			return error;
660 		}
661 		*rcd = (const CS_CodeDirectory *)blob;
662 	} else {
663 		return EBADEXEC;
664 	}
665 
666 	if (*rcd == NULL) {
667 		return EBADEXEC;
668 	}
669 
670 	return 0;
671 }
672 
673 /*
674  * cs_find_blob_bytes
675  *
676  * Find an blob from the superblob/code directory. The blob must have
677  * been been validated by cs_validate_csblob() before calling
678  * this. Use csblob_find_blob() instead.
679  *
680  * Will also find a "raw" code directory if its stored as well as
681  * searching the superblob.
682  *
683  * Parameters:	buffer			Pointer to code signature
684  *		length			Length of buffer
685  *		type			type of blob to find
686  *		magic			the magic number for that blob
687  *
688  * Returns:	pointer			Success
689  *		NULL			Buffer not found
690  */
691 
692 const CS_GenericBlob *
csblob_find_blob_bytes(const uint8_t * addr,size_t length,uint32_t type,uint32_t magic)693 csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
694 {
695 	const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
696 
697 	if ((addr + length) < addr) {
698 		panic("CODE SIGNING: CS Blob length overflow for addr: %p", addr);
699 	}
700 
701 	if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
702 		const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
703 		size_t n, count = ntohl(sb->count);
704 
705 		for (n = 0; n < count; n++) {
706 			if (ntohl(sb->index[n].type) != type) {
707 				continue;
708 			}
709 			uint32_t offset = ntohl(sb->index[n].offset);
710 			if (length - sizeof(const CS_GenericBlob) < offset) {
711 				return NULL;
712 			}
713 			blob = (const CS_GenericBlob *)(const void *)(addr + offset);
714 			if (ntohl(blob->magic) != magic) {
715 				continue;
716 			}
717 			if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
718 				panic("CODE SIGNING: CS Blob length overflow for blob at: %p", blob);
719 			} else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
720 				continue;
721 			}
722 			return blob;
723 		}
724 	} else if (type == CSSLOT_CODEDIRECTORY && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
725 	    && magic == CSMAGIC_CODEDIRECTORY) {
726 		if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
727 			panic("CODE SIGNING: CS Blob length overflow for code directory blob at: %p", blob);
728 		} else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
729 			return NULL;
730 		}
731 		return blob;
732 	}
733 	return NULL;
734 }
735 
736 
737 const CS_GenericBlob *
csblob_find_blob(struct cs_blob * csblob,uint32_t type,uint32_t magic)738 csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
739 {
740 	if ((csblob->csb_flags & CS_VALID) == 0) {
741 		return NULL;
742 	}
743 	return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
744 }
745 
746 static const uint8_t *
find_special_slot(const CS_CodeDirectory * cd,size_t slotsize,uint32_t slot)747 find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
748 {
749 	/* there is no zero special slot since that is the first code slot */
750 	if (ntohl(cd->nSpecialSlots) < slot || slot == 0) {
751 		return NULL;
752 	}
753 
754 	return (const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot);
755 }
756 
757 static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
758 
759 int
csblob_get_entitlements(struct cs_blob * csblob,void ** out_start,size_t * out_length)760 csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
761 {
762 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
763 	const CS_GenericBlob *entitlements;
764 	const CS_CodeDirectory *code_dir;
765 	const uint8_t *embedded_hash;
766 	union cs_hash_union context;
767 
768 	*out_start = NULL;
769 	*out_length = 0;
770 
771 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
772 		return EBADEXEC;
773 	}
774 
775 	code_dir = csblob->csb_cd;
776 
777 	if ((csblob->csb_flags & CS_VALID) == 0) {
778 		entitlements = NULL;
779 	} else {
780 		entitlements = csblob->csb_entitlements_blob;
781 	}
782 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
783 
784 	if (embedded_hash == NULL) {
785 		if (entitlements) {
786 			return EBADEXEC;
787 		}
788 		return 0;
789 	} else if (entitlements == NULL) {
790 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
791 			return EBADEXEC;
792 		} else {
793 			return 0;
794 		}
795 	}
796 
797 	csblob->csb_hashtype->cs_init(&context);
798 	csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
799 	csblob->csb_hashtype->cs_final(computed_hash, &context);
800 
801 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
802 		return EBADEXEC;
803 	}
804 
805 	*out_start = __DECONST(void *, entitlements);
806 	*out_length = ntohl(entitlements->length);
807 
808 	return 0;
809 }
810 
811 int
csblob_get_der_entitlements(struct cs_blob * csblob,const CS_GenericBlob ** out_start,size_t * out_length)812 csblob_get_der_entitlements(struct cs_blob *csblob, const CS_GenericBlob **out_start, size_t *out_length)
813 {
814 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
815 	const CS_GenericBlob *der_entitlements;
816 	const CS_CodeDirectory *code_dir;
817 	const uint8_t *embedded_hash;
818 	union cs_hash_union context;
819 
820 	*out_start = NULL;
821 	*out_length = 0;
822 
823 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
824 		return EBADEXEC;
825 	}
826 
827 	code_dir = csblob->csb_cd;
828 
829 	if ((csblob->csb_flags & CS_VALID) == 0) {
830 		der_entitlements = NULL;
831 	} else {
832 		der_entitlements = csblob->csb_der_entitlements_blob;
833 	}
834 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_DER_ENTITLEMENTS);
835 
836 	if (embedded_hash == NULL) {
837 		if (der_entitlements) {
838 			return EBADEXEC;
839 		}
840 		return 0;
841 	} else if (der_entitlements == NULL) {
842 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
843 			return EBADEXEC;
844 		} else {
845 			return 0;
846 		}
847 	}
848 
849 	csblob->csb_hashtype->cs_init(&context);
850 	csblob->csb_hashtype->cs_update(&context, der_entitlements, ntohl(der_entitlements->length));
851 	csblob->csb_hashtype->cs_final(computed_hash, &context);
852 
853 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
854 		return EBADEXEC;
855 	}
856 
857 	*out_start = der_entitlements;
858 	*out_length = ntohl(der_entitlements->length);
859 
860 	return 0;
861 }
862 
863 /*
864  * Register a provisioning profile with a cs_blob.
865  */
866 int
csblob_register_profile(struct cs_blob __unused * csblob,void __unused * profile_addr,vm_size_t __unused profile_size)867 csblob_register_profile(struct cs_blob __unused *csblob, void __unused *profile_addr, vm_size_t __unused profile_size)
868 {
869 	return 0;
870 }
871 
872 /*
873  * CODESIGNING
874  * End of routines to navigate code signing data structures in the kernel.
875  */
876 
877 
878 
879 /*
880  * ubc_info_init
881  *
882  * Allocate and attach an empty ubc_info structure to a vnode
883  *
884  * Parameters:	vp			Pointer to the vnode
885  *
886  * Returns:	0			Success
887  *	vnode_size:ENOMEM		Not enough space
888  *	vnode_size:???			Other error from vnode_getattr
889  *
890  */
891 int
ubc_info_init(struct vnode * vp)892 ubc_info_init(struct vnode *vp)
893 {
894 	return ubc_info_init_internal(vp, 0, 0);
895 }
896 
897 
898 /*
899  * ubc_info_init_withsize
900  *
901  * Allocate and attach a sized ubc_info structure to a vnode
902  *
903  * Parameters:	vp			Pointer to the vnode
904  *		filesize		The size of the file
905  *
906  * Returns:	0			Success
907  *	vnode_size:ENOMEM		Not enough space
908  *	vnode_size:???			Other error from vnode_getattr
909  */
910 int
ubc_info_init_withsize(struct vnode * vp,off_t filesize)911 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
912 {
913 	return ubc_info_init_internal(vp, 1, filesize);
914 }
915 
916 
917 /*
918  * ubc_info_init_internal
919  *
920  * Allocate and attach a ubc_info structure to a vnode
921  *
922  * Parameters:	vp			Pointer to the vnode
923  *		withfsize{0,1}		Zero if the size should be obtained
924  *					from the vnode; otherwise, use filesize
925  *		filesize		The size of the file, if withfsize == 1
926  *
927  * Returns:	0			Success
928  *	vnode_size:ENOMEM		Not enough space
929  *	vnode_size:???			Other error from vnode_getattr
930  *
931  * Notes:	We call a blocking zalloc(), and the zone was created as an
932  *		expandable and collectable zone, so if no memory is available,
933  *		it is possible for zalloc() to block indefinitely.  zalloc()
934  *		may also panic if the zone of zones is exhausted, since it's
935  *		NOT expandable.
936  *
937  *		We unconditionally call vnode_pager_setup(), even if this is
938  *		a reuse of a ubc_info; in that case, we should probably assert
939  *		that it does not already have a pager association, but do not.
940  *
941  *		Since memory_object_create_named() can only fail from receiving
942  *		an invalid pager argument, the explicit check and panic is
943  *		merely precautionary.
944  */
945 static int
ubc_info_init_internal(vnode_t vp,int withfsize,off_t filesize)946 ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
947 {
948 	struct ubc_info *uip;
949 	void *  pager;
950 	int error = 0;
951 	kern_return_t kret;
952 	memory_object_control_t control;
953 
954 	uip = vp->v_ubcinfo;
955 
956 	/*
957 	 * If there is not already a ubc_info attached to the vnode, we
958 	 * attach one; otherwise, we will reuse the one that's there.
959 	 */
960 	if (uip == UBC_INFO_NULL) {
961 		uip = zalloc_flags(ubc_info_zone, Z_WAITOK | Z_ZERO);
962 
963 		uip->ui_vnode = vp;
964 		uip->ui_flags = UI_INITED;
965 		uip->ui_ucred = NOCRED;
966 	}
967 	assert(uip->ui_flags != UI_NONE);
968 	assert(uip->ui_vnode == vp);
969 
970 	/* now set this ubc_info in the vnode */
971 	vp->v_ubcinfo = uip;
972 
973 	/*
974 	 * Allocate a pager object for this vnode
975 	 *
976 	 * XXX The value of the pager parameter is currently ignored.
977 	 * XXX Presumably, this API changed to avoid the race between
978 	 * XXX setting the pager and the UI_HASPAGER flag.
979 	 */
980 	pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
981 	assert(pager);
982 
983 	/*
984 	 * Explicitly set the pager into the ubc_info, after setting the
985 	 * UI_HASPAGER flag.
986 	 */
987 	SET(uip->ui_flags, UI_HASPAGER);
988 	uip->ui_pager = pager;
989 
990 	/*
991 	 * Note: We can not use VNOP_GETATTR() to get accurate
992 	 * value of ui_size because this may be an NFS vnode, and
993 	 * nfs_getattr() can call vinvalbuf(); if this happens,
994 	 * ubc_info is not set up to deal with that event.
995 	 * So use bogus size.
996 	 */
997 
998 	/*
999 	 * create a vnode - vm_object association
1000 	 * memory_object_create_named() creates a "named" reference on the
1001 	 * memory object we hold this reference as long as the vnode is
1002 	 * "alive."  Since memory_object_create_named() took its own reference
1003 	 * on the vnode pager we passed it, we can drop the reference
1004 	 * vnode_pager_setup() returned here.
1005 	 */
1006 	kret = memory_object_create_named(pager,
1007 	    (memory_object_size_t)uip->ui_size, &control);
1008 	vnode_pager_deallocate(pager);
1009 	if (kret != KERN_SUCCESS) {
1010 		panic("ubc_info_init: memory_object_create_named returned %d", kret);
1011 	}
1012 
1013 	assert(control);
1014 	uip->ui_control = control;      /* cache the value of the mo control */
1015 	SET(uip->ui_flags, UI_HASOBJREF);       /* with a named reference */
1016 
1017 	if (withfsize == 0) {
1018 		/* initialize the size */
1019 		error = vnode_size(vp, &uip->ui_size, vfs_context_current());
1020 		if (error) {
1021 			uip->ui_size = 0;
1022 		}
1023 	} else {
1024 		uip->ui_size = filesize;
1025 	}
1026 	vp->v_lflag |= VNAMED_UBC;      /* vnode has a named ubc reference */
1027 
1028 	return error;
1029 }
1030 
1031 
1032 /*
1033  * ubc_info_free
1034  *
1035  * Free a ubc_info structure
1036  *
1037  * Parameters:	uip			A pointer to the ubc_info to free
1038  *
1039  * Returns:	(void)
1040  *
1041  * Notes:	If there is a credential that has subsequently been associated
1042  *		with the ubc_info via a call to ubc_setcred(), the reference
1043  *		to the credential is dropped.
1044  *
1045  *		It's actually impossible for a ubc_info.ui_control to take the
1046  *		value MEMORY_OBJECT_CONTROL_NULL.
1047  */
1048 static void
ubc_info_free(struct ubc_info * uip)1049 ubc_info_free(struct ubc_info *uip)
1050 {
1051 	if (IS_VALID_CRED(uip->ui_ucred)) {
1052 		kauth_cred_unref(&uip->ui_ucred);
1053 	}
1054 
1055 	if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) {
1056 		memory_object_control_deallocate(uip->ui_control);
1057 	}
1058 
1059 	cluster_release(uip);
1060 	ubc_cs_free(uip);
1061 
1062 	zfree(ubc_info_zone, uip);
1063 	return;
1064 }
1065 
1066 
1067 void
ubc_info_deallocate(struct ubc_info * uip)1068 ubc_info_deallocate(struct ubc_info *uip)
1069 {
1070 	ubc_info_free(uip);
1071 }
1072 
1073 /*
1074  * ubc_setsize_ex
1075  *
1076  * Tell the VM that the the size of the file represented by the vnode has
1077  * changed
1078  *
1079  * Parameters:	vp	   The vp whose backing file size is
1080  *					   being changed
1081  *				nsize  The new size of the backing file
1082  *				opts   Options
1083  *
1084  * Returns:	EINVAL for new size < 0
1085  *			ENOENT if no UBC info exists
1086  *          EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1087  *          Other errors (mapped to errno_t) returned by VM functions
1088  *
1089  * Notes:   This function will indicate success if the new size is the
1090  *		    same or larger than the old size (in this case, the
1091  *		    remainder of the file will require modification or use of
1092  *		    an existing upl to access successfully).
1093  *
1094  *		    This function will fail if the new file size is smaller,
1095  *		    and the memory region being invalidated was unable to
1096  *		    actually be invalidated and/or the last page could not be
1097  *		    flushed, if the new size is not aligned to a page
1098  *		    boundary.  This is usually indicative of an I/O error.
1099  */
1100 errno_t
ubc_setsize_ex(struct vnode * vp,off_t nsize,ubc_setsize_opts_t opts)1101 ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
1102 {
1103 	off_t osize;    /* ui_size before change */
1104 	off_t lastpg, olastpgend, lastoff;
1105 	struct ubc_info *uip;
1106 	memory_object_control_t control;
1107 	kern_return_t kret = KERN_SUCCESS;
1108 
1109 	if (nsize < (off_t)0) {
1110 		return EINVAL;
1111 	}
1112 
1113 	if (!UBCINFOEXISTS(vp)) {
1114 		return ENOENT;
1115 	}
1116 
1117 	uip = vp->v_ubcinfo;
1118 	osize = uip->ui_size;
1119 
1120 	if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) {
1121 		return EAGAIN;
1122 	}
1123 
1124 	/*
1125 	 * Update the size before flushing the VM
1126 	 */
1127 	uip->ui_size = nsize;
1128 
1129 	if (nsize >= osize) {   /* Nothing more to do */
1130 		if (nsize > osize) {
1131 			lock_vnode_and_post(vp, NOTE_EXTEND);
1132 		}
1133 
1134 		return 0;
1135 	}
1136 
1137 	/*
1138 	 * When the file shrinks, invalidate the pages beyond the
1139 	 * new size. Also get rid of garbage beyond nsize on the
1140 	 * last page. The ui_size already has the nsize, so any
1141 	 * subsequent page-in will zero-fill the tail properly
1142 	 */
1143 	lastpg = trunc_page_64(nsize);
1144 	olastpgend = round_page_64(osize);
1145 	control = uip->ui_control;
1146 	assert(control);
1147 	lastoff = (nsize & PAGE_MASK_64);
1148 
1149 	if (lastoff) {
1150 		upl_t           upl;
1151 		upl_page_info_t *pl;
1152 
1153 		/*
1154 		 * new EOF ends up in the middle of a page
1155 		 * zero the tail of this page if it's currently
1156 		 * present in the cache
1157 		 */
1158 		kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
1159 
1160 		if (kret != KERN_SUCCESS) {
1161 			panic("ubc_setsize: ubc_create_upl (error = %d)", kret);
1162 		}
1163 
1164 		if (upl_valid_page(pl, 0)) {
1165 			cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
1166 		}
1167 
1168 		ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1169 
1170 		lastpg += PAGE_SIZE_64;
1171 	}
1172 	if (olastpgend > lastpg) {
1173 		int     flags;
1174 
1175 		if (lastpg == 0) {
1176 			flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
1177 		} else {
1178 			flags = MEMORY_OBJECT_DATA_FLUSH;
1179 		}
1180 		/*
1181 		 * invalidate the pages beyond the new EOF page
1182 		 *
1183 		 */
1184 		kret = memory_object_lock_request(control,
1185 		    (memory_object_offset_t)lastpg,
1186 		    (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
1187 		    MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
1188 		if (kret != KERN_SUCCESS) {
1189 			printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
1190 		}
1191 	}
1192 	return mach_to_bsd_errno(kret);
1193 }
1194 
1195 // Returns true for success
1196 int
ubc_setsize(vnode_t vp,off_t nsize)1197 ubc_setsize(vnode_t vp, off_t nsize)
1198 {
1199 	return ubc_setsize_ex(vp, nsize, 0) == 0;
1200 }
1201 
1202 /*
1203  * ubc_getsize
1204  *
1205  * Get the size of the file assocated with the specified vnode
1206  *
1207  * Parameters:	vp			The vnode whose size is of interest
1208  *
1209  * Returns:	0			There is no ubc_info associated with
1210  *					this vnode, or the size is zero
1211  *		!0			The size of the file
1212  *
1213  * Notes:	Using this routine, it is not possible for a caller to
1214  *		successfully distinguish between a vnode associate with a zero
1215  *		length file, and a vnode with no associated ubc_info.  The
1216  *		caller therefore needs to not care, or needs to ensure that
1217  *		they have previously successfully called ubc_info_init() or
1218  *		ubc_info_init_withsize().
1219  */
1220 off_t
ubc_getsize(struct vnode * vp)1221 ubc_getsize(struct vnode *vp)
1222 {
1223 	/* people depend on the side effect of this working this way
1224 	 * as they call this for directory
1225 	 */
1226 	if (!UBCINFOEXISTS(vp)) {
1227 		return (off_t)0;
1228 	}
1229 	return vp->v_ubcinfo->ui_size;
1230 }
1231 
1232 
1233 /*
1234  * ubc_umount
1235  *
1236  * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1237  * mount point
1238  *
1239  * Parameters:	mp			The mount point
1240  *
1241  * Returns:	0			Success
1242  *
1243  * Notes:	There is no failure indication for this function.
1244  *
1245  *		This function is used in the unmount path; since it may block
1246  *		I/O indefinitely, it should not be used in the forced unmount
1247  *		path, since a device unavailability could also block that
1248  *		indefinitely.
1249  *
1250  *		Because there is no device ejection interlock on USB, FireWire,
1251  *		or similar devices, it's possible that an ejection that begins
1252  *		subsequent to the vnode_iterate() completing, either on one of
1253  *		those devices, or a network mount for which the server quits
1254  *		responding, etc., may cause the caller to block indefinitely.
1255  */
1256 __private_extern__ int
ubc_umount(struct mount * mp)1257 ubc_umount(struct mount *mp)
1258 {
1259 	vnode_iterate(mp, 0, ubc_umcallback, 0);
1260 	return 0;
1261 }
1262 
1263 
1264 /*
1265  * ubc_umcallback
1266  *
1267  * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1268  * and vnode_iterate() for details of implementation.
1269  */
1270 static int
ubc_umcallback(vnode_t vp,__unused void * args)1271 ubc_umcallback(vnode_t vp, __unused void * args)
1272 {
1273 	if (UBCINFOEXISTS(vp)) {
1274 		(void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1275 	}
1276 	return VNODE_RETURNED;
1277 }
1278 
1279 
1280 /*
1281  * ubc_getcred
1282  *
1283  * Get the credentials currently active for the ubc_info associated with the
1284  * vnode.
1285  *
1286  * Parameters:	vp			The vnode whose ubc_info credentials
1287  *					are to be retrieved
1288  *
1289  * Returns:	!NOCRED			The credentials
1290  *		NOCRED			If there is no ubc_info for the vnode,
1291  *					or if there is one, but it has not had
1292  *					any credentials associated with it via
1293  *					a call to ubc_setcred()
1294  */
1295 kauth_cred_t
ubc_getcred(struct vnode * vp)1296 ubc_getcred(struct vnode *vp)
1297 {
1298 	if (UBCINFOEXISTS(vp)) {
1299 		return vp->v_ubcinfo->ui_ucred;
1300 	}
1301 
1302 	return NOCRED;
1303 }
1304 
1305 
1306 /*
1307  * ubc_setthreadcred
1308  *
1309  * If they are not already set, set the credentials of the ubc_info structure
1310  * associated with the vnode to those of the supplied thread; otherwise leave
1311  * them alone.
1312  *
1313  * Parameters:	vp			The vnode whose ubc_info creds are to
1314  *					be set
1315  *		p			The process whose credentials are to
1316  *					be used, if not running on an assumed
1317  *					credential
1318  *		thread			The thread whose credentials are to
1319  *					be used
1320  *
1321  * Returns:	1			This vnode has no associated ubc_info
1322  *		0			Success
1323  *
1324  * Notes:	This function takes a proc parameter to account for bootstrap
1325  *		issues where a task or thread may call this routine, either
1326  *		before credentials have been initialized by bsd_init(), or if
1327  *		there is no BSD info asscoiate with a mach thread yet.  This
1328  *		is known to happen in both the initial swap and memory mapping
1329  *		calls.
1330  *
1331  *		This function is generally used only in the following cases:
1332  *
1333  *		o	a memory mapped file via the mmap() system call
1334  *		o	a swap store backing file
1335  *		o	subsequent to a successful write via vn_write()
1336  *
1337  *		The information is then used by the NFS client in order to
1338  *		cons up a wire message in either the page-in or page-out path.
1339  *
1340  *		There are two potential problems with the use of this API:
1341  *
1342  *		o	Because the write path only set it on a successful
1343  *			write, there is a race window between setting the
1344  *			credential and its use to evict the pages to the
1345  *			remote file server
1346  *
1347  *		o	Because a page-in may occur prior to a write, the
1348  *			credential may not be set at this time, if the page-in
1349  *			is not the result of a mapping established via mmap().
1350  *
1351  *		In both these cases, this will be triggered from the paging
1352  *		path, which will instead use the credential of the current
1353  *		process, which in this case is either the dynamic_pager or
1354  *		the kernel task, both of which utilize "root" credentials.
1355  *
1356  *		This may potentially permit operations to occur which should
1357  *		be denied, or it may cause to be denied operations which
1358  *		should be permitted, depending on the configuration of the NFS
1359  *		server.
1360  */
1361 int
ubc_setthreadcred(struct vnode * vp,proc_t p,thread_t thread)1362 ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
1363 {
1364 	struct ubc_info *uip;
1365 	thread_ro_t tro = get_thread_ro(thread);
1366 
1367 	if (!UBCINFOEXISTS(vp)) {
1368 		return 1;
1369 	}
1370 
1371 	assert(thread == current_thread());
1372 
1373 	vnode_lock(vp);
1374 
1375 	uip = vp->v_ubcinfo;
1376 
1377 	if (!IS_VALID_CRED(uip->ui_ucred)) {
1378 		/* use per-thread cred, if assumed identity, else proc cred */
1379 		if (tro->tro_flags & TRO_SETUID) {
1380 			uip->ui_ucred = tro->tro_cred;
1381 			kauth_cred_ref(uip->ui_ucred);
1382 		} else {
1383 			uip->ui_ucred = kauth_cred_proc_ref(p);
1384 		}
1385 	}
1386 	vnode_unlock(vp);
1387 
1388 	return 0;
1389 }
1390 
1391 
1392 /*
1393  * ubc_setcred
1394  *
1395  * If they are not already set, set the credentials of the ubc_info structure
1396  * associated with the vnode to those of the process; otherwise leave them
1397  * alone.
1398  *
1399  * Parameters:	vp			The vnode whose ubc_info creds are to
1400  *					be set
1401  *		p			The process whose credentials are to
1402  *					be used
1403  *
1404  * Returns:	0			This vnode has no associated ubc_info
1405  *		1			Success
1406  *
1407  * Notes:	The return values for this function are inverted from nearly
1408  *		all other uses in the kernel.
1409  *
1410  *		See also ubc_setthreadcred(), above.
1411  *
1412  *		This function is considered deprecated, and generally should
1413  *		not be used, as it is incompatible with per-thread credentials;
1414  *		it exists for legacy KPI reasons.
1415  *
1416  * DEPRECATION:	ubc_setcred() is being deprecated. Please use
1417  *		ubc_setthreadcred() instead.
1418  */
1419 int
ubc_setcred(struct vnode * vp,proc_t p)1420 ubc_setcred(struct vnode *vp, proc_t p)
1421 {
1422 	struct ubc_info *uip;
1423 	kauth_cred_t credp;
1424 
1425 	/* If there is no ubc_info, deny the operation */
1426 	if (!UBCINFOEXISTS(vp)) {
1427 		return 0;
1428 	}
1429 
1430 	/*
1431 	 * Check to see if there is already a credential reference in the
1432 	 * ubc_info; if there is not, take one on the supplied credential.
1433 	 */
1434 	vnode_lock(vp);
1435 	uip = vp->v_ubcinfo;
1436 	credp = uip->ui_ucred;
1437 	if (!IS_VALID_CRED(credp)) {
1438 		uip->ui_ucred = kauth_cred_proc_ref(p);
1439 	}
1440 	vnode_unlock(vp);
1441 
1442 	return 1;
1443 }
1444 
1445 /*
1446  * ubc_getpager
1447  *
1448  * Get the pager associated with the ubc_info associated with the vnode.
1449  *
1450  * Parameters:	vp			The vnode to obtain the pager from
1451  *
1452  * Returns:	!VNODE_PAGER_NULL	The memory_object_t for the pager
1453  *		VNODE_PAGER_NULL	There is no ubc_info for this vnode
1454  *
1455  * Notes:	For each vnode that has a ubc_info associated with it, that
1456  *		ubc_info SHALL have a pager associated with it, so in the
1457  *		normal case, it's impossible to return VNODE_PAGER_NULL for
1458  *		a vnode with an associated ubc_info.
1459  */
1460 __private_extern__ memory_object_t
ubc_getpager(struct vnode * vp)1461 ubc_getpager(struct vnode *vp)
1462 {
1463 	if (UBCINFOEXISTS(vp)) {
1464 		return vp->v_ubcinfo->ui_pager;
1465 	}
1466 
1467 	return 0;
1468 }
1469 
1470 
1471 /*
1472  * ubc_getobject
1473  *
1474  * Get the memory object control associated with the ubc_info associated with
1475  * the vnode
1476  *
1477  * Parameters:	vp			The vnode to obtain the memory object
1478  *					from
1479  *		flags			DEPRECATED
1480  *
1481  * Returns:	!MEMORY_OBJECT_CONTROL_NULL
1482  *		MEMORY_OBJECT_CONTROL_NULL
1483  *
1484  * Notes:	Historically, if the flags were not "do not reactivate", this
1485  *		function would look up the memory object using the pager if
1486  *		it did not exist (this could be the case if the vnode had
1487  *		been previously reactivated).  The flags would also permit a
1488  *		hold to be requested, which would have created an object
1489  *		reference, if one had not already existed.  This usage is
1490  *		deprecated, as it would permit a race between finding and
1491  *		taking the reference vs. a single reference being dropped in
1492  *		another thread.
1493  */
1494 memory_object_control_t
ubc_getobject(struct vnode * vp,__unused int flags)1495 ubc_getobject(struct vnode *vp, __unused int flags)
1496 {
1497 	if (UBCINFOEXISTS(vp)) {
1498 		return vp->v_ubcinfo->ui_control;
1499 	}
1500 
1501 	return MEMORY_OBJECT_CONTROL_NULL;
1502 }
1503 
1504 /*
1505  * ubc_blktooff
1506  *
1507  * Convert a given block number to a memory backing object (file) offset for a
1508  * given vnode
1509  *
1510  * Parameters:	vp			The vnode in which the block is located
1511  *		blkno			The block number to convert
1512  *
1513  * Returns:	!-1			The offset into the backing object
1514  *		-1			There is no ubc_info associated with
1515  *					the vnode
1516  *		-1			An error occurred in the underlying VFS
1517  *					while translating the block to an
1518  *					offset; the most likely cause is that
1519  *					the caller specified a block past the
1520  *					end of the file, but this could also be
1521  *					any other error from VNOP_BLKTOOFF().
1522  *
1523  * Note:	Representing the error in band loses some information, but does
1524  *		not occlude a valid offset, since an off_t of -1 is normally
1525  *		used to represent EOF.  If we had a more reliable constant in
1526  *		our header files for it (i.e. explicitly cast to an off_t), we
1527  *		would use it here instead.
1528  */
1529 off_t
ubc_blktooff(vnode_t vp,daddr64_t blkno)1530 ubc_blktooff(vnode_t vp, daddr64_t blkno)
1531 {
1532 	off_t file_offset = -1;
1533 	int error;
1534 
1535 	if (UBCINFOEXISTS(vp)) {
1536 		error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
1537 		if (error) {
1538 			file_offset = -1;
1539 		}
1540 	}
1541 
1542 	return file_offset;
1543 }
1544 
1545 
1546 /*
1547  * ubc_offtoblk
1548  *
1549  * Convert a given offset in a memory backing object into a block number for a
1550  * given vnode
1551  *
1552  * Parameters:	vp			The vnode in which the offset is
1553  *					located
1554  *		offset			The offset into the backing object
1555  *
1556  * Returns:	!-1			The returned block number
1557  *		-1			There is no ubc_info associated with
1558  *					the vnode
1559  *		-1			An error occurred in the underlying VFS
1560  *					while translating the block to an
1561  *					offset; the most likely cause is that
1562  *					the caller specified a block past the
1563  *					end of the file, but this could also be
1564  *					any other error from VNOP_OFFTOBLK().
1565  *
1566  * Note:	Representing the error in band loses some information, but does
1567  *		not occlude a valid block number, since block numbers exceed
1568  *		the valid range for offsets, due to their relative sizes.  If
1569  *		we had a more reliable constant than -1 in our header files
1570  *		for it (i.e. explicitly cast to an daddr64_t), we would use it
1571  *		here instead.
1572  */
1573 daddr64_t
ubc_offtoblk(vnode_t vp,off_t offset)1574 ubc_offtoblk(vnode_t vp, off_t offset)
1575 {
1576 	daddr64_t blkno = -1;
1577 	int error = 0;
1578 
1579 	if (UBCINFOEXISTS(vp)) {
1580 		error = VNOP_OFFTOBLK(vp, offset, &blkno);
1581 		if (error) {
1582 			blkno = -1;
1583 		}
1584 	}
1585 
1586 	return blkno;
1587 }
1588 
1589 
1590 /*
1591  * ubc_pages_resident
1592  *
1593  * Determine whether or not a given vnode has pages resident via the memory
1594  * object control associated with the ubc_info associated with the vnode
1595  *
1596  * Parameters:	vp			The vnode we want to know about
1597  *
1598  * Returns:	1			Yes
1599  *		0			No
1600  */
1601 int
ubc_pages_resident(vnode_t vp)1602 ubc_pages_resident(vnode_t vp)
1603 {
1604 	kern_return_t           kret;
1605 	boolean_t                       has_pages_resident;
1606 
1607 	if (!UBCINFOEXISTS(vp)) {
1608 		return 0;
1609 	}
1610 
1611 	/*
1612 	 * The following call may fail if an invalid ui_control is specified,
1613 	 * or if there is no VM object associated with the control object.  In
1614 	 * either case, reacting to it as if there were no pages resident will
1615 	 * result in correct behavior.
1616 	 */
1617 	kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
1618 
1619 	if (kret != KERN_SUCCESS) {
1620 		return 0;
1621 	}
1622 
1623 	if (has_pages_resident == TRUE) {
1624 		return 1;
1625 	}
1626 
1627 	return 0;
1628 }
1629 
1630 /*
1631  * ubc_msync
1632  *
1633  * Clean and/or invalidate a range in the memory object that backs this vnode
1634  *
1635  * Parameters:	vp			The vnode whose associated ubc_info's
1636  *					associated memory object is to have a
1637  *					range invalidated within it
1638  *		beg_off			The start of the range, as an offset
1639  *		end_off			The end of the range, as an offset
1640  *		resid_off		The address of an off_t supplied by the
1641  *					caller; may be set to NULL to ignore
1642  *		flags			See ubc_msync_internal()
1643  *
1644  * Returns:	0			Success
1645  *		!0			Failure; an errno is returned
1646  *
1647  * Implicit Returns:
1648  *		*resid_off, modified	If non-NULL, the  contents are ALWAYS
1649  *					modified; they are initialized to the
1650  *					beg_off, and in case of an I/O error,
1651  *					the difference between beg_off and the
1652  *					current value will reflect what was
1653  *					able to be written before the error
1654  *					occurred.  If no error is returned, the
1655  *					value of the resid_off is undefined; do
1656  *					NOT use it in place of end_off if you
1657  *					intend to increment from the end of the
1658  *					last call and call iteratively.
1659  *
1660  * Notes:	see ubc_msync_internal() for more detailed information.
1661  *
1662  */
1663 errno_t
ubc_msync(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags)1664 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
1665 {
1666 	int retval;
1667 	int io_errno = 0;
1668 
1669 	if (resid_off) {
1670 		*resid_off = beg_off;
1671 	}
1672 
1673 	retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
1674 
1675 	if (retval == 0 && io_errno == 0) {
1676 		return EINVAL;
1677 	}
1678 	return io_errno;
1679 }
1680 
1681 
1682 /*
1683  * ubc_msync_internal
1684  *
1685  * Clean and/or invalidate a range in the memory object that backs this vnode
1686  *
1687  * Parameters:	vp			The vnode whose associated ubc_info's
1688  *					associated memory object is to have a
1689  *					range invalidated within it
1690  *		beg_off			The start of the range, as an offset
1691  *		end_off			The end of the range, as an offset
1692  *		resid_off		The address of an off_t supplied by the
1693  *					caller; may be set to NULL to ignore
1694  *		flags			MUST contain at least one of the flags
1695  *					UBC_INVALIDATE, UBC_PUSHDIRTY, or
1696  *					UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1697  *					UBC_SYNC may also be specified to cause
1698  *					this function to block until the
1699  *					operation is complete.  The behavior
1700  *					of UBC_SYNC is otherwise undefined.
1701  *		io_errno		The address of an int to contain the
1702  *					errno from a failed I/O operation, if
1703  *					one occurs; may be set to NULL to
1704  *					ignore
1705  *
1706  * Returns:	1			Success
1707  *		0			Failure
1708  *
1709  * Implicit Returns:
1710  *		*resid_off, modified	The contents of this offset MAY be
1711  *					modified; in case of an I/O error, the
1712  *					difference between beg_off and the
1713  *					current value will reflect what was
1714  *					able to be written before the error
1715  *					occurred.
1716  *		*io_errno, modified	The contents of this offset are set to
1717  *					an errno, if an error occurs; if the
1718  *					caller supplies an io_errno parameter,
1719  *					they should be careful to initialize it
1720  *					to 0 before calling this function to
1721  *					enable them to distinguish an error
1722  *					with a valid *resid_off from an invalid
1723  *					one, and to avoid potentially falsely
1724  *					reporting an error, depending on use.
1725  *
1726  * Notes:	If there is no ubc_info associated with the vnode supplied,
1727  *		this function immediately returns success.
1728  *
1729  *		If the value of end_off is less than or equal to beg_off, this
1730  *		function immediately returns success; that is, end_off is NOT
1731  *		inclusive.
1732  *
1733  *		IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1734  *		UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1735  *		attempt to block on in-progress I/O by calling this function
1736  *		with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1737  *		in order to block pending on the I/O already in progress.
1738  *
1739  *		The start offset is truncated to the page boundary and the
1740  *		size is adjusted to include the last page in the range; that
1741  *		is, end_off on exactly a page boundary will not change if it
1742  *		is rounded, and the range of bytes written will be from the
1743  *		truncate beg_off to the rounded (end_off - 1).
1744  */
1745 static int
ubc_msync_internal(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags,int * io_errno)1746 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1747 {
1748 	memory_object_size_t    tsize;
1749 	kern_return_t           kret;
1750 	int request_flags = 0;
1751 	int flush_flags   = MEMORY_OBJECT_RETURN_NONE;
1752 
1753 	if (!UBCINFOEXISTS(vp)) {
1754 		return 0;
1755 	}
1756 	if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) {
1757 		return 0;
1758 	}
1759 	if (end_off <= beg_off) {
1760 		return 1;
1761 	}
1762 
1763 	if (flags & UBC_INVALIDATE) {
1764 		/*
1765 		 * discard the resident pages
1766 		 */
1767 		request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
1768 	}
1769 
1770 	if (flags & UBC_SYNC) {
1771 		/*
1772 		 * wait for all the I/O to complete before returning
1773 		 */
1774 		request_flags |= MEMORY_OBJECT_IO_SYNC;
1775 	}
1776 
1777 	if (flags & UBC_PUSHDIRTY) {
1778 		/*
1779 		 * we only return the dirty pages in the range
1780 		 */
1781 		flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
1782 	}
1783 
1784 	if (flags & UBC_PUSHALL) {
1785 		/*
1786 		 * then return all the interesting pages in the range (both
1787 		 * dirty and precious) to the pager
1788 		 */
1789 		flush_flags = MEMORY_OBJECT_RETURN_ALL;
1790 	}
1791 
1792 	beg_off = trunc_page_64(beg_off);
1793 	end_off = round_page_64(end_off);
1794 	tsize   = (memory_object_size_t)end_off - beg_off;
1795 
1796 	/* flush and/or invalidate pages in the range requested */
1797 	kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
1798 	    beg_off, tsize,
1799 	    (memory_object_offset_t *)resid_off,
1800 	    io_errno, flush_flags, request_flags,
1801 	    VM_PROT_NO_CHANGE);
1802 
1803 	return (kret == KERN_SUCCESS) ? 1 : 0;
1804 }
1805 
1806 
1807 /*
1808  * ubc_map
1809  *
1810  * Explicitly map a vnode that has an associate ubc_info, and add a reference
1811  * to it for the ubc system, if there isn't one already, so it will not be
1812  * recycled while it's in use, and set flags on the ubc_info to indicate that
1813  * we have done this
1814  *
1815  * Parameters:	vp			The vnode to map
1816  *		flags			The mapping flags for the vnode; this
1817  *					will be a combination of one or more of
1818  *					PROT_READ, PROT_WRITE, and PROT_EXEC
1819  *
1820  * Returns:	0			Success
1821  *		EPERM			Permission was denied
1822  *
1823  * Notes:	An I/O reference on the vnode must already be held on entry
1824  *
1825  *		If there is no ubc_info associated with the vnode, this function
1826  *		will return success.
1827  *
1828  *		If a permission error occurs, this function will return
1829  *		failure; all other failures will cause this function to return
1830  *		success.
1831  *
1832  *		IMPORTANT: This is an internal use function, and its symbols
1833  *		are not exported, hence its error checking is not very robust.
1834  *		It is primarily used by:
1835  *
1836  *		o	mmap(), when mapping a file
1837  *		o	When mapping a shared file (a shared library in the
1838  *			shared segment region)
1839  *		o	When loading a program image during the exec process
1840  *
1841  *		...all of these uses ignore the return code, and any fault that
1842  *		results later because of a failure is handled in the fix-up path
1843  *		of the fault handler.  The interface exists primarily as a
1844  *		performance hint.
1845  *
1846  *		Given that third party implementation of the type of interfaces
1847  *		that would use this function, such as alternative executable
1848  *		formats, etc., are unsupported, this function is not exported
1849  *		for general use.
1850  *
1851  *		The extra reference is held until the VM system unmaps the
1852  *		vnode from its own context to maintain a vnode reference in
1853  *		cases like open()/mmap()/close(), which leave the backing
1854  *		object referenced by a mapped memory region in a process
1855  *		address space.
1856  */
1857 __private_extern__ int
ubc_map(vnode_t vp,int flags)1858 ubc_map(vnode_t vp, int flags)
1859 {
1860 	struct ubc_info *uip;
1861 	int error = 0;
1862 	int need_ref = 0;
1863 	int need_wakeup = 0;
1864 
1865 	if (UBCINFOEXISTS(vp)) {
1866 		vnode_lock(vp);
1867 		uip = vp->v_ubcinfo;
1868 
1869 		while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
1870 			SET(uip->ui_flags, UI_MAPWAITING);
1871 			(void) msleep(&uip->ui_flags, &vp->v_lock,
1872 			    PRIBIO, "ubc_map", NULL);
1873 		}
1874 		SET(uip->ui_flags, UI_MAPBUSY);
1875 		vnode_unlock(vp);
1876 
1877 		error = VNOP_MMAP(vp, flags, vfs_context_current());
1878 
1879 		/*
1880 		 * rdar://problem/22587101 required that we stop propagating
1881 		 * EPERM up the stack. Otherwise, we would have to funnel up
1882 		 * the error at all the call sites for memory_object_map().
1883 		 * The risk is in having to undo the map/object/entry state at
1884 		 * all these call sites. It would also affect more than just mmap()
1885 		 * e.g. vm_remap().
1886 		 *
1887 		 *	if (error != EPERM)
1888 		 *              error = 0;
1889 		 */
1890 
1891 		error = 0;
1892 
1893 		vnode_lock_spin(vp);
1894 
1895 		if (error == 0) {
1896 			if (!ISSET(uip->ui_flags, UI_ISMAPPED)) {
1897 				need_ref = 1;
1898 			}
1899 			SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
1900 			if (flags & PROT_WRITE) {
1901 				SET(uip->ui_flags, UI_MAPPEDWRITE);
1902 			}
1903 		}
1904 		CLR(uip->ui_flags, UI_MAPBUSY);
1905 
1906 		if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
1907 			CLR(uip->ui_flags, UI_MAPWAITING);
1908 			need_wakeup = 1;
1909 		}
1910 		vnode_unlock(vp);
1911 
1912 		if (need_wakeup) {
1913 			wakeup(&uip->ui_flags);
1914 		}
1915 
1916 		if (need_ref) {
1917 			/*
1918 			 * Make sure we get a ref as we can't unwind from here
1919 			 */
1920 			if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) {
1921 				panic("%s : VNODE_REF_FORCE failed", __FUNCTION__);
1922 			}
1923 			/*
1924 			 * Vnodes that are on "unreliable" media (like disk
1925 			 * images, network filesystems, 3rd-party filesystems,
1926 			 * and possibly external devices) could see their
1927 			 * contents be changed via the backing store without
1928 			 * triggering copy-on-write, so we can't fully rely
1929 			 * on copy-on-write and might have to resort to
1930 			 * copy-on-read to protect "privileged" processes and
1931 			 * prevent privilege escalation.
1932 			 *
1933 			 * The root filesystem is considered "reliable" because
1934 			 * there's not much point in trying to protect
1935 			 * ourselves from such a vulnerability and the extra
1936 			 * cost of copy-on-read (CPU time and memory pressure)
1937 			 * could result in some serious regressions.
1938 			 */
1939 			if (vp->v_mount != NULL &&
1940 			    ((vp->v_mount->mnt_flag & MNT_ROOTFS) ||
1941 			    vnode_on_reliable_media(vp))) {
1942 				/*
1943 				 * This vnode is deemed "reliable" so mark
1944 				 * its VM object as "trusted".
1945 				 */
1946 				memory_object_mark_trusted(uip->ui_control);
1947 			} else {
1948 //				printf("BUGGYCOW: %s:%d vp %p \"%s\" in mnt %p \"%s\" is untrusted\n", __FUNCTION__, __LINE__, vp, vp->v_name, vp->v_mount, vp->v_mount->mnt_vnodecovered->v_name);
1949 			}
1950 		}
1951 	}
1952 	return error;
1953 }
1954 
1955 
1956 /*
1957  * ubc_destroy_named
1958  *
1959  * Destroy the named memory object associated with the ubc_info control object
1960  * associated with the designated vnode, if there is a ubc_info associated
1961  * with the vnode, and a control object is associated with it
1962  *
1963  * Parameters:	vp			The designated vnode
1964  *
1965  * Returns:	(void)
1966  *
1967  * Notes:	This function is called on vnode termination for all vnodes,
1968  *		and must therefore not assume that there is a ubc_info that is
1969  *		associated with the vnode, nor that there is a control object
1970  *		associated with the ubc_info.
1971  *
1972  *		If all the conditions necessary are present, this function
1973  *		calls memory_object_destory(), which will in turn end up
1974  *		calling ubc_unmap() to release any vnode references that were
1975  *		established via ubc_map().
1976  *
1977  *		IMPORTANT: This is an internal use function that is used
1978  *		exclusively by the internal use function vclean().
1979  */
1980 __private_extern__ void
ubc_destroy_named(vnode_t vp)1981 ubc_destroy_named(vnode_t vp)
1982 {
1983 	memory_object_control_t control;
1984 	struct ubc_info *uip;
1985 	kern_return_t kret;
1986 
1987 	if (UBCINFOEXISTS(vp)) {
1988 		uip = vp->v_ubcinfo;
1989 
1990 		/* Terminate the memory object  */
1991 		control = ubc_getobject(vp, UBC_HOLDOBJECT);
1992 		if (control != MEMORY_OBJECT_CONTROL_NULL) {
1993 			kret = memory_object_destroy(control, 0);
1994 			if (kret != KERN_SUCCESS) {
1995 				panic("ubc_destroy_named: memory_object_destroy failed");
1996 			}
1997 		}
1998 	}
1999 }
2000 
2001 
2002 /*
2003  * ubc_isinuse
2004  *
2005  * Determine whether or not a vnode is currently in use by ubc at a level in
2006  * excess of the requested busycount
2007  *
2008  * Parameters:	vp			The vnode to check
2009  *		busycount		The threshold busy count, used to bias
2010  *					the count usually already held by the
2011  *					caller to avoid races
2012  *
2013  * Returns:	1			The vnode is in use over the threshold
2014  *		0			The vnode is not in use over the
2015  *					threshold
2016  *
2017  * Notes:	Because the vnode is only held locked while actually asking
2018  *		the use count, this function only represents a snapshot of the
2019  *		current state of the vnode.  If more accurate information is
2020  *		required, an additional busycount should be held by the caller
2021  *		and a non-zero busycount used.
2022  *
2023  *		If there is no ubc_info associated with the vnode, this
2024  *		function will report that the vnode is not in use by ubc.
2025  */
2026 int
ubc_isinuse(struct vnode * vp,int busycount)2027 ubc_isinuse(struct vnode *vp, int busycount)
2028 {
2029 	if (!UBCINFOEXISTS(vp)) {
2030 		return 0;
2031 	}
2032 	return ubc_isinuse_locked(vp, busycount, 0);
2033 }
2034 
2035 
2036 /*
2037  * ubc_isinuse_locked
2038  *
2039  * Determine whether or not a vnode is currently in use by ubc at a level in
2040  * excess of the requested busycount
2041  *
2042  * Parameters:	vp			The vnode to check
2043  *		busycount		The threshold busy count, used to bias
2044  *					the count usually already held by the
2045  *					caller to avoid races
2046  *		locked			True if the vnode is already locked by
2047  *					the caller
2048  *
2049  * Returns:	1			The vnode is in use over the threshold
2050  *		0			The vnode is not in use over the
2051  *					threshold
2052  *
2053  * Notes:	If the vnode is not locked on entry, it is locked while
2054  *		actually asking the use count.  If this is the case, this
2055  *		function only represents a snapshot of the current state of
2056  *		the vnode.  If more accurate information is required, the
2057  *		vnode lock should be held by the caller, otherwise an
2058  *		additional busycount should be held by the caller and a
2059  *		non-zero busycount used.
2060  *
2061  *		If there is no ubc_info associated with the vnode, this
2062  *		function will report that the vnode is not in use by ubc.
2063  */
2064 int
ubc_isinuse_locked(struct vnode * vp,int busycount,int locked)2065 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
2066 {
2067 	int retval = 0;
2068 
2069 
2070 	if (!locked) {
2071 		vnode_lock_spin(vp);
2072 	}
2073 
2074 	if ((vp->v_usecount - vp->v_kusecount) > busycount) {
2075 		retval = 1;
2076 	}
2077 
2078 	if (!locked) {
2079 		vnode_unlock(vp);
2080 	}
2081 	return retval;
2082 }
2083 
2084 
2085 /*
2086  * ubc_unmap
2087  *
2088  * Reverse the effects of a ubc_map() call for a given vnode
2089  *
2090  * Parameters:	vp			vnode to unmap from ubc
2091  *
2092  * Returns:	(void)
2093  *
2094  * Notes:	This is an internal use function used by vnode_pager_unmap().
2095  *		It will attempt to obtain a reference on the supplied vnode,
2096  *		and if it can do so, and there is an associated ubc_info, and
2097  *		the flags indicate that it was mapped via ubc_map(), then the
2098  *		flag is cleared, the mapping removed, and the reference taken
2099  *		by ubc_map() is released.
2100  *
2101  *		IMPORTANT: This MUST only be called by the VM
2102  *		to prevent race conditions.
2103  */
2104 __private_extern__ void
ubc_unmap(struct vnode * vp)2105 ubc_unmap(struct vnode *vp)
2106 {
2107 	struct ubc_info *uip;
2108 	int     need_rele = 0;
2109 	int     need_wakeup = 0;
2110 
2111 	if (vnode_getwithref(vp)) {
2112 		return;
2113 	}
2114 
2115 	if (UBCINFOEXISTS(vp)) {
2116 		bool want_fsevent = false;
2117 
2118 		vnode_lock(vp);
2119 		uip = vp->v_ubcinfo;
2120 
2121 		while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2122 			SET(uip->ui_flags, UI_MAPWAITING);
2123 			(void) msleep(&uip->ui_flags, &vp->v_lock,
2124 			    PRIBIO, "ubc_unmap", NULL);
2125 		}
2126 		SET(uip->ui_flags, UI_MAPBUSY);
2127 
2128 		if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
2129 			if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) {
2130 				want_fsevent = true;
2131 			}
2132 
2133 			need_rele = 1;
2134 
2135 			/*
2136 			 * We want to clear the mapped flags after we've called
2137 			 * VNOP_MNOMAP to avoid certain races and allow
2138 			 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2139 			 */
2140 		}
2141 		vnode_unlock(vp);
2142 
2143 		if (need_rele) {
2144 			vfs_context_t ctx = vfs_context_current();
2145 
2146 			(void)VNOP_MNOMAP(vp, ctx);
2147 
2148 #if CONFIG_FSE
2149 			/*
2150 			 * Why do we want an fsevent here?  Normally the
2151 			 * content modified fsevent is posted when a file is
2152 			 * closed and only if it's written to via conventional
2153 			 * means.  It's perfectly legal to close a file and
2154 			 * keep your mappings and we don't currently track
2155 			 * whether it was written to via a mapping.
2156 			 * Therefore, we need to post an fsevent here if the
2157 			 * file was mapped writable.  This may result in false
2158 			 * events, i.e. we post a notification when nothing
2159 			 * has really changed.
2160 			 */
2161 			if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
2162 				add_fsevent(FSE_CONTENT_MODIFIED_NO_HLINK, ctx,
2163 				    FSE_ARG_VNODE, vp,
2164 				    FSE_ARG_DONE);
2165 			}
2166 #endif
2167 
2168 			vnode_rele(vp);
2169 		}
2170 
2171 		vnode_lock_spin(vp);
2172 
2173 		if (need_rele) {
2174 			CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
2175 		}
2176 
2177 		CLR(uip->ui_flags, UI_MAPBUSY);
2178 
2179 		if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2180 			CLR(uip->ui_flags, UI_MAPWAITING);
2181 			need_wakeup = 1;
2182 		}
2183 		vnode_unlock(vp);
2184 
2185 		if (need_wakeup) {
2186 			wakeup(&uip->ui_flags);
2187 		}
2188 	}
2189 	/*
2190 	 * the drop of the vnode ref will cleanup
2191 	 */
2192 	vnode_put(vp);
2193 }
2194 
2195 
2196 /*
2197  * ubc_page_op
2198  *
2199  * Manipulate individual page state for a vnode with an associated ubc_info
2200  * with an associated memory object control.
2201  *
2202  * Parameters:	vp			The vnode backing the page
2203  *		f_offset		A file offset interior to the page
2204  *		ops			The operations to perform, as a bitmap
2205  *					(see below for more information)
2206  *		phys_entryp		The address of a ppnum_t; may be NULL
2207  *					to ignore
2208  *		flagsp			A pointer to an int to contain flags;
2209  *					may be NULL to ignore
2210  *
2211  * Returns:	KERN_SUCCESS		Success
2212  *		KERN_INVALID_ARGUMENT	If the memory object control has no VM
2213  *					object associated
2214  *		KERN_INVALID_OBJECT	If UPL_POP_PHYSICAL and the object is
2215  *					not physically contiguous
2216  *		KERN_INVALID_OBJECT	If !UPL_POP_PHYSICAL and the object is
2217  *					physically contiguous
2218  *		KERN_FAILURE		If the page cannot be looked up
2219  *
2220  * Implicit Returns:
2221  *		*phys_entryp (modified)	If phys_entryp is non-NULL and
2222  *					UPL_POP_PHYSICAL
2223  *		*flagsp (modified)	If flagsp is non-NULL and there was
2224  *					!UPL_POP_PHYSICAL and a KERN_SUCCESS
2225  *
2226  * Notes:	For object boundaries, it is considerably more efficient to
2227  *		ensure that f_offset is in fact on a page boundary, as this
2228  *		will avoid internal use of the hash table to identify the
2229  *		page, and would therefore skip a number of early optimizations.
2230  *		Since this is a page operation anyway, the caller should try
2231  *		to pass only a page aligned offset because of this.
2232  *
2233  *		*flagsp may be modified even if this function fails.  If it is
2234  *		modified, it will contain the condition of the page before the
2235  *		requested operation was attempted; these will only include the
2236  *		bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2237  *		UPL_POP_SET, or UPL_POP_CLR bits.
2238  *
2239  *		The flags field may contain a specific operation, such as
2240  *		UPL_POP_PHYSICAL or UPL_POP_DUMP:
2241  *
2242  *		o	UPL_POP_PHYSICAL	Fail if not contiguous; if
2243  *						*phys_entryp and successful, set
2244  *						*phys_entryp
2245  *		o	UPL_POP_DUMP		Dump the specified page
2246  *
2247  *		Otherwise, it is treated as a bitmap of one or more page
2248  *		operations to perform on the final memory object; allowable
2249  *		bit values are:
2250  *
2251  *		o	UPL_POP_DIRTY		The page is dirty
2252  *		o	UPL_POP_PAGEOUT		The page is paged out
2253  *		o	UPL_POP_PRECIOUS	The page is precious
2254  *		o	UPL_POP_ABSENT		The page is absent
2255  *		o	UPL_POP_BUSY		The page is busy
2256  *
2257  *		If the page status is only being queried and not modified, then
2258  *		not other bits should be specified.  However, if it is being
2259  *		modified, exactly ONE of the following bits should be set:
2260  *
2261  *		o	UPL_POP_SET		Set the current bitmap bits
2262  *		o	UPL_POP_CLR		Clear the current bitmap bits
2263  *
2264  *		Thus to effect a combination of setting an clearing, it may be
2265  *		necessary to call this function twice.  If this is done, the
2266  *		set should be used before the clear, since clearing may trigger
2267  *		a wakeup on the destination page, and if the page is backed by
2268  *		an encrypted swap file, setting will trigger the decryption
2269  *		needed before the wakeup occurs.
2270  */
2271 kern_return_t
ubc_page_op(struct vnode * vp,off_t f_offset,int ops,ppnum_t * phys_entryp,int * flagsp)2272 ubc_page_op(
2273 	struct vnode    *vp,
2274 	off_t           f_offset,
2275 	int             ops,
2276 	ppnum_t *phys_entryp,
2277 	int             *flagsp)
2278 {
2279 	memory_object_control_t         control;
2280 
2281 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2282 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2283 		return KERN_INVALID_ARGUMENT;
2284 	}
2285 
2286 	return memory_object_page_op(control,
2287 	           (memory_object_offset_t)f_offset,
2288 	           ops,
2289 	           phys_entryp,
2290 	           flagsp);
2291 }
2292 
2293 
2294 /*
2295  * ubc_range_op
2296  *
2297  * Manipulate page state for a range of memory for a vnode with an associated
2298  * ubc_info with an associated memory object control, when page level state is
2299  * not required to be returned from the call (i.e. there are no phys_entryp or
2300  * flagsp parameters to this call, and it takes a range which may contain
2301  * multiple pages, rather than an offset interior to a single page).
2302  *
2303  * Parameters:	vp			The vnode backing the page
2304  *		f_offset_beg		A file offset interior to the start page
2305  *		f_offset_end		A file offset interior to the end page
2306  *		ops			The operations to perform, as a bitmap
2307  *					(see below for more information)
2308  *		range			The address of an int; may be NULL to
2309  *					ignore
2310  *
2311  * Returns:	KERN_SUCCESS		Success
2312  *		KERN_INVALID_ARGUMENT	If the memory object control has no VM
2313  *					object associated
2314  *		KERN_INVALID_OBJECT	If the object is physically contiguous
2315  *
2316  * Implicit Returns:
2317  *		*range (modified)	If range is non-NULL, its contents will
2318  *					be modified to contain the number of
2319  *					bytes successfully operated upon.
2320  *
2321  * Notes:	IMPORTANT: This function cannot be used on a range that
2322  *		consists of physically contiguous pages.
2323  *
2324  *		For object boundaries, it is considerably more efficient to
2325  *		ensure that f_offset_beg and f_offset_end are in fact on page
2326  *		boundaries, as this will avoid internal use of the hash table
2327  *		to identify the page, and would therefore skip a number of
2328  *		early optimizations.  Since this is an operation on a set of
2329  *		pages anyway, the caller should try to pass only a page aligned
2330  *		offsets because of this.
2331  *
2332  *		*range will be modified only if this function succeeds.
2333  *
2334  *		The flags field MUST contain a specific operation; allowable
2335  *		values are:
2336  *
2337  *		o	UPL_ROP_ABSENT	Returns the extent of the range
2338  *					presented which is absent, starting
2339  *					with the start address presented
2340  *
2341  *		o	UPL_ROP_PRESENT	Returns the extent of the range
2342  *					presented which is present (resident),
2343  *					starting with the start address
2344  *					presented
2345  *		o	UPL_ROP_DUMP	Dump the pages which are found in the
2346  *					target object for the target range.
2347  *
2348  *		IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2349  *		multiple regions in the range, only the first matching region
2350  *		is returned.
2351  */
2352 kern_return_t
ubc_range_op(struct vnode * vp,off_t f_offset_beg,off_t f_offset_end,int ops,int * range)2353 ubc_range_op(
2354 	struct vnode    *vp,
2355 	off_t           f_offset_beg,
2356 	off_t           f_offset_end,
2357 	int             ops,
2358 	int             *range)
2359 {
2360 	memory_object_control_t         control;
2361 
2362 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2363 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2364 		return KERN_INVALID_ARGUMENT;
2365 	}
2366 
2367 	return memory_object_range_op(control,
2368 	           (memory_object_offset_t)f_offset_beg,
2369 	           (memory_object_offset_t)f_offset_end,
2370 	           ops,
2371 	           range);
2372 }
2373 
2374 
2375 /*
2376  * ubc_create_upl
2377  *
2378  * Given a vnode, cause the population of a portion of the vm_object; based on
2379  * the nature of the request, the pages returned may contain valid data, or
2380  * they may be uninitialized.
2381  *
2382  * Parameters:	vp			The vnode from which to create the upl
2383  *		f_offset		The start offset into the backing store
2384  *					represented by the vnode
2385  *		bufsize			The size of the upl to create
2386  *		uplp			Pointer to the upl_t to receive the
2387  *					created upl; MUST NOT be NULL
2388  *		plp			Pointer to receive the internal page
2389  *					list for the created upl; MAY be NULL
2390  *					to ignore
2391  *
2392  * Returns:	KERN_SUCCESS		The requested upl has been created
2393  *		KERN_INVALID_ARGUMENT	The bufsize argument is not an even
2394  *					multiple of the page size
2395  *		KERN_INVALID_ARGUMENT	There is no ubc_info associated with
2396  *					the vnode, or there is no memory object
2397  *					control associated with the ubc_info
2398  *	memory_object_upl_request:KERN_INVALID_VALUE
2399  *					The supplied upl_flags argument is
2400  *					invalid
2401  * Implicit Returns:
2402  *		*uplp (modified)
2403  *		*plp (modified)		If non-NULL, the value of *plp will be
2404  *					modified to point to the internal page
2405  *					list; this modification may occur even
2406  *					if this function is unsuccessful, in
2407  *					which case the contents may be invalid
2408  *
2409  * Note:	If successful, the returned *uplp MUST subsequently be freed
2410  *		via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2411  *		ubc_upl_abort(), or ubc_upl_abort_range().
2412  */
2413 kern_return_t
ubc_create_upl_external(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags)2414 ubc_create_upl_external(
2415 	struct vnode    *vp,
2416 	off_t           f_offset,
2417 	int             bufsize,
2418 	upl_t           *uplp,
2419 	upl_page_info_t **plp,
2420 	int             uplflags)
2421 {
2422 	return ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt());
2423 }
2424 
2425 kern_return_t
ubc_create_upl_kernel(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags,vm_tag_t tag)2426 ubc_create_upl_kernel(
2427 	struct vnode    *vp,
2428 	off_t           f_offset,
2429 	int             bufsize,
2430 	upl_t           *uplp,
2431 	upl_page_info_t **plp,
2432 	int             uplflags,
2433 	vm_tag_t tag)
2434 {
2435 	memory_object_control_t         control;
2436 	kern_return_t                   kr;
2437 
2438 	if (plp != NULL) {
2439 		*plp = NULL;
2440 	}
2441 	*uplp = NULL;
2442 
2443 	if (bufsize & 0xfff) {
2444 		return KERN_INVALID_ARGUMENT;
2445 	}
2446 
2447 	if (bufsize > MAX_UPL_SIZE_BYTES) {
2448 		return KERN_INVALID_ARGUMENT;
2449 	}
2450 
2451 	if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
2452 		if (uplflags & UPL_UBC_MSYNC) {
2453 			uplflags &= UPL_RET_ONLY_DIRTY;
2454 
2455 			uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
2456 			    UPL_SET_INTERNAL | UPL_SET_LITE;
2457 		} else if (uplflags & UPL_UBC_PAGEOUT) {
2458 			uplflags &= UPL_RET_ONLY_DIRTY;
2459 
2460 			if (uplflags & UPL_RET_ONLY_DIRTY) {
2461 				uplflags |= UPL_NOBLOCK;
2462 			}
2463 
2464 			uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
2465 			    UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
2466 		} else {
2467 			uplflags |= UPL_RET_ONLY_ABSENT |
2468 			    UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
2469 			    UPL_SET_INTERNAL | UPL_SET_LITE;
2470 
2471 			/*
2472 			 * if the requested size == PAGE_SIZE, we don't want to set
2473 			 * the UPL_NOBLOCK since we may be trying to recover from a
2474 			 * previous partial pagein I/O that occurred because we were low
2475 			 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2476 			 * since we're only asking for a single page, we can block w/o fear
2477 			 * of tying up pages while waiting for more to become available
2478 			 */
2479 			if (bufsize > PAGE_SIZE) {
2480 				uplflags |= UPL_NOBLOCK;
2481 			}
2482 		}
2483 	} else {
2484 		uplflags &= ~UPL_FOR_PAGEOUT;
2485 
2486 		if (uplflags & UPL_WILL_BE_DUMPED) {
2487 			uplflags &= ~UPL_WILL_BE_DUMPED;
2488 			uplflags |= (UPL_NO_SYNC | UPL_SET_INTERNAL);
2489 		} else {
2490 			uplflags |= (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
2491 		}
2492 	}
2493 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2494 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2495 		return KERN_INVALID_ARGUMENT;
2496 	}
2497 
2498 	kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
2499 	if (kr == KERN_SUCCESS && plp != NULL) {
2500 		*plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
2501 	}
2502 	return kr;
2503 }
2504 
2505 
2506 /*
2507  * ubc_upl_maxbufsize
2508  *
2509  * Return the maximum bufsize ubc_create_upl( ) will take.
2510  *
2511  * Parameters:	none
2512  *
2513  * Returns:	maximum size buffer (in bytes) ubc_create_upl( ) will take.
2514  */
2515 upl_size_t
ubc_upl_maxbufsize(void)2516 ubc_upl_maxbufsize(
2517 	void)
2518 {
2519 	return MAX_UPL_SIZE_BYTES;
2520 }
2521 
2522 /*
2523  * ubc_upl_map
2524  *
2525  * Map the page list assocated with the supplied upl into the kernel virtual
2526  * address space at the virtual address indicated by the dst_addr argument;
2527  * the entire upl is mapped
2528  *
2529  * Parameters:	upl			The upl to map
2530  *		dst_addr		The address at which to map the upl
2531  *
2532  * Returns:	KERN_SUCCESS		The upl has been mapped
2533  *		KERN_INVALID_ARGUMENT	The upl is UPL_NULL
2534  *		KERN_FAILURE		The upl is already mapped
2535  *	vm_map_enter:KERN_INVALID_ARGUMENT
2536  *					A failure code from vm_map_enter() due
2537  *					to an invalid argument
2538  */
2539 kern_return_t
ubc_upl_map(upl_t upl,vm_offset_t * dst_addr)2540 ubc_upl_map(
2541 	upl_t           upl,
2542 	vm_offset_t     *dst_addr)
2543 {
2544 	return vm_upl_map(kernel_map, upl, dst_addr);
2545 }
2546 
2547 /*
2548  * ubc_upl_map_range:- similar to ubc_upl_map but the focus is on a range
2549  * of the UPL. Takes an offset, size, and protection so that only a  part
2550  * of the UPL can be mapped with the right protections.
2551  */
2552 kern_return_t
ubc_upl_map_range(upl_t upl,vm_offset_t offset_to_map,vm_size_t size_to_map,vm_prot_t prot_to_map,vm_offset_t * dst_addr)2553 ubc_upl_map_range(
2554 	upl_t           upl,
2555 	vm_offset_t     offset_to_map,
2556 	vm_size_t       size_to_map,
2557 	vm_prot_t       prot_to_map,
2558 	vm_offset_t     *dst_addr)
2559 {
2560 	return vm_upl_map_range(kernel_map, upl, offset_to_map, size_to_map, prot_to_map, dst_addr);
2561 }
2562 
2563 
2564 /*
2565  * ubc_upl_unmap
2566  *
2567  * Unmap the page list assocated with the supplied upl from the kernel virtual
2568  * address space; the entire upl is unmapped.
2569  *
2570  * Parameters:	upl			The upl to unmap
2571  *
2572  * Returns:	KERN_SUCCESS		The upl has been unmapped
2573  *		KERN_FAILURE		The upl is not currently mapped
2574  *		KERN_INVALID_ARGUMENT	If the upl is UPL_NULL
2575  */
2576 kern_return_t
ubc_upl_unmap(upl_t upl)2577 ubc_upl_unmap(
2578 	upl_t   upl)
2579 {
2580 	return vm_upl_unmap(kernel_map, upl);
2581 }
2582 
2583 /*
2584  * ubc_upl_unmap_range:- similar to ubc_upl_unmap but the focus is
2585  * on part of the UPL that is mapped. The offset and size parameter
2586  * specifies what part of the UPL needs to be unmapped.
2587  *
2588  * Note: Currrently offset & size are unused as we always initiate the unmap from the
2589  * very beginning of the UPL's mapping and track the mapped size in the UPL. But we
2590  * might want to allow unmapping a UPL in the middle, for example, and we can use the
2591  * offset + size parameters for that purpose.
2592  */
2593 kern_return_t
ubc_upl_unmap_range(upl_t upl,vm_offset_t offset_to_unmap,vm_size_t size_to_unmap)2594 ubc_upl_unmap_range(
2595 	upl_t   upl,
2596 	vm_offset_t     offset_to_unmap,
2597 	vm_size_t       size_to_unmap)
2598 {
2599 	return vm_upl_unmap_range(kernel_map, upl, offset_to_unmap, size_to_unmap);
2600 }
2601 
2602 
2603 /*
2604  * ubc_upl_commit
2605  *
2606  * Commit the contents of the upl to the backing store
2607  *
2608  * Parameters:	upl			The upl to commit
2609  *
2610  * Returns:	KERN_SUCCESS		The upl has been committed
2611  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2612  *		KERN_FAILURE		The supplied upl does not represent
2613  *					device memory, and the offset plus the
2614  *					size would exceed the actual size of
2615  *					the upl
2616  *
2617  * Notes:	In practice, the only return value for this function should be
2618  *		KERN_SUCCESS, unless there has been data structure corruption;
2619  *		since the upl is deallocated regardless of success or failure,
2620  *		there's really nothing to do about this other than panic.
2621  *
2622  *		IMPORTANT: Use of this function should not be mixed with use of
2623  *		ubc_upl_commit_range(), due to the unconditional deallocation
2624  *		by this function.
2625  */
2626 kern_return_t
ubc_upl_commit(upl_t upl)2627 ubc_upl_commit(
2628 	upl_t                   upl)
2629 {
2630 	upl_page_info_t *pl;
2631 	kern_return_t   kr;
2632 
2633 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2634 	kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
2635 	upl_deallocate(upl);
2636 	return kr;
2637 }
2638 
2639 
2640 /*
2641  * ubc_upl_commit
2642  *
2643  * Commit the contents of the specified range of the upl to the backing store
2644  *
2645  * Parameters:	upl			The upl to commit
2646  *		offset			The offset into the upl
2647  *		size			The size of the region to be committed,
2648  *					starting at the specified offset
2649  *		flags			commit type (see below)
2650  *
2651  * Returns:	KERN_SUCCESS		The range has been committed
2652  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2653  *		KERN_FAILURE		The supplied upl does not represent
2654  *					device memory, and the offset plus the
2655  *					size would exceed the actual size of
2656  *					the upl
2657  *
2658  * Notes:	IMPORTANT: If the commit is successful, and the object is now
2659  *		empty, the upl will be deallocated.  Since the caller cannot
2660  *		check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2661  *		should generally only be used when the offset is 0 and the size
2662  *		is equal to the upl size.
2663  *
2664  *		The flags argument is a bitmap of flags on the rage of pages in
2665  *		the upl to be committed; allowable flags are:
2666  *
2667  *		o	UPL_COMMIT_FREE_ON_EMPTY	Free the upl when it is
2668  *							both empty and has been
2669  *							successfully committed
2670  *		o	UPL_COMMIT_CLEAR_DIRTY		Clear each pages dirty
2671  *							bit; will prevent a
2672  *							later pageout
2673  *		o	UPL_COMMIT_SET_DIRTY		Set each pages dirty
2674  *							bit; will cause a later
2675  *							pageout
2676  *		o	UPL_COMMIT_INACTIVATE		Clear each pages
2677  *							reference bit; the page
2678  *							will not be accessed
2679  *		o	UPL_COMMIT_ALLOW_ACCESS		Unbusy each page; pages
2680  *							become busy when an
2681  *							IOMemoryDescriptor is
2682  *							mapped or redirected,
2683  *							and we have to wait for
2684  *							an IOKit driver
2685  *
2686  *		The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2687  *		not be specified by the caller.
2688  *
2689  *		The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2690  *		mutually exclusive, and should not be combined.
2691  */
2692 kern_return_t
ubc_upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags)2693 ubc_upl_commit_range(
2694 	upl_t                   upl,
2695 	upl_offset_t            offset,
2696 	upl_size_t              size,
2697 	int                             flags)
2698 {
2699 	upl_page_info_t *pl;
2700 	boolean_t               empty;
2701 	kern_return_t   kr;
2702 
2703 	if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
2704 		flags |= UPL_COMMIT_NOTIFY_EMPTY;
2705 	}
2706 
2707 	if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
2708 		return KERN_INVALID_ARGUMENT;
2709 	}
2710 
2711 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2712 
2713 	kr = upl_commit_range(upl, offset, size, flags,
2714 	    pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
2715 
2716 	if ((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) {
2717 		upl_deallocate(upl);
2718 	}
2719 
2720 	return kr;
2721 }
2722 
2723 
2724 /*
2725  * ubc_upl_abort_range
2726  *
2727  * Abort the contents of the specified range of the specified upl
2728  *
2729  * Parameters:	upl			The upl to abort
2730  *		offset			The offset into the upl
2731  *		size			The size of the region to be aborted,
2732  *					starting at the specified offset
2733  *		abort_flags		abort type (see below)
2734  *
2735  * Returns:	KERN_SUCCESS		The range has been aborted
2736  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2737  *		KERN_FAILURE		The supplied upl does not represent
2738  *					device memory, and the offset plus the
2739  *					size would exceed the actual size of
2740  *					the upl
2741  *
2742  * Notes:	IMPORTANT: If the abort is successful, and the object is now
2743  *		empty, the upl will be deallocated.  Since the caller cannot
2744  *		check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2745  *		should generally only be used when the offset is 0 and the size
2746  *		is equal to the upl size.
2747  *
2748  *		The abort_flags argument is a bitmap of flags on the range of
2749  *		pages in the upl to be aborted; allowable flags are:
2750  *
2751  *		o	UPL_ABORT_FREE_ON_EMPTY	Free the upl when it is both
2752  *						empty and has been successfully
2753  *						aborted
2754  *		o	UPL_ABORT_RESTART	The operation must be restarted
2755  *		o	UPL_ABORT_UNAVAILABLE	The pages are unavailable
2756  *		o	UPL_ABORT_ERROR		An I/O error occurred
2757  *		o	UPL_ABORT_DUMP_PAGES	Just free the pages
2758  *		o	UPL_ABORT_NOTIFY_EMPTY	RESERVED
2759  *		o	UPL_ABORT_ALLOW_ACCESS	RESERVED
2760  *
2761  *		The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2762  *		not be specified by the caller.  It is intended to fulfill the
2763  *		same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2764  *		ubc_upl_commit_range(), but is never referenced internally.
2765  *
2766  *		The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2767  *		referenced; do not use it.
2768  */
2769 kern_return_t
ubc_upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int abort_flags)2770 ubc_upl_abort_range(
2771 	upl_t                   upl,
2772 	upl_offset_t            offset,
2773 	upl_size_t              size,
2774 	int                             abort_flags)
2775 {
2776 	kern_return_t   kr;
2777 	boolean_t               empty = FALSE;
2778 
2779 	if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) {
2780 		abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
2781 	}
2782 
2783 	kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
2784 
2785 	if ((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) {
2786 		upl_deallocate(upl);
2787 	}
2788 
2789 	return kr;
2790 }
2791 
2792 
2793 /*
2794  * ubc_upl_abort
2795  *
2796  * Abort the contents of the specified upl
2797  *
2798  * Parameters:	upl			The upl to abort
2799  *		abort_type		abort type (see below)
2800  *
2801  * Returns:	KERN_SUCCESS		The range has been aborted
2802  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2803  *		KERN_FAILURE		The supplied upl does not represent
2804  *					device memory, and the offset plus the
2805  *					size would exceed the actual size of
2806  *					the upl
2807  *
2808  * Notes:	IMPORTANT: If the abort is successful, and the object is now
2809  *		empty, the upl will be deallocated.  Since the caller cannot
2810  *		check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2811  *		should generally only be used when the offset is 0 and the size
2812  *		is equal to the upl size.
2813  *
2814  *		The abort_type is a bitmap of flags on the range of
2815  *		pages in the upl to be aborted; allowable flags are:
2816  *
2817  *		o	UPL_ABORT_FREE_ON_EMPTY	Free the upl when it is both
2818  *						empty and has been successfully
2819  *						aborted
2820  *		o	UPL_ABORT_RESTART	The operation must be restarted
2821  *		o	UPL_ABORT_UNAVAILABLE	The pages are unavailable
2822  *		o	UPL_ABORT_ERROR		An I/O error occurred
2823  *		o	UPL_ABORT_DUMP_PAGES	Just free the pages
2824  *		o	UPL_ABORT_NOTIFY_EMPTY	RESERVED
2825  *		o	UPL_ABORT_ALLOW_ACCESS	RESERVED
2826  *
2827  *		The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2828  *		not be specified by the caller.  It is intended to fulfill the
2829  *		same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2830  *		ubc_upl_commit_range(), but is never referenced internally.
2831  *
2832  *		The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2833  *		referenced; do not use it.
2834  */
2835 kern_return_t
ubc_upl_abort(upl_t upl,int abort_type)2836 ubc_upl_abort(
2837 	upl_t                   upl,
2838 	int                             abort_type)
2839 {
2840 	kern_return_t   kr;
2841 
2842 	kr = upl_abort(upl, abort_type);
2843 	upl_deallocate(upl);
2844 	return kr;
2845 }
2846 
2847 
2848 /*
2849  * ubc_upl_pageinfo
2850  *
2851  *  Retrieve the internal page list for the specified upl
2852  *
2853  * Parameters:	upl			The upl to obtain the page list from
2854  *
2855  * Returns:	!NULL			The (upl_page_info_t *) for the page
2856  *					list internal to the upl
2857  *		NULL			Error/no page list associated
2858  *
2859  * Notes:	IMPORTANT: The function is only valid on internal objects
2860  *		where the list request was made with the UPL_INTERNAL flag.
2861  *
2862  *		This function is a utility helper function, since some callers
2863  *		may not have direct access to the header defining the macro,
2864  *		due to abstraction layering constraints.
2865  */
2866 upl_page_info_t *
ubc_upl_pageinfo(upl_t upl)2867 ubc_upl_pageinfo(
2868 	upl_t                   upl)
2869 {
2870 	return UPL_GET_INTERNAL_PAGE_LIST(upl);
2871 }
2872 
2873 
2874 int
UBCINFOEXISTS(const struct vnode * vp)2875 UBCINFOEXISTS(const struct vnode * vp)
2876 {
2877 	return (vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL);
2878 }
2879 
2880 
2881 void
ubc_upl_range_needed(upl_t upl,int index,int count)2882 ubc_upl_range_needed(
2883 	upl_t           upl,
2884 	int             index,
2885 	int             count)
2886 {
2887 	upl_range_needed(upl, index, count);
2888 }
2889 
2890 boolean_t
ubc_is_mapped(const struct vnode * vp,boolean_t * writable)2891 ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
2892 {
2893 	if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) {
2894 		return FALSE;
2895 	}
2896 	if (writable) {
2897 		*writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
2898 	}
2899 	return TRUE;
2900 }
2901 
2902 boolean_t
ubc_is_mapped_writable(const struct vnode * vp)2903 ubc_is_mapped_writable(const struct vnode *vp)
2904 {
2905 	boolean_t writable;
2906 	return ubc_is_mapped(vp, &writable) && writable;
2907 }
2908 
2909 
2910 /*
2911  * CODE SIGNING
2912  */
2913 static atomic_size_t cs_blob_size = 0;
2914 static atomic_uint_fast32_t cs_blob_count = 0;
2915 static atomic_size_t cs_blob_size_peak = 0;
2916 static atomic_size_t cs_blob_size_max = 0;
2917 static atomic_uint_fast32_t cs_blob_count_peak = 0;
2918 
2919 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count, 0, "Current number of code signature blobs");
2920 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size, "Current size of all code signature blobs");
2921 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
2922 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, "Peak size of code signature blobs");
2923 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, "Size of biggest code signature blob");
2924 
2925 /*
2926  * Function: csblob_parse_teamid
2927  *
2928  * Description: This function returns a pointer to the team id
2929  *               stored within the codedirectory of the csblob.
2930  *               If the codedirectory predates team-ids, it returns
2931  *               NULL.
2932  *               This does not copy the name but returns a pointer to
2933  *               it within the CD. Subsequently, the CD must be
2934  *               available when this is used.
2935  */
2936 
2937 static const char *
csblob_parse_teamid(struct cs_blob * csblob)2938 csblob_parse_teamid(struct cs_blob *csblob)
2939 {
2940 	const CS_CodeDirectory *cd;
2941 
2942 	cd = csblob->csb_cd;
2943 
2944 	if (ntohl(cd->version) < CS_SUPPORTSTEAMID) {
2945 		return NULL;
2946 	}
2947 
2948 	if (cd->teamOffset == 0) {
2949 		return NULL;
2950 	}
2951 
2952 	const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
2953 	if (cs_debug > 1) {
2954 		printf("found team-id %s in cdblob\n", name);
2955 	}
2956 
2957 	return name;
2958 }
2959 
2960 
2961 kern_return_t
ubc_cs_blob_allocate(vm_offset_t * blob_addr_p,vm_size_t * blob_size_p)2962 ubc_cs_blob_allocate(
2963 	vm_offset_t     *blob_addr_p,
2964 	vm_size_t       *blob_size_p)
2965 {
2966 	kern_return_t   kr = KERN_FAILURE;
2967 	vm_size_t               allocation_size = 0;
2968 
2969 	if (!blob_addr_p || !blob_size_p) {
2970 		return KERN_INVALID_ARGUMENT;
2971 	}
2972 	allocation_size = *blob_size_p;
2973 
2974 	{
2975 		*blob_addr_p = (vm_offset_t) kalloc_tag(allocation_size, VM_KERN_MEMORY_SECURITY);
2976 
2977 		if (*blob_addr_p == 0) {
2978 			kr = KERN_NO_SPACE;
2979 		} else {
2980 			kr = KERN_SUCCESS;
2981 		}
2982 	}
2983 
2984 	if (kr == KERN_SUCCESS) {
2985 		if (*blob_addr_p) {
2986 			memset((void*)*blob_addr_p, 0, allocation_size);
2987 			*blob_size_p = allocation_size;
2988 		} else {
2989 			printf("CODE SIGNING: cs_blob allocation returned success, but received a NULL pointer\n");
2990 			kr = KERN_NO_SPACE;
2991 		}
2992 	}
2993 
2994 	return kr;
2995 }
2996 
2997 void
ubc_cs_blob_deallocate(vm_offset_t blob_addr,vm_size_t blob_size)2998 ubc_cs_blob_deallocate(
2999 	vm_offset_t     blob_addr,
3000 	vm_size_t       blob_size)
3001 {
3002 	{
3003 		kfree(blob_addr, blob_size);
3004 	}
3005 }
3006 
3007 /*
3008  * Some codesigned files use a lowest common denominator page size of
3009  * 4KiB, but can be used on systems that have a runtime page size of
3010  * 16KiB. Since faults will only occur on 16KiB ranges in
3011  * cs_validate_range(), we can convert the original Code Directory to
3012  * a multi-level scheme where groups of 4 hashes are combined to form
3013  * a new hash, which represents 16KiB in the on-disk file.  This can
3014  * reduce the wired memory requirement for the Code Directory by
3015  * 75%. Care must be taken for binaries that use the "fourk" VM pager
3016  * for unaligned access, which may still attempt to validate on
3017  * non-16KiB multiples for compatibility with 3rd party binaries.
3018  */
3019 static boolean_t
ubc_cs_supports_multilevel_hash(struct cs_blob * blob __unused)3020 ubc_cs_supports_multilevel_hash(struct cs_blob *blob __unused)
3021 {
3022 	const CS_CodeDirectory *cd;
3023 
3024 
3025 	/*
3026 	 * Only applies to binaries that ship as part of the OS,
3027 	 * primarily the shared cache.
3028 	 */
3029 	if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
3030 		return FALSE;
3031 	}
3032 
3033 	/*
3034 	 * If the runtime page size matches the code signing page
3035 	 * size, there is no work to do.
3036 	 */
3037 	if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
3038 		return FALSE;
3039 	}
3040 
3041 	cd = blob->csb_cd;
3042 
3043 	/*
3044 	 * There must be a valid integral multiple of hashes
3045 	 */
3046 	if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3047 		return FALSE;
3048 	}
3049 
3050 	/*
3051 	 * Scatter lists must also have ranges that have an integral number of hashes
3052 	 */
3053 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3054 		const SC_Scatter *scatter = (const SC_Scatter*)
3055 		    ((const char*)cd + ntohl(cd->scatterOffset));
3056 		/* iterate all scatter structs to make sure they are all aligned */
3057 		do {
3058 			uint32_t sbase = ntohl(scatter->base);
3059 			uint32_t scount = ntohl(scatter->count);
3060 
3061 			/* last scatter? */
3062 			if (scount == 0) {
3063 				break;
3064 			}
3065 
3066 			if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3067 				return FALSE;
3068 			}
3069 
3070 			if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3071 				return FALSE;
3072 			}
3073 
3074 			scatter++;
3075 		} while (1);
3076 	}
3077 
3078 	/* Covered range must be a multiple of the new page size */
3079 	if (ntohl(cd->codeLimit) & PAGE_MASK) {
3080 		return FALSE;
3081 	}
3082 
3083 	/* All checks pass */
3084 	return TRUE;
3085 }
3086 
3087 /*
3088  * Given a validated cs_blob, we reformat the structure to only include
3089  * the blobs which are required by the kernel for our current platform.
3090  * This saves significant memory with agile signatures.
3091  *
3092  * To support rewriting the code directory, potentially through
3093  * multilevel hashes, we provide a mechanism to allocate a code directory
3094  * of a specified size and zero it out --> caller can fill it in.
3095  *
3096  * We don't need to perform a lot of overflow checks as the assumption
3097  * here is that the cs_blob has already been validated.
3098  */
3099 static int
ubc_cs_reconstitute_code_signature(const struct cs_blob * const blob,vm_address_t * const ret_mem_kaddr,vm_size_t * const ret_mem_size,vm_size_t code_directory_size,CS_CodeDirectory ** const code_directory)3100 ubc_cs_reconstitute_code_signature(
3101 	const struct cs_blob * const blob,
3102 	vm_address_t * const ret_mem_kaddr,
3103 	vm_size_t * const ret_mem_size,
3104 	vm_size_t code_directory_size,
3105 	CS_CodeDirectory ** const code_directory
3106 	)
3107 {
3108 	vm_address_t new_blob_addr = 0;
3109 	vm_size_t new_blob_size = 0;
3110 	vm_size_t new_code_directory_size = 0;
3111 	const CS_GenericBlob *best_code_directory = NULL;
3112 	const CS_GenericBlob *first_code_directory = NULL;
3113 	const CS_GenericBlob *der_entitlements_blob = NULL;
3114 	const CS_GenericBlob *entitlements_blob = NULL;
3115 	const CS_GenericBlob *cms_blob = NULL;
3116 	CS_SuperBlob *superblob = NULL;
3117 	uint32_t num_blobs = 0;
3118 	uint32_t blob_index = 0;
3119 	uint32_t blob_offset = 0;
3120 	kern_return_t ret;
3121 	int err;
3122 
3123 	if (!blob) {
3124 		if (cs_debug > 1) {
3125 			printf("CODE SIGNING: CS Blob passed in is NULL\n");
3126 		}
3127 		return EINVAL;
3128 	}
3129 
3130 	best_code_directory = (const CS_GenericBlob*)blob->csb_cd;
3131 	if (!best_code_directory) {
3132 		/* This case can never happen, and it is a sign of bad things */
3133 		panic("CODE SIGNING: Validated CS Blob has no code directory");
3134 	}
3135 
3136 	new_code_directory_size = code_directory_size;
3137 	if (new_code_directory_size == 0) {
3138 		new_code_directory_size = ntohl(best_code_directory->length);
3139 	}
3140 
3141 	/*
3142 	 * A code signature can contain multiple code directories, each of which contains hashes
3143 	 * of pages based on a hashing algorithm. The kernel selects which hashing algorithm is
3144 	 * the strongest, and consequently, marks one of these code directories as the best
3145 	 * matched one. More often than not, the best matched one is _not_ the first one.
3146 	 *
3147 	 * However, the CMS blob which cryptographically verifies the code signature is only
3148 	 * signed against the first code directory. Therefore, if the CMS blob is present, we also
3149 	 * need the first code directory to be able to verify it. Given this, we organize the
3150 	 * new cs_blob as following order:
3151 	 *
3152 	 * 1. best code directory
3153 	 * 2. DER encoded entitlements blob (if present)
3154 	 * 3. entitlements blob (if present)
3155 	 * 4. cms blob (if present)
3156 	 * 5. first code directory (if not already the best match, and if cms blob is present)
3157 	 *
3158 	 * This order is chosen deliberately, as later on, we expect to get rid of the CMS blob
3159 	 * and the first code directory once their verification is complete.
3160 	 */
3161 
3162 	/* Storage for the super blob header */
3163 	new_blob_size += sizeof(CS_SuperBlob);
3164 
3165 	/* Guaranteed storage for the best code directory */
3166 	new_blob_size += sizeof(CS_BlobIndex);
3167 	new_blob_size += new_code_directory_size;
3168 	num_blobs += 1;
3169 
3170 	/* Conditional storage for the DER entitlements blob */
3171 	der_entitlements_blob = blob->csb_der_entitlements_blob;
3172 	if (der_entitlements_blob) {
3173 		new_blob_size += sizeof(CS_BlobIndex);
3174 		new_blob_size += ntohl(der_entitlements_blob->length);
3175 		num_blobs += 1;
3176 	}
3177 
3178 	/* Conditional storage for the entitlements blob */
3179 	entitlements_blob = blob->csb_entitlements_blob;
3180 	if (entitlements_blob) {
3181 		new_blob_size += sizeof(CS_BlobIndex);
3182 		new_blob_size += ntohl(entitlements_blob->length);
3183 		num_blobs += 1;
3184 	}
3185 
3186 	/* Conditional storage for the CMS blob */
3187 	cms_blob = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_SIGNATURESLOT, CSMAGIC_BLOBWRAPPER);
3188 	if (cms_blob) {
3189 		new_blob_size += sizeof(CS_BlobIndex);
3190 		new_blob_size += ntohl(cms_blob->length);
3191 		num_blobs += 1;
3192 	}
3193 
3194 	/*
3195 	 * Conditional storage for the first code directory.
3196 	 * This is only needed if a CMS blob exists and the best code directory isn't already
3197 	 * the first one. It is an error if we find a CMS blob but do not find a first code directory.
3198 	 */
3199 	if (cms_blob) {
3200 		first_code_directory = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_CODEDIRECTORY, CSMAGIC_CODEDIRECTORY);
3201 		if (first_code_directory == best_code_directory) {
3202 			/* We don't need the first code directory anymore, since the best one is already it */
3203 			first_code_directory = NULL;
3204 		} else if (first_code_directory) {
3205 			new_blob_size += sizeof(CS_BlobIndex);
3206 			new_blob_size += ntohl(first_code_directory->length);
3207 			num_blobs += 1;
3208 		} else {
3209 			printf("CODE SIGNING: Invalid CS Blob: found CMS blob but not a first code directory\n");
3210 			return EINVAL;
3211 		}
3212 	}
3213 
3214 	/*
3215 	 * The blob size could be rouded up to page size here, so we keep a copy
3216 	 * of the actual superblob length as well.
3217 	 */
3218 	vm_size_t new_blob_allocation_size = new_blob_size;
3219 	ret = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_allocation_size);
3220 	if (ret != KERN_SUCCESS) {
3221 		printf("CODE SIGNING: Failed to allocate memory for new code signing blob: %d\n", ret);
3222 		return ENOMEM;
3223 	}
3224 
3225 	/*
3226 	 * Fill out the superblob header and then all the blobs in the order listed
3227 	 * above.
3228 	 */
3229 	superblob = (CS_SuperBlob*)new_blob_addr;
3230 	superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
3231 	superblob->length = htonl((uint32_t)new_blob_size);
3232 	superblob->count = htonl(num_blobs);
3233 
3234 	blob_index = 0;
3235 	blob_offset = sizeof(CS_SuperBlob) + (num_blobs * sizeof(CS_BlobIndex));
3236 
3237 	/* Best code directory */
3238 	superblob->index[blob_index].offset = htonl(blob_offset);
3239 	if (first_code_directory) {
3240 		superblob->index[blob_index].type = htonl(CSSLOT_ALTERNATE_CODEDIRECTORIES);
3241 	} else {
3242 		superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3243 	}
3244 
3245 	if (code_directory_size > 0) {
3246 		/* We zero out the code directory, as we expect the caller to fill it in */
3247 		memset((void*)(new_blob_addr + blob_offset), 0, new_code_directory_size);
3248 	} else {
3249 		memcpy((void*)(new_blob_addr + blob_offset), best_code_directory, new_code_directory_size);
3250 	}
3251 
3252 	if (code_directory) {
3253 		*code_directory = (CS_CodeDirectory*)(new_blob_addr + blob_offset);
3254 	}
3255 	blob_offset += new_code_directory_size;
3256 
3257 	/* DER entitlements blob */
3258 	if (der_entitlements_blob) {
3259 		blob_index += 1;
3260 		superblob->index[blob_index].offset = htonl(blob_offset);
3261 		superblob->index[blob_index].type = htonl(CSSLOT_DER_ENTITLEMENTS);
3262 
3263 		memcpy((void*)(new_blob_addr + blob_offset), der_entitlements_blob, ntohl(der_entitlements_blob->length));
3264 		blob_offset += ntohl(der_entitlements_blob->length);
3265 	}
3266 
3267 	/* Entitlements blob */
3268 	if (entitlements_blob) {
3269 		blob_index += 1;
3270 		superblob->index[blob_index].offset = htonl(blob_offset);
3271 		superblob->index[blob_index].type = htonl(CSSLOT_ENTITLEMENTS);
3272 
3273 		memcpy((void*)(new_blob_addr + blob_offset), entitlements_blob, ntohl(entitlements_blob->length));
3274 		blob_offset += ntohl(entitlements_blob->length);
3275 	}
3276 
3277 	/* CMS blob */
3278 	if (cms_blob) {
3279 		blob_index += 1;
3280 		superblob->index[blob_index].offset = htonl(blob_offset);
3281 		superblob->index[blob_index].type = htonl(CSSLOT_SIGNATURESLOT);
3282 		memcpy((void*)(new_blob_addr + blob_offset), cms_blob, ntohl(cms_blob->length));
3283 		blob_offset += ntohl(cms_blob->length);
3284 	}
3285 
3286 	/* First code directory */
3287 	if (first_code_directory) {
3288 		blob_index += 1;
3289 		superblob->index[blob_index].offset = htonl(blob_offset);
3290 		superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3291 		memcpy((void*)(new_blob_addr + blob_offset), first_code_directory, ntohl(first_code_directory->length));
3292 		blob_offset += ntohl(first_code_directory->length);
3293 	}
3294 
3295 	/*
3296 	 * We only validate the blob in case we copied in the best code directory.
3297 	 * In case the code directory size we were passed in wasn't 0, we memset the best
3298 	 * code directory to 0 and expect the caller to fill it in. In the same spirit, we
3299 	 * expect the caller to validate the code signature after they fill in the code
3300 	 * directory.
3301 	 */
3302 	if (code_directory_size == 0) {
3303 		const CS_CodeDirectory *validated_code_directory = NULL;
3304 		const CS_GenericBlob *validated_entitlements_blob = NULL;
3305 		const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3306 		ret = cs_validate_csblob((const uint8_t *)superblob, new_blob_size, &validated_code_directory, &validated_entitlements_blob, &validated_der_entitlements_blob);
3307 		if (ret) {
3308 			printf("CODE SIGNING: Validation of reconstituted blob failed: %d\n", ret);
3309 			err = EINVAL;
3310 			goto fail;
3311 		}
3312 	}
3313 
3314 	if (ret_mem_kaddr) {
3315 		*ret_mem_kaddr = new_blob_addr;
3316 	}
3317 	if (ret_mem_size) {
3318 		*ret_mem_size = new_blob_allocation_size;
3319 	}
3320 
3321 	return 0;
3322 
3323 fail:
3324 	ubc_cs_blob_deallocate(new_blob_addr, new_blob_allocation_size);
3325 	return err;
3326 }
3327 
3328 #if CONFIG_ENFORCE_SIGNED_CODE
3329 /*
3330  * We use this function to clear out unnecessary bits from the code signature
3331  * blob which are no longer needed. We free these bits and give them back to
3332  * the kernel. This is needed since reconstitution includes extra data which is
3333  * needed only for verification but has no point in keeping afterwards.
3334  *
3335  * This results in significant memory reduction, especially for 3rd party apps
3336  * since we also get rid of the CMS blob.
3337  */
3338 static int
ubc_cs_clear_unneeded_code_signature(struct cs_blob * blob)3339 ubc_cs_clear_unneeded_code_signature(
3340 	struct cs_blob *blob
3341 	)
3342 {
3343 	CS_SuperBlob *superblob = NULL;
3344 	uint32_t num_blobs = 0;
3345 	vm_size_t last_needed_blob_offset = 0;
3346 	kern_return_t ret = KERN_FAILURE;
3347 	bool kmem_allocated = false;
3348 
3349 	/*
3350 	 * The only blobs we need to keep are the code directory and the entitlements
3351 	 * blob. These should have a certain ordering to them if we know that the
3352 	 * blob has been reconstituted in the past.
3353 	 *
3354 	 * Ordering:
3355 	 * 1. Code directory
3356 	 * 2. DER encoded entitlements (if present)
3357 	 *
3358 	 * We need to clear out the remaining page after these blobs end, and fix up
3359 	 * the superblob for the changes. Things gets a little more complicated for
3360 	 * blobs which may not have been kmem_allocated. For those, we simply just
3361 	 * allocate the new required space and copy into it.
3362 	 */
3363 
3364 	if (!blob) {
3365 		if (cs_debug > 1) {
3366 			printf("CODE SIGNING: CS Blob passed in is NULL\n");
3367 		}
3368 		return EINVAL;
3369 	}
3370 
3371 	if (!blob->csb_reconstituted) {
3372 		/*
3373 		 * Nothing for us to do, since we can't make any claims about how this
3374 		 * blob may have been ordered.
3375 		 */
3376 		return 0;
3377 	}
3378 
3379 
3380 	if (!blob->csb_cd) {
3381 		/* This case can never happen, and it is a sign of bad things */
3382 		panic("CODE SIGNING: Validated CS Blob has no code directory");
3383 	}
3384 	superblob = (CS_SuperBlob*)blob->csb_mem_kaddr;
3385 
3386 	num_blobs = 1;
3387 	last_needed_blob_offset = ntohl(superblob->index[0].offset) + ntohl(blob->csb_cd->length);
3388 
3389 	/* Check for DER entitlements */
3390 	if (blob->csb_der_entitlements_blob) {
3391 		num_blobs += 1;
3392 		last_needed_blob_offset += ntohl(blob->csb_der_entitlements_blob->length);
3393 	}
3394 
3395 	superblob->count = htonl(num_blobs);
3396 	superblob->length = htonl((uint32_t)last_needed_blob_offset);
3397 
3398 	/*
3399 	 * There is a chance that the code directory is marked within the superblob as an
3400 	 * alternate code directory. This happens when the first code directory isn't the
3401 	 * best one chosen by the kernel, so to be able to access both the first and the best,
3402 	 * we save the best one as an alternate one. Since we're getting rid of the first one
3403 	 * here, we mark the best one as the first one.
3404 	 */
3405 	superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
3406 
3407 	/*
3408 	 * If we are kmem_allocated, then we can free all the remaining pages which we no longer
3409 	 * need. However, this cannot be done if we didn't allocate page-wise, but byte-wise through
3410 	 * something like kalloc. In the latter case, we just allocate the required space again, and
3411 	 * copy over only the required portion of the superblob.
3412 	 */
3413 	if (kmem_allocated) {
3414 		vm_size_t last_needed_page_offset = round_page(last_needed_blob_offset);
3415 		assert(last_needed_page_offset <= blob->csb_mem_size);
3416 
3417 		vm_address_t unneeded_blob_addr = (vm_address_t)blob->csb_mem_kaddr + last_needed_page_offset;
3418 		vm_size_t unneeded_blob_size = blob->csb_mem_size - last_needed_page_offset;
3419 
3420 		/* These both need to be page aligned */
3421 		assert((unneeded_blob_addr & PAGE_MASK) == 0);
3422 		assert((unneeded_blob_size & PAGE_MASK) == 0);
3423 
3424 		/* Free the unneeded memory */
3425 		if (unneeded_blob_addr && unneeded_blob_size) {
3426 			kmem_free(kernel_map, unneeded_blob_addr, unneeded_blob_size);
3427 		}
3428 
3429 		/* Zero out the remaining bytes in the same page */
3430 		vm_size_t unneeded_bytes_in_page = last_needed_page_offset - last_needed_blob_offset;
3431 		memset((uint8_t*)superblob + last_needed_blob_offset, 0, unneeded_bytes_in_page);
3432 		blob->csb_mem_size = last_needed_page_offset;
3433 	} else {
3434 		vm_address_t new_superblob = 0;
3435 		vm_size_t new_superblob_size = last_needed_blob_offset;
3436 
3437 		ret = ubc_cs_blob_allocate(&new_superblob, &new_superblob_size);
3438 		if (ret != KERN_SUCCESS) {
3439 			printf("CODE SIGNING: Unable to allocate space when trying to clear unneeded code signature blobs: %d\n", ret);
3440 			return ENOMEM;
3441 		}
3442 
3443 		/*
3444 		 * As we weren't kmem_allocated before, we will not be kmem_allocated again. This should
3445 		 * mean the size we passed in is exactly the size we should get back for the allocation.
3446 		 */
3447 		assert(new_superblob_size == last_needed_blob_offset);
3448 
3449 		/* Copy in the updated superblob into the new memory */
3450 		memcpy((void*)new_superblob, superblob, new_superblob_size);
3451 
3452 		/* Free the old code signature and old memory */
3453 		ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3454 
3455 		/* Setup the code signature blob again */
3456 		blob->csb_mem_kaddr = (void *)new_superblob;
3457 		blob->csb_mem_size = new_superblob_size;
3458 		blob->csb_cd = (const CS_CodeDirectory*)csblob_find_blob_bytes((uint8_t*)new_superblob, new_superblob_size, CSSLOT_CODEDIRECTORY, CSMAGIC_CODEDIRECTORY);
3459 
3460 		blob->csb_der_entitlements_blob = csblob_find_blob_bytes((uint8_t*)new_superblob, new_superblob_size, CSSLOT_DER_ENTITLEMENTS, CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3461 	}
3462 
3463 	blob->csb_entitlements_blob = NULL;
3464 
3465 	const CS_CodeDirectory *validated_code_directory = NULL;
3466 	const CS_GenericBlob *validated_entitlements_blob = NULL;
3467 	const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3468 	ret = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, &validated_code_directory, &validated_entitlements_blob, &validated_der_entitlements_blob);
3469 	if (ret) {
3470 		printf("CODE SIGNING: Validation of blob after clearing unneeded code signature blobs failed: %d\n", ret);
3471 		return EINVAL;
3472 	}
3473 
3474 	return 0;
3475 }
3476 #endif /* CONFIG_ENFORCE_SIGNED_CODE */
3477 
3478 static int
ubc_cs_convert_to_multilevel_hash(struct cs_blob * blob)3479 ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
3480 {
3481 	const CS_CodeDirectory  *old_cd, *cd;
3482 	CS_CodeDirectory        *new_cd;
3483 	const CS_GenericBlob *entitlements;
3484 	const CS_GenericBlob *der_entitlements;
3485 	vm_offset_t     new_blob_addr;
3486 	vm_size_t       new_blob_size;
3487 	vm_size_t       new_cdsize;
3488 	int                             error;
3489 
3490 	uint32_t                hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
3491 
3492 	if (cs_debug > 1) {
3493 		printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
3494 		    (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
3495 	}
3496 
3497 	old_cd = blob->csb_cd;
3498 
3499 	/* Up to the hashes, we can copy all data */
3500 	new_cdsize  = ntohl(old_cd->hashOffset);
3501 	new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
3502 
3503 	error = ubc_cs_reconstitute_code_signature(blob, &new_blob_addr, &new_blob_size, new_cdsize, &new_cd);
3504 	if (error != 0) {
3505 		printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
3506 		return error;
3507 	}
3508 	entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_ENTITLEMENTS, CSMAGIC_EMBEDDED_ENTITLEMENTS);
3509 	der_entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_DER_ENTITLEMENTS, CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3510 
3511 	memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
3512 
3513 	/* Update fields in the Code Directory structure */
3514 	new_cd->length = htonl((uint32_t)new_cdsize);
3515 
3516 	uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
3517 	nCodeSlots >>= hashes_per_new_hash_shift;
3518 	new_cd->nCodeSlots = htonl(nCodeSlots);
3519 
3520 	new_cd->pageSize = (uint8_t)PAGE_SHIFT; /* Not byte-swapped */
3521 
3522 	if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
3523 		SC_Scatter *scatter = (SC_Scatter*)
3524 		    ((char *)new_cd + ntohl(new_cd->scatterOffset));
3525 		/* iterate all scatter structs to scale their counts */
3526 		do {
3527 			uint32_t scount = ntohl(scatter->count);
3528 			uint32_t sbase  = ntohl(scatter->base);
3529 
3530 			/* last scatter? */
3531 			if (scount == 0) {
3532 				break;
3533 			}
3534 
3535 			scount >>= hashes_per_new_hash_shift;
3536 			scatter->count = htonl(scount);
3537 
3538 			sbase >>= hashes_per_new_hash_shift;
3539 			scatter->base = htonl(sbase);
3540 
3541 			scatter++;
3542 		} while (1);
3543 	}
3544 
3545 	/* For each group of hashes, hash them together */
3546 	const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
3547 	unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
3548 
3549 	uint32_t hash_index;
3550 	for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
3551 		union cs_hash_union     mdctx;
3552 
3553 		uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
3554 		const unsigned char *src = src_base + hash_index * source_hash_len;
3555 		unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
3556 
3557 		blob->csb_hashtype->cs_init(&mdctx);
3558 		blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
3559 		blob->csb_hashtype->cs_final(dst, &mdctx);
3560 	}
3561 
3562 	error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements, &der_entitlements);
3563 	if (error != 0) {
3564 		printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3565 		    error);
3566 
3567 		ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3568 		return error;
3569 	}
3570 
3571 	/* New Code Directory is ready for use, swap it out in the blob structure */
3572 	ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3573 
3574 	blob->csb_mem_size = new_blob_size;
3575 	blob->csb_mem_kaddr = (void *)new_blob_addr;
3576 	blob->csb_cd = cd;
3577 	blob->csb_entitlements_blob = NULL;
3578 
3579 	blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
3580 	blob->csb_reconstituted = true;
3581 
3582 	/* The blob has some cached attributes of the Code Directory, so update those */
3583 
3584 	blob->csb_hash_firstlevel_pageshift = blob->csb_hash_pageshift; /* Save the original page size */
3585 
3586 	blob->csb_hash_pageshift = PAGE_SHIFT;
3587 	blob->csb_end_offset = ntohl(cd->codeLimit);
3588 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3589 		const SC_Scatter *scatter = (const SC_Scatter*)
3590 		    ((const char*)cd + ntohl(cd->scatterOffset));
3591 		blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
3592 	} else {
3593 		blob->csb_start_offset = 0;
3594 	}
3595 
3596 	return 0;
3597 }
3598 
3599 static void
cs_blob_cleanup(struct cs_blob * blob)3600 cs_blob_cleanup(struct cs_blob *blob)
3601 {
3602 	if (blob->profile_kaddr) {
3603 		kmem_free(kernel_map, blob->profile_kaddr, blob->profile_allocation_size);
3604 	}
3605 	blob->profile_kaddr = 0;
3606 	blob->profile_allocation_size = 0;
3607 
3608 	if (blob->csb_mem_kaddr) {
3609 		ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3610 	}
3611 	blob->csb_mem_kaddr = NULL;
3612 	blob->csb_mem_size = 0;
3613 
3614 	if (blob->csb_entitlements != NULL) {
3615 		if (amfi) {
3616 			// TODO: PANIC if amfi isn't present
3617 			amfi->OSEntitlements_invalidate(blob->csb_entitlements);
3618 		}
3619 		osobject_release(blob->csb_entitlements);
3620 		blob->csb_entitlements = NULL;
3621 	}
3622 }
3623 
3624 static void
cs_blob_ro_free(struct cs_blob * blob)3625 cs_blob_ro_free(struct cs_blob *blob)
3626 {
3627 	struct cs_blob tmp;
3628 
3629 	if (blob != NULL) {
3630 		/*
3631 		 * cs_blob_cleanup clears fields, so we need to pass it a
3632 		 * mutable copy.
3633 		 */
3634 		tmp = *blob;
3635 		cs_blob_cleanup(&tmp);
3636 
3637 		zfree_ro(ZONE_ID_CS_BLOB, blob);
3638 	}
3639 }
3640 
3641 /*
3642  * Free a cs_blob previously created by cs_blob_create_validated.
3643  */
3644 void
cs_blob_free(struct cs_blob * blob)3645 cs_blob_free(
3646 	struct cs_blob *blob)
3647 {
3648 	cs_blob_ro_free(blob);
3649 }
3650 
3651 static int
cs_blob_init_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob * blob,CS_CodeDirectory const ** const ret_cd)3652 cs_blob_init_validated(
3653 	vm_address_t * const addr,
3654 	vm_size_t size,
3655 	struct cs_blob *blob,
3656 	CS_CodeDirectory const ** const ret_cd)
3657 {
3658 	int error = EINVAL;
3659 	const CS_CodeDirectory *cd = NULL;
3660 	const CS_GenericBlob *entitlements = NULL;
3661 	const CS_GenericBlob *der_entitlements = NULL;
3662 	union cs_hash_union mdctx;
3663 	size_t length;
3664 
3665 	bzero(blob, sizeof(*blob));
3666 
3667 	/* fill in the new blob */
3668 	blob->csb_mem_size = size;
3669 	blob->csb_mem_offset = 0;
3670 	blob->csb_mem_kaddr = (void *)*addr;
3671 	blob->csb_flags = 0;
3672 	blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
3673 	blob->csb_platform_binary = 0;
3674 	blob->csb_platform_path = 0;
3675 	blob->csb_teamid = NULL;
3676 #if CONFIG_SUPPLEMENTAL_SIGNATURES
3677 	blob->csb_supplement_teamid = NULL;
3678 #endif
3679 	blob->csb_entitlements_blob = NULL;
3680 	blob->csb_der_entitlements_blob = NULL;
3681 	blob->csb_entitlements = NULL;
3682 	blob->csb_reconstituted = false;
3683 	blob->profile_kaddr = 0;
3684 	blob->profile_allocation_size = 0;
3685 
3686 	/* Transfer ownership. Even on error, this function will deallocate */
3687 	*addr = 0;
3688 
3689 	/*
3690 	 * Validate the blob's contents
3691 	 */
3692 	length = (size_t) size;
3693 	error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
3694 	    length, &cd, &entitlements, &der_entitlements);
3695 	if (error) {
3696 		if (cs_debug) {
3697 			printf("CODESIGNING: csblob invalid: %d\n", error);
3698 		}
3699 		/*
3700 		 * The vnode checker can't make the rest of this function
3701 		 * succeed if csblob validation failed, so bail */
3702 		goto out;
3703 	} else {
3704 		const unsigned char *md_base;
3705 		uint8_t hash[CS_HASH_MAX_SIZE];
3706 		int md_size;
3707 		vm_offset_t hash_pagemask;
3708 
3709 		blob->csb_cd = cd;
3710 		blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
3711 		blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
3712 		blob->csb_hashtype = cs_find_md(cd->hashType);
3713 		if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
3714 			panic("validated CodeDirectory but unsupported type");
3715 		}
3716 
3717 		blob->csb_hash_pageshift = cd->pageSize;
3718 		hash_pagemask = (1U << cd->pageSize) - 1;
3719 		blob->csb_hash_firstlevel_pageshift = 0;
3720 		blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
3721 		blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + hash_pagemask) & ~hash_pagemask);
3722 		if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3723 			const SC_Scatter *scatter = (const SC_Scatter*)
3724 			    ((const char*)cd + ntohl(cd->scatterOffset));
3725 			blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * (1U << blob->csb_hash_pageshift);
3726 		} else {
3727 			blob->csb_start_offset = 0;
3728 		}
3729 		/* compute the blob's cdhash */
3730 		md_base = (const unsigned char *) cd;
3731 		md_size = ntohl(cd->length);
3732 
3733 		blob->csb_hashtype->cs_init(&mdctx);
3734 		blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
3735 		blob->csb_hashtype->cs_final(hash, &mdctx);
3736 
3737 		memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
3738 
3739 #if CONFIG_SUPPLEMENTAL_SIGNATURES
3740 		blob->csb_linkage_hashtype = NULL;
3741 		if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0 &&
3742 		    ntohl(cd->linkageSize) >= CS_CDHASH_LEN) {
3743 			blob->csb_linkage_hashtype = cs_find_md(cd->linkageHashType);
3744 
3745 			if (blob->csb_linkage_hashtype != NULL) {
3746 				memcpy(blob->csb_linkage, (uint8_t const*)cd + ntohl(cd->linkageOffset),
3747 				    CS_CDHASH_LEN);
3748 			}
3749 		}
3750 #endif
3751 	}
3752 
3753 	error = 0;
3754 
3755 out:
3756 	if (error != 0) {
3757 		cs_blob_cleanup(blob);
3758 		blob = NULL;
3759 		cd = NULL;
3760 	}
3761 
3762 	if (ret_cd != NULL) {
3763 		*ret_cd = cd;
3764 	}
3765 
3766 	return error;
3767 }
3768 
3769 /*
3770  * Validate the code signature blob, create a struct cs_blob wrapper
3771  * and return it together with a pointer to the chosen code directory
3772  * and entitlements blob.
3773  *
3774  * Note that this takes ownership of the memory as addr, mainly because
3775  * this function can actually replace the passed in blob with another
3776  * one, e.g. when performing multilevel hashing optimization.
3777  */
3778 int
cs_blob_create_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob ** const ret_blob,CS_CodeDirectory const ** const ret_cd)3779 cs_blob_create_validated(
3780 	vm_address_t * const            addr,
3781 	vm_size_t                       size,
3782 	struct cs_blob ** const         ret_blob,
3783 	CS_CodeDirectory const ** const     ret_cd)
3784 {
3785 	struct cs_blob blob = {};
3786 	struct cs_blob *ro_blob;
3787 	int error;
3788 
3789 	if (ret_blob) {
3790 		*ret_blob = NULL;
3791 	}
3792 
3793 	if ((error = cs_blob_init_validated(addr, size, &blob, ret_cd)) != 0) {
3794 		return error;
3795 	}
3796 
3797 	if (ret_blob != NULL) {
3798 		ro_blob = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
3799 		zalloc_ro_update_elem(ZONE_ID_CS_BLOB, ro_blob, &blob);
3800 		*ret_blob = ro_blob;
3801 	}
3802 
3803 	return error;
3804 }
3805 
3806 #if CONFIG_SUPPLEMENTAL_SIGNATURES
3807 static void
cs_blob_supplement_free(struct cs_blob * const blob)3808 cs_blob_supplement_free(struct cs_blob * const blob)
3809 {
3810 	void *teamid;
3811 
3812 	if (blob != NULL) {
3813 		if (blob->csb_supplement_teamid != NULL) {
3814 			teamid = blob->csb_supplement_teamid;
3815 			vm_size_t teamid_size = strlen(blob->csb_supplement_teamid) + 1;
3816 			kfree_data(teamid, teamid_size);
3817 		}
3818 		cs_blob_ro_free(blob);
3819 	}
3820 }
3821 #endif
3822 
3823 static void
ubc_cs_blob_adjust_statistics(struct cs_blob const * blob)3824 ubc_cs_blob_adjust_statistics(struct cs_blob const *blob)
3825 {
3826 	/* Note that the atomic ops are not enough to guarantee
3827 	 * correctness: If a blob with an intermediate size is inserted
3828 	 * concurrently, we can lose a peak value assignment. But these
3829 	 * statistics are only advisory anyway, so we're not going to
3830 	 * employ full locking here. (Consequently, we are also okay with
3831 	 * relaxed ordering of those accesses.)
3832 	 */
3833 
3834 	unsigned int new_cs_blob_count = os_atomic_add(&cs_blob_count, 1, relaxed);
3835 	if (new_cs_blob_count > os_atomic_load(&cs_blob_count_peak, relaxed)) {
3836 		os_atomic_store(&cs_blob_count_peak, new_cs_blob_count, relaxed);
3837 	}
3838 
3839 	size_t new_cs_blob_size = os_atomic_add(&cs_blob_size, blob->csb_mem_size, relaxed);
3840 
3841 	if (new_cs_blob_size > os_atomic_load(&cs_blob_size_peak, relaxed)) {
3842 		os_atomic_store(&cs_blob_size_peak, new_cs_blob_size, relaxed);
3843 	}
3844 	if (blob->csb_mem_size > os_atomic_load(&cs_blob_size_max, relaxed)) {
3845 		os_atomic_store(&cs_blob_size_max, blob->csb_mem_size, relaxed);
3846 	}
3847 }
3848 
3849 static void
cs_blob_set_cpu_type(struct cs_blob * blob,cpu_type_t cputype)3850 cs_blob_set_cpu_type(struct cs_blob *blob, cpu_type_t cputype)
3851 {
3852 	zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_cpu_type, &cputype);
3853 }
3854 
3855 __abortlike
3856 static void
panic_cs_blob_backref_mismatch(struct cs_blob * blob,struct vnode * vp)3857 panic_cs_blob_backref_mismatch(struct cs_blob *blob, struct vnode *vp)
3858 {
3859 	panic("cs_blob vnode backref mismatch: blob=%p, vp=%p, "
3860 	    "blob->csb_vnode=%p", blob, vp, blob->csb_vnode);
3861 }
3862 
3863 void
cs_blob_require(struct cs_blob * blob,vnode_t vp)3864 cs_blob_require(struct cs_blob *blob, vnode_t vp)
3865 {
3866 	zone_require_ro(ZONE_ID_CS_BLOB, sizeof(struct cs_blob), blob);
3867 
3868 	if (vp != NULL && __improbable(blob->csb_vnode != vp)) {
3869 		panic_cs_blob_backref_mismatch(blob, vp);
3870 	}
3871 }
3872 
3873 int
ubc_cs_blob_add(struct vnode * vp,uint32_t platform,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t base_offset,vm_address_t * addr,vm_size_t size,struct image_params * imgp,__unused int flags,struct cs_blob ** ret_blob)3874 ubc_cs_blob_add(
3875 	struct vnode    *vp,
3876 	uint32_t        platform,
3877 	cpu_type_t      cputype,
3878 	cpu_subtype_t   cpusubtype,
3879 	off_t           base_offset,
3880 	vm_address_t    *addr,
3881 	vm_size_t       size,
3882 	struct image_params *imgp,
3883 	__unused int    flags,
3884 	struct cs_blob  **ret_blob)
3885 {
3886 	kern_return_t           kr;
3887 	struct ubc_info         *uip;
3888 	struct cs_blob          tmp_blob;
3889 	struct cs_blob          *blob_ro = NULL;
3890 	struct cs_blob          *oblob;
3891 	int                     error;
3892 	CS_CodeDirectory const *cd;
3893 	off_t                   blob_start_offset, blob_end_offset;
3894 	boolean_t               record_mtime;
3895 
3896 	record_mtime = FALSE;
3897 	if (ret_blob) {
3898 		*ret_blob = NULL;
3899 	}
3900 
3901 	/* Create the struct cs_blob wrapper that will be attached to the vnode.
3902 	 * Validates the passed in blob in the process. */
3903 	error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
3904 
3905 	if (error != 0) {
3906 		printf("malform code signature blob: %d\n", error);
3907 		return error;
3908 	}
3909 
3910 	tmp_blob.csb_cpu_type = cputype;
3911 	tmp_blob.csb_cpu_subtype = cpusubtype & ~CPU_SUBTYPE_MASK;
3912 	tmp_blob.csb_base_offset = base_offset;
3913 
3914 #if CONFIG_ENFORCE_SIGNED_CODE
3915 	/*
3916 	 * Reconstitute code signature
3917 	 */
3918 	{
3919 		vm_address_t new_mem_kaddr = 0;
3920 		vm_size_t new_mem_size = 0;
3921 
3922 		CS_CodeDirectory *new_cd = NULL;
3923 		const CS_GenericBlob *new_entitlements = NULL;
3924 		const CS_GenericBlob *new_der_entitlements = NULL;
3925 
3926 		error = ubc_cs_reconstitute_code_signature(&tmp_blob, &new_mem_kaddr, &new_mem_size, 0, &new_cd);
3927 		if (error != 0) {
3928 			printf("failed code signature reconstitution: %d\n", error);
3929 			goto out;
3930 		}
3931 		new_entitlements = csblob_find_blob_bytes((uint8_t*)new_mem_kaddr, new_mem_size, CSSLOT_ENTITLEMENTS, CSMAGIC_EMBEDDED_ENTITLEMENTS);
3932 		new_der_entitlements = csblob_find_blob_bytes((uint8_t*)new_mem_kaddr, new_mem_size, CSSLOT_DER_ENTITLEMENTS, CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3933 
3934 		ubc_cs_blob_deallocate((vm_offset_t)tmp_blob.csb_mem_kaddr, tmp_blob.csb_mem_size);
3935 
3936 		tmp_blob.csb_mem_kaddr = (void *)new_mem_kaddr;
3937 		tmp_blob.csb_mem_size = new_mem_size;
3938 		tmp_blob.csb_cd = new_cd;
3939 		tmp_blob.csb_entitlements_blob = new_entitlements;
3940 		tmp_blob.csb_der_entitlements_blob = new_der_entitlements;
3941 		tmp_blob.csb_reconstituted = true;
3942 	}
3943 #endif
3944 
3945 
3946 	blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
3947 	tmp_blob.csb_ro_addr = blob_ro;
3948 	tmp_blob.csb_vnode = vp;
3949 
3950 	/* AMFI needs to see the current blob state at the RO address. */
3951 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
3952 
3953 #if CONFIG_MACF
3954 	/*
3955 	 * Let policy module check whether the blob's signature is accepted.
3956 	 */
3957 
3958 	unsigned int cs_flags = tmp_blob.csb_flags;
3959 	unsigned int signer_type = tmp_blob.csb_signer_type;
3960 	error = mac_vnode_check_signature(vp, &tmp_blob, imgp, &cs_flags, &signer_type, flags, platform);
3961 
3962 	tmp_blob.csb_flags = cs_flags;
3963 	tmp_blob.csb_signer_type = signer_type;
3964 
3965 	if (error) {
3966 		if (cs_debug) {
3967 			printf("check_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
3968 		}
3969 		goto out;
3970 	}
3971 	if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(tmp_blob.csb_flags & CS_PLATFORM_BINARY)) {
3972 		if (cs_debug) {
3973 			printf("check_signature[pid: %d], is not apple signed\n", proc_getpid(current_proc()));
3974 		}
3975 		error = EPERM;
3976 		goto out;
3977 	}
3978 #endif
3979 
3980 
3981 #if CONFIG_ENFORCE_SIGNED_CODE
3982 	/*
3983 	 * When this flag is turned on, we reconstitue the code signature to only
3984 	 * include the blobs which are needed. This may include the first code
3985 	 * directory and the CMS blob. However, now that verification of this blob
3986 	 * is complete, we don't need all these blobs. Hence, we clear them out.
3987 	 */
3988 
3989 	if (ubc_cs_clear_unneeded_code_signature(&tmp_blob)) {
3990 		error = EPERM;
3991 		goto out;
3992 	}
3993 #endif /* CONFIG_ENFORCE_SIGNED_CODE */
3994 
3995 	tmp_blob.csb_entitlements_blob = NULL;
3996 
3997 
3998 	if (tmp_blob.csb_flags & CS_PLATFORM_BINARY) {
3999 		if (cs_debug > 1) {
4000 			printf("check_signature[pid: %d]: platform binary\n", proc_getpid(current_proc()));
4001 		}
4002 		tmp_blob.csb_platform_binary = 1;
4003 		tmp_blob.csb_platform_path = !!(tmp_blob.csb_flags & CS_PLATFORM_PATH);
4004 	} else {
4005 		tmp_blob.csb_platform_binary = 0;
4006 		tmp_blob.csb_platform_path = 0;
4007 		tmp_blob.csb_teamid = csblob_parse_teamid(&tmp_blob);
4008 		if (cs_debug > 1) {
4009 			if (tmp_blob.csb_teamid) {
4010 				printf("check_signature[pid: %d]: team-id is %s\n", proc_getpid(current_proc()), tmp_blob.csb_teamid);
4011 			} else {
4012 				printf("check_signature[pid: %d]: no team-id\n", proc_getpid(current_proc()));
4013 			}
4014 		}
4015 	}
4016 
4017 	/*
4018 	 * Validate the blob's coverage
4019 	 */
4020 	blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
4021 	blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
4022 
4023 	if (blob_start_offset >= blob_end_offset ||
4024 	    blob_start_offset < 0 ||
4025 	    blob_end_offset <= 0) {
4026 		/* reject empty or backwards blob */
4027 		error = EINVAL;
4028 		goto out;
4029 	}
4030 
4031 	if (ubc_cs_supports_multilevel_hash(&tmp_blob)) {
4032 		error = ubc_cs_convert_to_multilevel_hash(&tmp_blob);
4033 		if (error != 0) {
4034 			printf("failed multilevel hash conversion: %d\n", error);
4035 			goto out;
4036 		}
4037 		tmp_blob.csb_reconstituted = true;
4038 	}
4039 
4040 	vnode_lock(vp);
4041 	if (!UBCINFOEXISTS(vp)) {
4042 		vnode_unlock(vp);
4043 		error = ENOENT;
4044 		goto out;
4045 	}
4046 	uip = vp->v_ubcinfo;
4047 
4048 	/* check if this new blob overlaps with an existing blob */
4049 	for (oblob = ubc_get_cs_blobs(vp);
4050 	    oblob != NULL;
4051 	    oblob = oblob->csb_next) {
4052 		off_t oblob_start_offset, oblob_end_offset;
4053 
4054 		if (tmp_blob.csb_signer_type != oblob->csb_signer_type) {  // signer type needs to be the same for slices
4055 			vnode_unlock(vp);
4056 			error = EALREADY;
4057 			goto out;
4058 		} else if (tmp_blob.csb_platform_binary) {  //platform binary needs to be the same for app slices
4059 			if (!oblob->csb_platform_binary) {
4060 				vnode_unlock(vp);
4061 				error = EALREADY;
4062 				goto out;
4063 			}
4064 		} else if (tmp_blob.csb_teamid) {  //teamid binary needs to be the same for app slices
4065 			if (oblob->csb_platform_binary ||
4066 			    oblob->csb_teamid == NULL ||
4067 			    strcmp(oblob->csb_teamid, tmp_blob.csb_teamid) != 0) {
4068 				vnode_unlock(vp);
4069 				error = EALREADY;
4070 				goto out;
4071 			}
4072 		} else {  // non teamid binary needs to be the same for app slices
4073 			if (oblob->csb_platform_binary ||
4074 			    oblob->csb_teamid != NULL) {
4075 				vnode_unlock(vp);
4076 				error = EALREADY;
4077 				goto out;
4078 			}
4079 		}
4080 
4081 		oblob_start_offset = (oblob->csb_base_offset +
4082 		    oblob->csb_start_offset);
4083 		oblob_end_offset = (oblob->csb_base_offset +
4084 		    oblob->csb_end_offset);
4085 		if (blob_start_offset >= oblob_end_offset ||
4086 		    blob_end_offset <= oblob_start_offset) {
4087 			/* no conflict with this existing blob */
4088 		} else {
4089 			/* conflict ! */
4090 			if (blob_start_offset == oblob_start_offset &&
4091 			    blob_end_offset == oblob_end_offset &&
4092 			    tmp_blob.csb_mem_size == oblob->csb_mem_size &&
4093 			    tmp_blob.csb_flags == oblob->csb_flags &&
4094 			    (tmp_blob.csb_cpu_type == CPU_TYPE_ANY ||
4095 			    oblob->csb_cpu_type == CPU_TYPE_ANY ||
4096 			    tmp_blob.csb_cpu_type == oblob->csb_cpu_type) &&
4097 			    !bcmp(tmp_blob.csb_cdhash,
4098 			    oblob->csb_cdhash,
4099 			    CS_CDHASH_LEN)) {
4100 				/*
4101 				 * We already have this blob:
4102 				 * we'll return success but
4103 				 * throw away the new blob.
4104 				 */
4105 				if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
4106 					/*
4107 					 * The old blob matches this one
4108 					 * but doesn't have any CPU type.
4109 					 * Update it with whatever the caller
4110 					 * provided this time.
4111 					 */
4112 					cs_blob_set_cpu_type(oblob, cputype);
4113 				}
4114 
4115 				/* The signature is still accepted, so update the
4116 				 * generation count. */
4117 				uip->cs_add_gen = cs_blob_generation_count;
4118 
4119 				vnode_unlock(vp);
4120 				if (ret_blob) {
4121 					*ret_blob = oblob;
4122 				}
4123 				error = EAGAIN;
4124 				goto out;
4125 			} else {
4126 				/* different blob: reject the new one */
4127 				vnode_unlock(vp);
4128 				error = EALREADY;
4129 				goto out;
4130 			}
4131 		}
4132 	}
4133 
4134 
4135 	/* mark this vnode's VM object as having "signed pages" */
4136 	kr = memory_object_signed(uip->ui_control, TRUE);
4137 	if (kr != KERN_SUCCESS) {
4138 		vnode_unlock(vp);
4139 		error = ENOENT;
4140 		goto out;
4141 	}
4142 
4143 	if (uip->cs_blobs == NULL) {
4144 		/* loading 1st blob: record the file's current "modify time" */
4145 		record_mtime = TRUE;
4146 	}
4147 
4148 	/* set the generation count for cs_blobs */
4149 	uip->cs_add_gen = cs_blob_generation_count;
4150 
4151 	/*
4152 	 * Add this blob to the list of blobs for this vnode.
4153 	 * We always add at the front of the list and we never remove a
4154 	 * blob from the list, so ubc_cs_get_blobs() can return whatever
4155 	 * the top of the list was and that list will remain valid
4156 	 * while we validate a page, even after we release the vnode's lock.
4157 	 */
4158 	tmp_blob.csb_next = uip->cs_blobs;
4159 	uip->cs_blobs = blob_ro;
4160 
4161 	ubc_cs_blob_adjust_statistics(&tmp_blob);
4162 
4163 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
4164 
4165 	if (cs_debug > 1) {
4166 		proc_t p;
4167 		const char *name = vnode_getname_printable(vp);
4168 		p = current_proc();
4169 		printf("CODE SIGNING: proc %d(%s) "
4170 		    "loaded %s signatures for file (%s) "
4171 		    "range 0x%llx:0x%llx flags 0x%x\n",
4172 		    proc_getpid(p), p->p_comm,
4173 		    blob_ro->csb_cpu_type == -1 ? "detached" : "embedded",
4174 		    name,
4175 		    blob_ro->csb_base_offset + blob_ro->csb_start_offset,
4176 		    blob_ro->csb_base_offset + blob_ro->csb_end_offset,
4177 		    blob_ro->csb_flags);
4178 		vnode_putname_printable(name);
4179 	}
4180 
4181 	vnode_unlock(vp);
4182 
4183 	if (record_mtime) {
4184 		vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
4185 	}
4186 
4187 	if (ret_blob) {
4188 		*ret_blob = blob_ro;
4189 	}
4190 
4191 	error = 0;      /* success ! */
4192 
4193 out:
4194 	if (error) {
4195 		if (cs_debug) {
4196 			printf("check_signature[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
4197 		}
4198 
4199 		cs_blob_cleanup(&tmp_blob);
4200 		if (blob_ro) {
4201 			zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
4202 		}
4203 	}
4204 
4205 	if (error == EAGAIN) {
4206 		/*
4207 		 * See above:  error is EAGAIN if we were asked
4208 		 * to add an existing blob again.  We cleaned the new
4209 		 * blob and we want to return success.
4210 		 */
4211 		error = 0;
4212 	}
4213 
4214 	return error;
4215 }
4216 
4217 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4218 int
ubc_cs_blob_add_supplement(struct vnode * vp,struct vnode * orig_vp,off_t base_offset,vm_address_t * addr,vm_size_t size,struct cs_blob ** ret_blob)4219 ubc_cs_blob_add_supplement(
4220 	struct vnode    *vp,
4221 	struct vnode    *orig_vp,
4222 	off_t           base_offset,
4223 	vm_address_t    *addr,
4224 	vm_size_t       size,
4225 	struct cs_blob  **ret_blob)
4226 {
4227 	kern_return_t           kr;
4228 	struct ubc_info         *uip, *orig_uip;
4229 	int                     error;
4230 	struct cs_blob          tmp_blob;
4231 	struct cs_blob          *orig_blob;
4232 	struct cs_blob          *blob_ro = NULL;
4233 	CS_CodeDirectory const *cd;
4234 	off_t                   blob_start_offset, blob_end_offset;
4235 
4236 	if (ret_blob) {
4237 		*ret_blob = NULL;
4238 	}
4239 
4240 	/* Create the struct cs_blob wrapper that will be attached to the vnode.
4241 	 * Validates the passed in blob in the process. */
4242 	error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
4243 
4244 	if (error != 0) {
4245 		printf("malformed code signature supplement blob: %d\n", error);
4246 		return error;
4247 	}
4248 
4249 	tmp_blob.csb_cpu_type = -1;
4250 	tmp_blob.csb_base_offset = base_offset;
4251 
4252 	tmp_blob.csb_reconstituted = false;
4253 
4254 	vnode_lock(orig_vp);
4255 	if (!UBCINFOEXISTS(orig_vp)) {
4256 		vnode_unlock(orig_vp);
4257 		error = ENOENT;
4258 		goto out;
4259 	}
4260 
4261 	orig_uip = orig_vp->v_ubcinfo;
4262 
4263 	/* check that the supplement's linked cdhash matches a cdhash of
4264 	 * the target image.
4265 	 */
4266 
4267 	if (tmp_blob.csb_linkage_hashtype == NULL) {
4268 		proc_t p;
4269 		const char *iname = vnode_getname_printable(vp);
4270 		p = current_proc();
4271 
4272 		printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
4273 		    "is not a supplemental.\n",
4274 		    proc_getpid(p), p->p_comm, iname);
4275 
4276 		error = EINVAL;
4277 
4278 		vnode_putname_printable(iname);
4279 		vnode_unlock(orig_vp);
4280 		goto out;
4281 	}
4282 
4283 	for (orig_blob = ubc_get_cs_blobs(orig_vp); orig_blob != NULL;
4284 	    orig_blob = orig_blob->csb_next) {
4285 		if (orig_blob->csb_hashtype == tmp_blob.csb_linkage_hashtype &&
4286 		    memcmp(orig_blob->csb_cdhash, tmp_blob.csb_linkage, CS_CDHASH_LEN) == 0) {
4287 			// Found match!
4288 			break;
4289 		}
4290 	}
4291 
4292 	if (orig_blob == NULL) {
4293 		// Not found.
4294 
4295 		proc_t p;
4296 		const char *iname = vnode_getname_printable(vp);
4297 		p = current_proc();
4298 
4299 		printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
4300 		    "does not match any attached cdhash.\n",
4301 		    proc_getpid(p), p->p_comm, iname);
4302 
4303 		error = ESRCH;
4304 
4305 		vnode_putname_printable(iname);
4306 		vnode_unlock(orig_vp);
4307 		goto out;
4308 	}
4309 
4310 	vnode_unlock(orig_vp);
4311 
4312 	blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4313 	tmp_blob.csb_ro_addr = blob_ro;
4314 	tmp_blob.csb_vnode = vp;
4315 
4316 	/* AMFI needs to see the current blob state at the RO address. */
4317 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
4318 
4319 	// validate the signature against policy!
4320 #if CONFIG_MACF
4321 	unsigned int signer_type = tmp_blob.csb_signer_type;
4322 	error = mac_vnode_check_supplemental_signature(vp, &tmp_blob, orig_vp, orig_blob, &signer_type);
4323 
4324 	tmp_blob.csb_signer_type = signer_type;
4325 
4326 	if (error) {
4327 		if (cs_debug) {
4328 			printf("check_supplemental_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
4329 		}
4330 		goto out;
4331 	}
4332 #endif
4333 
4334 	// We allowed the supplemental signature blob so
4335 	// copy the platform bit or team-id from the linked signature and whether or not the original is developer code
4336 	tmp_blob.csb_platform_binary = 0;
4337 	tmp_blob.csb_platform_path = 0;
4338 	if (orig_blob->csb_platform_binary == 1) {
4339 		tmp_blob.csb_platform_binary = orig_blob->csb_platform_binary;
4340 		tmp_blob.csb_platform_path = orig_blob->csb_platform_path;
4341 	} else if (orig_blob->csb_teamid != NULL) {
4342 		vm_size_t teamid_size = strlen(orig_blob->csb_teamid) + 1;
4343 		tmp_blob.csb_supplement_teamid = kalloc_data(teamid_size, Z_WAITOK);
4344 		if (tmp_blob.csb_supplement_teamid == NULL) {
4345 			error = ENOMEM;
4346 			goto out;
4347 		}
4348 		strlcpy(tmp_blob.csb_supplement_teamid, orig_blob->csb_teamid, teamid_size);
4349 	}
4350 	tmp_blob.csb_flags = (orig_blob->csb_flags & CS_DEV_CODE);
4351 
4352 	// Validate the blob's coverage
4353 	blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
4354 	blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
4355 
4356 	if (blob_start_offset >= blob_end_offset || blob_start_offset < 0 || blob_end_offset <= 0) {
4357 		/* reject empty or backwards blob */
4358 		error = EINVAL;
4359 		goto out;
4360 	}
4361 
4362 	vnode_lock(vp);
4363 	if (!UBCINFOEXISTS(vp)) {
4364 		vnode_unlock(vp);
4365 		error = ENOENT;
4366 		goto out;
4367 	}
4368 	uip = vp->v_ubcinfo;
4369 
4370 	struct cs_blob *existing = uip->cs_blob_supplement;
4371 	if (existing != NULL) {
4372 		if (tmp_blob.csb_hashtype == existing->csb_hashtype &&
4373 		    memcmp(tmp_blob.csb_cdhash, existing->csb_cdhash, CS_CDHASH_LEN) == 0) {
4374 			error = EAGAIN; // non-fatal
4375 		} else {
4376 			error = EALREADY; // fatal
4377 		}
4378 
4379 		vnode_unlock(vp);
4380 		goto out;
4381 	}
4382 
4383 	/* Unlike regular cs_blobs, we only ever support one supplement. */
4384 	tmp_blob.csb_next = NULL;
4385 	uip->cs_blob_supplement = blob_ro;
4386 
4387 	/* mark this vnode's VM object as having "signed pages" */
4388 	kr = memory_object_signed(uip->ui_control, TRUE);
4389 	if (kr != KERN_SUCCESS) {
4390 		vnode_unlock(vp);
4391 		error = ENOENT;
4392 		goto out;
4393 	}
4394 
4395 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
4396 
4397 	vnode_unlock(vp);
4398 
4399 	/* We still adjust statistics even for supplemental blobs, as they
4400 	 * consume memory just the same. */
4401 	ubc_cs_blob_adjust_statistics(&tmp_blob);
4402 
4403 	if (cs_debug > 1) {
4404 		proc_t p;
4405 		const char *name = vnode_getname_printable(vp);
4406 		p = current_proc();
4407 		printf("CODE SIGNING: proc %d(%s) "
4408 		    "loaded supplemental signature for file (%s) "
4409 		    "range 0x%llx:0x%llx\n",
4410 		    proc_getpid(p), p->p_comm,
4411 		    name,
4412 		    blob_ro->csb_base_offset + blob_ro->csb_start_offset,
4413 		    blob_ro->csb_base_offset + blob_ro->csb_end_offset);
4414 		vnode_putname_printable(name);
4415 	}
4416 
4417 	if (ret_blob) {
4418 		*ret_blob = blob_ro;
4419 	}
4420 
4421 	error = 0; // Success!
4422 out:
4423 	if (error) {
4424 		if (cs_debug) {
4425 			printf("ubc_cs_blob_add_supplement[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
4426 		}
4427 
4428 		cs_blob_cleanup(&tmp_blob);
4429 		if (blob_ro) {
4430 			zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
4431 		}
4432 	}
4433 
4434 	if (error == EAGAIN) {
4435 		/* We were asked to add an existing blob.
4436 		 * We cleaned up and ignore the attempt. */
4437 		error = 0;
4438 	}
4439 
4440 	return error;
4441 }
4442 #endif
4443 
4444 
4445 
4446 void
csvnode_print_debug(struct vnode * vp)4447 csvnode_print_debug(struct vnode *vp)
4448 {
4449 	const char      *name = NULL;
4450 	struct ubc_info *uip;
4451 	struct cs_blob *blob;
4452 
4453 	name = vnode_getname_printable(vp);
4454 	if (name) {
4455 		printf("csvnode: name: %s\n", name);
4456 		vnode_putname_printable(name);
4457 	}
4458 
4459 	vnode_lock_spin(vp);
4460 
4461 	if (!UBCINFOEXISTS(vp)) {
4462 		blob = NULL;
4463 		goto out;
4464 	}
4465 
4466 	uip = vp->v_ubcinfo;
4467 	for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
4468 		printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
4469 		    (unsigned long)blob->csb_start_offset,
4470 		    (unsigned long)blob->csb_end_offset,
4471 		    blob->csb_flags,
4472 		    blob->csb_platform_binary ? "yes" : "no",
4473 		    blob->csb_platform_path ? "yes" : "no",
4474 		    blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
4475 	}
4476 
4477 out:
4478 	vnode_unlock(vp);
4479 }
4480 
4481 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4482 struct cs_blob *
ubc_cs_blob_get_supplement(struct vnode * vp,off_t offset)4483 ubc_cs_blob_get_supplement(
4484 	struct vnode    *vp,
4485 	off_t           offset)
4486 {
4487 	struct cs_blob *blob;
4488 	off_t offset_in_blob;
4489 
4490 	vnode_lock_spin(vp);
4491 
4492 	if (!UBCINFOEXISTS(vp)) {
4493 		blob = NULL;
4494 		goto out;
4495 	}
4496 
4497 	blob = vp->v_ubcinfo->cs_blob_supplement;
4498 
4499 	if (blob == NULL) {
4500 		// no supplemental blob
4501 		goto out;
4502 	}
4503 
4504 
4505 	if (offset != -1) {
4506 		offset_in_blob = offset - blob->csb_base_offset;
4507 		if (offset_in_blob < blob->csb_start_offset || offset_in_blob >= blob->csb_end_offset) {
4508 			// not actually covered by this blob
4509 			blob = NULL;
4510 		}
4511 	}
4512 
4513 out:
4514 	vnode_unlock(vp);
4515 
4516 	return blob;
4517 }
4518 #endif
4519 
4520 struct cs_blob *
ubc_cs_blob_get(struct vnode * vp,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t offset)4521 ubc_cs_blob_get(
4522 	struct vnode    *vp,
4523 	cpu_type_t      cputype,
4524 	cpu_subtype_t   cpusubtype,
4525 	off_t           offset)
4526 {
4527 	struct cs_blob  *blob;
4528 	off_t offset_in_blob;
4529 
4530 	vnode_lock_spin(vp);
4531 
4532 	if (!UBCINFOEXISTS(vp)) {
4533 		blob = NULL;
4534 		goto out;
4535 	}
4536 
4537 	for (blob = ubc_get_cs_blobs(vp);
4538 	    blob != NULL;
4539 	    blob = blob->csb_next) {
4540 		if (cputype != -1 && blob->csb_cpu_type == cputype && (cpusubtype == -1 || blob->csb_cpu_subtype == (cpusubtype & ~CPU_SUBTYPE_MASK))) {
4541 			break;
4542 		}
4543 		if (offset != -1) {
4544 			offset_in_blob = offset - blob->csb_base_offset;
4545 			if (offset_in_blob >= blob->csb_start_offset &&
4546 			    offset_in_blob < blob->csb_end_offset) {
4547 				/* our offset is covered by this blob */
4548 				break;
4549 			}
4550 		}
4551 	}
4552 
4553 out:
4554 	vnode_unlock(vp);
4555 
4556 	return blob;
4557 }
4558 
4559 static void
ubc_cs_free(struct ubc_info * uip)4560 ubc_cs_free(
4561 	struct ubc_info *uip)
4562 {
4563 	struct cs_blob  *blob, *next_blob;
4564 
4565 	for (blob = uip->cs_blobs;
4566 	    blob != NULL;
4567 	    blob = next_blob) {
4568 		next_blob = blob->csb_next;
4569 		os_atomic_add(&cs_blob_count, -1, relaxed);
4570 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
4571 		cs_blob_ro_free(blob);
4572 	}
4573 #if CHECK_CS_VALIDATION_BITMAP
4574 	ubc_cs_validation_bitmap_deallocate( uip );
4575 #endif
4576 	uip->cs_blobs = NULL;
4577 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4578 	if (uip->cs_blob_supplement != NULL) {
4579 		blob = uip->cs_blob_supplement;
4580 		os_atomic_add(&cs_blob_count, -1, relaxed);
4581 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
4582 		cs_blob_supplement_free(uip->cs_blob_supplement);
4583 		uip->cs_blob_supplement = NULL;
4584 	}
4585 #endif
4586 }
4587 
4588 /* check cs blob generation on vnode
4589  * returns:
4590  *    0         : Success, the cs_blob attached is current
4591  *    ENEEDAUTH : Generation count mismatch. Needs authentication again.
4592  */
4593 int
ubc_cs_generation_check(struct vnode * vp)4594 ubc_cs_generation_check(
4595 	struct vnode    *vp)
4596 {
4597 	int retval = ENEEDAUTH;
4598 
4599 	vnode_lock_spin(vp);
4600 
4601 	if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
4602 		retval = 0;
4603 	}
4604 
4605 	vnode_unlock(vp);
4606 	return retval;
4607 }
4608 
4609 int
ubc_cs_blob_revalidate(struct vnode * vp,struct cs_blob * blob,struct image_params * imgp,int flags,uint32_t platform)4610 ubc_cs_blob_revalidate(
4611 	struct vnode    *vp,
4612 	struct cs_blob *blob,
4613 	struct image_params *imgp,
4614 	int flags,
4615 	uint32_t platform
4616 	)
4617 {
4618 	int error = 0;
4619 	const CS_CodeDirectory *cd = NULL;
4620 	const CS_GenericBlob *entitlements = NULL;
4621 	const CS_GenericBlob *der_entitlements = NULL;
4622 	size_t size;
4623 	assert(vp != NULL);
4624 	assert(blob != NULL);
4625 
4626 	size = blob->csb_mem_size;
4627 	error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
4628 	    size, &cd, &entitlements, &der_entitlements);
4629 	if (error) {
4630 		if (cs_debug) {
4631 			printf("CODESIGNING: csblob invalid: %d\n", error);
4632 		}
4633 		goto out;
4634 	}
4635 
4636 	unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
4637 	unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
4638 
4639 	if (blob->csb_reconstituted) {
4640 		/*
4641 		 * Code signatures that have been modified after validation
4642 		 * cannot be revalidated inline from their in-memory blob.
4643 		 *
4644 		 * That's okay, though, because the only path left that relies
4645 		 * on revalidation of existing in-memory blobs is the legacy
4646 		 * detached signature database path, which only exists on macOS,
4647 		 * which does not do reconstitution of any kind.
4648 		 */
4649 		if (cs_debug) {
4650 			printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
4651 		}
4652 
4653 		/*
4654 		 * EAGAIN tells the caller that they may reread the code
4655 		 * signature and try attaching it again, which is the same
4656 		 * thing they would do if there was no cs_blob yet in the
4657 		 * first place.
4658 		 *
4659 		 * Conveniently, after ubc_cs_blob_add did a successful
4660 		 * validation, it will detect that a matching cs_blob (cdhash,
4661 		 * offset, arch etc.) already exists, and return success
4662 		 * without re-adding a cs_blob to the vnode.
4663 		 */
4664 		return EAGAIN;
4665 	}
4666 
4667 	/* callout to mac_vnode_check_signature */
4668 #if CONFIG_MACF
4669 	error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform);
4670 	if (cs_debug && error) {
4671 		printf("revalidate: check_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
4672 	}
4673 #else
4674 	(void)flags;
4675 	(void)signer_type;
4676 #endif
4677 
4678 	/* update generation number if success */
4679 	vnode_lock_spin(vp);
4680 	struct cs_signer_info signer_info = {
4681 		.csb_flags = cs_flags,
4682 		.csb_signer_type = signer_type
4683 	};
4684 	zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_signer_info, &signer_info);
4685 	if (UBCINFOEXISTS(vp)) {
4686 		if (error == 0) {
4687 			vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
4688 		} else {
4689 			vp->v_ubcinfo->cs_add_gen = 0;
4690 		}
4691 	}
4692 
4693 	vnode_unlock(vp);
4694 
4695 out:
4696 	return error;
4697 }
4698 
4699 void
cs_blob_reset_cache()4700 cs_blob_reset_cache()
4701 {
4702 	/* incrementing odd no by 2 makes sure '0' is never reached. */
4703 	OSAddAtomic(+2, &cs_blob_generation_count);
4704 	printf("Reseting cs_blob cache from all vnodes. \n");
4705 }
4706 
4707 struct cs_blob *
ubc_get_cs_blobs(struct vnode * vp)4708 ubc_get_cs_blobs(
4709 	struct vnode    *vp)
4710 {
4711 	struct ubc_info *uip;
4712 	struct cs_blob  *blobs;
4713 
4714 	/*
4715 	 * No need to take the vnode lock here.  The caller must be holding
4716 	 * a reference on the vnode (via a VM mapping or open file descriptor),
4717 	 * so the vnode will not go away.  The ubc_info stays until the vnode
4718 	 * goes away.  And we only modify "blobs" by adding to the head of the
4719 	 * list.
4720 	 * The ubc_info could go away entirely if the vnode gets reclaimed as
4721 	 * part of a forced unmount.  In the case of a code-signature validation
4722 	 * during a page fault, the "paging_in_progress" reference on the VM
4723 	 * object guarantess that the vnode pager (and the ubc_info) won't go
4724 	 * away during the fault.
4725 	 * Other callers need to protect against vnode reclaim by holding the
4726 	 * vnode lock, for example.
4727 	 */
4728 
4729 	if (!UBCINFOEXISTS(vp)) {
4730 		blobs = NULL;
4731 		goto out;
4732 	}
4733 
4734 	uip = vp->v_ubcinfo;
4735 	blobs = uip->cs_blobs;
4736 	if (blobs != NULL) {
4737 		cs_blob_require(blobs, vp);
4738 	}
4739 
4740 out:
4741 	return blobs;
4742 }
4743 
4744 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4745 struct cs_blob *
ubc_get_cs_supplement(struct vnode * vp)4746 ubc_get_cs_supplement(
4747 	struct vnode    *vp)
4748 {
4749 	struct ubc_info *uip;
4750 	struct cs_blob  *blob;
4751 
4752 	/*
4753 	 * No need to take the vnode lock here.  The caller must be holding
4754 	 * a reference on the vnode (via a VM mapping or open file descriptor),
4755 	 * so the vnode will not go away.  The ubc_info stays until the vnode
4756 	 * goes away.
4757 	 * The ubc_info could go away entirely if the vnode gets reclaimed as
4758 	 * part of a forced unmount.  In the case of a code-signature validation
4759 	 * during a page fault, the "paging_in_progress" reference on the VM
4760 	 * object guarantess that the vnode pager (and the ubc_info) won't go
4761 	 * away during the fault.
4762 	 * Other callers need to protect against vnode reclaim by holding the
4763 	 * vnode lock, for example.
4764 	 */
4765 
4766 	if (!UBCINFOEXISTS(vp)) {
4767 		blob = NULL;
4768 		goto out;
4769 	}
4770 
4771 	uip = vp->v_ubcinfo;
4772 	blob = uip->cs_blob_supplement;
4773 	if (blob != NULL) {
4774 		cs_blob_require(blob, vp);
4775 	}
4776 
4777 out:
4778 	return blob;
4779 }
4780 #endif
4781 
4782 
4783 void
ubc_get_cs_mtime(struct vnode * vp,struct timespec * cs_mtime)4784 ubc_get_cs_mtime(
4785 	struct vnode    *vp,
4786 	struct timespec *cs_mtime)
4787 {
4788 	struct ubc_info *uip;
4789 
4790 	if (!UBCINFOEXISTS(vp)) {
4791 		cs_mtime->tv_sec = 0;
4792 		cs_mtime->tv_nsec = 0;
4793 		return;
4794 	}
4795 
4796 	uip = vp->v_ubcinfo;
4797 	cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
4798 	cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
4799 }
4800 
4801 unsigned long cs_validate_page_no_hash = 0;
4802 unsigned long cs_validate_page_bad_hash = 0;
4803 static boolean_t
cs_validate_hash(struct cs_blob * blobs,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t * bytes_processed,unsigned * tainted)4804 cs_validate_hash(
4805 	struct cs_blob          *blobs,
4806 	memory_object_t         pager,
4807 	memory_object_offset_t  page_offset,
4808 	const void              *data,
4809 	vm_size_t               *bytes_processed,
4810 	unsigned                *tainted)
4811 {
4812 	union cs_hash_union     mdctx;
4813 	struct cs_hash const    *hashtype = NULL;
4814 	unsigned char           actual_hash[CS_HASH_MAX_SIZE];
4815 	unsigned char           expected_hash[CS_HASH_MAX_SIZE];
4816 	boolean_t               found_hash;
4817 	struct cs_blob          *blob;
4818 	const CS_CodeDirectory  *cd;
4819 	const unsigned char     *hash;
4820 	boolean_t               validated;
4821 	off_t                   offset; /* page offset in the file */
4822 	size_t                  size;
4823 	off_t                   codeLimit = 0;
4824 	const char              *lower_bound, *upper_bound;
4825 	vm_offset_t             kaddr, blob_addr;
4826 
4827 	/* retrieve the expected hash */
4828 	found_hash = FALSE;
4829 
4830 	for (blob = blobs;
4831 	    blob != NULL;
4832 	    blob = blob->csb_next) {
4833 		offset = page_offset - blob->csb_base_offset;
4834 		if (offset < blob->csb_start_offset ||
4835 		    offset >= blob->csb_end_offset) {
4836 			/* our page is not covered by this blob */
4837 			continue;
4838 		}
4839 
4840 		/* blob data has been released */
4841 		kaddr = (vm_offset_t)blob->csb_mem_kaddr;
4842 		if (kaddr == 0) {
4843 			continue;
4844 		}
4845 
4846 		blob_addr = kaddr + blob->csb_mem_offset;
4847 		lower_bound = CAST_DOWN(char *, blob_addr);
4848 		upper_bound = lower_bound + blob->csb_mem_size;
4849 
4850 		cd = blob->csb_cd;
4851 		if (cd != NULL) {
4852 			/* all CD's that have been injected is already validated */
4853 
4854 			hashtype = blob->csb_hashtype;
4855 			if (hashtype == NULL) {
4856 				panic("unknown hash type ?");
4857 			}
4858 			if (hashtype->cs_digest_size > sizeof(actual_hash)) {
4859 				panic("hash size too large");
4860 			}
4861 			if (offset & ((1U << blob->csb_hash_pageshift) - 1)) {
4862 				panic("offset not aligned to cshash boundary");
4863 			}
4864 
4865 			codeLimit = ntohl(cd->codeLimit);
4866 
4867 			hash = hashes(cd, (uint32_t)(offset >> blob->csb_hash_pageshift),
4868 			    hashtype->cs_size,
4869 			    lower_bound, upper_bound);
4870 			if (hash != NULL) {
4871 				bcopy(hash, expected_hash, hashtype->cs_size);
4872 				found_hash = TRUE;
4873 			}
4874 
4875 			break;
4876 		}
4877 	}
4878 
4879 	if (found_hash == FALSE) {
4880 		/*
4881 		 * We can't verify this page because there is no signature
4882 		 * for it (yet).  It's possible that this part of the object
4883 		 * is not signed, or that signatures for that part have not
4884 		 * been loaded yet.
4885 		 * Report that the page has not been validated and let the
4886 		 * caller decide if it wants to accept it or not.
4887 		 */
4888 		cs_validate_page_no_hash++;
4889 		if (cs_debug > 1) {
4890 			printf("CODE SIGNING: cs_validate_page: "
4891 			    "mobj %p off 0x%llx: no hash to validate !?\n",
4892 			    pager, page_offset);
4893 		}
4894 		validated = FALSE;
4895 		*tainted = 0;
4896 	} else {
4897 		*tainted = 0;
4898 
4899 		size = (1U << blob->csb_hash_pageshift);
4900 		*bytes_processed = size;
4901 
4902 		const uint32_t *asha1, *esha1;
4903 		if ((off_t)(offset + size) > codeLimit) {
4904 			/* partial page at end of segment */
4905 			assert(offset < codeLimit);
4906 			size = (size_t) (codeLimit & (size - 1));
4907 			*tainted |= CS_VALIDATE_NX;
4908 		}
4909 
4910 		hashtype->cs_init(&mdctx);
4911 
4912 		if (blob->csb_hash_firstlevel_pageshift) {
4913 			const unsigned char *partial_data = (const unsigned char *)data;
4914 			size_t i;
4915 			for (i = 0; i < size;) {
4916 				union cs_hash_union     partialctx;
4917 				unsigned char partial_digest[CS_HASH_MAX_SIZE];
4918 				size_t partial_size = MIN(size - i, (1U << blob->csb_hash_firstlevel_pageshift));
4919 
4920 				hashtype->cs_init(&partialctx);
4921 				hashtype->cs_update(&partialctx, partial_data, partial_size);
4922 				hashtype->cs_final(partial_digest, &partialctx);
4923 
4924 				/* Update cumulative multi-level hash */
4925 				hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
4926 				partial_data = partial_data + partial_size;
4927 				i += partial_size;
4928 			}
4929 		} else {
4930 			hashtype->cs_update(&mdctx, data, size);
4931 		}
4932 		hashtype->cs_final(actual_hash, &mdctx);
4933 
4934 		asha1 = (const uint32_t *) actual_hash;
4935 		esha1 = (const uint32_t *) expected_hash;
4936 
4937 		if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
4938 			if (cs_debug) {
4939 				printf("CODE SIGNING: cs_validate_page: "
4940 				    "mobj %p off 0x%llx size 0x%lx: "
4941 				    "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
4942 				    "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
4943 				    pager, page_offset, size,
4944 				    asha1[0], asha1[1], asha1[2],
4945 				    asha1[3], asha1[4],
4946 				    esha1[0], esha1[1], esha1[2],
4947 				    esha1[3], esha1[4]);
4948 			}
4949 			cs_validate_page_bad_hash++;
4950 			*tainted |= CS_VALIDATE_TAINTED;
4951 		} else {
4952 			if (cs_debug > 10) {
4953 				printf("CODE SIGNING: cs_validate_page: "
4954 				    "mobj %p off 0x%llx size 0x%lx: "
4955 				    "SHA1 OK\n",
4956 				    pager, page_offset, size);
4957 			}
4958 		}
4959 		validated = TRUE;
4960 	}
4961 
4962 	return validated;
4963 }
4964 
4965 boolean_t
cs_validate_range(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t dsize,unsigned * tainted)4966 cs_validate_range(
4967 	struct vnode    *vp,
4968 	memory_object_t         pager,
4969 	memory_object_offset_t  page_offset,
4970 	const void              *data,
4971 	vm_size_t               dsize,
4972 	unsigned                *tainted)
4973 {
4974 	vm_size_t offset_in_range;
4975 	boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
4976 
4977 	struct cs_blob *blobs = ubc_get_cs_blobs(vp);
4978 
4979 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4980 	if (blobs == NULL && proc_is_translated(current_proc())) {
4981 		struct cs_blob *supp = ubc_get_cs_supplement(vp);
4982 
4983 		if (supp != NULL) {
4984 			blobs = supp;
4985 		} else {
4986 			return FALSE;
4987 		}
4988 	}
4989 #endif
4990 
4991 
4992 
4993 	*tainted = 0;
4994 
4995 	for (offset_in_range = 0;
4996 	    offset_in_range < dsize;
4997 	    /* offset_in_range updated based on bytes processed */) {
4998 		unsigned subrange_tainted = 0;
4999 		boolean_t subrange_validated;
5000 		vm_size_t bytes_processed = 0;
5001 
5002 		subrange_validated = cs_validate_hash(blobs,
5003 		    pager,
5004 		    page_offset + offset_in_range,
5005 		    (const void *)((const char *)data + offset_in_range),
5006 		    &bytes_processed,
5007 		    &subrange_tainted);
5008 
5009 		*tainted |= subrange_tainted;
5010 
5011 		if (bytes_processed == 0) {
5012 			/* Cannote make forward progress, so return an error */
5013 			all_subranges_validated = FALSE;
5014 			break;
5015 		} else if (subrange_validated == FALSE) {
5016 			all_subranges_validated = FALSE;
5017 			/* Keep going to detect other types of failures in subranges */
5018 		}
5019 
5020 		offset_in_range += bytes_processed;
5021 	}
5022 
5023 	return all_subranges_validated;
5024 }
5025 
5026 void
cs_validate_page(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,int * validated_p,int * tainted_p,int * nx_p)5027 cs_validate_page(
5028 	struct vnode            *vp,
5029 	memory_object_t         pager,
5030 	memory_object_offset_t  page_offset,
5031 	const void              *data,
5032 	int                     *validated_p,
5033 	int                     *tainted_p,
5034 	int                     *nx_p)
5035 {
5036 	vm_size_t offset_in_page;
5037 	struct cs_blob *blobs;
5038 
5039 	blobs = ubc_get_cs_blobs(vp);
5040 
5041 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5042 	if (blobs == NULL && proc_is_translated(current_proc())) {
5043 		struct cs_blob *supp = ubc_get_cs_supplement(vp);
5044 
5045 		if (supp != NULL) {
5046 			blobs = supp;
5047 		}
5048 	}
5049 #endif
5050 
5051 	*validated_p = VMP_CS_ALL_FALSE;
5052 	*tainted_p = VMP_CS_ALL_FALSE;
5053 	*nx_p = VMP_CS_ALL_FALSE;
5054 
5055 	for (offset_in_page = 0;
5056 	    offset_in_page < PAGE_SIZE;
5057 	    /* offset_in_page updated based on bytes processed */) {
5058 		unsigned subrange_tainted = 0;
5059 		boolean_t subrange_validated;
5060 		vm_size_t bytes_processed = 0;
5061 		int sub_bit;
5062 
5063 		subrange_validated = cs_validate_hash(blobs,
5064 		    pager,
5065 		    page_offset + offset_in_page,
5066 		    (const void *)((const char *)data + offset_in_page),
5067 		    &bytes_processed,
5068 		    &subrange_tainted);
5069 
5070 		if (bytes_processed == 0) {
5071 			/* 4k chunk not code-signed: try next one */
5072 			offset_in_page += FOURK_PAGE_SIZE;
5073 			continue;
5074 		}
5075 		if (offset_in_page == 0 &&
5076 		    bytes_processed > PAGE_SIZE - FOURK_PAGE_SIZE) {
5077 			/* all processed: no 4k granularity */
5078 			if (subrange_validated) {
5079 				*validated_p = VMP_CS_ALL_TRUE;
5080 			}
5081 			if (subrange_tainted & CS_VALIDATE_TAINTED) {
5082 				*tainted_p = VMP_CS_ALL_TRUE;
5083 			}
5084 			if (subrange_tainted & CS_VALIDATE_NX) {
5085 				*nx_p = VMP_CS_ALL_TRUE;
5086 			}
5087 			break;
5088 		}
5089 		/* we only handle 4k or 16k code-signing granularity... */
5090 		assertf(bytes_processed <= FOURK_PAGE_SIZE,
5091 		    "vp %p blobs %p offset 0x%llx + 0x%llx bytes_processed 0x%llx\n",
5092 		    vp, blobs, (uint64_t)page_offset,
5093 		    (uint64_t)offset_in_page, (uint64_t)bytes_processed);
5094 		sub_bit = 1 << (offset_in_page >> FOURK_PAGE_SHIFT);
5095 		if (subrange_validated) {
5096 			*validated_p |= sub_bit;
5097 		}
5098 		if (subrange_tainted & CS_VALIDATE_TAINTED) {
5099 			*tainted_p |= sub_bit;
5100 		}
5101 		if (subrange_tainted & CS_VALIDATE_NX) {
5102 			*nx_p |= sub_bit;
5103 		}
5104 		/* go to next 4k chunk */
5105 		offset_in_page += FOURK_PAGE_SIZE;
5106 	}
5107 
5108 	return;
5109 }
5110 
5111 int
ubc_cs_getcdhash(vnode_t vp,off_t offset,unsigned char * cdhash)5112 ubc_cs_getcdhash(
5113 	vnode_t         vp,
5114 	off_t           offset,
5115 	unsigned char   *cdhash)
5116 {
5117 	struct cs_blob  *blobs, *blob;
5118 	off_t           rel_offset;
5119 	int             ret;
5120 
5121 	vnode_lock(vp);
5122 
5123 	blobs = ubc_get_cs_blobs(vp);
5124 	for (blob = blobs;
5125 	    blob != NULL;
5126 	    blob = blob->csb_next) {
5127 		/* compute offset relative to this blob */
5128 		rel_offset = offset - blob->csb_base_offset;
5129 		if (rel_offset >= blob->csb_start_offset &&
5130 		    rel_offset < blob->csb_end_offset) {
5131 			/* this blob does cover our "offset" ! */
5132 			break;
5133 		}
5134 	}
5135 
5136 	if (blob == NULL) {
5137 		/* we didn't find a blob covering "offset" */
5138 		ret = EBADEXEC; /* XXX any better error ? */
5139 	} else {
5140 		/* get the SHA1 hash of that blob */
5141 		bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash));
5142 		ret = 0;
5143 	}
5144 
5145 	vnode_unlock(vp);
5146 
5147 	return ret;
5148 }
5149 
5150 boolean_t
ubc_cs_is_range_codesigned(vnode_t vp,mach_vm_offset_t start,mach_vm_size_t size)5151 ubc_cs_is_range_codesigned(
5152 	vnode_t                 vp,
5153 	mach_vm_offset_t        start,
5154 	mach_vm_size_t          size)
5155 {
5156 	struct cs_blob          *csblob;
5157 	mach_vm_offset_t        blob_start;
5158 	mach_vm_offset_t        blob_end;
5159 
5160 	if (vp == NULL) {
5161 		/* no file: no code signature */
5162 		return FALSE;
5163 	}
5164 	if (size == 0) {
5165 		/* no range: no code signature */
5166 		return FALSE;
5167 	}
5168 	if (start + size < start) {
5169 		/* overflow */
5170 		return FALSE;
5171 	}
5172 
5173 	csblob = ubc_cs_blob_get(vp, -1, -1, start);
5174 	if (csblob == NULL) {
5175 		return FALSE;
5176 	}
5177 
5178 	/*
5179 	 * We currently check if the range is covered by a single blob,
5180 	 * which should always be the case for the dyld shared cache.
5181 	 * If we ever want to make this routine handle other cases, we
5182 	 * would have to iterate if the blob does not cover the full range.
5183 	 */
5184 	blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
5185 	    csblob->csb_start_offset);
5186 	blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
5187 	    csblob->csb_end_offset);
5188 	if (blob_start > start || blob_end < (start + size)) {
5189 		/* range not fully covered by this code-signing blob */
5190 		return FALSE;
5191 	}
5192 
5193 	return TRUE;
5194 }
5195 
5196 #if CHECK_CS_VALIDATION_BITMAP
5197 #define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
5198 extern  boolean_t       root_fs_upgrade_try;
5199 
5200 /*
5201  * Should we use the code-sign bitmap to avoid repeated code-sign validation?
5202  * Depends:
5203  * a) Is the target vnode on the root filesystem?
5204  * b) Has someone tried to mount the root filesystem read-write?
5205  * If answers are (a) yes AND (b) no, then we can use the bitmap.
5206  */
5207 #define USE_CODE_SIGN_BITMAP(vp)        ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
5208 kern_return_t
ubc_cs_validation_bitmap_allocate(vnode_t vp)5209 ubc_cs_validation_bitmap_allocate(
5210 	vnode_t         vp)
5211 {
5212 	kern_return_t   kr = KERN_SUCCESS;
5213 	struct ubc_info *uip;
5214 	char            *target_bitmap;
5215 	vm_object_size_t        bitmap_size;
5216 
5217 	if (!USE_CODE_SIGN_BITMAP(vp) || (!UBCINFOEXISTS(vp))) {
5218 		kr = KERN_INVALID_ARGUMENT;
5219 	} else {
5220 		uip = vp->v_ubcinfo;
5221 
5222 		if (uip->cs_valid_bitmap == NULL) {
5223 			bitmap_size = stob(uip->ui_size);
5224 			target_bitmap = (char*) kalloc_data((vm_size_t)bitmap_size, Z_WAITOK | Z_ZERO);
5225 			if (target_bitmap == 0) {
5226 				kr = KERN_NO_SPACE;
5227 			} else {
5228 				kr = KERN_SUCCESS;
5229 			}
5230 			if (kr == KERN_SUCCESS) {
5231 				uip->cs_valid_bitmap = (void*)target_bitmap;
5232 				uip->cs_valid_bitmap_size = bitmap_size;
5233 			}
5234 		}
5235 	}
5236 	return kr;
5237 }
5238 
5239 kern_return_t
ubc_cs_check_validation_bitmap(vnode_t vp,memory_object_offset_t offset,int optype)5240 ubc_cs_check_validation_bitmap(
5241 	vnode_t                 vp,
5242 	memory_object_offset_t          offset,
5243 	int                     optype)
5244 {
5245 	kern_return_t   kr = KERN_SUCCESS;
5246 
5247 	if (!USE_CODE_SIGN_BITMAP(vp) || !UBCINFOEXISTS(vp)) {
5248 		kr = KERN_INVALID_ARGUMENT;
5249 	} else {
5250 		struct ubc_info *uip = vp->v_ubcinfo;
5251 		char            *target_bitmap = uip->cs_valid_bitmap;
5252 
5253 		if (target_bitmap == NULL) {
5254 			kr = KERN_INVALID_ARGUMENT;
5255 		} else {
5256 			uint64_t        bit, byte;
5257 			bit = atop_64( offset );
5258 			byte = bit >> 3;
5259 
5260 			if (byte > uip->cs_valid_bitmap_size) {
5261 				kr = KERN_INVALID_ARGUMENT;
5262 			} else {
5263 				if (optype == CS_BITMAP_SET) {
5264 					target_bitmap[byte] |= (1 << (bit & 07));
5265 					kr = KERN_SUCCESS;
5266 				} else if (optype == CS_BITMAP_CLEAR) {
5267 					target_bitmap[byte] &= ~(1 << (bit & 07));
5268 					kr = KERN_SUCCESS;
5269 				} else if (optype == CS_BITMAP_CHECK) {
5270 					if (target_bitmap[byte] & (1 << (bit & 07))) {
5271 						kr = KERN_SUCCESS;
5272 					} else {
5273 						kr = KERN_FAILURE;
5274 					}
5275 				}
5276 			}
5277 		}
5278 	}
5279 	return kr;
5280 }
5281 
5282 void
ubc_cs_validation_bitmap_deallocate(struct ubc_info * uip)5283 ubc_cs_validation_bitmap_deallocate(
5284 	struct ubc_info *uip)
5285 {
5286 	if (uip->cs_valid_bitmap != NULL) {
5287 		kfree_data(uip->cs_valid_bitmap, (vm_size_t)uip->cs_valid_bitmap_size);
5288 		uip->cs_valid_bitmap = NULL;
5289 	}
5290 }
5291 #else
5292 kern_return_t
ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)5293 ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)
5294 {
5295 	return KERN_INVALID_ARGUMENT;
5296 }
5297 
5298 kern_return_t
ubc_cs_check_validation_bitmap(__unused struct vnode * vp,__unused memory_object_offset_t offset,__unused int optype)5299 ubc_cs_check_validation_bitmap(
5300 	__unused struct vnode *vp,
5301 	__unused memory_object_offset_t offset,
5302 	__unused int optype)
5303 {
5304 	return KERN_INVALID_ARGUMENT;
5305 }
5306 
5307 void
ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info * uip)5308 ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info *uip)
5309 {
5310 	return;
5311 }
5312 #endif /* CHECK_CS_VALIDATION_BITMAP */
5313 
5314