1 /*
2 * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: ubc_subr.c
30 * Author: Umesh Vaishampayan [[email protected]]
31 * 05-Aug-1999 umeshv Created.
32 *
33 * Functions related to Unified Buffer cache.
34 *
35 * Caller of UBC functions MUST have a valid reference on the vnode.
36 *
37 */
38
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/lock.h>
43 #include <sys/mman.h>
44 #include <sys/mount_internal.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/ucred.h>
48 #include <sys/proc_internal.h>
49 #include <sys/kauth.h>
50 #include <sys/buf.h>
51 #include <sys/user.h>
52 #include <sys/codesign.h>
53 #include <sys/codedir_internal.h>
54 #include <sys/fsevents.h>
55 #include <sys/fcntl.h>
56 #include <sys/reboot.h>
57 #include <sys/code_signing.h>
58
59 #include <mach/mach_types.h>
60 #include <mach/memory_object_types.h>
61 #include <mach/memory_object_control.h>
62 #include <mach/vm_map.h>
63 #include <mach/mach_vm.h>
64 #include <mach/upl.h>
65
66 #include <kern/kern_types.h>
67 #include <kern/kalloc.h>
68 #include <kern/zalloc.h>
69 #include <kern/thread.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_upl.h>
74 #include <vm/vm_kern_xnu.h>
75 #include <vm/vm_protos.h> /* last */
76 #include <vm/vm_ubc.h>
77
78 #include <libkern/crypto/sha1.h>
79 #include <libkern/crypto/sha2.h>
80 #include <libkern/libkern.h>
81
82 #include <security/mac_framework.h>
83 #include <stdbool.h>
84 #include <stdatomic.h>
85 #include <libkern/amfi/amfi.h>
86
87 extern void Debugger(const char *message);
88
89 #if DIAGNOSTIC
90 #if defined(assert)
91 #undef assert
92 #endif
93 #define assert(cond) \
94 ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
95 #else
96 #include <kern/assert.h>
97 #endif /* DIAGNOSTIC */
98
99 static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
100 static int ubc_umcallback(vnode_t, void *);
101 static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
102 static void ubc_cs_free(struct ubc_info *uip);
103
104 static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
105 static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
106
107 ZONE_DEFINE_TYPE(ubc_info_zone, "ubc_info zone", struct ubc_info,
108 ZC_ZFREE_CLEARMEM);
109 static uint32_t cs_blob_generation_count = 1;
110
111 /*
112 * CODESIGNING
113 * Routines to navigate code signing data structures in the kernel...
114 */
115
116 ZONE_DEFINE_ID(ZONE_ID_CS_BLOB, "cs_blob zone", struct cs_blob,
117 ZC_READONLY | ZC_ZFREE_CLEARMEM);
118
119 extern int cs_debug;
120
121 #define PAGE_SHIFT_4K (12)
122
123 static boolean_t
cs_valid_range(const void * start,const void * end,const void * lower_bound,const void * upper_bound)124 cs_valid_range(
125 const void *start,
126 const void *end,
127 const void *lower_bound,
128 const void *upper_bound)
129 {
130 if (upper_bound < lower_bound ||
131 end < start) {
132 return FALSE;
133 }
134
135 if (start < lower_bound ||
136 end > upper_bound) {
137 return FALSE;
138 }
139
140 return TRUE;
141 }
142
143 typedef void (*cs_md_init)(void *ctx);
144 typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
145 typedef void (*cs_md_final)(void *hash, void *ctx);
146
147 struct cs_hash {
148 uint8_t cs_type; /* type code as per code signing */
149 size_t cs_size; /* size of effective hash (may be truncated) */
150 size_t cs_digest_size;/* size of native hash */
151 cs_md_init cs_init;
152 cs_md_update cs_update;
153 cs_md_final cs_final;
154 };
155
156 uint8_t
cs_hash_type(struct cs_hash const * const cs_hash)157 cs_hash_type(
158 struct cs_hash const * const cs_hash)
159 {
160 return cs_hash->cs_type;
161 }
162
163 static const struct cs_hash cs_hash_sha1 = {
164 .cs_type = CS_HASHTYPE_SHA1,
165 .cs_size = CS_SHA1_LEN,
166 .cs_digest_size = SHA_DIGEST_LENGTH,
167 .cs_init = (cs_md_init)SHA1Init,
168 .cs_update = (cs_md_update)SHA1Update,
169 .cs_final = (cs_md_final)SHA1Final,
170 };
171 #if CRYPTO_SHA2
172 static const struct cs_hash cs_hash_sha256 = {
173 .cs_type = CS_HASHTYPE_SHA256,
174 .cs_size = SHA256_DIGEST_LENGTH,
175 .cs_digest_size = SHA256_DIGEST_LENGTH,
176 .cs_init = (cs_md_init)SHA256_Init,
177 .cs_update = (cs_md_update)SHA256_Update,
178 .cs_final = (cs_md_final)SHA256_Final,
179 };
180 static const struct cs_hash cs_hash_sha256_truncate = {
181 .cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
182 .cs_size = CS_SHA256_TRUNCATED_LEN,
183 .cs_digest_size = SHA256_DIGEST_LENGTH,
184 .cs_init = (cs_md_init)SHA256_Init,
185 .cs_update = (cs_md_update)SHA256_Update,
186 .cs_final = (cs_md_final)SHA256_Final,
187 };
188 static const struct cs_hash cs_hash_sha384 = {
189 .cs_type = CS_HASHTYPE_SHA384,
190 .cs_size = SHA384_DIGEST_LENGTH,
191 .cs_digest_size = SHA384_DIGEST_LENGTH,
192 .cs_init = (cs_md_init)SHA384_Init,
193 .cs_update = (cs_md_update)SHA384_Update,
194 .cs_final = (cs_md_final)SHA384_Final,
195 };
196 #endif
197
198 static struct cs_hash const *
cs_find_md(uint8_t type)199 cs_find_md(uint8_t type)
200 {
201 if (type == CS_HASHTYPE_SHA1) {
202 return &cs_hash_sha1;
203 #if CRYPTO_SHA2
204 } else if (type == CS_HASHTYPE_SHA256) {
205 return &cs_hash_sha256;
206 } else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
207 return &cs_hash_sha256_truncate;
208 } else if (type == CS_HASHTYPE_SHA384) {
209 return &cs_hash_sha384;
210 #endif
211 }
212 return NULL;
213 }
214
215 union cs_hash_union {
216 SHA1_CTX sha1ctxt;
217 SHA256_CTX sha256ctx;
218 SHA384_CTX sha384ctx;
219 };
220
221
222 /*
223 * Choose among different hash algorithms.
224 * Higher is better, 0 => don't use at all.
225 */
226 static const uint32_t hashPriorities[] = {
227 CS_HASHTYPE_SHA1,
228 CS_HASHTYPE_SHA256_TRUNCATED,
229 CS_HASHTYPE_SHA256,
230 CS_HASHTYPE_SHA384,
231 };
232
233 static unsigned int
hash_rank(const CS_CodeDirectory * cd)234 hash_rank(const CS_CodeDirectory *cd)
235 {
236 uint32_t type = cd->hashType;
237 unsigned int n;
238
239 for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) {
240 if (hashPriorities[n] == type) {
241 return n + 1;
242 }
243 }
244 return 0; /* not supported */
245 }
246
247
248 /*
249 * Locating a page hash
250 */
251 static const unsigned char *
hashes(const CS_CodeDirectory * cd,uint32_t page,size_t hash_len,const char * lower_bound,const char * upper_bound)252 hashes(
253 const CS_CodeDirectory *cd,
254 uint32_t page,
255 size_t hash_len,
256 const char *lower_bound,
257 const char *upper_bound)
258 {
259 const unsigned char *base, *top, *hash;
260 uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
261
262 assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
263
264 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
265 /* Get first scatter struct */
266 const SC_Scatter *scatter = (const SC_Scatter*)
267 ((const char*)cd + ntohl(cd->scatterOffset));
268 uint32_t hashindex = 0, scount, sbase = 0;
269 /* iterate all scatter structs */
270 do {
271 if ((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
272 if (cs_debug) {
273 printf("CODE SIGNING: Scatter extends past Code Directory\n");
274 }
275 return NULL;
276 }
277
278 scount = ntohl(scatter->count);
279 uint32_t new_base = ntohl(scatter->base);
280
281 /* last scatter? */
282 if (scount == 0) {
283 return NULL;
284 }
285
286 if ((hashindex > 0) && (new_base <= sbase)) {
287 if (cs_debug) {
288 printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
289 sbase, new_base);
290 }
291 return NULL; /* unordered scatter array */
292 }
293 sbase = new_base;
294
295 /* this scatter beyond page we're looking for? */
296 if (sbase > page) {
297 return NULL;
298 }
299
300 if (sbase + scount >= page) {
301 /* Found the scatter struct that is
302 * referencing our page */
303
304 /* base = address of first hash covered by scatter */
305 base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
306 hashindex * hash_len;
307 /* top = address of first hash after this scatter */
308 top = base + scount * hash_len;
309 if (!cs_valid_range(base, top, lower_bound,
310 upper_bound) ||
311 hashindex > nCodeSlots) {
312 return NULL;
313 }
314
315 break;
316 }
317
318 /* this scatter struct is before the page we're looking
319 * for. Iterate. */
320 hashindex += scount;
321 scatter++;
322 } while (1);
323
324 hash = base + (page - sbase) * hash_len;
325 } else {
326 base = (const unsigned char *)cd + ntohl(cd->hashOffset);
327 top = base + nCodeSlots * hash_len;
328 if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
329 page > nCodeSlots) {
330 return NULL;
331 }
332 assert(page < nCodeSlots);
333
334 hash = base + page * hash_len;
335 }
336
337 if (!cs_valid_range(hash, hash + hash_len,
338 lower_bound, upper_bound)) {
339 hash = NULL;
340 }
341
342 return hash;
343 }
344
345 /*
346 * cs_validate_codedirectory
347 *
348 * Validate that pointers inside the code directory to make sure that
349 * all offsets and lengths are constrained within the buffer.
350 *
351 * Parameters: cd Pointer to code directory buffer
352 * length Length of buffer
353 *
354 * Returns: 0 Success
355 * EBADEXEC Invalid code signature
356 */
357
358 static int
cs_validate_codedirectory(const CS_CodeDirectory * cd,size_t length)359 cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
360 {
361 struct cs_hash const *hashtype;
362
363 if (length < sizeof(*cd)) {
364 return EBADEXEC;
365 }
366 if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) {
367 return EBADEXEC;
368 }
369 if ((cd->pageSize != PAGE_SHIFT_4K) && (cd->pageSize != PAGE_SHIFT)) {
370 printf("disallowing unsupported code signature page shift: %u\n", cd->pageSize);
371 return EBADEXEC;
372 }
373 hashtype = cs_find_md(cd->hashType);
374 if (hashtype == NULL) {
375 return EBADEXEC;
376 }
377
378 if (cd->hashSize != hashtype->cs_size) {
379 return EBADEXEC;
380 }
381
382 if (length < ntohl(cd->hashOffset)) {
383 return EBADEXEC;
384 }
385
386 /* check that nSpecialSlots fits in the buffer in front of hashOffset */
387 if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) {
388 return EBADEXEC;
389 }
390
391 /* check that codeslots fits in the buffer */
392 if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) {
393 return EBADEXEC;
394 }
395
396 if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
397 if (length < ntohl(cd->scatterOffset)) {
398 return EBADEXEC;
399 }
400
401 const SC_Scatter *scatter = (const SC_Scatter *)
402 (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
403 uint32_t nPages = 0;
404
405 /*
406 * Check each scatter buffer, since we don't know the
407 * length of the scatter buffer array, we have to
408 * check each entry.
409 */
410 while (1) {
411 /* check that the end of each scatter buffer in within the length */
412 if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) {
413 return EBADEXEC;
414 }
415 uint32_t scount = ntohl(scatter->count);
416 if (scount == 0) {
417 break;
418 }
419 if (nPages + scount < nPages) {
420 return EBADEXEC;
421 }
422 nPages += scount;
423 scatter++;
424
425 /* XXX check that basees doesn't overlap */
426 /* XXX check that targetOffset doesn't overlap */
427 }
428 #if 0 /* rdar://12579439 */
429 if (nPages != ntohl(cd->nCodeSlots)) {
430 return EBADEXEC;
431 }
432 #endif
433 }
434
435 if (length < ntohl(cd->identOffset)) {
436 return EBADEXEC;
437 }
438
439 /* identifier is NULL terminated string */
440 if (cd->identOffset) {
441 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
442 if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) {
443 return EBADEXEC;
444 }
445 } else {
446 return EBADEXEC;
447 }
448
449 /* team identifier is NULL terminated string */
450 if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
451 if (length < ntohl(cd->teamOffset)) {
452 return EBADEXEC;
453 }
454
455 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
456 if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) {
457 return EBADEXEC;
458 }
459 }
460
461 /* linkage is variable length binary data */
462 if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0) {
463 const uintptr_t ptr = (uintptr_t)cd + ntohl(cd->linkageOffset);
464 const uintptr_t ptr_end = ptr + ntohl(cd->linkageSize);
465
466 if (ptr_end < ptr || ptr < (uintptr_t)cd || ptr_end > (uintptr_t)cd + length) {
467 return EBADEXEC;
468 }
469 }
470
471
472 return 0;
473 }
474
475 /*
476 *
477 */
478
479 static int
cs_validate_blob(const CS_GenericBlob * blob,size_t length)480 cs_validate_blob(const CS_GenericBlob *blob, size_t length)
481 {
482 if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) {
483 return EBADEXEC;
484 }
485 return 0;
486 }
487
488 /*
489 * cs_validate_csblob
490 *
491 * Validate that superblob/embedded code directory to make sure that
492 * all internal pointers are valid.
493 *
494 * Will validate both a superblob csblob and a "raw" code directory.
495 *
496 *
497 * Parameters: buffer Pointer to code signature
498 * length Length of buffer
499 * rcd returns pointer to code directory
500 *
501 * Returns: 0 Success
502 * EBADEXEC Invalid code signature
503 */
504
505 static int
cs_validate_csblob(const uint8_t * addr,const size_t blob_size,const CS_CodeDirectory ** rcd,const CS_GenericBlob ** rentitlements,const CS_GenericBlob ** rder_entitlements)506 cs_validate_csblob(
507 const uint8_t *addr,
508 const size_t blob_size,
509 const CS_CodeDirectory **rcd,
510 const CS_GenericBlob **rentitlements,
511 const CS_GenericBlob **rder_entitlements)
512 {
513 const CS_GenericBlob *blob;
514 int error;
515 size_t length;
516 bool primary_cd_exists = false;
517 const CS_GenericBlob *self_constraint = NULL;
518 const CS_GenericBlob *parent_constraint = NULL;
519 const CS_GenericBlob *responsible_proc_constraint = NULL;
520 const CS_GenericBlob *library_constraint = NULL;
521
522 *rcd = NULL;
523 *rentitlements = NULL;
524 *rder_entitlements = NULL;
525
526 blob = (const CS_GenericBlob *)(const void *)addr;
527
528 length = blob_size;
529 error = cs_validate_blob(blob, length);
530 if (error) {
531 return error;
532 }
533 length = ntohl(blob->length);
534
535 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
536 const CS_SuperBlob *sb;
537 uint32_t n, count;
538 const CS_CodeDirectory *best_cd = NULL;
539 unsigned int best_rank = 0;
540
541 if (length < sizeof(CS_SuperBlob)) {
542 return EBADEXEC;
543 }
544
545 sb = (const CS_SuperBlob *)blob;
546 count = ntohl(sb->count);
547
548 /* check that the array of BlobIndex fits in the rest of the data */
549 if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) {
550 return EBADEXEC;
551 }
552
553 /* now check each BlobIndex */
554 for (n = 0; n < count; n++) {
555 const CS_BlobIndex *blobIndex = &sb->index[n];
556 uint32_t type = ntohl(blobIndex->type);
557 uint32_t offset = ntohl(blobIndex->offset);
558 if (length < offset) {
559 return EBADEXEC;
560 }
561
562 const CS_GenericBlob *subBlob =
563 (const CS_GenericBlob *)(const void *)(addr + offset);
564
565 size_t subLength = length - offset;
566
567 if ((error = cs_validate_blob(subBlob, subLength)) != 0) {
568 return error;
569 }
570 subLength = ntohl(subBlob->length);
571
572 /* extra validation for CDs, that is also returned */
573 if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
574 if (type == CSSLOT_CODEDIRECTORY) {
575 primary_cd_exists = true;
576 }
577 const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
578 if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) {
579 return error;
580 }
581 unsigned int rank = hash_rank(candidate);
582 if (cs_debug > 3) {
583 printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
584 }
585 if (best_cd == NULL || rank > best_rank) {
586 best_cd = candidate;
587 best_rank = rank;
588
589 if (cs_debug > 2) {
590 printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
591 }
592 *rcd = best_cd;
593 } else if (best_cd != NULL && rank == best_rank) {
594 /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
595 printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
596 return EBADEXEC;
597 }
598 } else if (type == CSSLOT_ENTITLEMENTS) {
599 if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
600 return EBADEXEC;
601 }
602 if (*rentitlements != NULL) {
603 printf("multiple entitlements blobs\n");
604 return EBADEXEC;
605 }
606 *rentitlements = subBlob;
607 } else if (type == CSSLOT_DER_ENTITLEMENTS) {
608 if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_DER_ENTITLEMENTS) {
609 return EBADEXEC;
610 }
611 if (*rder_entitlements != NULL) {
612 printf("multiple der entitlements blobs\n");
613 return EBADEXEC;
614 }
615 *rder_entitlements = subBlob;
616 } else if (type == CSSLOT_LAUNCH_CONSTRAINT_SELF) {
617 if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
618 return EBADEXEC;
619 }
620 if (self_constraint != NULL) {
621 printf("multiple self constraint blobs\n");
622 return EBADEXEC;
623 }
624 self_constraint = subBlob;
625 } else if (type == CSSLOT_LAUNCH_CONSTRAINT_PARENT) {
626 if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
627 return EBADEXEC;
628 }
629 if (parent_constraint != NULL) {
630 printf("multiple parent constraint blobs\n");
631 return EBADEXEC;
632 }
633 parent_constraint = subBlob;
634 } else if (type == CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE) {
635 if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
636 return EBADEXEC;
637 }
638 if (responsible_proc_constraint != NULL) {
639 printf("multiple responsible process constraint blobs\n");
640 return EBADEXEC;
641 }
642 responsible_proc_constraint = subBlob;
643 } else if (type == CSSLOT_LIBRARY_CONSTRAINT) {
644 if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
645 return EBADEXEC;
646 }
647 if (library_constraint != NULL) {
648 printf("multiple library constraint blobs\n");
649 return EBADEXEC;
650 }
651 library_constraint = subBlob;
652 }
653 }
654 if (!primary_cd_exists) {
655 printf("missing primary code directory\n");
656 return EBADEXEC;
657 }
658 } else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
659 if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) {
660 return error;
661 }
662 *rcd = (const CS_CodeDirectory *)blob;
663 } else {
664 return EBADEXEC;
665 }
666
667 if (*rcd == NULL) {
668 return EBADEXEC;
669 }
670
671 return 0;
672 }
673
674 /*
675 * cs_find_blob_bytes
676 *
677 * Find an blob from the superblob/code directory. The blob must have
678 * been been validated by cs_validate_csblob() before calling
679 * this. Use csblob_find_blob() instead.
680 *
681 * Will also find a "raw" code directory if its stored as well as
682 * searching the superblob.
683 *
684 * Parameters: buffer Pointer to code signature
685 * length Length of buffer
686 * type type of blob to find
687 * magic the magic number for that blob
688 *
689 * Returns: pointer Success
690 * NULL Buffer not found
691 */
692
693 const CS_GenericBlob *
csblob_find_blob_bytes(const uint8_t * addr,size_t length,uint32_t type,uint32_t magic)694 csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
695 {
696 const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
697
698 if ((addr + length) < addr) {
699 panic("CODE SIGNING: CS Blob length overflow for addr: %p", addr);
700 }
701
702 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
703 const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
704 size_t n, count = ntohl(sb->count);
705
706 for (n = 0; n < count; n++) {
707 if (ntohl(sb->index[n].type) != type) {
708 continue;
709 }
710 uint32_t offset = ntohl(sb->index[n].offset);
711 if (length - sizeof(const CS_GenericBlob) < offset) {
712 return NULL;
713 }
714 blob = (const CS_GenericBlob *)(const void *)(addr + offset);
715 if (ntohl(blob->magic) != magic) {
716 continue;
717 }
718 if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
719 panic("CODE SIGNING: CS Blob length overflow for blob at: %p", blob);
720 } else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
721 continue;
722 }
723 return blob;
724 }
725 } else if (type == CSSLOT_CODEDIRECTORY && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
726 && magic == CSMAGIC_CODEDIRECTORY) {
727 if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
728 panic("CODE SIGNING: CS Blob length overflow for code directory blob at: %p", blob);
729 } else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
730 return NULL;
731 }
732 return blob;
733 }
734 return NULL;
735 }
736
737
738 const CS_GenericBlob *
csblob_find_blob(struct cs_blob * csblob,uint32_t type,uint32_t magic)739 csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
740 {
741 if ((csblob->csb_flags & CS_VALID) == 0) {
742 return NULL;
743 }
744 return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
745 }
746
747 static const uint8_t *
find_special_slot(const CS_CodeDirectory * cd,size_t slotsize,uint32_t slot)748 find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
749 {
750 /* there is no zero special slot since that is the first code slot */
751 if (ntohl(cd->nSpecialSlots) < slot || slot == 0) {
752 return NULL;
753 }
754
755 return (const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot);
756 }
757
758 static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
759
760 static int
csblob_find_special_slot_blob(struct cs_blob * csblob,uint32_t slot,uint32_t magic,const CS_GenericBlob ** out_start,size_t * out_length)761 csblob_find_special_slot_blob(struct cs_blob* csblob, uint32_t slot, uint32_t magic, const CS_GenericBlob **out_start, size_t *out_length)
762 {
763 uint8_t computed_hash[CS_HASH_MAX_SIZE];
764 const CS_GenericBlob *blob;
765 const CS_CodeDirectory *code_dir;
766 const uint8_t *embedded_hash;
767 union cs_hash_union context;
768
769 if (out_start) {
770 *out_start = NULL;
771 }
772 if (out_length) {
773 *out_length = 0;
774 }
775
776 if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
777 return EBADEXEC;
778 }
779
780 code_dir = csblob->csb_cd;
781
782 blob = csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, slot, magic);
783
784 embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, slot);
785
786 if (embedded_hash == NULL) {
787 if (blob) {
788 return EBADEXEC;
789 }
790 return 0;
791 } else if (blob == NULL) {
792 if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
793 return EBADEXEC;
794 } else {
795 return 0;
796 }
797 }
798
799 csblob->csb_hashtype->cs_init(&context);
800 csblob->csb_hashtype->cs_update(&context, blob, ntohl(blob->length));
801 csblob->csb_hashtype->cs_final(computed_hash, &context);
802
803 if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
804 return EBADEXEC;
805 }
806 if (out_start) {
807 *out_start = blob;
808 }
809 if (out_length) {
810 *out_length = ntohl(blob->length);
811 }
812
813 return 0;
814 }
815
816 int
csblob_get_entitlements(struct cs_blob * csblob,void ** out_start,size_t * out_length)817 csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
818 {
819 uint8_t computed_hash[CS_HASH_MAX_SIZE];
820 const CS_GenericBlob *entitlements;
821 const CS_CodeDirectory *code_dir;
822 const uint8_t *embedded_hash;
823 union cs_hash_union context;
824
825 *out_start = NULL;
826 *out_length = 0;
827
828 if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
829 return EBADEXEC;
830 }
831
832 code_dir = csblob->csb_cd;
833
834 if ((csblob->csb_flags & CS_VALID) == 0) {
835 entitlements = NULL;
836 } else {
837 entitlements = csblob->csb_entitlements_blob;
838 }
839 embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
840
841 if (embedded_hash == NULL) {
842 if (entitlements) {
843 return EBADEXEC;
844 }
845 return 0;
846 } else if (entitlements == NULL) {
847 if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
848 return EBADEXEC;
849 } else {
850 return 0;
851 }
852 }
853
854 csblob->csb_hashtype->cs_init(&context);
855 csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
856 csblob->csb_hashtype->cs_final(computed_hash, &context);
857
858 if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
859 return EBADEXEC;
860 }
861
862 *out_start = __DECONST(void *, entitlements);
863 *out_length = ntohl(entitlements->length);
864
865 return 0;
866 }
867
868 const CS_GenericBlob*
csblob_get_der_entitlements_unsafe(struct cs_blob * csblob)869 csblob_get_der_entitlements_unsafe(struct cs_blob * csblob)
870 {
871 if ((csblob->csb_flags & CS_VALID) == 0) {
872 return NULL;
873 }
874
875 return csblob->csb_der_entitlements_blob;
876 }
877
878 int
csblob_get_der_entitlements(struct cs_blob * csblob,const CS_GenericBlob ** out_start,size_t * out_length)879 csblob_get_der_entitlements(struct cs_blob *csblob, const CS_GenericBlob **out_start, size_t *out_length)
880 {
881 uint8_t computed_hash[CS_HASH_MAX_SIZE];
882 const CS_GenericBlob *der_entitlements;
883 const CS_CodeDirectory *code_dir;
884 const uint8_t *embedded_hash;
885 union cs_hash_union context;
886
887 *out_start = NULL;
888 *out_length = 0;
889
890 if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
891 return EBADEXEC;
892 }
893
894 code_dir = csblob->csb_cd;
895
896 if ((csblob->csb_flags & CS_VALID) == 0) {
897 der_entitlements = NULL;
898 } else {
899 der_entitlements = csblob->csb_der_entitlements_blob;
900 }
901 embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_DER_ENTITLEMENTS);
902
903 if (embedded_hash == NULL) {
904 if (der_entitlements) {
905 return EBADEXEC;
906 }
907 return 0;
908 } else if (der_entitlements == NULL) {
909 if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
910 return EBADEXEC;
911 } else {
912 return 0;
913 }
914 }
915
916 csblob->csb_hashtype->cs_init(&context);
917 csblob->csb_hashtype->cs_update(&context, der_entitlements, ntohl(der_entitlements->length));
918 csblob->csb_hashtype->cs_final(computed_hash, &context);
919
920 if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
921 return EBADEXEC;
922 }
923
924 *out_start = der_entitlements;
925 *out_length = ntohl(der_entitlements->length);
926
927 return 0;
928 }
929
930 static bool
ubc_cs_blob_pagewise_allocate(__unused vm_size_t size)931 ubc_cs_blob_pagewise_allocate(
932 __unused vm_size_t size)
933 {
934 #if CODE_SIGNING_MONITOR
935 /* If the monitor isn't enabled, then we don't need to page-align */
936 if (csm_enabled() == false) {
937 return false;
938 }
939
940 /*
941 * Small allocations can be maanged by the monitor itself. We only need to allocate
942 * page-wise when it is a sufficiently large allocation and the monitor cannot manage
943 * it on its own.
944 */
945 if (size <= csm_signature_size_limit()) {
946 return false;
947 }
948
949 return true;
950 #else
951 /* Without a monitor, we never need to page align */
952 return false;
953 #endif /* CODE_SIGNING_MONITOR */
954 }
955
956 int
csblob_register_profile(__unused struct cs_blob * csblob,__unused cs_profile_register_t * profile)957 csblob_register_profile(
958 __unused struct cs_blob *csblob,
959 __unused cs_profile_register_t *profile)
960 {
961 #if CODE_SIGNING_MONITOR
962 /* Profiles only need to be registered for monitor environments */
963 assert(profile->data != NULL);
964 assert(profile->size != 0);
965 assert(csblob != NULL);
966
967 kern_return_t kr = csm_register_provisioning_profile(
968 profile->uuid,
969 profile->data, profile->size);
970
971 if ((kr != KERN_SUCCESS) && (kr != KERN_ALREADY_IN_SET)) {
972 if (kr == KERN_NOT_SUPPORTED) {
973 return 0;
974 }
975 return EPERM;
976 }
977
978 /* Attempt to trust the profile */
979 kr = csm_trust_provisioning_profile(
980 profile->uuid,
981 profile->sig_data, profile->sig_size);
982
983 if (kr != KERN_SUCCESS) {
984 return EPERM;
985 }
986
987 /* Associate the profile with the monitor's signature object */
988 kr = csm_associate_provisioning_profile(
989 csblob->csb_csm_obj,
990 profile->uuid);
991
992 if (kr != KERN_SUCCESS) {
993 return EPERM;
994 }
995
996 return 0;
997 #else
998 return 0;
999 #endif /* CODE_SIGNING_MONITOR */
1000 }
1001
1002 int
csblob_register_profile_uuid(struct cs_blob * csblob,const uuid_t profile_uuid,void * profile_addr,vm_size_t profile_size)1003 csblob_register_profile_uuid(
1004 struct cs_blob *csblob,
1005 const uuid_t profile_uuid,
1006 void *profile_addr,
1007 vm_size_t profile_size)
1008 {
1009 cs_profile_register_t profile = {
1010 .sig_data = NULL,
1011 .sig_size = 0,
1012 .data = profile_addr,
1013 .size = profile_size
1014 };
1015
1016 /* Copy the provided UUID */
1017 memcpy(profile.uuid, profile_uuid, sizeof(profile.uuid));
1018
1019 return csblob_register_profile(csblob, &profile);
1020 }
1021
1022 /*
1023 * CODESIGNING
1024 * End of routines to navigate code signing data structures in the kernel.
1025 */
1026
1027
1028
1029 /*
1030 * ubc_info_init
1031 *
1032 * Allocate and attach an empty ubc_info structure to a vnode
1033 *
1034 * Parameters: vp Pointer to the vnode
1035 *
1036 * Returns: 0 Success
1037 * vnode_size:ENOMEM Not enough space
1038 * vnode_size:??? Other error from vnode_getattr
1039 *
1040 */
1041 int
ubc_info_init(struct vnode * vp)1042 ubc_info_init(struct vnode *vp)
1043 {
1044 return ubc_info_init_internal(vp, 0, 0);
1045 }
1046
1047
1048 /*
1049 * ubc_info_init_withsize
1050 *
1051 * Allocate and attach a sized ubc_info structure to a vnode
1052 *
1053 * Parameters: vp Pointer to the vnode
1054 * filesize The size of the file
1055 *
1056 * Returns: 0 Success
1057 * vnode_size:ENOMEM Not enough space
1058 * vnode_size:??? Other error from vnode_getattr
1059 */
1060 int
ubc_info_init_withsize(struct vnode * vp,off_t filesize)1061 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
1062 {
1063 return ubc_info_init_internal(vp, 1, filesize);
1064 }
1065
1066
1067 /*
1068 * ubc_info_init_internal
1069 *
1070 * Allocate and attach a ubc_info structure to a vnode
1071 *
1072 * Parameters: vp Pointer to the vnode
1073 * withfsize{0,1} Zero if the size should be obtained
1074 * from the vnode; otherwise, use filesize
1075 * filesize The size of the file, if withfsize == 1
1076 *
1077 * Returns: 0 Success
1078 * vnode_size:ENOMEM Not enough space
1079 * vnode_size:??? Other error from vnode_getattr
1080 *
1081 * Notes: We call a blocking zalloc(), and the zone was created as an
1082 * expandable and collectable zone, so if no memory is available,
1083 * it is possible for zalloc() to block indefinitely. zalloc()
1084 * may also panic if the zone of zones is exhausted, since it's
1085 * NOT expandable.
1086 *
1087 * We unconditionally call vnode_pager_setup(), even if this is
1088 * a reuse of a ubc_info; in that case, we should probably assert
1089 * that it does not already have a pager association, but do not.
1090 *
1091 * Since memory_object_create_named() can only fail from receiving
1092 * an invalid pager argument, the explicit check and panic is
1093 * merely precautionary.
1094 */
1095 static int
ubc_info_init_internal(vnode_t vp,int withfsize,off_t filesize)1096 ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
1097 {
1098 struct ubc_info *uip;
1099 void * pager;
1100 int error = 0;
1101 kern_return_t kret;
1102 memory_object_control_t control;
1103
1104 uip = vp->v_ubcinfo;
1105
1106 /*
1107 * If there is not already a ubc_info attached to the vnode, we
1108 * attach one; otherwise, we will reuse the one that's there.
1109 */
1110 if (uip == UBC_INFO_NULL) {
1111 uip = zalloc_flags(ubc_info_zone, Z_WAITOK | Z_ZERO);
1112
1113 uip->ui_vnode = vp;
1114 uip->ui_flags = UI_INITED;
1115 uip->ui_ucred = NOCRED;
1116 }
1117 assert(uip->ui_flags != UI_NONE);
1118 assert(uip->ui_vnode == vp);
1119
1120 /* now set this ubc_info in the vnode */
1121 vp->v_ubcinfo = uip;
1122
1123 /*
1124 * Allocate a pager object for this vnode
1125 *
1126 * XXX The value of the pager parameter is currently ignored.
1127 * XXX Presumably, this API changed to avoid the race between
1128 * XXX setting the pager and the UI_HASPAGER flag.
1129 */
1130 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
1131 assert(pager);
1132
1133 /*
1134 * Explicitly set the pager into the ubc_info, after setting the
1135 * UI_HASPAGER flag.
1136 */
1137 SET(uip->ui_flags, UI_HASPAGER);
1138 uip->ui_pager = pager;
1139
1140 /*
1141 * Note: We can not use VNOP_GETATTR() to get accurate
1142 * value of ui_size because this may be an NFS vnode, and
1143 * nfs_getattr() can call vinvalbuf(); if this happens,
1144 * ubc_info is not set up to deal with that event.
1145 * So use bogus size.
1146 */
1147
1148 /*
1149 * create a vnode - vm_object association
1150 * memory_object_create_named() creates a "named" reference on the
1151 * memory object we hold this reference as long as the vnode is
1152 * "alive." Since memory_object_create_named() took its own reference
1153 * on the vnode pager we passed it, we can drop the reference
1154 * vnode_pager_setup() returned here.
1155 */
1156 kret = memory_object_create_named(pager,
1157 (memory_object_size_t)uip->ui_size, &control);
1158 vnode_pager_deallocate(pager);
1159 if (kret != KERN_SUCCESS) {
1160 panic("ubc_info_init: memory_object_create_named returned %d", kret);
1161 }
1162
1163 assert(control);
1164 uip->ui_control = control; /* cache the value of the mo control */
1165 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
1166
1167 if (withfsize == 0) {
1168 /* initialize the size */
1169 error = vnode_size(vp, &uip->ui_size, vfs_context_current());
1170 if (error) {
1171 uip->ui_size = 0;
1172 }
1173 } else {
1174 uip->ui_size = filesize;
1175 }
1176 vp->v_lflag |= VNAMED_UBC; /* vnode has a named ubc reference */
1177
1178 return error;
1179 }
1180
1181
1182 /*
1183 * ubc_info_free
1184 *
1185 * Free a ubc_info structure
1186 *
1187 * Parameters: uip A pointer to the ubc_info to free
1188 *
1189 * Returns: (void)
1190 *
1191 * Notes: If there is a credential that has subsequently been associated
1192 * with the ubc_info, the reference to the credential is dropped.
1193 *
1194 * It's actually impossible for a ubc_info.ui_control to take the
1195 * value MEMORY_OBJECT_CONTROL_NULL.
1196 */
1197 static void
ubc_info_free(struct ubc_info * uip)1198 ubc_info_free(struct ubc_info *uip)
1199 {
1200 if (IS_VALID_CRED(uip->ui_ucred)) {
1201 kauth_cred_unref(&uip->ui_ucred);
1202 }
1203
1204 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) {
1205 memory_object_control_deallocate(uip->ui_control);
1206 }
1207
1208 cluster_release(uip);
1209 ubc_cs_free(uip);
1210
1211 zfree(ubc_info_zone, uip);
1212 return;
1213 }
1214
1215
1216 void
ubc_info_deallocate(struct ubc_info * uip)1217 ubc_info_deallocate(struct ubc_info *uip)
1218 {
1219 ubc_info_free(uip);
1220 }
1221
1222 /*
1223 * ubc_setsize_ex
1224 *
1225 * Tell the VM that the the size of the file represented by the vnode has
1226 * changed
1227 *
1228 * Parameters: vp The vp whose backing file size is
1229 * being changed
1230 * nsize The new size of the backing file
1231 * opts Options
1232 *
1233 * Returns: EINVAL for new size < 0
1234 * ENOENT if no UBC info exists
1235 * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1236 * Other errors (mapped to errno_t) returned by VM functions
1237 *
1238 * Notes: This function will indicate success if the new size is the
1239 * same or larger than the old size (in this case, the
1240 * remainder of the file will require modification or use of
1241 * an existing upl to access successfully).
1242 *
1243 * This function will fail if the new file size is smaller,
1244 * and the memory region being invalidated was unable to
1245 * actually be invalidated and/or the last page could not be
1246 * flushed, if the new size is not aligned to a page
1247 * boundary. This is usually indicative of an I/O error.
1248 */
1249 errno_t
ubc_setsize_ex(struct vnode * vp,off_t nsize,ubc_setsize_opts_t opts)1250 ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
1251 {
1252 off_t osize; /* ui_size before change */
1253 off_t lastpg, olastpgend, lastoff;
1254 struct ubc_info *uip;
1255 memory_object_control_t control;
1256 kern_return_t kret = KERN_SUCCESS;
1257
1258 if (nsize < (off_t)0) {
1259 return EINVAL;
1260 }
1261
1262 if (!UBCINFOEXISTS(vp)) {
1263 return ENOENT;
1264 }
1265
1266 uip = vp->v_ubcinfo;
1267 osize = uip->ui_size;
1268
1269 if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) {
1270 return EAGAIN;
1271 }
1272
1273 /*
1274 * Update the size before flushing the VM
1275 */
1276 uip->ui_size = nsize;
1277
1278 if (nsize >= osize) { /* Nothing more to do */
1279 if (nsize > osize) {
1280 lock_vnode_and_post(vp, NOTE_EXTEND);
1281 }
1282
1283 return 0;
1284 }
1285
1286 /*
1287 * When the file shrinks, invalidate the pages beyond the
1288 * new size. Also get rid of garbage beyond nsize on the
1289 * last page. The ui_size already has the nsize, so any
1290 * subsequent page-in will zero-fill the tail properly
1291 */
1292 lastpg = trunc_page_64(nsize);
1293 olastpgend = round_page_64(osize);
1294 control = uip->ui_control;
1295 assert(control);
1296 lastoff = (nsize & PAGE_MASK_64);
1297
1298 if (lastoff) {
1299 upl_t upl;
1300 upl_page_info_t *pl;
1301
1302 /*
1303 * new EOF ends up in the middle of a page
1304 * zero the tail of this page if it's currently
1305 * present in the cache
1306 */
1307 kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
1308
1309 if (kret != KERN_SUCCESS) {
1310 panic("ubc_setsize: ubc_create_upl (error = %d)", kret);
1311 }
1312
1313 if (upl_valid_page(pl, 0)) {
1314 cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
1315 }
1316
1317 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1318
1319 lastpg += PAGE_SIZE_64;
1320 }
1321 if (olastpgend > lastpg) {
1322 int flags;
1323
1324 if (lastpg == 0) {
1325 flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
1326 } else {
1327 flags = MEMORY_OBJECT_DATA_FLUSH;
1328 }
1329 /*
1330 * invalidate the pages beyond the new EOF page
1331 *
1332 */
1333 kret = memory_object_lock_request(control,
1334 (memory_object_offset_t)lastpg,
1335 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
1336 MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
1337 if (kret != KERN_SUCCESS) {
1338 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
1339 }
1340 }
1341 return mach_to_bsd_errno(kret);
1342 }
1343
1344 // Returns true for success
1345 int
ubc_setsize(vnode_t vp,off_t nsize)1346 ubc_setsize(vnode_t vp, off_t nsize)
1347 {
1348 return ubc_setsize_ex(vp, nsize, 0) == 0;
1349 }
1350
1351 /*
1352 * ubc_getsize
1353 *
1354 * Get the size of the file assocated with the specified vnode
1355 *
1356 * Parameters: vp The vnode whose size is of interest
1357 *
1358 * Returns: 0 There is no ubc_info associated with
1359 * this vnode, or the size is zero
1360 * !0 The size of the file
1361 *
1362 * Notes: Using this routine, it is not possible for a caller to
1363 * successfully distinguish between a vnode associate with a zero
1364 * length file, and a vnode with no associated ubc_info. The
1365 * caller therefore needs to not care, or needs to ensure that
1366 * they have previously successfully called ubc_info_init() or
1367 * ubc_info_init_withsize().
1368 */
1369 off_t
ubc_getsize(struct vnode * vp)1370 ubc_getsize(struct vnode *vp)
1371 {
1372 /* people depend on the side effect of this working this way
1373 * as they call this for directory
1374 */
1375 if (!UBCINFOEXISTS(vp)) {
1376 return (off_t)0;
1377 }
1378 return vp->v_ubcinfo->ui_size;
1379 }
1380
1381
1382 /*
1383 * ubc_umount
1384 *
1385 * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1386 * mount point
1387 *
1388 * Parameters: mp The mount point
1389 *
1390 * Returns: 0 Success
1391 *
1392 * Notes: There is no failure indication for this function.
1393 *
1394 * This function is used in the unmount path; since it may block
1395 * I/O indefinitely, it should not be used in the forced unmount
1396 * path, since a device unavailability could also block that
1397 * indefinitely.
1398 *
1399 * Because there is no device ejection interlock on USB, FireWire,
1400 * or similar devices, it's possible that an ejection that begins
1401 * subsequent to the vnode_iterate() completing, either on one of
1402 * those devices, or a network mount for which the server quits
1403 * responding, etc., may cause the caller to block indefinitely.
1404 */
1405 __private_extern__ int
ubc_umount(struct mount * mp)1406 ubc_umount(struct mount *mp)
1407 {
1408 vnode_iterate(mp, 0, ubc_umcallback, 0);
1409 return 0;
1410 }
1411
1412
1413 /*
1414 * ubc_umcallback
1415 *
1416 * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1417 * and vnode_iterate() for details of implementation.
1418 */
1419 static int
ubc_umcallback(vnode_t vp,__unused void * args)1420 ubc_umcallback(vnode_t vp, __unused void * args)
1421 {
1422 if (UBCINFOEXISTS(vp)) {
1423 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1424 }
1425 return VNODE_RETURNED;
1426 }
1427
1428
1429 /*
1430 * ubc_getcred
1431 *
1432 * Get the credentials currently active for the ubc_info associated with the
1433 * vnode.
1434 *
1435 * Parameters: vp The vnode whose ubc_info credentials
1436 * are to be retrieved
1437 *
1438 * Returns: !NOCRED The credentials
1439 * NOCRED If there is no ubc_info for the vnode,
1440 * or if there is one, but it has not had
1441 * any credentials associated with it.
1442 */
1443 kauth_cred_t
ubc_getcred(struct vnode * vp)1444 ubc_getcred(struct vnode *vp)
1445 {
1446 if (UBCINFOEXISTS(vp)) {
1447 return vp->v_ubcinfo->ui_ucred;
1448 }
1449
1450 return NOCRED;
1451 }
1452
1453
1454 /*
1455 * ubc_setthreadcred
1456 *
1457 * If they are not already set, set the credentials of the ubc_info structure
1458 * associated with the vnode to those of the supplied thread; otherwise leave
1459 * them alone.
1460 *
1461 * Parameters: vp The vnode whose ubc_info creds are to
1462 * be set
1463 * p The process whose credentials are to
1464 * be used, if not running on an assumed
1465 * credential
1466 * thread The thread whose credentials are to
1467 * be used
1468 *
1469 * Returns: 1 This vnode has no associated ubc_info
1470 * 0 Success
1471 *
1472 * Notes: This function is generally used only in the following cases:
1473 *
1474 * o a memory mapped file via the mmap() system call
1475 * o a swap store backing file
1476 * o subsequent to a successful write via vn_write()
1477 *
1478 * The information is then used by the NFS client in order to
1479 * cons up a wire message in either the page-in or page-out path.
1480 *
1481 * There are two potential problems with the use of this API:
1482 *
1483 * o Because the write path only set it on a successful
1484 * write, there is a race window between setting the
1485 * credential and its use to evict the pages to the
1486 * remote file server
1487 *
1488 * o Because a page-in may occur prior to a write, the
1489 * credential may not be set at this time, if the page-in
1490 * is not the result of a mapping established via mmap().
1491 *
1492 * In both these cases, this will be triggered from the paging
1493 * path, which will instead use the credential of the current
1494 * process, which in this case is either the dynamic_pager or
1495 * the kernel task, both of which utilize "root" credentials.
1496 *
1497 * This may potentially permit operations to occur which should
1498 * be denied, or it may cause to be denied operations which
1499 * should be permitted, depending on the configuration of the NFS
1500 * server.
1501 */
1502 int
ubc_setthreadcred(struct vnode * vp,proc_t p,thread_t thread)1503 ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
1504 {
1505 #pragma unused(p, thread)
1506 assert(p == current_proc());
1507 assert(thread == current_thread());
1508
1509 return ubc_setcred(vp, kauth_cred_get());
1510 }
1511
1512
1513 /*
1514 * ubc_setcred
1515 *
1516 * If they are not already set, set the credentials of the ubc_info structure
1517 * associated with the vnode to those specified; otherwise leave them
1518 * alone.
1519 *
1520 * Parameters: vp The vnode whose ubc_info creds are to
1521 * be set
1522 * ucred The credentials to use
1523 *
1524 * Returns: 0 This vnode has no associated ubc_info
1525 * 1 Success
1526 *
1527 * Notes: The return values for this function are inverted from nearly
1528 * all other uses in the kernel.
1529 *
1530 * See also ubc_setthreadcred(), above.
1531 */
1532 int
ubc_setcred(struct vnode * vp,kauth_cred_t ucred)1533 ubc_setcred(struct vnode *vp, kauth_cred_t ucred)
1534 {
1535 struct ubc_info *uip;
1536
1537 /* If there is no ubc_info, deny the operation */
1538 if (!UBCINFOEXISTS(vp)) {
1539 return 0;
1540 }
1541
1542 /*
1543 * Check to see if there is already a credential reference in the
1544 * ubc_info; if there is not, take one on the supplied credential.
1545 */
1546 vnode_lock(vp);
1547 uip = vp->v_ubcinfo;
1548 if (!IS_VALID_CRED(uip->ui_ucred)) {
1549 kauth_cred_ref(ucred);
1550 uip->ui_ucred = ucred;
1551 }
1552 vnode_unlock(vp);
1553
1554 return 1;
1555 }
1556
1557 /*
1558 * ubc_getpager
1559 *
1560 * Get the pager associated with the ubc_info associated with the vnode.
1561 *
1562 * Parameters: vp The vnode to obtain the pager from
1563 *
1564 * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager
1565 * VNODE_PAGER_NULL There is no ubc_info for this vnode
1566 *
1567 * Notes: For each vnode that has a ubc_info associated with it, that
1568 * ubc_info SHALL have a pager associated with it, so in the
1569 * normal case, it's impossible to return VNODE_PAGER_NULL for
1570 * a vnode with an associated ubc_info.
1571 */
1572 __private_extern__ memory_object_t
ubc_getpager(struct vnode * vp)1573 ubc_getpager(struct vnode *vp)
1574 {
1575 if (UBCINFOEXISTS(vp)) {
1576 return vp->v_ubcinfo->ui_pager;
1577 }
1578
1579 return 0;
1580 }
1581
1582
1583 /*
1584 * ubc_getobject
1585 *
1586 * Get the memory object control associated with the ubc_info associated with
1587 * the vnode
1588 *
1589 * Parameters: vp The vnode to obtain the memory object
1590 * from
1591 * flags DEPRECATED
1592 *
1593 * Returns: !MEMORY_OBJECT_CONTROL_NULL
1594 * MEMORY_OBJECT_CONTROL_NULL
1595 *
1596 * Notes: Historically, if the flags were not "do not reactivate", this
1597 * function would look up the memory object using the pager if
1598 * it did not exist (this could be the case if the vnode had
1599 * been previously reactivated). The flags would also permit a
1600 * hold to be requested, which would have created an object
1601 * reference, if one had not already existed. This usage is
1602 * deprecated, as it would permit a race between finding and
1603 * taking the reference vs. a single reference being dropped in
1604 * another thread.
1605 */
1606 memory_object_control_t
ubc_getobject(struct vnode * vp,__unused int flags)1607 ubc_getobject(struct vnode *vp, __unused int flags)
1608 {
1609 if (UBCINFOEXISTS(vp)) {
1610 return vp->v_ubcinfo->ui_control;
1611 }
1612
1613 return MEMORY_OBJECT_CONTROL_NULL;
1614 }
1615
1616 /*
1617 * ubc_blktooff
1618 *
1619 * Convert a given block number to a memory backing object (file) offset for a
1620 * given vnode
1621 *
1622 * Parameters: vp The vnode in which the block is located
1623 * blkno The block number to convert
1624 *
1625 * Returns: !-1 The offset into the backing object
1626 * -1 There is no ubc_info associated with
1627 * the vnode
1628 * -1 An error occurred in the underlying VFS
1629 * while translating the block to an
1630 * offset; the most likely cause is that
1631 * the caller specified a block past the
1632 * end of the file, but this could also be
1633 * any other error from VNOP_BLKTOOFF().
1634 *
1635 * Note: Representing the error in band loses some information, but does
1636 * not occlude a valid offset, since an off_t of -1 is normally
1637 * used to represent EOF. If we had a more reliable constant in
1638 * our header files for it (i.e. explicitly cast to an off_t), we
1639 * would use it here instead.
1640 */
1641 off_t
ubc_blktooff(vnode_t vp,daddr64_t blkno)1642 ubc_blktooff(vnode_t vp, daddr64_t blkno)
1643 {
1644 off_t file_offset = -1;
1645 int error;
1646
1647 if (UBCINFOEXISTS(vp)) {
1648 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
1649 if (error) {
1650 file_offset = -1;
1651 }
1652 }
1653
1654 return file_offset;
1655 }
1656
1657
1658 /*
1659 * ubc_offtoblk
1660 *
1661 * Convert a given offset in a memory backing object into a block number for a
1662 * given vnode
1663 *
1664 * Parameters: vp The vnode in which the offset is
1665 * located
1666 * offset The offset into the backing object
1667 *
1668 * Returns: !-1 The returned block number
1669 * -1 There is no ubc_info associated with
1670 * the vnode
1671 * -1 An error occurred in the underlying VFS
1672 * while translating the block to an
1673 * offset; the most likely cause is that
1674 * the caller specified a block past the
1675 * end of the file, but this could also be
1676 * any other error from VNOP_OFFTOBLK().
1677 *
1678 * Note: Representing the error in band loses some information, but does
1679 * not occlude a valid block number, since block numbers exceed
1680 * the valid range for offsets, due to their relative sizes. If
1681 * we had a more reliable constant than -1 in our header files
1682 * for it (i.e. explicitly cast to an daddr64_t), we would use it
1683 * here instead.
1684 */
1685 daddr64_t
ubc_offtoblk(vnode_t vp,off_t offset)1686 ubc_offtoblk(vnode_t vp, off_t offset)
1687 {
1688 daddr64_t blkno = -1;
1689 int error = 0;
1690
1691 if (UBCINFOEXISTS(vp)) {
1692 error = VNOP_OFFTOBLK(vp, offset, &blkno);
1693 if (error) {
1694 blkno = -1;
1695 }
1696 }
1697
1698 return blkno;
1699 }
1700
1701
1702 /*
1703 * ubc_pages_resident
1704 *
1705 * Determine whether or not a given vnode has pages resident via the memory
1706 * object control associated with the ubc_info associated with the vnode
1707 *
1708 * Parameters: vp The vnode we want to know about
1709 *
1710 * Returns: 1 Yes
1711 * 0 No
1712 */
1713 int
ubc_pages_resident(vnode_t vp)1714 ubc_pages_resident(vnode_t vp)
1715 {
1716 kern_return_t kret;
1717 boolean_t has_pages_resident;
1718
1719 if (!UBCINFOEXISTS(vp)) {
1720 return 0;
1721 }
1722
1723 /*
1724 * The following call may fail if an invalid ui_control is specified,
1725 * or if there is no VM object associated with the control object. In
1726 * either case, reacting to it as if there were no pages resident will
1727 * result in correct behavior.
1728 */
1729 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
1730
1731 if (kret != KERN_SUCCESS) {
1732 return 0;
1733 }
1734
1735 if (has_pages_resident == TRUE) {
1736 return 1;
1737 }
1738
1739 return 0;
1740 }
1741
1742 /*
1743 * ubc_msync
1744 *
1745 * Clean and/or invalidate a range in the memory object that backs this vnode
1746 *
1747 * Parameters: vp The vnode whose associated ubc_info's
1748 * associated memory object is to have a
1749 * range invalidated within it
1750 * beg_off The start of the range, as an offset
1751 * end_off The end of the range, as an offset
1752 * resid_off The address of an off_t supplied by the
1753 * caller; may be set to NULL to ignore
1754 * flags See ubc_msync_internal()
1755 *
1756 * Returns: 0 Success
1757 * !0 Failure; an errno is returned
1758 *
1759 * Implicit Returns:
1760 * *resid_off, modified If non-NULL, the contents are ALWAYS
1761 * modified; they are initialized to the
1762 * beg_off, and in case of an I/O error,
1763 * the difference between beg_off and the
1764 * current value will reflect what was
1765 * able to be written before the error
1766 * occurred. If no error is returned, the
1767 * value of the resid_off is undefined; do
1768 * NOT use it in place of end_off if you
1769 * intend to increment from the end of the
1770 * last call and call iteratively.
1771 *
1772 * Notes: see ubc_msync_internal() for more detailed information.
1773 *
1774 */
1775 errno_t
ubc_msync(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags)1776 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
1777 {
1778 int retval;
1779 int io_errno = 0;
1780
1781 if (resid_off) {
1782 *resid_off = beg_off;
1783 }
1784
1785 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
1786
1787 if (retval == 0 && io_errno == 0) {
1788 return EINVAL;
1789 }
1790 return io_errno;
1791 }
1792
1793
1794 /*
1795 * ubc_msync_internal
1796 *
1797 * Clean and/or invalidate a range in the memory object that backs this vnode
1798 *
1799 * Parameters: vp The vnode whose associated ubc_info's
1800 * associated memory object is to have a
1801 * range invalidated within it
1802 * beg_off The start of the range, as an offset
1803 * end_off The end of the range, as an offset
1804 * resid_off The address of an off_t supplied by the
1805 * caller; may be set to NULL to ignore
1806 * flags MUST contain at least one of the flags
1807 * UBC_INVALIDATE, UBC_PUSHDIRTY, or
1808 * UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1809 * UBC_SYNC may also be specified to cause
1810 * this function to block until the
1811 * operation is complete. The behavior
1812 * of UBC_SYNC is otherwise undefined.
1813 * io_errno The address of an int to contain the
1814 * errno from a failed I/O operation, if
1815 * one occurs; may be set to NULL to
1816 * ignore
1817 *
1818 * Returns: 1 Success
1819 * 0 Failure
1820 *
1821 * Implicit Returns:
1822 * *resid_off, modified The contents of this offset MAY be
1823 * modified; in case of an I/O error, the
1824 * difference between beg_off and the
1825 * current value will reflect what was
1826 * able to be written before the error
1827 * occurred.
1828 * *io_errno, modified The contents of this offset are set to
1829 * an errno, if an error occurs; if the
1830 * caller supplies an io_errno parameter,
1831 * they should be careful to initialize it
1832 * to 0 before calling this function to
1833 * enable them to distinguish an error
1834 * with a valid *resid_off from an invalid
1835 * one, and to avoid potentially falsely
1836 * reporting an error, depending on use.
1837 *
1838 * Notes: If there is no ubc_info associated with the vnode supplied,
1839 * this function immediately returns success.
1840 *
1841 * If the value of end_off is less than or equal to beg_off, this
1842 * function immediately returns success; that is, end_off is NOT
1843 * inclusive.
1844 *
1845 * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1846 * UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1847 * attempt to block on in-progress I/O by calling this function
1848 * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1849 * in order to block pending on the I/O already in progress.
1850 *
1851 * The start offset is truncated to the page boundary and the
1852 * size is adjusted to include the last page in the range; that
1853 * is, end_off on exactly a page boundary will not change if it
1854 * is rounded, and the range of bytes written will be from the
1855 * truncate beg_off to the rounded (end_off - 1).
1856 */
1857 static int
ubc_msync_internal(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags,int * io_errno)1858 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1859 {
1860 memory_object_size_t tsize;
1861 kern_return_t kret;
1862 int request_flags = 0;
1863 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
1864
1865 if (!UBCINFOEXISTS(vp)) {
1866 return 0;
1867 }
1868 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) {
1869 return 0;
1870 }
1871 if (end_off <= beg_off) {
1872 return 1;
1873 }
1874
1875 if (flags & UBC_INVALIDATE) {
1876 /*
1877 * discard the resident pages
1878 */
1879 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
1880 }
1881
1882 if (flags & UBC_SYNC) {
1883 /*
1884 * wait for all the I/O to complete before returning
1885 */
1886 request_flags |= MEMORY_OBJECT_IO_SYNC;
1887 }
1888
1889 if (flags & UBC_PUSHDIRTY) {
1890 /*
1891 * we only return the dirty pages in the range
1892 */
1893 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
1894 }
1895
1896 if (flags & UBC_PUSHALL) {
1897 /*
1898 * then return all the interesting pages in the range (both
1899 * dirty and precious) to the pager
1900 */
1901 flush_flags = MEMORY_OBJECT_RETURN_ALL;
1902 }
1903
1904 beg_off = trunc_page_64(beg_off);
1905 end_off = round_page_64(end_off);
1906 tsize = (memory_object_size_t)end_off - beg_off;
1907
1908 /* flush and/or invalidate pages in the range requested */
1909 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
1910 beg_off, tsize,
1911 (memory_object_offset_t *)resid_off,
1912 io_errno, flush_flags, request_flags,
1913 VM_PROT_NO_CHANGE);
1914
1915 return (kret == KERN_SUCCESS) ? 1 : 0;
1916 }
1917
1918
1919 /*
1920 * ubc_map
1921 *
1922 * Explicitly map a vnode that has an associate ubc_info, and add a reference
1923 * to it for the ubc system, if there isn't one already, so it will not be
1924 * recycled while it's in use, and set flags on the ubc_info to indicate that
1925 * we have done this
1926 *
1927 * Parameters: vp The vnode to map
1928 * flags The mapping flags for the vnode; this
1929 * will be a combination of one or more of
1930 * PROT_READ, PROT_WRITE, and PROT_EXEC
1931 *
1932 * Returns: 0 Success
1933 * EPERM Permission was denied
1934 *
1935 * Notes: An I/O reference on the vnode must already be held on entry
1936 *
1937 * If there is no ubc_info associated with the vnode, this function
1938 * will return success.
1939 *
1940 * If a permission error occurs, this function will return
1941 * failure; all other failures will cause this function to return
1942 * success.
1943 *
1944 * IMPORTANT: This is an internal use function, and its symbols
1945 * are not exported, hence its error checking is not very robust.
1946 * It is primarily used by:
1947 *
1948 * o mmap(), when mapping a file
1949 * o When mapping a shared file (a shared library in the
1950 * shared segment region)
1951 * o When loading a program image during the exec process
1952 *
1953 * ...all of these uses ignore the return code, and any fault that
1954 * results later because of a failure is handled in the fix-up path
1955 * of the fault handler. The interface exists primarily as a
1956 * performance hint.
1957 *
1958 * Given that third party implementation of the type of interfaces
1959 * that would use this function, such as alternative executable
1960 * formats, etc., are unsupported, this function is not exported
1961 * for general use.
1962 *
1963 * The extra reference is held until the VM system unmaps the
1964 * vnode from its own context to maintain a vnode reference in
1965 * cases like open()/mmap()/close(), which leave the backing
1966 * object referenced by a mapped memory region in a process
1967 * address space.
1968 */
1969 __private_extern__ int
ubc_map(vnode_t vp,int flags)1970 ubc_map(vnode_t vp, int flags)
1971 {
1972 struct ubc_info *uip;
1973 int error = 0;
1974 int need_ref = 0;
1975 int need_wakeup = 0;
1976
1977 /*
1978 * This call is non-blocking and does not ever fail but it can
1979 * only be made when there is other explicit synchronization
1980 * with reclaiming of the vnode which, in this path, is provided
1981 * by the "mapping in progress" counter.
1982 */
1983 error = vnode_getalways_from_pager(vp);
1984 if (error != 0) {
1985 /* This can't happen */
1986 panic("vnode_getalways returned %d for vp %p", error, vp);
1987 }
1988
1989 if (UBCINFOEXISTS(vp) == 0) {
1990 /*
1991 * The vnode might have started being reclaimed (forced unmount?) while
1992 * this call was in progress.
1993 * The caller is not expecting an error but is expected to figure out that
1994 * the "pager" it used for this vnode is now gone.
1995 */
1996 error = 0;
1997 } else {
1998 vnode_lock(vp);
1999 uip = vp->v_ubcinfo;
2000
2001 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2002 SET(uip->ui_flags, UI_MAPWAITING);
2003 (void) msleep(&uip->ui_flags, &vp->v_lock,
2004 PRIBIO, "ubc_map", NULL);
2005 }
2006 SET(uip->ui_flags, UI_MAPBUSY);
2007 vnode_unlock(vp);
2008
2009 error = VNOP_MMAP(vp, flags, vfs_context_current());
2010
2011 /*
2012 * rdar://problem/22587101 required that we stop propagating
2013 * EPERM up the stack. Otherwise, we would have to funnel up
2014 * the error at all the call sites for memory_object_map().
2015 * The risk is in having to undo the map/object/entry state at
2016 * all these call sites. It would also affect more than just mmap()
2017 * e.g. vm_remap().
2018 *
2019 * if (error != EPERM)
2020 * error = 0;
2021 */
2022
2023 error = 0;
2024
2025 vnode_lock_spin(vp);
2026
2027 if (error == 0) {
2028 if (!ISSET(uip->ui_flags, UI_ISMAPPED)) {
2029 need_ref = 1;
2030 }
2031 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
2032 if (flags & PROT_WRITE) {
2033 SET(uip->ui_flags, (UI_WASMAPPEDWRITE | UI_MAPPEDWRITE));
2034 }
2035 }
2036 CLR(uip->ui_flags, UI_MAPBUSY);
2037
2038 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2039 CLR(uip->ui_flags, UI_MAPWAITING);
2040 need_wakeup = 1;
2041 }
2042 vnode_unlock(vp);
2043
2044 if (need_wakeup) {
2045 wakeup(&uip->ui_flags);
2046 }
2047
2048 if (need_ref) {
2049 /*
2050 * Make sure we get a ref as we can't unwind from here
2051 */
2052 if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) {
2053 panic("%s : VNODE_REF_FORCE failed", __FUNCTION__);
2054 }
2055 /*
2056 * Vnodes that are on "unreliable" media (like disk
2057 * images, network filesystems, 3rd-party filesystems,
2058 * and possibly external devices) could see their
2059 * contents be changed via the backing store without
2060 * triggering copy-on-write, so we can't fully rely
2061 * on copy-on-write and might have to resort to
2062 * copy-on-read to protect "privileged" processes and
2063 * prevent privilege escalation.
2064 *
2065 * The root filesystem is considered "reliable" because
2066 * there's not much point in trying to protect
2067 * ourselves from such a vulnerability and the extra
2068 * cost of copy-on-read (CPU time and memory pressure)
2069 * could result in some serious regressions.
2070 */
2071 if (vp->v_mount != NULL &&
2072 ((vp->v_mount->mnt_flag & MNT_ROOTFS) ||
2073 vnode_on_reliable_media(vp))) {
2074 /*
2075 * This vnode is deemed "reliable" so mark
2076 * its VM object as "trusted".
2077 */
2078 memory_object_mark_trusted(uip->ui_control);
2079 } else {
2080 // printf("BUGGYCOW: %s:%d vp %p \"%s\" in mnt %p \"%s\" is untrusted\n", __FUNCTION__, __LINE__, vp, vp->v_name, vp->v_mount, vp->v_mount->mnt_vnodecovered->v_name);
2081 }
2082 }
2083 }
2084 vnode_put_from_pager(vp);
2085
2086 return error;
2087 }
2088
2089
2090 /*
2091 * ubc_destroy_named
2092 *
2093 * Destroy the named memory object associated with the ubc_info control object
2094 * associated with the designated vnode, if there is a ubc_info associated
2095 * with the vnode, and a control object is associated with it
2096 *
2097 * Parameters: vp The designated vnode
2098 *
2099 * Returns: (void)
2100 *
2101 * Notes: This function is called on vnode termination for all vnodes,
2102 * and must therefore not assume that there is a ubc_info that is
2103 * associated with the vnode, nor that there is a control object
2104 * associated with the ubc_info.
2105 *
2106 * If all the conditions necessary are present, this function
2107 * calls memory_object_destory(), which will in turn end up
2108 * calling ubc_unmap() to release any vnode references that were
2109 * established via ubc_map().
2110 *
2111 * IMPORTANT: This is an internal use function that is used
2112 * exclusively by the internal use function vclean().
2113 */
2114 __private_extern__ void
ubc_destroy_named(vnode_t vp,vm_object_destroy_reason_t reason)2115 ubc_destroy_named(vnode_t vp, vm_object_destroy_reason_t reason)
2116 {
2117 memory_object_control_t control;
2118 struct ubc_info *uip;
2119 kern_return_t kret;
2120
2121 if (UBCINFOEXISTS(vp)) {
2122 uip = vp->v_ubcinfo;
2123
2124 /* Terminate the memory object */
2125 control = ubc_getobject(vp, UBC_HOLDOBJECT);
2126 if (control != MEMORY_OBJECT_CONTROL_NULL) {
2127 kret = memory_object_destroy(control, reason);
2128 if (kret != KERN_SUCCESS) {
2129 panic("ubc_destroy_named: memory_object_destroy failed");
2130 }
2131 }
2132 }
2133 }
2134
2135
2136 /*
2137 * ubc_isinuse
2138 *
2139 * Determine whether or not a vnode is currently in use by ubc at a level in
2140 * excess of the requested busycount
2141 *
2142 * Parameters: vp The vnode to check
2143 * busycount The threshold busy count, used to bias
2144 * the count usually already held by the
2145 * caller to avoid races
2146 *
2147 * Returns: 1 The vnode is in use over the threshold
2148 * 0 The vnode is not in use over the
2149 * threshold
2150 *
2151 * Notes: Because the vnode is only held locked while actually asking
2152 * the use count, this function only represents a snapshot of the
2153 * current state of the vnode. If more accurate information is
2154 * required, an additional busycount should be held by the caller
2155 * and a non-zero busycount used.
2156 *
2157 * If there is no ubc_info associated with the vnode, this
2158 * function will report that the vnode is not in use by ubc.
2159 */
2160 int
ubc_isinuse(struct vnode * vp,int busycount)2161 ubc_isinuse(struct vnode *vp, int busycount)
2162 {
2163 if (!UBCINFOEXISTS(vp)) {
2164 return 0;
2165 }
2166 return ubc_isinuse_locked(vp, busycount, 0);
2167 }
2168
2169
2170 /*
2171 * ubc_isinuse_locked
2172 *
2173 * Determine whether or not a vnode is currently in use by ubc at a level in
2174 * excess of the requested busycount
2175 *
2176 * Parameters: vp The vnode to check
2177 * busycount The threshold busy count, used to bias
2178 * the count usually already held by the
2179 * caller to avoid races
2180 * locked True if the vnode is already locked by
2181 * the caller
2182 *
2183 * Returns: 1 The vnode is in use over the threshold
2184 * 0 The vnode is not in use over the
2185 * threshold
2186 *
2187 * Notes: If the vnode is not locked on entry, it is locked while
2188 * actually asking the use count. If this is the case, this
2189 * function only represents a snapshot of the current state of
2190 * the vnode. If more accurate information is required, the
2191 * vnode lock should be held by the caller, otherwise an
2192 * additional busycount should be held by the caller and a
2193 * non-zero busycount used.
2194 *
2195 * If there is no ubc_info associated with the vnode, this
2196 * function will report that the vnode is not in use by ubc.
2197 */
2198 int
ubc_isinuse_locked(struct vnode * vp,int busycount,int locked)2199 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
2200 {
2201 int retval = 0;
2202
2203
2204 if (!locked) {
2205 vnode_lock_spin(vp);
2206 }
2207
2208 if ((vp->v_usecount - vp->v_kusecount) > busycount) {
2209 retval = 1;
2210 }
2211
2212 if (!locked) {
2213 vnode_unlock(vp);
2214 }
2215 return retval;
2216 }
2217
2218
2219 /*
2220 * ubc_unmap
2221 *
2222 * Reverse the effects of a ubc_map() call for a given vnode
2223 *
2224 * Parameters: vp vnode to unmap from ubc
2225 *
2226 * Returns: (void)
2227 *
2228 * Notes: This is an internal use function used by vnode_pager_unmap().
2229 * It will attempt to obtain a reference on the supplied vnode,
2230 * and if it can do so, and there is an associated ubc_info, and
2231 * the flags indicate that it was mapped via ubc_map(), then the
2232 * flag is cleared, the mapping removed, and the reference taken
2233 * by ubc_map() is released.
2234 *
2235 * IMPORTANT: This MUST only be called by the VM
2236 * to prevent race conditions.
2237 */
2238 __private_extern__ void
ubc_unmap(struct vnode * vp)2239 ubc_unmap(struct vnode *vp)
2240 {
2241 struct ubc_info *uip;
2242 int need_rele = 0;
2243 int need_wakeup = 0;
2244 int error = 0;
2245
2246 /*
2247 * This call is non-blocking and does not ever fail but it can
2248 * only be made when there is other explicit synchronization
2249 * with reclaiming of the vnode which, in this path, is provided
2250 * by the "mapping in progress" counter.
2251 */
2252 error = vnode_getalways_from_pager(vp);
2253 if (error != 0) {
2254 /* This can't happen */
2255 panic("vnode_getalways returned %d for vp %p", error, vp);
2256 }
2257
2258 if (UBCINFOEXISTS(vp) == 0) {
2259 /*
2260 * The vnode might have started being reclaimed (forced unmount?) while
2261 * this call was in progress.
2262 * The caller is not expecting an error but is expected to figure out that
2263 * the "pager" it used for this vnode is now gone and take appropriate
2264 * action.
2265 */
2266 } else {
2267 bool want_fsevent = false;
2268
2269 vnode_lock(vp);
2270 uip = vp->v_ubcinfo;
2271
2272 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2273 SET(uip->ui_flags, UI_MAPWAITING);
2274 (void) msleep(&uip->ui_flags, &vp->v_lock,
2275 PRIBIO, "ubc_unmap", NULL);
2276 }
2277 SET(uip->ui_flags, UI_MAPBUSY);
2278
2279 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
2280 if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) {
2281 want_fsevent = true;
2282 }
2283
2284 need_rele = 1;
2285
2286 /*
2287 * We want to clear the mapped flags after we've called
2288 * VNOP_MNOMAP to avoid certain races and allow
2289 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2290 */
2291 }
2292 vnode_unlock(vp);
2293
2294 if (need_rele) {
2295 vfs_context_t ctx = vfs_context_current();
2296
2297 (void)VNOP_MNOMAP(vp, ctx);
2298
2299 #if CONFIG_FSE
2300 /*
2301 * Why do we want an fsevent here? Normally the
2302 * content modified fsevent is posted when a file is
2303 * closed and only if it's written to via conventional
2304 * means. It's perfectly legal to close a file and
2305 * keep your mappings and we don't currently track
2306 * whether it was written to via a mapping.
2307 * Therefore, we need to post an fsevent here if the
2308 * file was mapped writable. This may result in false
2309 * events, i.e. we post a notification when nothing
2310 * has really changed.
2311 */
2312 if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
2313 add_fsevent(FSE_CONTENT_MODIFIED_NO_HLINK, ctx,
2314 FSE_ARG_VNODE, vp,
2315 FSE_ARG_DONE);
2316 }
2317 #endif
2318
2319 vnode_rele(vp);
2320 }
2321
2322 vnode_lock_spin(vp);
2323
2324 if (need_rele) {
2325 CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
2326 }
2327
2328 CLR(uip->ui_flags, UI_MAPBUSY);
2329
2330 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2331 CLR(uip->ui_flags, UI_MAPWAITING);
2332 need_wakeup = 1;
2333 }
2334 vnode_unlock(vp);
2335
2336 if (need_wakeup) {
2337 wakeup(&uip->ui_flags);
2338 }
2339 }
2340 /*
2341 * the drop of the vnode ref will cleanup
2342 */
2343 vnode_put_from_pager(vp);
2344 }
2345
2346
2347 /*
2348 * ubc_page_op
2349 *
2350 * Manipulate individual page state for a vnode with an associated ubc_info
2351 * with an associated memory object control.
2352 *
2353 * Parameters: vp The vnode backing the page
2354 * f_offset A file offset interior to the page
2355 * ops The operations to perform, as a bitmap
2356 * (see below for more information)
2357 * phys_entryp The address of a ppnum_t; may be NULL
2358 * to ignore
2359 * flagsp A pointer to an int to contain flags;
2360 * may be NULL to ignore
2361 *
2362 * Returns: KERN_SUCCESS Success
2363 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2364 * object associated
2365 * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is
2366 * not physically contiguous
2367 * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is
2368 * physically contiguous
2369 * KERN_FAILURE If the page cannot be looked up
2370 *
2371 * Implicit Returns:
2372 * *phys_entryp (modified) If phys_entryp is non-NULL and
2373 * UPL_POP_PHYSICAL
2374 * *flagsp (modified) If flagsp is non-NULL and there was
2375 * !UPL_POP_PHYSICAL and a KERN_SUCCESS
2376 *
2377 * Notes: For object boundaries, it is considerably more efficient to
2378 * ensure that f_offset is in fact on a page boundary, as this
2379 * will avoid internal use of the hash table to identify the
2380 * page, and would therefore skip a number of early optimizations.
2381 * Since this is a page operation anyway, the caller should try
2382 * to pass only a page aligned offset because of this.
2383 *
2384 * *flagsp may be modified even if this function fails. If it is
2385 * modified, it will contain the condition of the page before the
2386 * requested operation was attempted; these will only include the
2387 * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2388 * UPL_POP_SET, or UPL_POP_CLR bits.
2389 *
2390 * The flags field may contain a specific operation, such as
2391 * UPL_POP_PHYSICAL or UPL_POP_DUMP:
2392 *
2393 * o UPL_POP_PHYSICAL Fail if not contiguous; if
2394 * *phys_entryp and successful, set
2395 * *phys_entryp
2396 * o UPL_POP_DUMP Dump the specified page
2397 *
2398 * Otherwise, it is treated as a bitmap of one or more page
2399 * operations to perform on the final memory object; allowable
2400 * bit values are:
2401 *
2402 * o UPL_POP_DIRTY The page is dirty
2403 * o UPL_POP_PAGEOUT The page is paged out
2404 * o UPL_POP_PRECIOUS The page is precious
2405 * o UPL_POP_ABSENT The page is absent
2406 * o UPL_POP_BUSY The page is busy
2407 *
2408 * If the page status is only being queried and not modified, then
2409 * not other bits should be specified. However, if it is being
2410 * modified, exactly ONE of the following bits should be set:
2411 *
2412 * o UPL_POP_SET Set the current bitmap bits
2413 * o UPL_POP_CLR Clear the current bitmap bits
2414 *
2415 * Thus to effect a combination of setting an clearing, it may be
2416 * necessary to call this function twice. If this is done, the
2417 * set should be used before the clear, since clearing may trigger
2418 * a wakeup on the destination page, and if the page is backed by
2419 * an encrypted swap file, setting will trigger the decryption
2420 * needed before the wakeup occurs.
2421 */
2422 kern_return_t
ubc_page_op(struct vnode * vp,off_t f_offset,int ops,ppnum_t * phys_entryp,int * flagsp)2423 ubc_page_op(
2424 struct vnode *vp,
2425 off_t f_offset,
2426 int ops,
2427 ppnum_t *phys_entryp,
2428 int *flagsp)
2429 {
2430 memory_object_control_t control;
2431
2432 control = ubc_getobject(vp, UBC_FLAGS_NONE);
2433 if (control == MEMORY_OBJECT_CONTROL_NULL) {
2434 return KERN_INVALID_ARGUMENT;
2435 }
2436
2437 return memory_object_page_op(control,
2438 (memory_object_offset_t)f_offset,
2439 ops,
2440 phys_entryp,
2441 flagsp);
2442 }
2443
2444
2445 /*
2446 * ubc_range_op
2447 *
2448 * Manipulate page state for a range of memory for a vnode with an associated
2449 * ubc_info with an associated memory object control, when page level state is
2450 * not required to be returned from the call (i.e. there are no phys_entryp or
2451 * flagsp parameters to this call, and it takes a range which may contain
2452 * multiple pages, rather than an offset interior to a single page).
2453 *
2454 * Parameters: vp The vnode backing the page
2455 * f_offset_beg A file offset interior to the start page
2456 * f_offset_end A file offset interior to the end page
2457 * ops The operations to perform, as a bitmap
2458 * (see below for more information)
2459 * range The address of an int; may be NULL to
2460 * ignore
2461 *
2462 * Returns: KERN_SUCCESS Success
2463 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2464 * object associated
2465 * KERN_INVALID_OBJECT If the object is physically contiguous
2466 *
2467 * Implicit Returns:
2468 * *range (modified) If range is non-NULL, its contents will
2469 * be modified to contain the number of
2470 * bytes successfully operated upon.
2471 *
2472 * Notes: IMPORTANT: This function cannot be used on a range that
2473 * consists of physically contiguous pages.
2474 *
2475 * For object boundaries, it is considerably more efficient to
2476 * ensure that f_offset_beg and f_offset_end are in fact on page
2477 * boundaries, as this will avoid internal use of the hash table
2478 * to identify the page, and would therefore skip a number of
2479 * early optimizations. Since this is an operation on a set of
2480 * pages anyway, the caller should try to pass only a page aligned
2481 * offsets because of this.
2482 *
2483 * *range will be modified only if this function succeeds.
2484 *
2485 * The flags field MUST contain a specific operation; allowable
2486 * values are:
2487 *
2488 * o UPL_ROP_ABSENT Returns the extent of the range
2489 * presented which is absent, starting
2490 * with the start address presented
2491 *
2492 * o UPL_ROP_PRESENT Returns the extent of the range
2493 * presented which is present (resident),
2494 * starting with the start address
2495 * presented
2496 * o UPL_ROP_DUMP Dump the pages which are found in the
2497 * target object for the target range.
2498 *
2499 * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2500 * multiple regions in the range, only the first matching region
2501 * is returned.
2502 */
2503 kern_return_t
ubc_range_op(struct vnode * vp,off_t f_offset_beg,off_t f_offset_end,int ops,int * range)2504 ubc_range_op(
2505 struct vnode *vp,
2506 off_t f_offset_beg,
2507 off_t f_offset_end,
2508 int ops,
2509 int *range)
2510 {
2511 memory_object_control_t control;
2512
2513 control = ubc_getobject(vp, UBC_FLAGS_NONE);
2514 if (control == MEMORY_OBJECT_CONTROL_NULL) {
2515 return KERN_INVALID_ARGUMENT;
2516 }
2517
2518 return memory_object_range_op(control,
2519 (memory_object_offset_t)f_offset_beg,
2520 (memory_object_offset_t)f_offset_end,
2521 ops,
2522 range);
2523 }
2524
2525
2526 /*
2527 * ubc_create_upl
2528 *
2529 * Given a vnode, cause the population of a portion of the vm_object; based on
2530 * the nature of the request, the pages returned may contain valid data, or
2531 * they may be uninitialized.
2532 *
2533 * Parameters: vp The vnode from which to create the upl
2534 * f_offset The start offset into the backing store
2535 * represented by the vnode
2536 * bufsize The size of the upl to create
2537 * uplp Pointer to the upl_t to receive the
2538 * created upl; MUST NOT be NULL
2539 * plp Pointer to receive the internal page
2540 * list for the created upl; MAY be NULL
2541 * to ignore
2542 *
2543 * Returns: KERN_SUCCESS The requested upl has been created
2544 * KERN_INVALID_ARGUMENT The bufsize argument is not an even
2545 * multiple of the page size
2546 * KERN_INVALID_ARGUMENT There is no ubc_info associated with
2547 * the vnode, or there is no memory object
2548 * control associated with the ubc_info
2549 * memory_object_upl_request:KERN_INVALID_VALUE
2550 * The supplied upl_flags argument is
2551 * invalid
2552 * Implicit Returns:
2553 * *uplp (modified)
2554 * *plp (modified) If non-NULL, the value of *plp will be
2555 * modified to point to the internal page
2556 * list; this modification may occur even
2557 * if this function is unsuccessful, in
2558 * which case the contents may be invalid
2559 *
2560 * Note: If successful, the returned *uplp MUST subsequently be freed
2561 * via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2562 * ubc_upl_abort(), or ubc_upl_abort_range().
2563 */
2564 kern_return_t
ubc_create_upl_external(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags)2565 ubc_create_upl_external(
2566 struct vnode *vp,
2567 off_t f_offset,
2568 int bufsize,
2569 upl_t *uplp,
2570 upl_page_info_t **plp,
2571 int uplflags)
2572 {
2573 return ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt());
2574 }
2575
2576 kern_return_t
ubc_create_upl_kernel(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags,vm_tag_t tag)2577 ubc_create_upl_kernel(
2578 struct vnode *vp,
2579 off_t f_offset,
2580 int bufsize,
2581 upl_t *uplp,
2582 upl_page_info_t **plp,
2583 int uplflags,
2584 vm_tag_t tag)
2585 {
2586 memory_object_control_t control;
2587 kern_return_t kr;
2588
2589 if (plp != NULL) {
2590 *plp = NULL;
2591 }
2592 *uplp = NULL;
2593
2594 if (bufsize & 0xfff) {
2595 return KERN_INVALID_ARGUMENT;
2596 }
2597
2598 if (bufsize > MAX_UPL_SIZE_BYTES) {
2599 return KERN_INVALID_ARGUMENT;
2600 }
2601
2602 if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
2603 if (uplflags & UPL_UBC_MSYNC) {
2604 uplflags &= UPL_RET_ONLY_DIRTY;
2605
2606 uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
2607 UPL_SET_INTERNAL | UPL_SET_LITE;
2608 } else if (uplflags & UPL_UBC_PAGEOUT) {
2609 uplflags &= UPL_RET_ONLY_DIRTY;
2610
2611 if (uplflags & UPL_RET_ONLY_DIRTY) {
2612 uplflags |= UPL_NOBLOCK;
2613 }
2614
2615 uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
2616 UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
2617 } else {
2618 uplflags |= UPL_RET_ONLY_ABSENT |
2619 UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
2620 UPL_SET_INTERNAL | UPL_SET_LITE;
2621
2622 /*
2623 * if the requested size == PAGE_SIZE, we don't want to set
2624 * the UPL_NOBLOCK since we may be trying to recover from a
2625 * previous partial pagein I/O that occurred because we were low
2626 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2627 * since we're only asking for a single page, we can block w/o fear
2628 * of tying up pages while waiting for more to become available
2629 */
2630 if (bufsize > PAGE_SIZE) {
2631 uplflags |= UPL_NOBLOCK;
2632 }
2633 }
2634 } else {
2635 uplflags &= ~UPL_FOR_PAGEOUT;
2636
2637 if (uplflags & UPL_WILL_BE_DUMPED) {
2638 uplflags &= ~UPL_WILL_BE_DUMPED;
2639 uplflags |= (UPL_NO_SYNC | UPL_SET_INTERNAL);
2640 } else {
2641 uplflags |= (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
2642 }
2643 }
2644 control = ubc_getobject(vp, UBC_FLAGS_NONE);
2645 if (control == MEMORY_OBJECT_CONTROL_NULL) {
2646 return KERN_INVALID_ARGUMENT;
2647 }
2648
2649 kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
2650 if (kr == KERN_SUCCESS && plp != NULL) {
2651 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
2652 }
2653 return kr;
2654 }
2655
2656
2657 /*
2658 * ubc_upl_maxbufsize
2659 *
2660 * Return the maximum bufsize ubc_create_upl( ) will take.
2661 *
2662 * Parameters: none
2663 *
2664 * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take.
2665 */
2666 upl_size_t
ubc_upl_maxbufsize(void)2667 ubc_upl_maxbufsize(
2668 void)
2669 {
2670 return MAX_UPL_SIZE_BYTES;
2671 }
2672
2673 /*
2674 * ubc_upl_map
2675 *
2676 * Map the page list assocated with the supplied upl into the kernel virtual
2677 * address space at the virtual address indicated by the dst_addr argument;
2678 * the entire upl is mapped
2679 *
2680 * Parameters: upl The upl to map
2681 * dst_addr The address at which to map the upl
2682 *
2683 * Returns: KERN_SUCCESS The upl has been mapped
2684 * KERN_INVALID_ARGUMENT The upl is UPL_NULL
2685 * KERN_FAILURE The upl is already mapped
2686 * vm_map_enter:KERN_INVALID_ARGUMENT
2687 * A failure code from vm_map_enter() due
2688 * to an invalid argument
2689 */
2690 kern_return_t
ubc_upl_map(upl_t upl,vm_offset_t * dst_addr)2691 ubc_upl_map(
2692 upl_t upl,
2693 vm_offset_t *dst_addr)
2694 {
2695 return vm_upl_map(kernel_map, upl, dst_addr);
2696 }
2697
2698 /*
2699 * ubc_upl_map_range:- similar to ubc_upl_map but the focus is on a range
2700 * of the UPL. Takes an offset, size, and protection so that only a part
2701 * of the UPL can be mapped with the right protections.
2702 */
2703 kern_return_t
ubc_upl_map_range(upl_t upl,vm_offset_t offset_to_map,vm_size_t size_to_map,vm_prot_t prot_to_map,vm_offset_t * dst_addr)2704 ubc_upl_map_range(
2705 upl_t upl,
2706 vm_offset_t offset_to_map,
2707 vm_size_t size_to_map,
2708 vm_prot_t prot_to_map,
2709 vm_offset_t *dst_addr)
2710 {
2711 return vm_upl_map_range(kernel_map, upl, offset_to_map, size_to_map, prot_to_map, dst_addr);
2712 }
2713
2714
2715 /*
2716 * ubc_upl_unmap
2717 *
2718 * Unmap the page list assocated with the supplied upl from the kernel virtual
2719 * address space; the entire upl is unmapped.
2720 *
2721 * Parameters: upl The upl to unmap
2722 *
2723 * Returns: KERN_SUCCESS The upl has been unmapped
2724 * KERN_FAILURE The upl is not currently mapped
2725 * KERN_INVALID_ARGUMENT If the upl is UPL_NULL
2726 */
2727 kern_return_t
ubc_upl_unmap(upl_t upl)2728 ubc_upl_unmap(
2729 upl_t upl)
2730 {
2731 return vm_upl_unmap(kernel_map, upl);
2732 }
2733
2734 /*
2735 * ubc_upl_unmap_range:- similar to ubc_upl_unmap but the focus is
2736 * on part of the UPL that is mapped. The offset and size parameter
2737 * specifies what part of the UPL needs to be unmapped.
2738 *
2739 * Note: Currrently offset & size are unused as we always initiate the unmap from the
2740 * very beginning of the UPL's mapping and track the mapped size in the UPL. But we
2741 * might want to allow unmapping a UPL in the middle, for example, and we can use the
2742 * offset + size parameters for that purpose.
2743 */
2744 kern_return_t
ubc_upl_unmap_range(upl_t upl,vm_offset_t offset_to_unmap,vm_size_t size_to_unmap)2745 ubc_upl_unmap_range(
2746 upl_t upl,
2747 vm_offset_t offset_to_unmap,
2748 vm_size_t size_to_unmap)
2749 {
2750 return vm_upl_unmap_range(kernel_map, upl, offset_to_unmap, size_to_unmap);
2751 }
2752
2753
2754 /*
2755 * ubc_upl_commit
2756 *
2757 * Commit the contents of the upl to the backing store
2758 *
2759 * Parameters: upl The upl to commit
2760 *
2761 * Returns: KERN_SUCCESS The upl has been committed
2762 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2763 * KERN_FAILURE The supplied upl does not represent
2764 * device memory, and the offset plus the
2765 * size would exceed the actual size of
2766 * the upl
2767 *
2768 * Notes: In practice, the only return value for this function should be
2769 * KERN_SUCCESS, unless there has been data structure corruption;
2770 * since the upl is deallocated regardless of success or failure,
2771 * there's really nothing to do about this other than panic.
2772 *
2773 * IMPORTANT: Use of this function should not be mixed with use of
2774 * ubc_upl_commit_range(), due to the unconditional deallocation
2775 * by this function.
2776 */
2777 kern_return_t
ubc_upl_commit(upl_t upl)2778 ubc_upl_commit(
2779 upl_t upl)
2780 {
2781 upl_page_info_t *pl;
2782 kern_return_t kr;
2783
2784 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2785 kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
2786 upl_deallocate(upl);
2787 return kr;
2788 }
2789
2790
2791 /*
2792 * ubc_upl_commit
2793 *
2794 * Commit the contents of the specified range of the upl to the backing store
2795 *
2796 * Parameters: upl The upl to commit
2797 * offset The offset into the upl
2798 * size The size of the region to be committed,
2799 * starting at the specified offset
2800 * flags commit type (see below)
2801 *
2802 * Returns: KERN_SUCCESS The range has been committed
2803 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2804 * KERN_FAILURE The supplied upl does not represent
2805 * device memory, and the offset plus the
2806 * size would exceed the actual size of
2807 * the upl
2808 *
2809 * Notes: IMPORTANT: If the commit is successful, and the object is now
2810 * empty, the upl will be deallocated. Since the caller cannot
2811 * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2812 * should generally only be used when the offset is 0 and the size
2813 * is equal to the upl size.
2814 *
2815 * The flags argument is a bitmap of flags on the rage of pages in
2816 * the upl to be committed; allowable flags are:
2817 *
2818 * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is
2819 * both empty and has been
2820 * successfully committed
2821 * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty
2822 * bit; will prevent a
2823 * later pageout
2824 * o UPL_COMMIT_SET_DIRTY Set each pages dirty
2825 * bit; will cause a later
2826 * pageout
2827 * o UPL_COMMIT_INACTIVATE Clear each pages
2828 * reference bit; the page
2829 * will not be accessed
2830 * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages
2831 * become busy when an
2832 * IOMemoryDescriptor is
2833 * mapped or redirected,
2834 * and we have to wait for
2835 * an IOKit driver
2836 *
2837 * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2838 * not be specified by the caller.
2839 *
2840 * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2841 * mutually exclusive, and should not be combined.
2842 */
2843 kern_return_t
ubc_upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags)2844 ubc_upl_commit_range(
2845 upl_t upl,
2846 upl_offset_t offset,
2847 upl_size_t size,
2848 int flags)
2849 {
2850 upl_page_info_t *pl;
2851 boolean_t empty;
2852 kern_return_t kr;
2853
2854 if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
2855 flags |= UPL_COMMIT_NOTIFY_EMPTY;
2856 }
2857
2858 if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
2859 return KERN_INVALID_ARGUMENT;
2860 }
2861
2862 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2863
2864 kr = upl_commit_range(upl, offset, size, flags,
2865 pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
2866
2867 if ((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) {
2868 upl_deallocate(upl);
2869 }
2870
2871 return kr;
2872 }
2873
2874
2875 /*
2876 * ubc_upl_abort_range
2877 *
2878 * Abort the contents of the specified range of the specified upl
2879 *
2880 * Parameters: upl The upl to abort
2881 * offset The offset into the upl
2882 * size The size of the region to be aborted,
2883 * starting at the specified offset
2884 * abort_flags abort type (see below)
2885 *
2886 * Returns: KERN_SUCCESS The range has been aborted
2887 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2888 * KERN_FAILURE The supplied upl does not represent
2889 * device memory, and the offset plus the
2890 * size would exceed the actual size of
2891 * the upl
2892 *
2893 * Notes: IMPORTANT: If the abort is successful, and the object is now
2894 * empty, the upl will be deallocated. Since the caller cannot
2895 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2896 * should generally only be used when the offset is 0 and the size
2897 * is equal to the upl size.
2898 *
2899 * The abort_flags argument is a bitmap of flags on the range of
2900 * pages in the upl to be aborted; allowable flags are:
2901 *
2902 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2903 * empty and has been successfully
2904 * aborted
2905 * o UPL_ABORT_RESTART The operation must be restarted
2906 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2907 * o UPL_ABORT_ERROR An I/O error occurred
2908 * o UPL_ABORT_DUMP_PAGES Just free the pages
2909 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2910 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2911 *
2912 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2913 * not be specified by the caller. It is intended to fulfill the
2914 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2915 * ubc_upl_commit_range(), but is never referenced internally.
2916 *
2917 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2918 * referenced; do not use it.
2919 */
2920 kern_return_t
ubc_upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int abort_flags)2921 ubc_upl_abort_range(
2922 upl_t upl,
2923 upl_offset_t offset,
2924 upl_size_t size,
2925 int abort_flags)
2926 {
2927 kern_return_t kr;
2928 boolean_t empty = FALSE;
2929
2930 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) {
2931 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
2932 }
2933
2934 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
2935
2936 if ((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) {
2937 upl_deallocate(upl);
2938 }
2939
2940 return kr;
2941 }
2942
2943
2944 /*
2945 * ubc_upl_abort
2946 *
2947 * Abort the contents of the specified upl
2948 *
2949 * Parameters: upl The upl to abort
2950 * abort_type abort type (see below)
2951 *
2952 * Returns: KERN_SUCCESS The range has been aborted
2953 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2954 * KERN_FAILURE The supplied upl does not represent
2955 * device memory, and the offset plus the
2956 * size would exceed the actual size of
2957 * the upl
2958 *
2959 * Notes: IMPORTANT: If the abort is successful, and the object is now
2960 * empty, the upl will be deallocated. Since the caller cannot
2961 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2962 * should generally only be used when the offset is 0 and the size
2963 * is equal to the upl size.
2964 *
2965 * The abort_type is a bitmap of flags on the range of
2966 * pages in the upl to be aborted; allowable flags are:
2967 *
2968 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2969 * empty and has been successfully
2970 * aborted
2971 * o UPL_ABORT_RESTART The operation must be restarted
2972 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2973 * o UPL_ABORT_ERROR An I/O error occurred
2974 * o UPL_ABORT_DUMP_PAGES Just free the pages
2975 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2976 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2977 *
2978 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2979 * not be specified by the caller. It is intended to fulfill the
2980 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2981 * ubc_upl_commit_range(), but is never referenced internally.
2982 *
2983 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2984 * referenced; do not use it.
2985 */
2986 kern_return_t
ubc_upl_abort(upl_t upl,int abort_type)2987 ubc_upl_abort(
2988 upl_t upl,
2989 int abort_type)
2990 {
2991 kern_return_t kr;
2992
2993 kr = upl_abort(upl, abort_type);
2994 upl_deallocate(upl);
2995 return kr;
2996 }
2997
2998
2999 /*
3000 * ubc_upl_pageinfo
3001 *
3002 * Retrieve the internal page list for the specified upl
3003 *
3004 * Parameters: upl The upl to obtain the page list from
3005 *
3006 * Returns: !NULL The (upl_page_info_t *) for the page
3007 * list internal to the upl
3008 * NULL Error/no page list associated
3009 *
3010 * Notes: IMPORTANT: The function is only valid on internal objects
3011 * where the list request was made with the UPL_INTERNAL flag.
3012 *
3013 * This function is a utility helper function, since some callers
3014 * may not have direct access to the header defining the macro,
3015 * due to abstraction layering constraints.
3016 */
3017 upl_page_info_t *
ubc_upl_pageinfo(upl_t upl)3018 ubc_upl_pageinfo(
3019 upl_t upl)
3020 {
3021 return UPL_GET_INTERNAL_PAGE_LIST(upl);
3022 }
3023
3024
3025 int
UBCINFOEXISTS(const struct vnode * vp)3026 UBCINFOEXISTS(const struct vnode * vp)
3027 {
3028 return (vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL);
3029 }
3030
3031
3032 void
ubc_upl_range_needed(upl_t upl,int index,int count)3033 ubc_upl_range_needed(
3034 upl_t upl,
3035 int index,
3036 int count)
3037 {
3038 upl_range_needed(upl, index, count);
3039 }
3040
3041 boolean_t
ubc_is_mapped(const struct vnode * vp,boolean_t * writable)3042 ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
3043 {
3044 if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) {
3045 return FALSE;
3046 }
3047 if (writable) {
3048 *writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
3049 }
3050 return TRUE;
3051 }
3052
3053 boolean_t
ubc_is_mapped_writable(const struct vnode * vp)3054 ubc_is_mapped_writable(const struct vnode *vp)
3055 {
3056 boolean_t writable;
3057 return ubc_is_mapped(vp, &writable) && writable;
3058 }
3059
3060 boolean_t
ubc_was_mapped(const struct vnode * vp,boolean_t * writable)3061 ubc_was_mapped(const struct vnode *vp, boolean_t *writable)
3062 {
3063 if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_WASMAPPED)) {
3064 return FALSE;
3065 }
3066 if (writable) {
3067 *writable = ISSET(vp->v_ubcinfo->ui_flags, UI_WASMAPPEDWRITE);
3068 }
3069 return TRUE;
3070 }
3071
3072 boolean_t
ubc_was_mapped_writable(const struct vnode * vp)3073 ubc_was_mapped_writable(const struct vnode *vp)
3074 {
3075 boolean_t writable;
3076 return ubc_was_mapped(vp, &writable) && writable;
3077 }
3078
3079
3080 /*
3081 * CODE SIGNING
3082 */
3083 static atomic_size_t cs_blob_size = 0;
3084 static atomic_uint_fast32_t cs_blob_count = 0;
3085 static atomic_size_t cs_blob_size_peak = 0;
3086 static atomic_size_t cs_blob_size_max = 0;
3087 static atomic_uint_fast32_t cs_blob_count_peak = 0;
3088
3089 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count, 0, "Current number of code signature blobs");
3090 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size, "Current size of all code signature blobs");
3091 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
3092 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, "Peak size of code signature blobs");
3093 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, "Size of biggest code signature blob");
3094
3095 /*
3096 * Function: csblob_parse_teamid
3097 *
3098 * Description: This function returns a pointer to the team id
3099 * stored within the codedirectory of the csblob.
3100 * If the codedirectory predates team-ids, it returns
3101 * NULL.
3102 * This does not copy the name but returns a pointer to
3103 * it within the CD. Subsequently, the CD must be
3104 * available when this is used.
3105 */
3106
3107 static const char *
csblob_parse_teamid(struct cs_blob * csblob)3108 csblob_parse_teamid(struct cs_blob *csblob)
3109 {
3110 const CS_CodeDirectory *cd;
3111
3112 cd = csblob->csb_cd;
3113
3114 if (ntohl(cd->version) < CS_SUPPORTSTEAMID) {
3115 return NULL;
3116 }
3117
3118 if (cd->teamOffset == 0) {
3119 return NULL;
3120 }
3121
3122 const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
3123 if (cs_debug > 1) {
3124 printf("found team-id %s in cdblob\n", name);
3125 }
3126
3127 return name;
3128 }
3129
3130 kern_return_t
ubc_cs_blob_allocate(vm_offset_t * blob_addr_p,vm_size_t * blob_size_p)3131 ubc_cs_blob_allocate(
3132 vm_offset_t *blob_addr_p,
3133 vm_size_t *blob_size_p)
3134 {
3135 kern_return_t kr = KERN_FAILURE;
3136 vm_size_t allocation_size = 0;
3137
3138 if (!blob_addr_p || !blob_size_p) {
3139 return KERN_INVALID_ARGUMENT;
3140 }
3141 allocation_size = *blob_size_p;
3142
3143 if (ubc_cs_blob_pagewise_allocate(allocation_size) == true) {
3144 /* Round up to page size */
3145 allocation_size = round_page(allocation_size);
3146
3147 /* Allocate page-wise */
3148 kr = kmem_alloc(
3149 kernel_map,
3150 blob_addr_p,
3151 allocation_size,
3152 KMA_KOBJECT | KMA_DATA | KMA_ZERO,
3153 VM_KERN_MEMORY_SECURITY);
3154 } else {
3155 *blob_addr_p = (vm_offset_t)kalloc_data_tag(
3156 allocation_size,
3157 Z_WAITOK | Z_ZERO,
3158 VM_KERN_MEMORY_SECURITY);
3159
3160 assert(*blob_addr_p != 0);
3161 kr = KERN_SUCCESS;
3162 }
3163
3164 if (kr == KERN_SUCCESS) {
3165 *blob_size_p = allocation_size;
3166 }
3167
3168 return kr;
3169 }
3170
3171 void
ubc_cs_blob_deallocate(vm_offset_t blob_addr,vm_size_t blob_size)3172 ubc_cs_blob_deallocate(
3173 vm_offset_t blob_addr,
3174 vm_size_t blob_size)
3175 {
3176 if (ubc_cs_blob_pagewise_allocate(blob_size) == true) {
3177 kmem_free(kernel_map, blob_addr, blob_size);
3178 } else {
3179 kfree_data(blob_addr, blob_size);
3180 }
3181 }
3182
3183 /*
3184 * Some codesigned files use a lowest common denominator page size of
3185 * 4KiB, but can be used on systems that have a runtime page size of
3186 * 16KiB. Since faults will only occur on 16KiB ranges in
3187 * cs_validate_range(), we can convert the original Code Directory to
3188 * a multi-level scheme where groups of 4 hashes are combined to form
3189 * a new hash, which represents 16KiB in the on-disk file. This can
3190 * reduce the wired memory requirement for the Code Directory by
3191 * 75%.
3192 */
3193 static boolean_t
ubc_cs_supports_multilevel_hash(struct cs_blob * blob __unused)3194 ubc_cs_supports_multilevel_hash(struct cs_blob *blob __unused)
3195 {
3196 const CS_CodeDirectory *cd;
3197
3198 #if CODE_SIGNING_MONITOR
3199 // TODO: <rdar://problem/30954826>
3200 if (csm_enabled() == true) {
3201 return FALSE;
3202 }
3203 #endif
3204
3205 /*
3206 * Only applies to binaries that ship as part of the OS,
3207 * primarily the shared cache.
3208 */
3209 if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
3210 return FALSE;
3211 }
3212
3213 /*
3214 * If the runtime page size matches the code signing page
3215 * size, there is no work to do.
3216 */
3217 if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
3218 return FALSE;
3219 }
3220
3221 cd = blob->csb_cd;
3222
3223 /*
3224 * There must be a valid integral multiple of hashes
3225 */
3226 if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3227 return FALSE;
3228 }
3229
3230 /*
3231 * Scatter lists must also have ranges that have an integral number of hashes
3232 */
3233 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3234 const SC_Scatter *scatter = (const SC_Scatter*)
3235 ((const char*)cd + ntohl(cd->scatterOffset));
3236 /* iterate all scatter structs to make sure they are all aligned */
3237 do {
3238 uint32_t sbase = ntohl(scatter->base);
3239 uint32_t scount = ntohl(scatter->count);
3240
3241 /* last scatter? */
3242 if (scount == 0) {
3243 break;
3244 }
3245
3246 if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3247 return FALSE;
3248 }
3249
3250 if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3251 return FALSE;
3252 }
3253
3254 scatter++;
3255 } while (1);
3256 }
3257
3258 /* Covered range must be a multiple of the new page size */
3259 if (ntohl(cd->codeLimit) & PAGE_MASK) {
3260 return FALSE;
3261 }
3262
3263 /* All checks pass */
3264 return TRUE;
3265 }
3266
3267 /*
3268 * Reconstruct a cs_blob with the code signature fields. This helper function
3269 * is useful because a lot of things often change the base address of the code
3270 * signature blob, which requires reconstructing some of the other pointers
3271 * within.
3272 */
3273 static errno_t
ubc_cs_blob_reconstruct(struct cs_blob * cs_blob,const vm_address_t signature_addr,const vm_address_t signature_size,const vm_offset_t code_directory_offset)3274 ubc_cs_blob_reconstruct(
3275 struct cs_blob *cs_blob,
3276 const vm_address_t signature_addr,
3277 const vm_address_t signature_size,
3278 const vm_offset_t code_directory_offset)
3279 {
3280 const CS_CodeDirectory *code_directory = NULL;
3281
3282 /* Setup the signature blob address */
3283 cs_blob->csb_mem_kaddr = (void*)signature_addr;
3284 cs_blob->csb_mem_size = signature_size;
3285
3286 /* Setup the code directory in the blob */
3287 code_directory = (const CS_CodeDirectory*)(signature_addr + code_directory_offset);
3288 cs_blob->csb_cd = code_directory;
3289
3290 /* Setup the XML entitlements */
3291 cs_blob->csb_entitlements_blob = csblob_find_blob_bytes(
3292 (uint8_t*)signature_addr,
3293 signature_size,
3294 CSSLOT_ENTITLEMENTS,
3295 CSMAGIC_EMBEDDED_ENTITLEMENTS);
3296
3297 /* Setup the DER entitlements */
3298 cs_blob->csb_der_entitlements_blob = csblob_find_blob_bytes(
3299 (uint8_t*)signature_addr,
3300 signature_size,
3301 CSSLOT_DER_ENTITLEMENTS,
3302 CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3303
3304 return 0;
3305 }
3306
3307 /*
3308 * Given a validated cs_blob, we reformat the structure to only include
3309 * the blobs which are required by the kernel for our current platform.
3310 * This saves significant memory with agile signatures.
3311 *
3312 * To support rewriting the code directory, potentially through
3313 * multilevel hashes, we provide a mechanism to allocate a code directory
3314 * of a specified size and zero it out --> caller can fill it in.
3315 *
3316 * We don't need to perform a lot of overflow checks as the assumption
3317 * here is that the cs_blob has already been validated.
3318 */
3319 static errno_t
ubc_cs_reconstitute_code_signature(const struct cs_blob * const blob,vm_address_t * const ret_mem_kaddr,vm_size_t * const ret_mem_size,vm_size_t code_directory_size,CS_CodeDirectory ** const code_directory)3320 ubc_cs_reconstitute_code_signature(
3321 const struct cs_blob * const blob,
3322 vm_address_t * const ret_mem_kaddr,
3323 vm_size_t * const ret_mem_size,
3324 vm_size_t code_directory_size,
3325 CS_CodeDirectory ** const code_directory
3326 )
3327 {
3328 vm_address_t new_blob_addr = 0;
3329 vm_size_t new_blob_size = 0;
3330 vm_size_t new_code_directory_size = 0;
3331 const CS_GenericBlob *best_code_directory = NULL;
3332 const CS_GenericBlob *first_code_directory = NULL;
3333 const CS_GenericBlob *der_entitlements_blob = NULL;
3334 const CS_GenericBlob *entitlements_blob = NULL;
3335 const CS_GenericBlob *cms_blob = NULL;
3336 const CS_GenericBlob *launch_constraint_self = NULL;
3337 const CS_GenericBlob *launch_constraint_parent = NULL;
3338 const CS_GenericBlob *launch_constraint_responsible = NULL;
3339 const CS_GenericBlob *library_constraint = NULL;
3340 CS_SuperBlob *superblob = NULL;
3341 uint32_t num_blobs = 0;
3342 uint32_t blob_index = 0;
3343 uint32_t blob_offset = 0;
3344 kern_return_t ret;
3345 int err;
3346
3347 if (!blob) {
3348 if (cs_debug > 1) {
3349 printf("CODE SIGNING: CS Blob passed in is NULL\n");
3350 }
3351 return EINVAL;
3352 }
3353
3354 best_code_directory = (const CS_GenericBlob*)blob->csb_cd;
3355 if (!best_code_directory) {
3356 /* This case can never happen, and it is a sign of bad things */
3357 panic("CODE SIGNING: Validated CS Blob has no code directory");
3358 }
3359
3360 new_code_directory_size = code_directory_size;
3361 if (new_code_directory_size == 0) {
3362 new_code_directory_size = ntohl(best_code_directory->length);
3363 }
3364
3365 /*
3366 * A code signature can contain multiple code directories, each of which contains hashes
3367 * of pages based on a hashing algorithm. The kernel selects which hashing algorithm is
3368 * the strongest, and consequently, marks one of these code directories as the best
3369 * matched one. More often than not, the best matched one is _not_ the first one.
3370 *
3371 * However, the CMS blob which cryptographically verifies the code signature is only
3372 * signed against the first code directory. Therefore, if the CMS blob is present, we also
3373 * need the first code directory to be able to verify it. Given this, we organize the
3374 * new cs_blob as following order:
3375 *
3376 * 1. best code directory
3377 * 2. DER encoded entitlements blob (if present)
3378 * 3. launch constraint self (if present)
3379 * 4. launch constraint parent (if present)
3380 * 5. launch constraint responsible (if present)
3381 * 6. library constraint (if present)
3382 * 7. entitlements blob (if present)
3383 * 8. cms blob (if present)
3384 * 9. first code directory (if not already the best match, and if cms blob is present)
3385 *
3386 * This order is chosen deliberately, as later on, we expect to get rid of the CMS blob
3387 * and the first code directory once their verification is complete.
3388 */
3389
3390 /* Storage for the super blob header */
3391 new_blob_size += sizeof(CS_SuperBlob);
3392
3393 /* Guaranteed storage for the best code directory */
3394 new_blob_size += sizeof(CS_BlobIndex);
3395 new_blob_size += new_code_directory_size;
3396 num_blobs += 1;
3397
3398 /* Conditional storage for the DER entitlements blob */
3399 der_entitlements_blob = blob->csb_der_entitlements_blob;
3400 if (der_entitlements_blob) {
3401 new_blob_size += sizeof(CS_BlobIndex);
3402 new_blob_size += ntohl(der_entitlements_blob->length);
3403 num_blobs += 1;
3404 }
3405
3406 /* Conditional storage for the launch constraints self blob */
3407 launch_constraint_self = csblob_find_blob_bytes(
3408 (const uint8_t *)blob->csb_mem_kaddr,
3409 blob->csb_mem_size,
3410 CSSLOT_LAUNCH_CONSTRAINT_SELF,
3411 CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3412 if (launch_constraint_self) {
3413 new_blob_size += sizeof(CS_BlobIndex);
3414 new_blob_size += ntohl(launch_constraint_self->length);
3415 num_blobs += 1;
3416 }
3417
3418 /* Conditional storage for the launch constraints parent blob */
3419 launch_constraint_parent = csblob_find_blob_bytes(
3420 (const uint8_t *)blob->csb_mem_kaddr,
3421 blob->csb_mem_size,
3422 CSSLOT_LAUNCH_CONSTRAINT_PARENT,
3423 CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3424 if (launch_constraint_parent) {
3425 new_blob_size += sizeof(CS_BlobIndex);
3426 new_blob_size += ntohl(launch_constraint_parent->length);
3427 num_blobs += 1;
3428 }
3429
3430 /* Conditional storage for the launch constraints responsible blob */
3431 launch_constraint_responsible = csblob_find_blob_bytes(
3432 (const uint8_t *)blob->csb_mem_kaddr,
3433 blob->csb_mem_size,
3434 CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE,
3435 CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3436 if (launch_constraint_responsible) {
3437 new_blob_size += sizeof(CS_BlobIndex);
3438 new_blob_size += ntohl(launch_constraint_responsible->length);
3439 num_blobs += 1;
3440 }
3441
3442 /* Conditional storage for the library constraintsblob */
3443 library_constraint = csblob_find_blob_bytes(
3444 (const uint8_t *)blob->csb_mem_kaddr,
3445 blob->csb_mem_size,
3446 CSSLOT_LIBRARY_CONSTRAINT,
3447 CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3448 if (library_constraint) {
3449 new_blob_size += sizeof(CS_BlobIndex);
3450 new_blob_size += ntohl(library_constraint->length);
3451 num_blobs += 1;
3452 }
3453
3454 /* Conditional storage for the entitlements blob */
3455 entitlements_blob = blob->csb_entitlements_blob;
3456 if (entitlements_blob) {
3457 new_blob_size += sizeof(CS_BlobIndex);
3458 new_blob_size += ntohl(entitlements_blob->length);
3459 num_blobs += 1;
3460 }
3461
3462 /* Conditional storage for the CMS blob */
3463 cms_blob = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_SIGNATURESLOT, CSMAGIC_BLOBWRAPPER);
3464 if (cms_blob) {
3465 new_blob_size += sizeof(CS_BlobIndex);
3466 new_blob_size += ntohl(cms_blob->length);
3467 num_blobs += 1;
3468 }
3469
3470 /*
3471 * Conditional storage for the first code directory.
3472 * This is only needed if a CMS blob exists and the best code directory isn't already
3473 * the first one. It is an error if we find a CMS blob but do not find a first code directory.
3474 */
3475 if (cms_blob) {
3476 first_code_directory = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_CODEDIRECTORY, CSMAGIC_CODEDIRECTORY);
3477 if (first_code_directory == best_code_directory) {
3478 /* We don't need the first code directory anymore, since the best one is already it */
3479 first_code_directory = NULL;
3480 } else if (first_code_directory) {
3481 new_blob_size += sizeof(CS_BlobIndex);
3482 new_blob_size += ntohl(first_code_directory->length);
3483 num_blobs += 1;
3484 } else {
3485 printf("CODE SIGNING: Invalid CS Blob: found CMS blob but not a first code directory\n");
3486 return EINVAL;
3487 }
3488 }
3489
3490 /*
3491 * The blob size could be rouded up to page size here, so we keep a copy
3492 * of the actual superblob length as well.
3493 */
3494 vm_size_t new_blob_allocation_size = new_blob_size;
3495 ret = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_allocation_size);
3496 if (ret != KERN_SUCCESS) {
3497 printf("CODE SIGNING: Failed to allocate memory for new code signing blob: %d\n", ret);
3498 return ENOMEM;
3499 }
3500
3501 /*
3502 * Fill out the superblob header and then all the blobs in the order listed
3503 * above.
3504 */
3505 superblob = (CS_SuperBlob*)new_blob_addr;
3506 superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
3507 superblob->length = htonl((uint32_t)new_blob_size);
3508 superblob->count = htonl(num_blobs);
3509
3510 blob_index = 0;
3511 blob_offset = sizeof(CS_SuperBlob) + (num_blobs * sizeof(CS_BlobIndex));
3512
3513 /* Best code directory */
3514 superblob->index[blob_index].offset = htonl(blob_offset);
3515 if (first_code_directory) {
3516 superblob->index[blob_index].type = htonl(CSSLOT_ALTERNATE_CODEDIRECTORIES);
3517 } else {
3518 superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3519 }
3520
3521 if (code_directory_size > 0) {
3522 /* We zero out the code directory, as we expect the caller to fill it in */
3523 memset((void*)(new_blob_addr + blob_offset), 0, new_code_directory_size);
3524 } else {
3525 memcpy((void*)(new_blob_addr + blob_offset), best_code_directory, new_code_directory_size);
3526 }
3527
3528 if (code_directory) {
3529 *code_directory = (CS_CodeDirectory*)(new_blob_addr + blob_offset);
3530 }
3531 blob_offset += new_code_directory_size;
3532
3533 /* DER entitlements blob */
3534 if (der_entitlements_blob) {
3535 blob_index += 1;
3536 superblob->index[blob_index].offset = htonl(blob_offset);
3537 superblob->index[blob_index].type = htonl(CSSLOT_DER_ENTITLEMENTS);
3538
3539 memcpy((void*)(new_blob_addr + blob_offset), der_entitlements_blob, ntohl(der_entitlements_blob->length));
3540 blob_offset += ntohl(der_entitlements_blob->length);
3541 }
3542
3543 /* Launch constraints self blob */
3544 if (launch_constraint_self) {
3545 blob_index += 1;
3546 superblob->index[blob_index].offset = htonl(blob_offset);
3547 superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_SELF);
3548
3549 memcpy(
3550 (void*)(new_blob_addr + blob_offset),
3551 launch_constraint_self,
3552 ntohl(launch_constraint_self->length));
3553
3554 blob_offset += ntohl(launch_constraint_self->length);
3555 }
3556
3557 /* Launch constraints parent blob */
3558 if (launch_constraint_parent) {
3559 blob_index += 1;
3560 superblob->index[blob_index].offset = htonl(blob_offset);
3561 superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_PARENT);
3562
3563 memcpy(
3564 (void*)(new_blob_addr + blob_offset),
3565 launch_constraint_parent,
3566 ntohl(launch_constraint_parent->length));
3567
3568 blob_offset += ntohl(launch_constraint_parent->length);
3569 }
3570
3571 /* Launch constraints responsible blob */
3572 if (launch_constraint_responsible) {
3573 blob_index += 1;
3574 superblob->index[blob_index].offset = htonl(blob_offset);
3575 superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE);
3576
3577 memcpy(
3578 (void*)(new_blob_addr + blob_offset),
3579 launch_constraint_responsible,
3580 ntohl(launch_constraint_responsible->length));
3581
3582 blob_offset += ntohl(launch_constraint_responsible->length);
3583 }
3584
3585 /* library constraints blob */
3586 if (library_constraint) {
3587 blob_index += 1;
3588 superblob->index[blob_index].offset = htonl(blob_offset);
3589 superblob->index[blob_index].type = htonl(CSSLOT_LIBRARY_CONSTRAINT);
3590
3591 memcpy(
3592 (void*)(new_blob_addr + blob_offset),
3593 library_constraint,
3594 ntohl(library_constraint->length));
3595
3596 blob_offset += ntohl(library_constraint->length);
3597 }
3598
3599 /* Entitlements blob */
3600 if (entitlements_blob) {
3601 blob_index += 1;
3602 superblob->index[blob_index].offset = htonl(blob_offset);
3603 superblob->index[blob_index].type = htonl(CSSLOT_ENTITLEMENTS);
3604
3605 memcpy((void*)(new_blob_addr + blob_offset), entitlements_blob, ntohl(entitlements_blob->length));
3606 blob_offset += ntohl(entitlements_blob->length);
3607 }
3608
3609 /* CMS blob */
3610 if (cms_blob) {
3611 blob_index += 1;
3612 superblob->index[blob_index].offset = htonl(blob_offset);
3613 superblob->index[blob_index].type = htonl(CSSLOT_SIGNATURESLOT);
3614 memcpy((void*)(new_blob_addr + blob_offset), cms_blob, ntohl(cms_blob->length));
3615 blob_offset += ntohl(cms_blob->length);
3616 }
3617
3618 /* First code directory */
3619 if (first_code_directory) {
3620 blob_index += 1;
3621 superblob->index[blob_index].offset = htonl(blob_offset);
3622 superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3623 memcpy((void*)(new_blob_addr + blob_offset), first_code_directory, ntohl(first_code_directory->length));
3624 blob_offset += ntohl(first_code_directory->length);
3625 }
3626
3627 /*
3628 * We only validate the blob in case we copied in the best code directory.
3629 * In case the code directory size we were passed in wasn't 0, we memset the best
3630 * code directory to 0 and expect the caller to fill it in. In the same spirit, we
3631 * expect the caller to validate the code signature after they fill in the code
3632 * directory.
3633 */
3634 if (code_directory_size == 0) {
3635 const CS_CodeDirectory *validated_code_directory = NULL;
3636 const CS_GenericBlob *validated_entitlements_blob = NULL;
3637 const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3638
3639 ret = cs_validate_csblob(
3640 (const uint8_t *)superblob,
3641 new_blob_size,
3642 &validated_code_directory,
3643 &validated_entitlements_blob,
3644 &validated_der_entitlements_blob);
3645
3646 if (ret) {
3647 printf("unable to validate reconstituted cs_blob: %d\n", ret);
3648 err = EINVAL;
3649 goto fail;
3650 }
3651 }
3652
3653 if (ret_mem_kaddr) {
3654 *ret_mem_kaddr = new_blob_addr;
3655 }
3656 if (ret_mem_size) {
3657 *ret_mem_size = new_blob_allocation_size;
3658 }
3659
3660 return 0;
3661
3662 fail:
3663 ubc_cs_blob_deallocate(new_blob_addr, new_blob_allocation_size);
3664 return err;
3665 }
3666
3667 /*
3668 * We use this function to clear out unnecessary bits from the code signature
3669 * blob which are no longer needed. We free these bits and give them back to
3670 * the kernel. This is needed since reconstitution includes extra data which is
3671 * needed only for verification but has no point in keeping afterwards.
3672 *
3673 * This results in significant memory reduction, especially for 3rd party apps
3674 * since we also get rid of the CMS blob.
3675 */
3676 static errno_t
ubc_cs_reconstitute_code_signature_2nd_stage(struct cs_blob * blob)3677 ubc_cs_reconstitute_code_signature_2nd_stage(
3678 struct cs_blob *blob
3679 )
3680 {
3681 kern_return_t ret = KERN_FAILURE;
3682 const CS_GenericBlob *launch_constraint_self = NULL;
3683 const CS_GenericBlob *launch_constraint_parent = NULL;
3684 const CS_GenericBlob *launch_constraint_responsible = NULL;
3685 const CS_GenericBlob *library_constraint = NULL;
3686 CS_SuperBlob *superblob = NULL;
3687 uint32_t num_blobs = 0;
3688 vm_size_t last_needed_blob_offset = 0;
3689 vm_offset_t code_directory_offset = 0;
3690
3691 /*
3692 * Ordering of blobs we need to keep:
3693 * 1. Code directory
3694 * 2. DER encoded entitlements (if present)
3695 * 3. Launch constraints self (if present)
3696 * 4. Launch constraints parent (if present)
3697 * 5. Launch constraints responsible (if present)
3698 * 6. Library constraints (if present)
3699 *
3700 * We need to clear out the remaining page after these blobs end, and fix up
3701 * the superblob for the changes. Things gets a little more complicated for
3702 * blobs which may not have been kmem_allocated. For those, we simply just
3703 * allocate the new required space and copy into it.
3704 */
3705
3706 if (blob == NULL) {
3707 printf("NULL blob passed in for 2nd stage reconstitution\n");
3708 return EINVAL;
3709 }
3710 assert(blob->csb_reconstituted == true);
3711
3712 /* Ensure we're not page-wise allocated when in this function */
3713 assert(ubc_cs_blob_pagewise_allocate(blob->csb_mem_size) == false);
3714
3715 if (!blob->csb_cd) {
3716 /* This case can never happen, and it is a sign of bad things */
3717 panic("validated cs_blob has no code directory");
3718 }
3719 superblob = (CS_SuperBlob*)blob->csb_mem_kaddr;
3720
3721 num_blobs = 1;
3722 last_needed_blob_offset = ntohl(superblob->index[0].offset) + ntohl(blob->csb_cd->length);
3723
3724 /* Check for DER entitlements */
3725 if (blob->csb_der_entitlements_blob) {
3726 num_blobs += 1;
3727 last_needed_blob_offset += ntohl(blob->csb_der_entitlements_blob->length);
3728 }
3729
3730 /* Check for launch constraints self */
3731 launch_constraint_self = csblob_find_blob_bytes(
3732 (const uint8_t *)blob->csb_mem_kaddr,
3733 blob->csb_mem_size,
3734 CSSLOT_LAUNCH_CONSTRAINT_SELF,
3735 CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3736 if (launch_constraint_self) {
3737 num_blobs += 1;
3738 last_needed_blob_offset += ntohl(launch_constraint_self->length);
3739 }
3740
3741 /* Check for launch constraints parent */
3742 launch_constraint_parent = csblob_find_blob_bytes(
3743 (const uint8_t *)blob->csb_mem_kaddr,
3744 blob->csb_mem_size,
3745 CSSLOT_LAUNCH_CONSTRAINT_PARENT,
3746 CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3747 if (launch_constraint_parent) {
3748 num_blobs += 1;
3749 last_needed_blob_offset += ntohl(launch_constraint_parent->length);
3750 }
3751
3752 /* Check for launch constraints responsible */
3753 launch_constraint_responsible = csblob_find_blob_bytes(
3754 (const uint8_t *)blob->csb_mem_kaddr,
3755 blob->csb_mem_size,
3756 CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE,
3757 CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3758 if (launch_constraint_responsible) {
3759 num_blobs += 1;
3760 last_needed_blob_offset += ntohl(launch_constraint_responsible->length);
3761 }
3762
3763 /* Check for library constraint */
3764 library_constraint = csblob_find_blob_bytes(
3765 (const uint8_t *)blob->csb_mem_kaddr,
3766 blob->csb_mem_size,
3767 CSSLOT_LIBRARY_CONSTRAINT,
3768 CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3769 if (library_constraint) {
3770 num_blobs += 1;
3771 last_needed_blob_offset += ntohl(library_constraint->length);
3772 }
3773
3774 superblob->count = htonl(num_blobs);
3775 superblob->length = htonl((uint32_t)last_needed_blob_offset);
3776
3777 /*
3778 * There is a chance that the code directory is marked within the superblob as an
3779 * alternate code directory. This happens when the first code directory isn't the
3780 * best one chosen by the kernel, so to be able to access both the first and the best,
3781 * we save the best one as an alternate one. Since we're getting rid of the first one
3782 * here, we mark the best one as the first one.
3783 */
3784 superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
3785
3786 vm_address_t new_superblob = 0;
3787 vm_size_t new_superblob_size = last_needed_blob_offset;
3788
3789 ret = ubc_cs_blob_allocate(&new_superblob, &new_superblob_size);
3790 if (ret != KERN_SUCCESS) {
3791 printf("unable to allocate memory for 2nd stage reconstitution: %d\n", ret);
3792 return ENOMEM;
3793 }
3794 assert(new_superblob_size == last_needed_blob_offset);
3795
3796 /* Calculate the code directory offset */
3797 code_directory_offset = (vm_offset_t)blob->csb_cd - (vm_offset_t)blob->csb_mem_kaddr;
3798
3799 /* Copy in the updated superblob into the new memory */
3800 memcpy((void*)new_superblob, superblob, new_superblob_size);
3801
3802 /* Free the old code signature and old memory */
3803 ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3804
3805 /* Reconstruct critical fields in the blob object */
3806 ubc_cs_blob_reconstruct(
3807 blob,
3808 new_superblob,
3809 new_superblob_size,
3810 code_directory_offset);
3811
3812 /* XML entitlements should've been removed */
3813 assert(blob->csb_entitlements_blob == NULL);
3814
3815 const CS_CodeDirectory *validated_code_directory = NULL;
3816 const CS_GenericBlob *validated_entitlements_blob = NULL;
3817 const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3818
3819 ret = cs_validate_csblob(
3820 (const uint8_t*)blob->csb_mem_kaddr,
3821 blob->csb_mem_size,
3822 &validated_code_directory,
3823 &validated_entitlements_blob,
3824 &validated_der_entitlements_blob);
3825 if (ret) {
3826 printf("unable to validate code signature after 2nd stage reconstitution: %d\n", ret);
3827 return EINVAL;
3828 }
3829
3830 return 0;
3831 }
3832
3833 static int
ubc_cs_convert_to_multilevel_hash(struct cs_blob * blob)3834 ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
3835 {
3836 const CS_CodeDirectory *old_cd, *cd;
3837 CS_CodeDirectory *new_cd;
3838 const CS_GenericBlob *entitlements;
3839 const CS_GenericBlob *der_entitlements;
3840 vm_offset_t new_blob_addr;
3841 vm_size_t new_blob_size;
3842 vm_size_t new_cdsize;
3843 int error;
3844
3845 uint32_t hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
3846
3847 if (cs_debug > 1) {
3848 printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
3849 (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
3850 }
3851
3852 old_cd = blob->csb_cd;
3853
3854 /* Up to the hashes, we can copy all data */
3855 new_cdsize = ntohl(old_cd->hashOffset);
3856 new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
3857
3858 error = ubc_cs_reconstitute_code_signature(blob, &new_blob_addr, &new_blob_size, new_cdsize, &new_cd);
3859 if (error != 0) {
3860 printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
3861 return error;
3862 }
3863 entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_ENTITLEMENTS, CSMAGIC_EMBEDDED_ENTITLEMENTS);
3864 der_entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_DER_ENTITLEMENTS, CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3865
3866 memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
3867
3868 /* Update fields in the Code Directory structure */
3869 new_cd->length = htonl((uint32_t)new_cdsize);
3870
3871 uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
3872 nCodeSlots >>= hashes_per_new_hash_shift;
3873 new_cd->nCodeSlots = htonl(nCodeSlots);
3874
3875 new_cd->pageSize = (uint8_t)PAGE_SHIFT; /* Not byte-swapped */
3876
3877 if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
3878 SC_Scatter *scatter = (SC_Scatter*)
3879 ((char *)new_cd + ntohl(new_cd->scatterOffset));
3880 /* iterate all scatter structs to scale their counts */
3881 do {
3882 uint32_t scount = ntohl(scatter->count);
3883 uint32_t sbase = ntohl(scatter->base);
3884
3885 /* last scatter? */
3886 if (scount == 0) {
3887 break;
3888 }
3889
3890 scount >>= hashes_per_new_hash_shift;
3891 scatter->count = htonl(scount);
3892
3893 sbase >>= hashes_per_new_hash_shift;
3894 scatter->base = htonl(sbase);
3895
3896 scatter++;
3897 } while (1);
3898 }
3899
3900 /* For each group of hashes, hash them together */
3901 const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
3902 unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
3903
3904 uint32_t hash_index;
3905 for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
3906 union cs_hash_union mdctx;
3907
3908 uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
3909 const unsigned char *src = src_base + hash_index * source_hash_len;
3910 unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
3911
3912 blob->csb_hashtype->cs_init(&mdctx);
3913 blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
3914 blob->csb_hashtype->cs_final(dst, &mdctx);
3915 }
3916
3917 error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements, &der_entitlements);
3918 if (error != 0) {
3919 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3920 error);
3921
3922 ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3923 return error;
3924 }
3925
3926 /* New Code Directory is ready for use, swap it out in the blob structure */
3927 ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3928
3929 blob->csb_mem_size = new_blob_size;
3930 blob->csb_mem_kaddr = (void *)new_blob_addr;
3931 blob->csb_cd = cd;
3932 blob->csb_entitlements_blob = NULL;
3933
3934 blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
3935 blob->csb_reconstituted = true;
3936
3937 /* The blob has some cached attributes of the Code Directory, so update those */
3938
3939 blob->csb_hash_firstlevel_pageshift = blob->csb_hash_pageshift; /* Save the original page size */
3940
3941 blob->csb_hash_pageshift = PAGE_SHIFT;
3942 blob->csb_end_offset = ntohl(cd->codeLimit);
3943 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3944 const SC_Scatter *scatter = (const SC_Scatter*)
3945 ((const char*)cd + ntohl(cd->scatterOffset));
3946 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
3947 } else {
3948 blob->csb_start_offset = 0;
3949 }
3950
3951 return 0;
3952 }
3953
3954 static void
cs_blob_cleanup(struct cs_blob * blob)3955 cs_blob_cleanup(struct cs_blob *blob)
3956 {
3957 if (blob->csb_entitlements != NULL) {
3958 amfi->OSEntitlements_invalidate(blob->csb_entitlements);
3959 osobject_release(blob->csb_entitlements);
3960 blob->csb_entitlements = NULL;
3961 }
3962
3963 #if CODE_SIGNING_MONITOR
3964 if (blob->csb_csm_obj != NULL) {
3965 /* Unconditionally remove any profiles we may have associated */
3966 csm_disassociate_provisioning_profile(blob->csb_csm_obj);
3967
3968 kern_return_t kr = csm_unregister_code_signature(blob->csb_csm_obj);
3969 if (kr == KERN_SUCCESS) {
3970 /*
3971 * If the code signature was monitor managed, the monitor will have freed it
3972 * itself in the unregistration call. It means we do not need to free the data
3973 * over here.
3974 */
3975 if (blob->csb_csm_managed) {
3976 blob->csb_mem_kaddr = NULL;
3977 blob->csb_mem_size = 0;
3978 }
3979 } else if (kr == KERN_ABORTED) {
3980 /*
3981 * The code-signing-monitor refused to unregister the code signature. It means
3982 * whatever memory was backing the code signature may not have been released, and
3983 * attempting to free it down below will not be successful. As a result, all we
3984 * can do is prevent the kernel from touching the data.
3985 */
3986 blob->csb_mem_kaddr = NULL;
3987 blob->csb_mem_size = 0;
3988 }
3989 }
3990
3991 /* Unconditionally remove references to the monitor */
3992 blob->csb_csm_obj = NULL;
3993 blob->csb_csm_managed = false;
3994 #endif
3995
3996 if (blob->csb_mem_kaddr) {
3997 ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3998 }
3999 blob->csb_mem_kaddr = NULL;
4000 blob->csb_mem_size = 0;
4001 }
4002
4003 static void
cs_blob_ro_free(struct cs_blob * blob)4004 cs_blob_ro_free(struct cs_blob *blob)
4005 {
4006 struct cs_blob tmp;
4007
4008 if (blob != NULL) {
4009 /*
4010 * cs_blob_cleanup clears fields, so we need to pass it a
4011 * mutable copy.
4012 */
4013 tmp = *blob;
4014 cs_blob_cleanup(&tmp);
4015
4016 zfree_ro(ZONE_ID_CS_BLOB, blob);
4017 }
4018 }
4019
4020 /*
4021 * Free a cs_blob previously created by cs_blob_create_validated.
4022 */
4023 void
cs_blob_free(struct cs_blob * blob)4024 cs_blob_free(
4025 struct cs_blob *blob)
4026 {
4027 cs_blob_ro_free(blob);
4028 }
4029
4030 static int
cs_blob_init_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob * blob,CS_CodeDirectory const ** const ret_cd)4031 cs_blob_init_validated(
4032 vm_address_t * const addr,
4033 vm_size_t size,
4034 struct cs_blob *blob,
4035 CS_CodeDirectory const ** const ret_cd)
4036 {
4037 int error = EINVAL;
4038 const CS_CodeDirectory *cd = NULL;
4039 const CS_GenericBlob *entitlements = NULL;
4040 const CS_GenericBlob *der_entitlements = NULL;
4041 union cs_hash_union mdctx;
4042 size_t length;
4043
4044 bzero(blob, sizeof(*blob));
4045
4046 /* fill in the new blob */
4047 blob->csb_mem_size = size;
4048 blob->csb_mem_offset = 0;
4049 blob->csb_mem_kaddr = (void *)*addr;
4050 blob->csb_flags = 0;
4051 blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
4052 blob->csb_platform_binary = 0;
4053 blob->csb_platform_path = 0;
4054 blob->csb_teamid = NULL;
4055 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4056 blob->csb_supplement_teamid = NULL;
4057 #endif
4058 blob->csb_entitlements_blob = NULL;
4059 blob->csb_der_entitlements_blob = NULL;
4060 blob->csb_entitlements = NULL;
4061 #if CODE_SIGNING_MONITOR
4062 blob->csb_csm_obj = NULL;
4063 blob->csb_csm_managed = false;
4064 #endif
4065 blob->csb_reconstituted = false;
4066 blob->csb_validation_category = CS_VALIDATION_CATEGORY_INVALID;
4067
4068 /* Transfer ownership. Even on error, this function will deallocate */
4069 *addr = 0;
4070
4071 /*
4072 * Validate the blob's contents
4073 */
4074 length = (size_t) size;
4075 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
4076 length, &cd, &entitlements, &der_entitlements);
4077 if (error) {
4078 if (cs_debug) {
4079 printf("CODESIGNING: csblob invalid: %d\n", error);
4080 }
4081 /*
4082 * The vnode checker can't make the rest of this function
4083 * succeed if csblob validation failed, so bail */
4084 goto out;
4085 } else {
4086 const unsigned char *md_base;
4087 uint8_t hash[CS_HASH_MAX_SIZE];
4088 int md_size;
4089 vm_offset_t hash_pagemask;
4090
4091 blob->csb_cd = cd;
4092 blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
4093 blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
4094 blob->csb_hashtype = cs_find_md(cd->hashType);
4095 if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
4096 panic("validated CodeDirectory but unsupported type");
4097 }
4098
4099 blob->csb_hash_pageshift = cd->pageSize;
4100 hash_pagemask = (1U << cd->pageSize) - 1;
4101 blob->csb_hash_firstlevel_pageshift = 0;
4102 blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
4103 blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + hash_pagemask) & ~hash_pagemask);
4104 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
4105 const SC_Scatter *scatter = (const SC_Scatter*)
4106 ((const char*)cd + ntohl(cd->scatterOffset));
4107 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * (1U << blob->csb_hash_pageshift);
4108 } else {
4109 blob->csb_start_offset = 0;
4110 }
4111 /* compute the blob's cdhash */
4112 md_base = (const unsigned char *) cd;
4113 md_size = ntohl(cd->length);
4114
4115 blob->csb_hashtype->cs_init(&mdctx);
4116 blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
4117 blob->csb_hashtype->cs_final(hash, &mdctx);
4118
4119 memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
4120
4121 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4122 blob->csb_linkage_hashtype = NULL;
4123 if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0 &&
4124 ntohl(cd->linkageSize) >= CS_CDHASH_LEN) {
4125 blob->csb_linkage_hashtype = cs_find_md(cd->linkageHashType);
4126
4127 if (blob->csb_linkage_hashtype != NULL) {
4128 memcpy(blob->csb_linkage, (uint8_t const*)cd + ntohl(cd->linkageOffset),
4129 CS_CDHASH_LEN);
4130 }
4131 }
4132 #endif
4133 }
4134
4135 error = 0;
4136
4137 out:
4138 if (error != 0) {
4139 cs_blob_cleanup(blob);
4140 blob = NULL;
4141 cd = NULL;
4142 }
4143
4144 if (ret_cd != NULL) {
4145 *ret_cd = cd;
4146 }
4147
4148 return error;
4149 }
4150
4151 /*
4152 * Validate the code signature blob, create a struct cs_blob wrapper
4153 * and return it together with a pointer to the chosen code directory
4154 * and entitlements blob.
4155 *
4156 * Note that this takes ownership of the memory as addr, mainly because
4157 * this function can actually replace the passed in blob with another
4158 * one, e.g. when performing multilevel hashing optimization.
4159 */
4160 int
cs_blob_create_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob ** const ret_blob,CS_CodeDirectory const ** const ret_cd)4161 cs_blob_create_validated(
4162 vm_address_t * const addr,
4163 vm_size_t size,
4164 struct cs_blob ** const ret_blob,
4165 CS_CodeDirectory const ** const ret_cd)
4166 {
4167 struct cs_blob blob = {};
4168 struct cs_blob *ro_blob;
4169 int error;
4170
4171 if (ret_blob) {
4172 *ret_blob = NULL;
4173 }
4174
4175 if ((error = cs_blob_init_validated(addr, size, &blob, ret_cd)) != 0) {
4176 return error;
4177 }
4178
4179 if (ret_blob != NULL) {
4180 ro_blob = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4181 zalloc_ro_update_elem(ZONE_ID_CS_BLOB, ro_blob, &blob);
4182 *ret_blob = ro_blob;
4183 }
4184
4185 return error;
4186 }
4187
4188 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4189 static void
cs_blob_supplement_free(struct cs_blob * const blob)4190 cs_blob_supplement_free(struct cs_blob * const blob)
4191 {
4192 void *teamid;
4193
4194 if (blob != NULL) {
4195 if (blob->csb_supplement_teamid != NULL) {
4196 teamid = blob->csb_supplement_teamid;
4197 vm_size_t teamid_size = strlen(blob->csb_supplement_teamid) + 1;
4198 kfree_data(teamid, teamid_size);
4199 }
4200 cs_blob_ro_free(blob);
4201 }
4202 }
4203 #endif
4204
4205 static void
ubc_cs_blob_adjust_statistics(struct cs_blob const * blob)4206 ubc_cs_blob_adjust_statistics(struct cs_blob const *blob)
4207 {
4208 /* Note that the atomic ops are not enough to guarantee
4209 * correctness: If a blob with an intermediate size is inserted
4210 * concurrently, we can lose a peak value assignment. But these
4211 * statistics are only advisory anyway, so we're not going to
4212 * employ full locking here. (Consequently, we are also okay with
4213 * relaxed ordering of those accesses.)
4214 */
4215
4216 unsigned int new_cs_blob_count = os_atomic_add(&cs_blob_count, 1, relaxed);
4217 if (new_cs_blob_count > os_atomic_load(&cs_blob_count_peak, relaxed)) {
4218 os_atomic_store(&cs_blob_count_peak, new_cs_blob_count, relaxed);
4219 }
4220
4221 size_t new_cs_blob_size = os_atomic_add(&cs_blob_size, blob->csb_mem_size, relaxed);
4222
4223 if (new_cs_blob_size > os_atomic_load(&cs_blob_size_peak, relaxed)) {
4224 os_atomic_store(&cs_blob_size_peak, new_cs_blob_size, relaxed);
4225 }
4226 if (blob->csb_mem_size > os_atomic_load(&cs_blob_size_max, relaxed)) {
4227 os_atomic_store(&cs_blob_size_max, blob->csb_mem_size, relaxed);
4228 }
4229 }
4230
4231 static void
cs_blob_set_cpu_type(struct cs_blob * blob,cpu_type_t cputype)4232 cs_blob_set_cpu_type(struct cs_blob *blob, cpu_type_t cputype)
4233 {
4234 zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_cpu_type, &cputype);
4235 }
4236
4237 __abortlike
4238 static void
panic_cs_blob_backref_mismatch(struct cs_blob * blob,struct vnode * vp)4239 panic_cs_blob_backref_mismatch(struct cs_blob *blob, struct vnode *vp)
4240 {
4241 panic("cs_blob vnode backref mismatch: blob=%p, vp=%p, "
4242 "blob->csb_vnode=%p", blob, vp, blob->csb_vnode);
4243 }
4244
4245 void
cs_blob_require(struct cs_blob * blob,vnode_t vp)4246 cs_blob_require(struct cs_blob *blob, vnode_t vp)
4247 {
4248 zone_require_ro(ZONE_ID_CS_BLOB, sizeof(struct cs_blob), blob);
4249
4250 if (vp != NULL && __improbable(blob->csb_vnode != vp)) {
4251 panic_cs_blob_backref_mismatch(blob, vp);
4252 }
4253 }
4254
4255 #if CODE_SIGNING_MONITOR
4256
4257 /**
4258 * Independently verify the authenticity of the code signature through the monitor
4259 * environment. This is required as otherwise the monitor won't allow associations
4260 * of the code signature with address spaces.
4261 *
4262 * Once we've verified the code signature, we no longer need to keep around any
4263 * provisioning profiles we may have registered with it. AMFI associates profiles
4264 * with the monitor during its validation (which happens before the monitor's).
4265 */
4266 static errno_t
verify_code_signature_monitor(struct cs_blob * cs_blob)4267 verify_code_signature_monitor(
4268 struct cs_blob *cs_blob)
4269 {
4270 kern_return_t ret = KERN_DENIED;
4271
4272 ret = csm_verify_code_signature(cs_blob->csb_csm_obj, &cs_blob->csb_csm_trust_level);
4273 if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4274 printf("unable to verify code signature with monitor: %d\n", ret);
4275 return EPERM;
4276 }
4277
4278 ret = csm_disassociate_provisioning_profile(cs_blob->csb_csm_obj);
4279 if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_FOUND) && (ret != KERN_NOT_SUPPORTED)) {
4280 printf("unable to disassociate profile from code signature: %d\n", ret);
4281 return EPERM;
4282 }
4283
4284 /* Associate the OSEntitlements kernel object with the monitor */
4285 ret = csm_associate_os_entitlements(cs_blob->csb_csm_obj, cs_blob->csb_entitlements);
4286 if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4287 printf("unable to associate OSEntitlements with monitor: %d\n", ret);
4288 return EPERM;
4289 }
4290
4291 return 0;
4292 }
4293
4294 /**
4295 * Register the code signature with the code signing monitor environment. This
4296 * will effectively make the blob data immutable, either because the blob memory
4297 * will be allocated and managed directory by the monitor, or because the monitor
4298 * will lockdown the memory associated with the blob.
4299 */
4300 static errno_t
register_code_signature_monitor(struct vnode * vnode,struct cs_blob * cs_blob,vm_offset_t code_directory_offset)4301 register_code_signature_monitor(
4302 struct vnode *vnode,
4303 struct cs_blob *cs_blob,
4304 vm_offset_t code_directory_offset)
4305 {
4306 kern_return_t ret = KERN_DENIED;
4307 vm_address_t monitor_signature_addr = 0;
4308 void *monitor_sig_object = NULL;
4309 const char *vnode_path_ptr = NULL;
4310
4311 /*
4312 * Attempt to resolve the path for this vnode and pass it in to the code
4313 * signing monitor during registration.
4314 */
4315 int vnode_path_len = MAXPATHLEN;
4316 char *vnode_path = kalloc_data(vnode_path_len, Z_WAITOK);
4317
4318 /*
4319 * Taking a reference on the vnode recursively can sometimes lead to a
4320 * deadlock on the system. Since we already have a vnode pointer, it means
4321 * the caller performed a vnode lookup, which implicitly takes a reference
4322 * on the vnode. However, there is more than just having a reference on a
4323 * vnode which is important. vnode's also have an iocount, and we must only
4324 * access a vnode which has an iocount of greater than 0. Thankfully, all
4325 * the conditions which lead to calling this function ensure that this
4326 * vnode is safe to access here.
4327 *
4328 * For more details: rdar://105819068.
4329 */
4330 errno_t error = vn_getpath(vnode, vnode_path, &vnode_path_len);
4331 if (error == 0) {
4332 vnode_path_ptr = vnode_path;
4333 }
4334
4335 ret = csm_register_code_signature(
4336 (vm_address_t)cs_blob->csb_mem_kaddr,
4337 cs_blob->csb_mem_size,
4338 code_directory_offset,
4339 vnode_path_ptr,
4340 &monitor_sig_object,
4341 &monitor_signature_addr);
4342
4343 kfree_data(vnode_path, MAXPATHLEN);
4344 vnode_path_ptr = NULL;
4345
4346 if (ret == KERN_SUCCESS) {
4347 /* Reconstruct the cs_blob if the monitor used its own allocation */
4348 if (monitor_signature_addr != (vm_address_t)cs_blob->csb_mem_kaddr) {
4349 vm_address_t monitor_signature_size = cs_blob->csb_mem_size;
4350
4351 /* Free the old memory for the blob */
4352 ubc_cs_blob_deallocate(
4353 (vm_address_t)cs_blob->csb_mem_kaddr,
4354 cs_blob->csb_mem_size);
4355
4356 /* Reconstruct critical fields in the blob object */
4357 ubc_cs_blob_reconstruct(
4358 cs_blob,
4359 monitor_signature_addr,
4360 monitor_signature_size,
4361 code_directory_offset);
4362
4363 /* Mark the signature as monitor managed */
4364 cs_blob->csb_csm_managed = true;
4365 }
4366 } else if (ret != KERN_NOT_SUPPORTED) {
4367 printf("unable to register code signature with monitor: %d\n", ret);
4368 return EPERM;
4369 }
4370
4371 /* Save the monitor handle for the signature object -- may be NULL */
4372 cs_blob->csb_csm_obj = monitor_sig_object;
4373
4374 return 0;
4375 }
4376
4377 #endif /* CODE_SIGNING_MONITOR */
4378
4379 static errno_t
validate_main_binary_check(struct cs_blob * csblob,cs_blob_add_flags_t csblob_add_flags)4380 validate_main_binary_check(
4381 struct cs_blob *csblob,
4382 cs_blob_add_flags_t csblob_add_flags)
4383 {
4384 #if XNU_TARGET_OS_OSX
4385 (void)csblob;
4386 (void)csblob_add_flags;
4387 return 0;
4388 #else
4389 const CS_CodeDirectory *first_cd = NULL;
4390 const CS_CodeDirectory *alt_cd = NULL;
4391 uint64_t exec_seg_flags = 0;
4392 uint32_t slot = CSSLOT_CODEDIRECTORY;
4393
4394 /* Nothing to enforce if we're allowing main binaries */
4395 if ((csblob_add_flags & CS_BLOB_ADD_ALLOW_MAIN_BINARY) != 0) {
4396 return 0;
4397 }
4398
4399 first_cd = (const CS_CodeDirectory*)csblob_find_blob(csblob, slot, CSMAGIC_CODEDIRECTORY);
4400 if ((first_cd != NULL) && (ntohl(first_cd->version) >= CS_SUPPORTSEXECSEG)) {
4401 exec_seg_flags |= ntohll(first_cd->execSegFlags);
4402 }
4403
4404 for (uint32_t i = 0; i < CSSLOT_ALTERNATE_CODEDIRECTORY_MAX; i++) {
4405 slot = CSSLOT_ALTERNATE_CODEDIRECTORIES + i;
4406 alt_cd = (const CS_CodeDirectory*)csblob_find_blob(csblob, slot, CSMAGIC_CODEDIRECTORY);
4407 if ((alt_cd == NULL) || (ntohl(alt_cd->version) < CS_SUPPORTSEXECSEG)) {
4408 continue;
4409 }
4410 exec_seg_flags |= ntohll(alt_cd->execSegFlags);
4411 }
4412
4413 if ((exec_seg_flags & CS_EXECSEG_MAIN_BINARY) != 0) {
4414 return EBADEXEC;
4415 }
4416 return 0;
4417 #endif /* XNU_TARGET_OS_OSX */
4418 }
4419
4420 /**
4421 * Accelerate entitlements for a code signature object. When we have a code
4422 * signing monitor, this acceleration is done within the monitor which then
4423 * passes back a CoreEntitlements query context the kernel can use. When we
4424 * don't have a code signing monitor, we accelerate the queries within the
4425 * kernel memory itself.
4426 *
4427 * This function must be called when the storage for the code signature can
4428 * no longer change.
4429 */
4430 static errno_t
accelerate_entitlement_queries(struct cs_blob * cs_blob)4431 accelerate_entitlement_queries(
4432 struct cs_blob *cs_blob)
4433 {
4434 kern_return_t ret = KERN_NOT_SUPPORTED;
4435
4436 #if CODE_SIGNING_MONITOR
4437 CEQueryContext_t ce_ctx = NULL;
4438 const char *signing_id = NULL;
4439
4440 ret = csm_accelerate_entitlements(cs_blob->csb_csm_obj, &ce_ctx);
4441 if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4442 printf("unable to accelerate entitlements through the monitor: %d\n", ret);
4443 return EPERM;
4444 }
4445
4446 if (ret == KERN_SUCCESS) {
4447 /* Call cannot not fail at this stage */
4448 ret = csm_acquire_signing_identifier(cs_blob->csb_csm_obj, &signing_id);
4449 assert(ret == KERN_SUCCESS);
4450
4451 /* Adjust the OSEntitlements context with AMFI */
4452 ret = amfi->OSEntitlements.adjustContextWithMonitor(
4453 cs_blob->csb_entitlements,
4454 ce_ctx,
4455 cs_blob->csb_csm_obj,
4456 signing_id,
4457 cs_blob->csb_flags);
4458 if (ret != KERN_SUCCESS) {
4459 printf("unable to adjust OSEntitlements context with monitor: %d\n", ret);
4460 return EPERM;
4461 }
4462
4463 return 0;
4464 }
4465 #endif
4466
4467 /*
4468 * If we reach here, then either we don't have a code signing monitor, or
4469 * the code signing monitor isn't enabled for code signing, in which case,
4470 * AMFI is going to accelerate the entitlements context and adjust its
4471 * context on its own.
4472 */
4473 assert(ret == KERN_NOT_SUPPORTED);
4474
4475 ret = amfi->OSEntitlements.adjustContextWithoutMonitor(
4476 cs_blob->csb_entitlements,
4477 cs_blob);
4478
4479 if (ret != KERN_SUCCESS) {
4480 printf("unable to adjust OSEntitlements context without monitor: %d\n", ret);
4481 return EPERM;
4482 }
4483
4484 return 0;
4485 }
4486
4487 /**
4488 * Ensure and validate that some security critical code signing blobs haven't
4489 * been stripped off from the code signature. This can happen if an attacker
4490 * chose to load a code signature sans these critical blobs, or if there is a
4491 * bug in reconstitution logic which remove these blobs from the code signature.
4492 */
4493 static errno_t
validate_auxiliary_signed_blobs(struct cs_blob * cs_blob)4494 validate_auxiliary_signed_blobs(
4495 struct cs_blob *cs_blob)
4496 {
4497 struct cs_blob_identifier {
4498 uint32_t cs_slot;
4499 uint32_t cs_magic;
4500 };
4501
4502 const struct cs_blob_identifier identifiers[] = {
4503 {CSSLOT_LAUNCH_CONSTRAINT_SELF, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4504 {CSSLOT_LAUNCH_CONSTRAINT_PARENT, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4505 {CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4506 {CSSLOT_LIBRARY_CONSTRAINT, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT}
4507 };
4508 const uint32_t num_identifiers = sizeof(identifiers) / sizeof(identifiers[0]);
4509
4510 for (uint32_t i = 0; i < num_identifiers; i++) {
4511 errno_t err = csblob_find_special_slot_blob(
4512 cs_blob,
4513 identifiers[i].cs_slot,
4514 identifiers[i].cs_magic,
4515 NULL,
4516 NULL);
4517
4518 if (err != 0) {
4519 printf("unable to validate security-critical blob: %d [%u|%u]\n",
4520 err, identifiers[i].cs_slot, identifiers[i].cs_magic);
4521
4522 return EPERM;
4523 }
4524 }
4525
4526 return 0;
4527 }
4528
4529 /**
4530 * Setup multi-level hashing for the code signature. This isn't supported on most
4531 * shipping devices, but on ones where it is, it can result in significant savings
4532 * of memory from the code signature standpoint.
4533 *
4534 * Multi-level hashing is used to condense the code directory hashes in order to
4535 * improve memory consumption. We take four 4K page hashes, and condense them into
4536 * a single 16K hash, hence reducing the space consumed by the code directory by
4537 * about ~75%.
4538 */
4539 static errno_t
setup_multilevel_hashing(struct cs_blob * cs_blob)4540 setup_multilevel_hashing(
4541 struct cs_blob *cs_blob)
4542 {
4543 code_signing_monitor_type_t monitor_type = CS_MONITOR_TYPE_NONE;
4544 errno_t err = -1;
4545
4546 /*
4547 * When we have a code signing monitor, we do not support multi-level hashing
4548 * since the code signature data is expected to be locked within memory which
4549 * cannot be written to by the kernel.
4550 *
4551 * Even when the code signing monitor isn't explicitly enabled, there are other
4552 * reasons for not performing multi-level hashing. For instance, Rosetta creates
4553 * issues with multi-level hashing on Apple Silicon Macs.
4554 */
4555 code_signing_configuration(&monitor_type, NULL);
4556 if (monitor_type != CS_MONITOR_TYPE_NONE) {
4557 return 0;
4558 }
4559
4560 /* We need to check if multi-level hashing is supported for this blob */
4561 if (ubc_cs_supports_multilevel_hash(cs_blob) == false) {
4562 return 0;
4563 }
4564
4565 err = ubc_cs_convert_to_multilevel_hash(cs_blob);
4566 if (err != 0) {
4567 printf("unable to setup multi-level hashing: %d\n", err);
4568 return err;
4569 }
4570
4571 assert(cs_blob->csb_reconstituted == true);
4572 return 0;
4573 }
4574
4575 /**
4576 * Once code signature validation is complete, we can remove even more blobs from the
4577 * code signature as they are no longer needed. This goes on to conserve even more
4578 * system memory.
4579 */
4580 static errno_t
reconstitute_code_signature_2nd_stage(struct cs_blob * cs_blob)4581 reconstitute_code_signature_2nd_stage(
4582 struct cs_blob *cs_blob)
4583 {
4584 kern_return_t ret = KERN_NOT_SUPPORTED;
4585 errno_t err = EPERM;
4586
4587 /* If we never reconstituted before, we won't be reconstituting again */
4588 if (cs_blob->csb_reconstituted == false) {
4589 return 0;
4590 }
4591
4592 #if CODE_SIGNING_MONITOR
4593 /*
4594 * When we have a code signing monitor, the code signature is immutable until the
4595 * monitor decides to unlock parts of it. Therefore, 2nd stage reconstitution takes
4596 * place in the monitor when we have a monitor available.
4597 *
4598 * If the monitor isn't enforcing code signing (in which case the code signature is
4599 * NOT immutable), then we perform 2nd stage reconstitution within the kernel itself.
4600 */
4601 vm_address_t unneeded_addr = 0;
4602 vm_size_t unneeded_size = 0;
4603
4604 ret = csm_reconstitute_code_signature(
4605 cs_blob->csb_csm_obj,
4606 &unneeded_addr,
4607 &unneeded_size);
4608
4609 if ((ret == KERN_SUCCESS) && unneeded_addr && unneeded_size) {
4610 /* Free the unneded part of the blob */
4611 kmem_free(kernel_map, unneeded_addr, unneeded_size);
4612
4613 /* Adjust the size in the blob object */
4614 cs_blob->csb_mem_size -= unneeded_size;
4615 }
4616 #endif
4617
4618 if (ret == KERN_SUCCESS) {
4619 goto success;
4620 } else if (ret != KERN_NOT_SUPPORTED) {
4621 /*
4622 * A monitor environment is available, and it failed in performing 2nd stage
4623 * reconstitution. This is a fatal issue for code signing validation.
4624 */
4625 printf("unable to reconstitute code signature through monitor: %d\n", ret);
4626 return EPERM;
4627 }
4628
4629 /* No monitor available if we reached here */
4630 err = ubc_cs_reconstitute_code_signature_2nd_stage(cs_blob);
4631 if (err != 0) {
4632 return err;
4633 }
4634
4635 success:
4636 /*
4637 * Regardless of whether we are performing 2nd stage reconstitution in the monitor
4638 * or in the kernel, we remove references to XML entitlements from the blob here.
4639 * None of the 2nd stage reconstitution code ever keeps these around, and they have
4640 * been explicitly deprecated and disallowed.
4641 */
4642 cs_blob->csb_entitlements_blob = NULL;
4643
4644 return 0;
4645 }
4646
4647 /**
4648 * A code signature blob often contains blob which aren't needed in the kernel. Since
4649 * the code signature is wired into kernel memory for the time it is used, it behooves
4650 * us to remove any blobs we have no need for in order to conserve memory.
4651 *
4652 * Some platforms support copying the entire SuperBlob stored in kernel memory into
4653 * userspace memory through the "csops" system call. There is an expectation that when
4654 * this happens, all the blobs which were a part of the code signature are copied in
4655 * to userspace memory. As a result, these platforms cannot reconstitute the code
4656 * signature since, or rather, these platforms cannot remove blobs from the signature,
4657 * thereby making reconstitution useless.
4658 */
4659 static errno_t
reconstitute_code_signature(struct cs_blob * cs_blob)4660 reconstitute_code_signature(
4661 struct cs_blob *cs_blob)
4662 {
4663 CS_CodeDirectory *code_directory = NULL;
4664 vm_address_t signature_addr = 0;
4665 vm_size_t signature_size = 0;
4666 vm_offset_t code_directory_offset = 0;
4667 bool platform_supports_reconstitution = false;
4668
4669 #if CONFIG_CODE_SIGNATURE_RECONSTITUTION
4670 platform_supports_reconstitution = true;
4671 #endif
4672
4673 /*
4674 * We can skip reconstitution if the code signing monitor isn't available or not
4675 * enabled. But if we do have a monitor, then reconsitution becomes required, as
4676 * there is an expectation of performing 2nd stage reconstitution through the
4677 * monitor itself.
4678 */
4679 if (platform_supports_reconstitution == false) {
4680 #if CODE_SIGNING_MONITOR
4681 if (csm_enabled() == true) {
4682 printf("reconstitution required when code signing monitor is enabled\n");
4683 return EPERM;
4684 }
4685 #endif
4686 return 0;
4687 }
4688
4689 errno_t err = ubc_cs_reconstitute_code_signature(
4690 cs_blob,
4691 &signature_addr,
4692 &signature_size,
4693 0,
4694 &code_directory);
4695
4696 if (err != 0) {
4697 printf("unable to reconstitute code signature: %d\n", err);
4698 return err;
4699 }
4700
4701 /* Calculate the code directory offset */
4702 code_directory_offset = (vm_offset_t)code_directory - signature_addr;
4703
4704 /* Reconstitution allocates new memory -- free the old one */
4705 ubc_cs_blob_deallocate((vm_address_t)cs_blob->csb_mem_kaddr, cs_blob->csb_mem_size);
4706
4707 /* Reconstruct critical fields in the blob object */
4708 ubc_cs_blob_reconstruct(
4709 cs_blob,
4710 signature_addr,
4711 signature_size,
4712 code_directory_offset);
4713
4714 /* Mark the object as reconstituted */
4715 cs_blob->csb_reconstituted = true;
4716
4717 return 0;
4718 }
4719
4720 int
ubc_cs_blob_add(struct vnode * vp,uint32_t platform,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t base_offset,vm_address_t * addr,vm_size_t size,struct image_params * imgp,__unused int flags,struct cs_blob ** ret_blob,cs_blob_add_flags_t csblob_add_flags)4721 ubc_cs_blob_add(
4722 struct vnode *vp,
4723 uint32_t platform,
4724 cpu_type_t cputype,
4725 cpu_subtype_t cpusubtype,
4726 off_t base_offset,
4727 vm_address_t *addr,
4728 vm_size_t size,
4729 struct image_params *imgp,
4730 __unused int flags,
4731 struct cs_blob **ret_blob,
4732 cs_blob_add_flags_t csblob_add_flags)
4733 {
4734 ptrauth_generic_signature_t cs_blob_sig = {0};
4735 struct ubc_info *uip = NULL;
4736 struct cs_blob tmp_blob = {0};
4737 struct cs_blob *blob_ro = NULL;
4738 struct cs_blob *oblob = NULL;
4739 CS_CodeDirectory const *cd = NULL;
4740 off_t blob_start_offset = 0;
4741 off_t blob_end_offset = 0;
4742 boolean_t record_mtime = false;
4743 kern_return_t kr = KERN_DENIED;
4744 errno_t error = -1;
4745
4746 #if HAS_APPLE_PAC
4747 void *signed_entitlements = NULL;
4748 #if CODE_SIGNING_MONITOR
4749 void *signed_monitor_obj = NULL;
4750 #endif
4751 #endif
4752
4753 if (ret_blob) {
4754 *ret_blob = NULL;
4755 }
4756
4757 /*
4758 * Create the struct cs_blob abstract data type which will get attached to
4759 * the vnode object. This function also validates the structural integrity
4760 * of the code signature blob being passed in.
4761 *
4762 * We initialize a temporary blob whose contents are then copied into an RO
4763 * blob which we allocate from the read-only allocator.
4764 */
4765 error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
4766 if (error != 0) {
4767 printf("unable to create a validated cs_blob object: %d\n", error);
4768 return error;
4769 }
4770
4771 tmp_blob.csb_cpu_type = cputype;
4772 tmp_blob.csb_cpu_subtype = cpusubtype & ~CPU_SUBTYPE_MASK;
4773 tmp_blob.csb_base_offset = base_offset;
4774
4775 /* Perform 1st stage reconstitution */
4776 error = reconstitute_code_signature(&tmp_blob);
4777 if (error != 0) {
4778 goto out;
4779 }
4780
4781 /*
4782 * There is a strong design pattern we have to follow carefully within this
4783 * function. Since we're storing the struct cs_blob within RO-allocated
4784 * memory, it is immutable to modifications from within the kernel itself.
4785 *
4786 * However, before the contents of the blob are transferred to the immutable
4787 * cs_blob, they are kept on the stack. In order to protect against a kernel
4788 * R/W attacker, we must protect this stack variable. Most importantly, any
4789 * code paths which can block for a while must compute a PAC signature over
4790 * the stack variable, then perform the blocking operation, and then ensure
4791 * that the PAC signature over the stack variable is still valid to ensure
4792 * that an attacker did not overwrite contents of the blob by introducing a
4793 * maliciously long blocking operation, giving them the time required to go
4794 * and overwrite the contents of the blob.
4795 *
4796 * The most important fields to protect here are the OSEntitlements and the
4797 * code signing monitor object references. For these ones, we keep around
4798 * extra signed pointers diversified against the read-only blobs' memory
4799 * and then update the stack variable with these before updating the full
4800 * read-only blob.
4801 */
4802
4803 blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4804 assert(blob_ro != NULL);
4805
4806 tmp_blob.csb_ro_addr = blob_ro;
4807 tmp_blob.csb_vnode = vp;
4808
4809 /* AMFI needs to see the current blob state at the RO address */
4810 zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
4811
4812 #if CODE_SIGNING_MONITOR
4813 error = register_code_signature_monitor(
4814 vp,
4815 &tmp_blob,
4816 (vm_offset_t)tmp_blob.csb_cd - (vm_offset_t)tmp_blob.csb_mem_kaddr);
4817
4818 if (error != 0) {
4819 goto out;
4820 }
4821
4822 #if HAS_APPLE_PAC
4823 signed_monitor_obj = ptrauth_sign_unauthenticated(
4824 tmp_blob.csb_csm_obj,
4825 ptrauth_key_process_independent_data,
4826 ptrauth_blend_discriminator(&blob_ro->csb_csm_obj,
4827 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_csm_obj")));
4828 #endif /* HAS_APPLE_PAC */
4829
4830 #endif /* CODE_SIGNING_MONITOR */
4831
4832 /*
4833 * Ensure that we're honoring the main binary policy check on platforms which
4834 * require it. We perform this check at this stage to ensure the blob we're
4835 * looking at has been locked down by a code signing monitor if the system
4836 * has one.
4837 */
4838 error = validate_main_binary_check(&tmp_blob, csblob_add_flags);
4839 if (error != 0) {
4840 printf("failed to verify main binary policy: %d\n", error);
4841 goto out;
4842 }
4843
4844 #if CONFIG_MACF
4845 unsigned int cs_flags = tmp_blob.csb_flags;
4846 unsigned int signer_type = tmp_blob.csb_signer_type;
4847
4848 error = mac_vnode_check_signature(
4849 vp,
4850 &tmp_blob,
4851 imgp,
4852 &cs_flags,
4853 &signer_type,
4854 flags,
4855 platform);
4856
4857 if (error != 0) {
4858 printf("validation of code signature failed through MACF policy: %d\n", error);
4859 goto out;
4860 }
4861
4862 #if HAS_APPLE_PAC
4863 signed_entitlements = ptrauth_sign_unauthenticated(
4864 tmp_blob.csb_entitlements,
4865 ptrauth_key_process_independent_data,
4866 ptrauth_blend_discriminator(&blob_ro->csb_entitlements,
4867 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements")));
4868 #endif
4869
4870 tmp_blob.csb_flags = cs_flags;
4871 tmp_blob.csb_signer_type = signer_type;
4872
4873 if (tmp_blob.csb_flags & CS_PLATFORM_BINARY) {
4874 tmp_blob.csb_platform_binary = 1;
4875 tmp_blob.csb_platform_path = !!(tmp_blob.csb_flags & CS_PLATFORM_PATH);
4876 tmp_blob.csb_teamid = NULL;
4877 } else {
4878 tmp_blob.csb_platform_binary = 0;
4879 tmp_blob.csb_platform_path = 0;
4880 }
4881
4882 if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !tmp_blob.csb_platform_binary) {
4883 printf("dyld simulator runtime is not apple signed: proc: %d\n",
4884 proc_getpid(current_proc()));
4885
4886 error = EPERM;
4887 goto out;
4888 }
4889 #endif /* CONFIG_MACF */
4890
4891 #if CODE_SIGNING_MONITOR
4892 error = verify_code_signature_monitor(&tmp_blob);
4893 if (error != 0) {
4894 goto out;
4895 }
4896 #endif
4897
4898 /* Perform 2nd stage reconstitution */
4899 error = reconstitute_code_signature_2nd_stage(&tmp_blob);
4900 if (error != 0) {
4901 goto out;
4902 }
4903
4904 /* Setup any multi-level hashing for the code signature */
4905 error = setup_multilevel_hashing(&tmp_blob);
4906 if (error != 0) {
4907 goto out;
4908 }
4909
4910 /* Ensure security critical auxiliary blobs still exist */
4911 error = validate_auxiliary_signed_blobs(&tmp_blob);
4912 if (error != 0) {
4913 goto out;
4914 }
4915
4916 /*
4917 * Accelerate the entitlement queries for this code signature. This must
4918 * be done only after we know that the code signature pointers within the
4919 * struct cs_blob aren't going to be shifted around anymore, which is why
4920 * this acceleration is done after setting up multilevel hashing, since
4921 * that is the last part of signature validation which can shift the code
4922 * signature around.
4923 */
4924 error = accelerate_entitlement_queries(&tmp_blob);
4925 if (error != 0) {
4926 goto out;
4927 }
4928
4929 /*
4930 * Parse and set the Team ID for this code signature. This only needs to
4931 * happen when the signature isn't marked as platform. Like above, this
4932 * has to happen after we know the pointers within struct cs_blob aren't
4933 * going to be shifted anymore.
4934 */
4935 if ((tmp_blob.csb_flags & CS_PLATFORM_BINARY) == 0) {
4936 tmp_blob.csb_teamid = csblob_parse_teamid(&tmp_blob);
4937 }
4938
4939 /*
4940 * Validate the code signing blob's coverage. Ideally, we can just do this
4941 * in the beginning, right after structural validation, however, multilevel
4942 * hashing can change some offets.
4943 */
4944 blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
4945 blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
4946 if (blob_start_offset >= blob_end_offset) {
4947 error = EINVAL;
4948 goto out;
4949 } else if (blob_start_offset < 0 || blob_end_offset <= 0) {
4950 error = EINVAL;
4951 goto out;
4952 }
4953
4954 /*
4955 * The vnode_lock, linked list traversal, and marking of the memory object as
4956 * signed can all be blocking operations. Compute a PAC over the tmp_blob.
4957 */
4958 cs_blob_sig = ptrauth_utils_sign_blob_generic(
4959 &tmp_blob,
4960 sizeof(tmp_blob),
4961 OS_PTRAUTH_DISCRIMINATOR("ubc_cs_blob_add.blocking_op0"),
4962 PTRAUTH_ADDR_DIVERSIFY);
4963
4964 vnode_lock(vp);
4965 if (!UBCINFOEXISTS(vp)) {
4966 vnode_unlock(vp);
4967 error = ENOENT;
4968 goto out;
4969 }
4970 uip = vp->v_ubcinfo;
4971
4972 /* check if this new blob overlaps with an existing blob */
4973 for (oblob = ubc_get_cs_blobs(vp);
4974 oblob != NULL;
4975 oblob = oblob->csb_next) {
4976 off_t oblob_start_offset, oblob_end_offset;
4977
4978 if (tmp_blob.csb_signer_type != oblob->csb_signer_type) { // signer type needs to be the same for slices
4979 vnode_unlock(vp);
4980 error = EALREADY;
4981 goto out;
4982 } else if (tmp_blob.csb_platform_binary) { //platform binary needs to be the same for app slices
4983 if (!oblob->csb_platform_binary) {
4984 vnode_unlock(vp);
4985 error = EALREADY;
4986 goto out;
4987 }
4988 } else if (tmp_blob.csb_teamid) { //teamid binary needs to be the same for app slices
4989 if (oblob->csb_platform_binary ||
4990 oblob->csb_teamid == NULL ||
4991 strcmp(oblob->csb_teamid, tmp_blob.csb_teamid) != 0) {
4992 vnode_unlock(vp);
4993 error = EALREADY;
4994 goto out;
4995 }
4996 } else { // non teamid binary needs to be the same for app slices
4997 if (oblob->csb_platform_binary ||
4998 oblob->csb_teamid != NULL) {
4999 vnode_unlock(vp);
5000 error = EALREADY;
5001 goto out;
5002 }
5003 }
5004
5005 oblob_start_offset = (oblob->csb_base_offset +
5006 oblob->csb_start_offset);
5007 oblob_end_offset = (oblob->csb_base_offset +
5008 oblob->csb_end_offset);
5009 if (blob_start_offset >= oblob_end_offset ||
5010 blob_end_offset <= oblob_start_offset) {
5011 /* no conflict with this existing blob */
5012 } else {
5013 /* conflict ! */
5014 if (blob_start_offset == oblob_start_offset &&
5015 blob_end_offset == oblob_end_offset &&
5016 tmp_blob.csb_mem_size == oblob->csb_mem_size &&
5017 tmp_blob.csb_flags == oblob->csb_flags &&
5018 (tmp_blob.csb_cpu_type == CPU_TYPE_ANY ||
5019 oblob->csb_cpu_type == CPU_TYPE_ANY ||
5020 tmp_blob.csb_cpu_type == oblob->csb_cpu_type) &&
5021 !bcmp(tmp_blob.csb_cdhash,
5022 oblob->csb_cdhash,
5023 CS_CDHASH_LEN)) {
5024 /*
5025 * We already have this blob:
5026 * we'll return success but
5027 * throw away the new blob.
5028 */
5029 if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
5030 /*
5031 * The old blob matches this one
5032 * but doesn't have any CPU type.
5033 * Update it with whatever the caller
5034 * provided this time.
5035 */
5036 cs_blob_set_cpu_type(oblob, cputype);
5037 }
5038
5039 /* The signature is still accepted, so update the
5040 * generation count. */
5041 uip->cs_add_gen = cs_blob_generation_count;
5042
5043 vnode_unlock(vp);
5044 if (ret_blob) {
5045 *ret_blob = oblob;
5046 }
5047 error = EAGAIN;
5048 goto out;
5049 } else {
5050 /* different blob: reject the new one */
5051 vnode_unlock(vp);
5052 error = EALREADY;
5053 goto out;
5054 }
5055 }
5056 }
5057
5058 /* mark this vnode's VM object as having "signed pages" */
5059 kr = memory_object_signed(uip->ui_control, TRUE);
5060 if (kr != KERN_SUCCESS) {
5061 vnode_unlock(vp);
5062 error = ENOENT;
5063 goto out;
5064 }
5065
5066 if (uip->cs_blobs == NULL) {
5067 /* loading 1st blob: record the file's current "modify time" */
5068 record_mtime = TRUE;
5069 }
5070
5071 /* set the generation count for cs_blobs */
5072 uip->cs_add_gen = cs_blob_generation_count;
5073
5074 /* Authenticate the PAC signature after blocking operation */
5075 ptrauth_utils_auth_blob_generic(
5076 &tmp_blob,
5077 sizeof(tmp_blob),
5078 OS_PTRAUTH_DISCRIMINATOR("ubc_cs_blob_add.blocking_op0"),
5079 PTRAUTH_ADDR_DIVERSIFY,
5080 cs_blob_sig);
5081
5082 /* Update the system statistics for code signatures blobs */
5083 ubc_cs_blob_adjust_statistics(&tmp_blob);
5084
5085 /* Update the list pointer to reference other blobs for this vnode */
5086 tmp_blob.csb_next = uip->cs_blobs;
5087
5088 #if HAS_APPLE_PAC
5089 /*
5090 * Update all the critical pointers in the blob with the RO diversified
5091 * values before updating the read-only blob with the full contents of
5092 * the struct cs_blob. We need to use memcpy here as otherwise a simple
5093 * assignment will cause the compiler to re-sign using the stack variable
5094 * as the address diversifier.
5095 */
5096 memcpy((void*)&tmp_blob.csb_entitlements, &signed_entitlements, sizeof(void*));
5097 #if CODE_SIGNING_MONITOR
5098 memcpy((void*)&tmp_blob.csb_csm_obj, &signed_monitor_obj, sizeof(void*));
5099 #endif
5100 #endif
5101 zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5102
5103 /* Add a fence to ensure writes to the blob are visible on all threads */
5104 os_atomic_thread_fence(seq_cst);
5105
5106 /*
5107 * Add the cs_blob to the front of the list of blobs for this vnode. We
5108 * add to the front of the list, and we never remove a blob from the list
5109 * which means ubc_cs_get_blobs can return whatever the top of the list
5110 * is, while still keeping the list valid. Useful for if we validate a
5111 * page while adding in a new blob for this vnode.
5112 */
5113 uip->cs_blobs = blob_ro;
5114
5115 /* Make sure to reload pointer from uip to double check */
5116 if (uip->cs_blobs->csb_next) {
5117 zone_require_ro(ZONE_ID_CS_BLOB, sizeof(struct cs_blob), uip->cs_blobs->csb_next);
5118 }
5119
5120 if (cs_debug > 1) {
5121 proc_t p;
5122 const char *name = vnode_getname_printable(vp);
5123 p = current_proc();
5124 printf("CODE SIGNING: proc %d(%s) "
5125 "loaded %s signatures for file (%s) "
5126 "range 0x%llx:0x%llx flags 0x%x\n",
5127 proc_getpid(p), p->p_comm,
5128 blob_ro->csb_cpu_type == -1 ? "detached" : "embedded",
5129 name,
5130 blob_ro->csb_base_offset + blob_ro->csb_start_offset,
5131 blob_ro->csb_base_offset + blob_ro->csb_end_offset,
5132 blob_ro->csb_flags);
5133 vnode_putname_printable(name);
5134 }
5135
5136 vnode_unlock(vp);
5137
5138 if (record_mtime) {
5139 vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
5140 }
5141
5142 if (ret_blob) {
5143 *ret_blob = blob_ro;
5144 }
5145
5146 error = 0; /* success ! */
5147
5148 out:
5149 if (error) {
5150 if (error != EAGAIN) {
5151 printf("check_signature[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
5152 }
5153
5154 cs_blob_cleanup(&tmp_blob);
5155 if (blob_ro) {
5156 zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
5157 }
5158 }
5159
5160 if (error == EAGAIN) {
5161 /*
5162 * See above: error is EAGAIN if we were asked
5163 * to add an existing blob again. We cleaned the new
5164 * blob and we want to return success.
5165 */
5166 error = 0;
5167 }
5168
5169 return error;
5170 }
5171
5172 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5173 int
ubc_cs_blob_add_supplement(struct vnode * vp,struct vnode * orig_vp,off_t base_offset,vm_address_t * addr,vm_size_t size,struct cs_blob ** ret_blob)5174 ubc_cs_blob_add_supplement(
5175 struct vnode *vp,
5176 struct vnode *orig_vp,
5177 off_t base_offset,
5178 vm_address_t *addr,
5179 vm_size_t size,
5180 struct cs_blob **ret_blob)
5181 {
5182 kern_return_t kr;
5183 struct ubc_info *uip, *orig_uip;
5184 int error;
5185 struct cs_blob tmp_blob;
5186 struct cs_blob *orig_blob;
5187 struct cs_blob *blob_ro = NULL;
5188 CS_CodeDirectory const *cd;
5189 off_t blob_start_offset, blob_end_offset;
5190
5191 if (ret_blob) {
5192 *ret_blob = NULL;
5193 }
5194
5195 /* Create the struct cs_blob wrapper that will be attached to the vnode.
5196 * Validates the passed in blob in the process. */
5197 error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
5198
5199 if (error != 0) {
5200 printf("malformed code signature supplement blob: %d\n", error);
5201 return error;
5202 }
5203
5204 tmp_blob.csb_cpu_type = -1;
5205 tmp_blob.csb_base_offset = base_offset;
5206
5207 tmp_blob.csb_reconstituted = false;
5208
5209 vnode_lock(orig_vp);
5210 if (!UBCINFOEXISTS(orig_vp)) {
5211 vnode_unlock(orig_vp);
5212 error = ENOENT;
5213 goto out;
5214 }
5215
5216 orig_uip = orig_vp->v_ubcinfo;
5217
5218 /* check that the supplement's linked cdhash matches a cdhash of
5219 * the target image.
5220 */
5221
5222 if (tmp_blob.csb_linkage_hashtype == NULL) {
5223 proc_t p;
5224 const char *iname = vnode_getname_printable(vp);
5225 p = current_proc();
5226
5227 printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
5228 "is not a supplemental.\n",
5229 proc_getpid(p), p->p_comm, iname);
5230
5231 error = EINVAL;
5232
5233 vnode_putname_printable(iname);
5234 vnode_unlock(orig_vp);
5235 goto out;
5236 }
5237 bool found_but_not_valid = false;
5238 for (orig_blob = ubc_get_cs_blobs(orig_vp); orig_blob != NULL;
5239 orig_blob = orig_blob->csb_next) {
5240 if (orig_blob->csb_hashtype == tmp_blob.csb_linkage_hashtype &&
5241 memcmp(orig_blob->csb_cdhash, tmp_blob.csb_linkage, CS_CDHASH_LEN) == 0) {
5242 // Found match!
5243 found_but_not_valid = ((orig_blob->csb_flags & CS_VALID) != CS_VALID);
5244 break;
5245 }
5246 }
5247
5248 if (orig_blob == NULL || found_but_not_valid) {
5249 // Not found.
5250
5251 proc_t p;
5252 const char *iname = vnode_getname_printable(vp);
5253 p = current_proc();
5254
5255 error = (orig_blob == NULL) ? ESRCH : EPERM;
5256
5257 printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
5258 "does not match any attached cdhash (error: %d).\n",
5259 proc_getpid(p), p->p_comm, iname, error);
5260
5261 vnode_putname_printable(iname);
5262 vnode_unlock(orig_vp);
5263 goto out;
5264 }
5265
5266 vnode_unlock(orig_vp);
5267
5268 blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
5269 tmp_blob.csb_ro_addr = blob_ro;
5270 tmp_blob.csb_vnode = vp;
5271
5272 /* AMFI needs to see the current blob state at the RO address. */
5273 zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5274
5275 // validate the signature against policy!
5276 #if CONFIG_MACF
5277 unsigned int signer_type = tmp_blob.csb_signer_type;
5278 error = mac_vnode_check_supplemental_signature(vp, &tmp_blob, orig_vp, orig_blob, &signer_type);
5279
5280 tmp_blob.csb_signer_type = signer_type;
5281
5282 if (error) {
5283 if (cs_debug) {
5284 printf("check_supplemental_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
5285 }
5286 goto out;
5287 }
5288 #endif
5289
5290 // We allowed the supplemental signature blob so
5291 // copy the platform bit or team-id from the linked signature and whether or not the original is developer code
5292 tmp_blob.csb_platform_binary = 0;
5293 tmp_blob.csb_platform_path = 0;
5294 if (orig_blob->csb_platform_binary == 1) {
5295 tmp_blob.csb_platform_binary = orig_blob->csb_platform_binary;
5296 tmp_blob.csb_platform_path = orig_blob->csb_platform_path;
5297 } else if (orig_blob->csb_teamid != NULL) {
5298 vm_size_t teamid_size = strlen(orig_blob->csb_teamid) + 1;
5299 tmp_blob.csb_supplement_teamid = kalloc_data(teamid_size, Z_WAITOK);
5300 if (tmp_blob.csb_supplement_teamid == NULL) {
5301 error = ENOMEM;
5302 goto out;
5303 }
5304 strlcpy(tmp_blob.csb_supplement_teamid, orig_blob->csb_teamid, teamid_size);
5305 }
5306 tmp_blob.csb_flags = (orig_blob->csb_flags & CS_DEV_CODE);
5307
5308 // Validate the blob's coverage
5309 blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
5310 blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
5311
5312 if (blob_start_offset >= blob_end_offset || blob_start_offset < 0 || blob_end_offset <= 0) {
5313 /* reject empty or backwards blob */
5314 error = EINVAL;
5315 goto out;
5316 }
5317
5318 vnode_lock(vp);
5319 if (!UBCINFOEXISTS(vp)) {
5320 vnode_unlock(vp);
5321 error = ENOENT;
5322 goto out;
5323 }
5324 uip = vp->v_ubcinfo;
5325
5326 struct cs_blob *existing = uip->cs_blob_supplement;
5327 if (existing != NULL) {
5328 if (tmp_blob.csb_hashtype == existing->csb_hashtype &&
5329 memcmp(tmp_blob.csb_cdhash, existing->csb_cdhash, CS_CDHASH_LEN) == 0) {
5330 error = EAGAIN; // non-fatal
5331 } else {
5332 error = EALREADY; // fatal
5333 }
5334
5335 vnode_unlock(vp);
5336 goto out;
5337 }
5338
5339 /* mark this vnode's VM object as having "signed pages" */
5340 kr = memory_object_signed(uip->ui_control, TRUE);
5341 if (kr != KERN_SUCCESS) {
5342 vnode_unlock(vp);
5343 error = ENOENT;
5344 goto out;
5345 }
5346
5347
5348 /* We still adjust statistics even for supplemental blobs, as they
5349 * consume memory just the same. */
5350 ubc_cs_blob_adjust_statistics(&tmp_blob);
5351 /* Unlike regular cs_blobs, we only ever support one supplement. */
5352 tmp_blob.csb_next = NULL;
5353 zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5354
5355 os_atomic_thread_fence(seq_cst); // Fence to prevent reordering here
5356 uip->cs_blob_supplement = blob_ro;
5357
5358 /* Make sure to reload pointer from uip to double check */
5359 if (__improbable(uip->cs_blob_supplement->csb_next)) {
5360 panic("csb_next does not match expected NULL value");
5361 }
5362
5363 vnode_unlock(vp);
5364
5365
5366 if (cs_debug > 1) {
5367 proc_t p;
5368 const char *name = vnode_getname_printable(vp);
5369 p = current_proc();
5370 printf("CODE SIGNING: proc %d(%s) "
5371 "loaded supplemental signature for file (%s) "
5372 "range 0x%llx:0x%llx\n",
5373 proc_getpid(p), p->p_comm,
5374 name,
5375 blob_ro->csb_base_offset + blob_ro->csb_start_offset,
5376 blob_ro->csb_base_offset + blob_ro->csb_end_offset);
5377 vnode_putname_printable(name);
5378 }
5379
5380 if (ret_blob) {
5381 *ret_blob = blob_ro;
5382 }
5383
5384 error = 0; // Success!
5385 out:
5386 if (error) {
5387 if (cs_debug) {
5388 printf("ubc_cs_blob_add_supplement[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
5389 }
5390
5391 cs_blob_cleanup(&tmp_blob);
5392 if (blob_ro) {
5393 zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
5394 }
5395 }
5396
5397 if (error == EAGAIN) {
5398 /* We were asked to add an existing blob.
5399 * We cleaned up and ignore the attempt. */
5400 error = 0;
5401 }
5402
5403 return error;
5404 }
5405 #endif
5406
5407
5408
5409 void
csvnode_print_debug(struct vnode * vp)5410 csvnode_print_debug(struct vnode *vp)
5411 {
5412 const char *name = NULL;
5413 struct ubc_info *uip;
5414 struct cs_blob *blob;
5415
5416 name = vnode_getname_printable(vp);
5417 if (name) {
5418 printf("csvnode: name: %s\n", name);
5419 vnode_putname_printable(name);
5420 }
5421
5422 vnode_lock_spin(vp);
5423
5424 if (!UBCINFOEXISTS(vp)) {
5425 blob = NULL;
5426 goto out;
5427 }
5428
5429 uip = vp->v_ubcinfo;
5430 for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
5431 printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
5432 (unsigned long)blob->csb_start_offset,
5433 (unsigned long)blob->csb_end_offset,
5434 blob->csb_flags,
5435 blob->csb_platform_binary ? "yes" : "no",
5436 blob->csb_platform_path ? "yes" : "no",
5437 blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
5438 }
5439
5440 out:
5441 vnode_unlock(vp);
5442 }
5443
5444 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5445 struct cs_blob *
ubc_cs_blob_get_supplement(struct vnode * vp,off_t offset)5446 ubc_cs_blob_get_supplement(
5447 struct vnode *vp,
5448 off_t offset)
5449 {
5450 struct cs_blob *blob;
5451 off_t offset_in_blob;
5452
5453 vnode_lock_spin(vp);
5454
5455 if (!UBCINFOEXISTS(vp)) {
5456 blob = NULL;
5457 goto out;
5458 }
5459
5460 blob = vp->v_ubcinfo->cs_blob_supplement;
5461
5462 if (blob == NULL) {
5463 // no supplemental blob
5464 goto out;
5465 }
5466
5467
5468 if (offset != -1) {
5469 offset_in_blob = offset - blob->csb_base_offset;
5470 if (offset_in_blob < blob->csb_start_offset || offset_in_blob >= blob->csb_end_offset) {
5471 // not actually covered by this blob
5472 blob = NULL;
5473 }
5474 }
5475
5476 out:
5477 vnode_unlock(vp);
5478
5479 return blob;
5480 }
5481 #endif
5482
5483 struct cs_blob *
ubc_cs_blob_get(struct vnode * vp,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t offset)5484 ubc_cs_blob_get(
5485 struct vnode *vp,
5486 cpu_type_t cputype,
5487 cpu_subtype_t cpusubtype,
5488 off_t offset)
5489 {
5490 struct cs_blob *blob;
5491 off_t offset_in_blob;
5492
5493 vnode_lock_spin(vp);
5494
5495 if (!UBCINFOEXISTS(vp)) {
5496 blob = NULL;
5497 goto out;
5498 }
5499
5500 for (blob = ubc_get_cs_blobs(vp);
5501 blob != NULL;
5502 blob = blob->csb_next) {
5503 if (cputype != -1 && blob->csb_cpu_type == cputype && (cpusubtype == -1 || blob->csb_cpu_subtype == (cpusubtype & ~CPU_SUBTYPE_MASK))) {
5504 break;
5505 }
5506 if (offset != -1) {
5507 offset_in_blob = offset - blob->csb_base_offset;
5508 if (offset_in_blob >= blob->csb_start_offset &&
5509 offset_in_blob < blob->csb_end_offset) {
5510 /* our offset is covered by this blob */
5511 break;
5512 }
5513 }
5514 }
5515
5516 out:
5517 vnode_unlock(vp);
5518
5519 return blob;
5520 }
5521
5522 void
ubc_cs_free_and_vnode_unlock(vnode_t vp)5523 ubc_cs_free_and_vnode_unlock(
5524 vnode_t vp)
5525 {
5526 struct ubc_info *uip = vp->v_ubcinfo;
5527 struct cs_blob *cs_blobs, *blob, *next_blob;
5528
5529 if (!(uip->ui_flags & UI_CSBLOBINVALID)) {
5530 vnode_unlock(vp);
5531 return;
5532 }
5533
5534 uip->ui_flags &= ~UI_CSBLOBINVALID;
5535
5536 cs_blobs = uip->cs_blobs;
5537 uip->cs_blobs = NULL;
5538
5539 #if CHECK_CS_VALIDATION_BITMAP
5540 ubc_cs_validation_bitmap_deallocate( uip );
5541 #endif
5542
5543 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5544 struct cs_blob *cs_blob_supplement = uip->cs_blob_supplement;
5545 uip->cs_blob_supplement = NULL;
5546 #endif
5547
5548 vnode_unlock(vp);
5549
5550 for (blob = cs_blobs;
5551 blob != NULL;
5552 blob = next_blob) {
5553 next_blob = blob->csb_next;
5554 os_atomic_add(&cs_blob_count, -1, relaxed);
5555 os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5556 cs_blob_ro_free(blob);
5557 }
5558
5559 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5560 if (cs_blob_supplement != NULL) {
5561 os_atomic_add(&cs_blob_count, -1, relaxed);
5562 os_atomic_add(&cs_blob_size, -cs_blob_supplement->csb_mem_size, relaxed);
5563 cs_blob_supplement_free(cs_blob_supplement);
5564 }
5565 #endif
5566 }
5567
5568 static void
ubc_cs_free(struct ubc_info * uip)5569 ubc_cs_free(
5570 struct ubc_info *uip)
5571 {
5572 struct cs_blob *blob, *next_blob;
5573
5574 for (blob = uip->cs_blobs;
5575 blob != NULL;
5576 blob = next_blob) {
5577 next_blob = blob->csb_next;
5578 os_atomic_add(&cs_blob_count, -1, relaxed);
5579 os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5580 cs_blob_ro_free(blob);
5581 }
5582 #if CHECK_CS_VALIDATION_BITMAP
5583 ubc_cs_validation_bitmap_deallocate( uip );
5584 #endif
5585 uip->cs_blobs = NULL;
5586 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5587 if (uip->cs_blob_supplement != NULL) {
5588 blob = uip->cs_blob_supplement;
5589 os_atomic_add(&cs_blob_count, -1, relaxed);
5590 os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5591 cs_blob_supplement_free(uip->cs_blob_supplement);
5592 uip->cs_blob_supplement = NULL;
5593 }
5594 #endif
5595 }
5596
5597 /* check cs blob generation on vnode
5598 * returns:
5599 * 0 : Success, the cs_blob attached is current
5600 * ENEEDAUTH : Generation count mismatch. Needs authentication again.
5601 */
5602 int
ubc_cs_generation_check(struct vnode * vp)5603 ubc_cs_generation_check(
5604 struct vnode *vp)
5605 {
5606 int retval = ENEEDAUTH;
5607
5608 vnode_lock_spin(vp);
5609
5610 if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
5611 retval = 0;
5612 }
5613
5614 vnode_unlock(vp);
5615 return retval;
5616 }
5617
5618 int
ubc_cs_blob_revalidate(struct vnode * vp,struct cs_blob * blob,struct image_params * imgp,int flags,uint32_t platform)5619 ubc_cs_blob_revalidate(
5620 struct vnode *vp,
5621 struct cs_blob *blob,
5622 struct image_params *imgp,
5623 int flags,
5624 uint32_t platform
5625 )
5626 {
5627 int error = 0;
5628 const CS_CodeDirectory *cd = NULL;
5629 const CS_GenericBlob *entitlements = NULL;
5630 const CS_GenericBlob *der_entitlements = NULL;
5631 size_t size;
5632 assert(vp != NULL);
5633 assert(blob != NULL);
5634
5635 if ((blob->csb_flags & CS_VALID) == 0) {
5636 // If the blob attached to the vnode was invalidated, don't try to revalidate it
5637 // Blob invalidation only occurs when the file that the blob is attached to is
5638 // opened for writing, giving us a signal that the file is modified.
5639 printf("CODESIGNING: can not re-validate a previously invalidated blob, reboot or create a new file.\n");
5640 error = EPERM;
5641 goto out;
5642 }
5643
5644 size = blob->csb_mem_size;
5645 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
5646 size, &cd, &entitlements, &der_entitlements);
5647 if (error) {
5648 if (cs_debug) {
5649 printf("CODESIGNING: csblob invalid: %d\n", error);
5650 }
5651 goto out;
5652 }
5653
5654 unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
5655 unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
5656
5657 if (blob->csb_reconstituted) {
5658 /*
5659 * Code signatures that have been modified after validation
5660 * cannot be revalidated inline from their in-memory blob.
5661 *
5662 * That's okay, though, because the only path left that relies
5663 * on revalidation of existing in-memory blobs is the legacy
5664 * detached signature database path, which only exists on macOS,
5665 * which does not do reconstitution of any kind.
5666 */
5667 if (cs_debug) {
5668 printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
5669 }
5670
5671 /*
5672 * EAGAIN tells the caller that they may reread the code
5673 * signature and try attaching it again, which is the same
5674 * thing they would do if there was no cs_blob yet in the
5675 * first place.
5676 *
5677 * Conveniently, after ubc_cs_blob_add did a successful
5678 * validation, it will detect that a matching cs_blob (cdhash,
5679 * offset, arch etc.) already exists, and return success
5680 * without re-adding a cs_blob to the vnode.
5681 */
5682 return EAGAIN;
5683 }
5684
5685 /* callout to mac_vnode_check_signature */
5686 #if CONFIG_MACF
5687 error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform);
5688 if (cs_debug && error) {
5689 printf("revalidate: check_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
5690 }
5691 #else
5692 (void)flags;
5693 (void)signer_type;
5694 #endif
5695
5696 /* update generation number if success */
5697 vnode_lock_spin(vp);
5698 struct cs_signer_info signer_info = {
5699 .csb_flags = cs_flags,
5700 .csb_signer_type = signer_type
5701 };
5702 zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_signer_info, &signer_info);
5703 if (UBCINFOEXISTS(vp)) {
5704 if (error == 0) {
5705 vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
5706 } else {
5707 vp->v_ubcinfo->cs_add_gen = 0;
5708 }
5709 }
5710
5711 vnode_unlock(vp);
5712
5713 out:
5714 return error;
5715 }
5716
5717 void
cs_blob_reset_cache()5718 cs_blob_reset_cache()
5719 {
5720 /* incrementing odd no by 2 makes sure '0' is never reached. */
5721 OSAddAtomic(+2, &cs_blob_generation_count);
5722 printf("Reseting cs_blob cache from all vnodes. \n");
5723 }
5724
5725 struct cs_blob *
ubc_get_cs_blobs(struct vnode * vp)5726 ubc_get_cs_blobs(
5727 struct vnode *vp)
5728 {
5729 struct ubc_info *uip;
5730 struct cs_blob *blobs;
5731
5732 /*
5733 * No need to take the vnode lock here. The caller must be holding
5734 * a reference on the vnode (via a VM mapping or open file descriptor),
5735 * so the vnode will not go away. The ubc_info stays until the vnode
5736 * goes away. And we only modify "blobs" by adding to the head of the
5737 * list.
5738 * The ubc_info could go away entirely if the vnode gets reclaimed as
5739 * part of a forced unmount. In the case of a code-signature validation
5740 * during a page fault, the "paging_in_progress" reference on the VM
5741 * object guarantess that the vnode pager (and the ubc_info) won't go
5742 * away during the fault.
5743 * Other callers need to protect against vnode reclaim by holding the
5744 * vnode lock, for example.
5745 */
5746
5747 if (!UBCINFOEXISTS(vp)) {
5748 blobs = NULL;
5749 goto out;
5750 }
5751
5752 uip = vp->v_ubcinfo;
5753 blobs = uip->cs_blobs;
5754 if (blobs != NULL) {
5755 cs_blob_require(blobs, vp);
5756 }
5757
5758 out:
5759 return blobs;
5760 }
5761
5762 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5763 struct cs_blob *
ubc_get_cs_supplement(struct vnode * vp)5764 ubc_get_cs_supplement(
5765 struct vnode *vp)
5766 {
5767 struct ubc_info *uip;
5768 struct cs_blob *blob;
5769
5770 /*
5771 * No need to take the vnode lock here. The caller must be holding
5772 * a reference on the vnode (via a VM mapping or open file descriptor),
5773 * so the vnode will not go away. The ubc_info stays until the vnode
5774 * goes away.
5775 * The ubc_info could go away entirely if the vnode gets reclaimed as
5776 * part of a forced unmount. In the case of a code-signature validation
5777 * during a page fault, the "paging_in_progress" reference on the VM
5778 * object guarantess that the vnode pager (and the ubc_info) won't go
5779 * away during the fault.
5780 * Other callers need to protect against vnode reclaim by holding the
5781 * vnode lock, for example.
5782 */
5783
5784 if (!UBCINFOEXISTS(vp)) {
5785 blob = NULL;
5786 goto out;
5787 }
5788
5789 uip = vp->v_ubcinfo;
5790 blob = uip->cs_blob_supplement;
5791 if (blob != NULL) {
5792 cs_blob_require(blob, vp);
5793 }
5794
5795 out:
5796 return blob;
5797 }
5798 #endif
5799
5800
5801 void
ubc_get_cs_mtime(struct vnode * vp,struct timespec * cs_mtime)5802 ubc_get_cs_mtime(
5803 struct vnode *vp,
5804 struct timespec *cs_mtime)
5805 {
5806 struct ubc_info *uip;
5807
5808 if (!UBCINFOEXISTS(vp)) {
5809 cs_mtime->tv_sec = 0;
5810 cs_mtime->tv_nsec = 0;
5811 return;
5812 }
5813
5814 uip = vp->v_ubcinfo;
5815 cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
5816 cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
5817 }
5818
5819 unsigned long cs_validate_page_no_hash = 0;
5820 unsigned long cs_validate_page_bad_hash = 0;
5821 static boolean_t
cs_validate_hash(struct cs_blob * blobs,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t * bytes_processed,unsigned * tainted)5822 cs_validate_hash(
5823 struct cs_blob *blobs,
5824 memory_object_t pager,
5825 memory_object_offset_t page_offset,
5826 const void *data,
5827 vm_size_t *bytes_processed,
5828 unsigned *tainted)
5829 {
5830 union cs_hash_union mdctx;
5831 struct cs_hash const *hashtype = NULL;
5832 unsigned char actual_hash[CS_HASH_MAX_SIZE];
5833 unsigned char expected_hash[CS_HASH_MAX_SIZE];
5834 boolean_t found_hash;
5835 struct cs_blob *blob;
5836 const CS_CodeDirectory *cd;
5837 const unsigned char *hash;
5838 boolean_t validated;
5839 off_t offset; /* page offset in the file */
5840 size_t size;
5841 off_t codeLimit = 0;
5842 const char *lower_bound, *upper_bound;
5843 vm_offset_t kaddr, blob_addr;
5844
5845 /* retrieve the expected hash */
5846 found_hash = FALSE;
5847
5848 for (blob = blobs;
5849 blob != NULL;
5850 blob = blob->csb_next) {
5851 offset = page_offset - blob->csb_base_offset;
5852 if (offset < blob->csb_start_offset ||
5853 offset >= blob->csb_end_offset) {
5854 /* our page is not covered by this blob */
5855 continue;
5856 }
5857
5858 /* blob data has been released */
5859 kaddr = (vm_offset_t)blob->csb_mem_kaddr;
5860 if (kaddr == 0) {
5861 continue;
5862 }
5863
5864 blob_addr = kaddr + blob->csb_mem_offset;
5865 lower_bound = CAST_DOWN(char *, blob_addr);
5866 upper_bound = lower_bound + blob->csb_mem_size;
5867
5868 cd = blob->csb_cd;
5869 if (cd != NULL) {
5870 /* all CD's that have been injected is already validated */
5871
5872 hashtype = blob->csb_hashtype;
5873 if (hashtype == NULL) {
5874 panic("unknown hash type ?");
5875 }
5876 if (hashtype->cs_digest_size > sizeof(actual_hash)) {
5877 panic("hash size too large");
5878 }
5879 if (offset & ((1U << blob->csb_hash_pageshift) - 1)) {
5880 panic("offset not aligned to cshash boundary");
5881 }
5882
5883 codeLimit = ntohl(cd->codeLimit);
5884
5885 hash = hashes(cd, (uint32_t)(offset >> blob->csb_hash_pageshift),
5886 hashtype->cs_size,
5887 lower_bound, upper_bound);
5888 if (hash != NULL) {
5889 bcopy(hash, expected_hash, hashtype->cs_size);
5890 found_hash = TRUE;
5891 }
5892
5893 break;
5894 }
5895 }
5896
5897 if (found_hash == FALSE) {
5898 /*
5899 * We can't verify this page because there is no signature
5900 * for it (yet). It's possible that this part of the object
5901 * is not signed, or that signatures for that part have not
5902 * been loaded yet.
5903 * Report that the page has not been validated and let the
5904 * caller decide if it wants to accept it or not.
5905 */
5906 cs_validate_page_no_hash++;
5907 if (cs_debug > 1) {
5908 printf("CODE SIGNING: cs_validate_page: "
5909 "mobj %p off 0x%llx: no hash to validate !?\n",
5910 pager, page_offset);
5911 }
5912 validated = FALSE;
5913 *tainted = 0;
5914 } else {
5915 *tainted = 0;
5916
5917 size = (1U << blob->csb_hash_pageshift);
5918 *bytes_processed = size;
5919
5920 const uint32_t *asha1, *esha1;
5921 if ((off_t)(offset + size) > codeLimit) {
5922 /* partial page at end of segment */
5923 assert(offset < codeLimit);
5924 size = (size_t) (codeLimit & (size - 1));
5925 *tainted |= CS_VALIDATE_NX;
5926 }
5927
5928 hashtype->cs_init(&mdctx);
5929
5930 if (blob->csb_hash_firstlevel_pageshift) {
5931 const unsigned char *partial_data = (const unsigned char *)data;
5932 size_t i;
5933 for (i = 0; i < size;) {
5934 union cs_hash_union partialctx;
5935 unsigned char partial_digest[CS_HASH_MAX_SIZE];
5936 size_t partial_size = MIN(size - i, (1U << blob->csb_hash_firstlevel_pageshift));
5937
5938 hashtype->cs_init(&partialctx);
5939 hashtype->cs_update(&partialctx, partial_data, partial_size);
5940 hashtype->cs_final(partial_digest, &partialctx);
5941
5942 /* Update cumulative multi-level hash */
5943 hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
5944 partial_data = partial_data + partial_size;
5945 i += partial_size;
5946 }
5947 } else {
5948 hashtype->cs_update(&mdctx, data, size);
5949 }
5950 hashtype->cs_final(actual_hash, &mdctx);
5951
5952 asha1 = (const uint32_t *) actual_hash;
5953 esha1 = (const uint32_t *) expected_hash;
5954
5955 if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
5956 if (cs_debug) {
5957 printf("CODE SIGNING: cs_validate_page: "
5958 "mobj %p off 0x%llx size 0x%lx: "
5959 "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
5960 "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
5961 pager, page_offset, size,
5962 asha1[0], asha1[1], asha1[2],
5963 asha1[3], asha1[4],
5964 esha1[0], esha1[1], esha1[2],
5965 esha1[3], esha1[4]);
5966 }
5967 cs_validate_page_bad_hash++;
5968 *tainted |= CS_VALIDATE_TAINTED;
5969 } else {
5970 if (cs_debug > 10) {
5971 printf("CODE SIGNING: cs_validate_page: "
5972 "mobj %p off 0x%llx size 0x%lx: "
5973 "SHA1 OK\n",
5974 pager, page_offset, size);
5975 }
5976 }
5977 validated = TRUE;
5978 }
5979
5980 return validated;
5981 }
5982
5983 boolean_t
cs_validate_range(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t dsize,unsigned * tainted)5984 cs_validate_range(
5985 struct vnode *vp,
5986 memory_object_t pager,
5987 memory_object_offset_t page_offset,
5988 const void *data,
5989 vm_size_t dsize,
5990 unsigned *tainted)
5991 {
5992 vm_size_t offset_in_range;
5993 boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
5994
5995 struct cs_blob *blobs = ubc_get_cs_blobs(vp);
5996
5997 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5998 if (blobs == NULL && proc_is_translated(current_proc())) {
5999 struct cs_blob *supp = ubc_get_cs_supplement(vp);
6000
6001 if (supp != NULL) {
6002 blobs = supp;
6003 } else {
6004 return FALSE;
6005 }
6006 }
6007 #endif
6008
6009 #if DEVELOPMENT || DEBUG
6010 code_signing_config_t cs_config = 0;
6011
6012 /*
6013 * This exemption is specifically useful for systems which want to avoid paying
6014 * the cost of verifying the integrity of pages, since that is done by computing
6015 * hashes, which can take some time.
6016 */
6017 code_signing_configuration(NULL, &cs_config);
6018 if (cs_config & CS_CONFIG_INTEGRITY_SKIP) {
6019 *tainted = 0;
6020
6021 /* Return early to avoid paying the cost of hashing */
6022 return true;
6023 }
6024 #endif
6025
6026 *tainted = 0;
6027
6028 for (offset_in_range = 0;
6029 offset_in_range < dsize;
6030 /* offset_in_range updated based on bytes processed */) {
6031 unsigned subrange_tainted = 0;
6032 boolean_t subrange_validated;
6033 vm_size_t bytes_processed = 0;
6034
6035 subrange_validated = cs_validate_hash(blobs,
6036 pager,
6037 page_offset + offset_in_range,
6038 (const void *)((const char *)data + offset_in_range),
6039 &bytes_processed,
6040 &subrange_tainted);
6041
6042 *tainted |= subrange_tainted;
6043
6044 if (bytes_processed == 0) {
6045 /* Cannote make forward progress, so return an error */
6046 all_subranges_validated = FALSE;
6047 break;
6048 } else if (subrange_validated == FALSE) {
6049 all_subranges_validated = FALSE;
6050 /* Keep going to detect other types of failures in subranges */
6051 }
6052
6053 offset_in_range += bytes_processed;
6054 }
6055
6056 return all_subranges_validated;
6057 }
6058
6059 void
cs_validate_page(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,int * validated_p,int * tainted_p,int * nx_p)6060 cs_validate_page(
6061 struct vnode *vp,
6062 memory_object_t pager,
6063 memory_object_offset_t page_offset,
6064 const void *data,
6065 int *validated_p,
6066 int *tainted_p,
6067 int *nx_p)
6068 {
6069 vm_size_t offset_in_page;
6070 struct cs_blob *blobs;
6071
6072 blobs = ubc_get_cs_blobs(vp);
6073
6074 #if CONFIG_SUPPLEMENTAL_SIGNATURES
6075 if (blobs == NULL && proc_is_translated(current_proc())) {
6076 struct cs_blob *supp = ubc_get_cs_supplement(vp);
6077
6078 if (supp != NULL) {
6079 blobs = supp;
6080 }
6081 }
6082 #endif
6083
6084 #if DEVELOPMENT || DEBUG
6085 code_signing_config_t cs_config = 0;
6086
6087 /*
6088 * This exemption is specifically useful for systems which want to avoid paying
6089 * the cost of verifying the integrity of pages, since that is done by computing
6090 * hashes, which can take some time.
6091 */
6092 code_signing_configuration(NULL, &cs_config);
6093 if (cs_config & CS_CONFIG_INTEGRITY_SKIP) {
6094 *validated_p = VMP_CS_ALL_TRUE;
6095 *tainted_p = VMP_CS_ALL_FALSE;
6096 *nx_p = VMP_CS_ALL_FALSE;
6097
6098 /* Return early to avoid paying the cost of hashing */
6099 return;
6100 }
6101 #endif
6102
6103 *validated_p = VMP_CS_ALL_FALSE;
6104 *tainted_p = VMP_CS_ALL_FALSE;
6105 *nx_p = VMP_CS_ALL_FALSE;
6106
6107 for (offset_in_page = 0;
6108 offset_in_page < PAGE_SIZE;
6109 /* offset_in_page updated based on bytes processed */) {
6110 unsigned subrange_tainted = 0;
6111 boolean_t subrange_validated;
6112 vm_size_t bytes_processed = 0;
6113 int sub_bit;
6114
6115 subrange_validated = cs_validate_hash(blobs,
6116 pager,
6117 page_offset + offset_in_page,
6118 (const void *)((const char *)data + offset_in_page),
6119 &bytes_processed,
6120 &subrange_tainted);
6121
6122 if (bytes_processed == 0) {
6123 /* 4k chunk not code-signed: try next one */
6124 offset_in_page += FOURK_PAGE_SIZE;
6125 continue;
6126 }
6127 if (offset_in_page == 0 &&
6128 bytes_processed > PAGE_SIZE - FOURK_PAGE_SIZE) {
6129 /* all processed: no 4k granularity */
6130 if (subrange_validated) {
6131 *validated_p = VMP_CS_ALL_TRUE;
6132 }
6133 if (subrange_tainted & CS_VALIDATE_TAINTED) {
6134 *tainted_p = VMP_CS_ALL_TRUE;
6135 }
6136 if (subrange_tainted & CS_VALIDATE_NX) {
6137 *nx_p = VMP_CS_ALL_TRUE;
6138 }
6139 break;
6140 }
6141 /* we only handle 4k or 16k code-signing granularity... */
6142 assertf(bytes_processed <= FOURK_PAGE_SIZE,
6143 "vp %p blobs %p offset 0x%llx + 0x%llx bytes_processed 0x%llx\n",
6144 vp, blobs, (uint64_t)page_offset,
6145 (uint64_t)offset_in_page, (uint64_t)bytes_processed);
6146 sub_bit = 1 << (offset_in_page >> FOURK_PAGE_SHIFT);
6147 if (subrange_validated) {
6148 *validated_p |= sub_bit;
6149 }
6150 if (subrange_tainted & CS_VALIDATE_TAINTED) {
6151 *tainted_p |= sub_bit;
6152 }
6153 if (subrange_tainted & CS_VALIDATE_NX) {
6154 *nx_p |= sub_bit;
6155 }
6156 /* go to next 4k chunk */
6157 offset_in_page += FOURK_PAGE_SIZE;
6158 }
6159
6160 return;
6161 }
6162
6163 int
ubc_cs_getcdhash(vnode_t vp,off_t offset,unsigned char * cdhash,uint8_t * type)6164 ubc_cs_getcdhash(
6165 vnode_t vp,
6166 off_t offset,
6167 unsigned char *cdhash,
6168 uint8_t *type)
6169 {
6170 struct cs_blob *blobs, *blob;
6171 off_t rel_offset;
6172 int ret;
6173
6174 vnode_lock(vp);
6175
6176 blobs = ubc_get_cs_blobs(vp);
6177 for (blob = blobs;
6178 blob != NULL;
6179 blob = blob->csb_next) {
6180 /* compute offset relative to this blob */
6181 rel_offset = offset - blob->csb_base_offset;
6182 if (rel_offset >= blob->csb_start_offset &&
6183 rel_offset < blob->csb_end_offset) {
6184 /* this blob does cover our "offset" ! */
6185 break;
6186 }
6187 }
6188
6189 if (blob == NULL) {
6190 /* we didn't find a blob covering "offset" */
6191 ret = EBADEXEC; /* XXX any better error ? */
6192 } else {
6193 /* get the CDHash of that blob */
6194 bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash));
6195
6196 /* get the type of the CDHash */
6197 if (type != NULL) {
6198 *type = blob->csb_cd->hashType;
6199 }
6200
6201 ret = 0;
6202 }
6203
6204 vnode_unlock(vp);
6205
6206 return ret;
6207 }
6208
6209 boolean_t
ubc_cs_is_range_codesigned(vnode_t vp,mach_vm_offset_t start,mach_vm_size_t size)6210 ubc_cs_is_range_codesigned(
6211 vnode_t vp,
6212 mach_vm_offset_t start,
6213 mach_vm_size_t size)
6214 {
6215 struct cs_blob *csblob;
6216 mach_vm_offset_t blob_start;
6217 mach_vm_offset_t blob_end;
6218
6219 if (vp == NULL) {
6220 /* no file: no code signature */
6221 return FALSE;
6222 }
6223 if (size == 0) {
6224 /* no range: no code signature */
6225 return FALSE;
6226 }
6227 if (start + size < start) {
6228 /* overflow */
6229 return FALSE;
6230 }
6231
6232 csblob = ubc_cs_blob_get(vp, -1, -1, start);
6233 if (csblob == NULL) {
6234 return FALSE;
6235 }
6236
6237 /*
6238 * We currently check if the range is covered by a single blob,
6239 * which should always be the case for the dyld shared cache.
6240 * If we ever want to make this routine handle other cases, we
6241 * would have to iterate if the blob does not cover the full range.
6242 */
6243 blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
6244 csblob->csb_start_offset);
6245 blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
6246 csblob->csb_end_offset);
6247 if (blob_start > start || blob_end < (start + size)) {
6248 /* range not fully covered by this code-signing blob */
6249 return FALSE;
6250 }
6251
6252 return TRUE;
6253 }
6254
6255 #if CHECK_CS_VALIDATION_BITMAP
6256 #define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
6257 extern boolean_t root_fs_upgrade_try;
6258
6259 /*
6260 * Should we use the code-sign bitmap to avoid repeated code-sign validation?
6261 * Depends:
6262 * a) Is the target vnode on the root filesystem?
6263 * b) Has someone tried to mount the root filesystem read-write?
6264 * If answers are (a) yes AND (b) no, then we can use the bitmap.
6265 */
6266 #define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
6267 kern_return_t
ubc_cs_validation_bitmap_allocate(vnode_t vp)6268 ubc_cs_validation_bitmap_allocate(
6269 vnode_t vp)
6270 {
6271 kern_return_t kr = KERN_SUCCESS;
6272 struct ubc_info *uip;
6273 char *target_bitmap;
6274 vm_object_size_t bitmap_size;
6275
6276 if (!USE_CODE_SIGN_BITMAP(vp) || (!UBCINFOEXISTS(vp))) {
6277 kr = KERN_INVALID_ARGUMENT;
6278 } else {
6279 uip = vp->v_ubcinfo;
6280
6281 if (uip->cs_valid_bitmap == NULL) {
6282 bitmap_size = stob(uip->ui_size);
6283 target_bitmap = (char*) kalloc_data((vm_size_t)bitmap_size, Z_WAITOK | Z_ZERO);
6284 if (target_bitmap == 0) {
6285 kr = KERN_NO_SPACE;
6286 } else {
6287 kr = KERN_SUCCESS;
6288 }
6289 if (kr == KERN_SUCCESS) {
6290 uip->cs_valid_bitmap = (void*)target_bitmap;
6291 uip->cs_valid_bitmap_size = bitmap_size;
6292 }
6293 }
6294 }
6295 return kr;
6296 }
6297
6298 kern_return_t
ubc_cs_check_validation_bitmap(vnode_t vp,memory_object_offset_t offset,int optype)6299 ubc_cs_check_validation_bitmap(
6300 vnode_t vp,
6301 memory_object_offset_t offset,
6302 int optype)
6303 {
6304 kern_return_t kr = KERN_SUCCESS;
6305
6306 if (!USE_CODE_SIGN_BITMAP(vp) || !UBCINFOEXISTS(vp)) {
6307 kr = KERN_INVALID_ARGUMENT;
6308 } else {
6309 struct ubc_info *uip = vp->v_ubcinfo;
6310 char *target_bitmap = uip->cs_valid_bitmap;
6311
6312 if (target_bitmap == NULL) {
6313 kr = KERN_INVALID_ARGUMENT;
6314 } else {
6315 uint64_t bit, byte;
6316 bit = atop_64( offset );
6317 byte = bit >> 3;
6318
6319 if (byte > uip->cs_valid_bitmap_size) {
6320 kr = KERN_INVALID_ARGUMENT;
6321 } else {
6322 if (optype == CS_BITMAP_SET) {
6323 target_bitmap[byte] |= (1 << (bit & 07));
6324 kr = KERN_SUCCESS;
6325 } else if (optype == CS_BITMAP_CLEAR) {
6326 target_bitmap[byte] &= ~(1 << (bit & 07));
6327 kr = KERN_SUCCESS;
6328 } else if (optype == CS_BITMAP_CHECK) {
6329 if (target_bitmap[byte] & (1 << (bit & 07))) {
6330 kr = KERN_SUCCESS;
6331 } else {
6332 kr = KERN_FAILURE;
6333 }
6334 }
6335 }
6336 }
6337 }
6338 return kr;
6339 }
6340
6341 void
ubc_cs_validation_bitmap_deallocate(struct ubc_info * uip)6342 ubc_cs_validation_bitmap_deallocate(
6343 struct ubc_info *uip)
6344 {
6345 if (uip->cs_valid_bitmap != NULL) {
6346 kfree_data(uip->cs_valid_bitmap, (vm_size_t)uip->cs_valid_bitmap_size);
6347 uip->cs_valid_bitmap = NULL;
6348 }
6349 }
6350 #else
6351 kern_return_t
ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)6352 ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)
6353 {
6354 return KERN_INVALID_ARGUMENT;
6355 }
6356
6357 kern_return_t
ubc_cs_check_validation_bitmap(__unused struct vnode * vp,__unused memory_object_offset_t offset,__unused int optype)6358 ubc_cs_check_validation_bitmap(
6359 __unused struct vnode *vp,
6360 __unused memory_object_offset_t offset,
6361 __unused int optype)
6362 {
6363 return KERN_INVALID_ARGUMENT;
6364 }
6365
6366 void
ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info * uip)6367 ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info *uip)
6368 {
6369 return;
6370 }
6371 #endif /* CHECK_CS_VALIDATION_BITMAP */
6372
6373 #if CODE_SIGNING_MONITOR
6374
6375 kern_return_t
cs_associate_blob_with_mapping(void * pmap,vm_map_offset_t start,vm_map_size_t size,vm_object_offset_t offset,void * blobs_p)6376 cs_associate_blob_with_mapping(
6377 void *pmap,
6378 vm_map_offset_t start,
6379 vm_map_size_t size,
6380 vm_object_offset_t offset,
6381 void *blobs_p)
6382 {
6383 off_t blob_start_offset, blob_end_offset;
6384 kern_return_t kr;
6385 struct cs_blob *blobs, *blob;
6386 vm_offset_t kaddr;
6387 void *monitor_sig_obj = NULL;
6388
6389 if (csm_enabled() == false) {
6390 return KERN_NOT_SUPPORTED;
6391 }
6392
6393 blobs = (struct cs_blob *)blobs_p;
6394
6395 for (blob = blobs;
6396 blob != NULL;
6397 blob = blob->csb_next) {
6398 blob_start_offset = (blob->csb_base_offset +
6399 blob->csb_start_offset);
6400 blob_end_offset = (blob->csb_base_offset +
6401 blob->csb_end_offset);
6402 if ((off_t) offset < blob_start_offset ||
6403 (off_t) offset >= blob_end_offset ||
6404 (off_t) (offset + size) <= blob_start_offset ||
6405 (off_t) (offset + size) > blob_end_offset) {
6406 continue;
6407 }
6408
6409 kaddr = (vm_offset_t)blob->csb_mem_kaddr;
6410 if (kaddr == 0) {
6411 /* blob data has been released */
6412 continue;
6413 }
6414
6415 monitor_sig_obj = blob->csb_csm_obj;
6416 if (monitor_sig_obj == NULL) {
6417 continue;
6418 }
6419
6420 break;
6421 }
6422
6423 if (monitor_sig_obj != NULL) {
6424 vm_offset_t segment_offset = offset - blob_start_offset;
6425 kr = csm_associate_code_signature(pmap, monitor_sig_obj, start, size, segment_offset);
6426 } else {
6427 kr = KERN_CODESIGN_ERROR;
6428 }
6429
6430 return kr;
6431 }
6432
6433 #endif /* CODE_SIGNING_MONITOR */
6434