1 /*
2 * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_kobject.h>
49 #include <os/refcnt.h>
50
51 #include <ipc/ipc_port.h>
52 #include <ipc/ipc_space.h>
53
54 #include <vm/vm_fault.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_pageout.h>
57 #include <vm/memory_object.h>
58 #include <vm/vm_pageout.h>
59 #include <vm/vm_protos.h>
60 #include <vm/vm_kern.h>
61
62 /*
63 * APPLE PROTECT MEMORY PAGER
64 *
65 * This external memory manager (EMM) handles memory from the encrypted
66 * sections of some executables protected by the DSMOS kernel extension.
67 *
68 * It mostly handles page-in requests (from memory_object_data_request()) by
69 * getting the encrypted data from its backing VM object, itself backed by
70 * the encrypted file, decrypting it and providing it to VM.
71 *
72 * The decrypted pages will never be dirtied, so the memory manager doesn't
73 * need to handle page-out requests (from memory_object_data_return()). The
74 * pages need to be mapped copy-on-write, so that the originals stay clean.
75 *
76 * We don't expect to have to handle a large number of apple-protected
77 * binaries, so the data structures are very simple (simple linked list)
78 * for now.
79 */
80
81 /* forward declarations */
82 void apple_protect_pager_reference(memory_object_t mem_obj);
83 void apple_protect_pager_deallocate(memory_object_t mem_obj);
84 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
85 memory_object_control_t control,
86 memory_object_cluster_size_t pg_size);
87 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
88 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
89 memory_object_offset_t offset,
90 memory_object_cluster_size_t length,
91 vm_prot_t protection_required,
92 memory_object_fault_info_t fault_info);
93 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
94 memory_object_offset_t offset,
95 memory_object_cluster_size_t data_cnt,
96 memory_object_offset_t *resid_offset,
97 int *io_error,
98 boolean_t dirty,
99 boolean_t kernel_copy,
100 int upl_flags);
101 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
102 memory_object_offset_t offset,
103 memory_object_cluster_size_t data_cnt);
104 kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
105 memory_object_offset_t offset,
106 memory_object_size_t size,
107 vm_prot_t desired_access);
108 kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
109 memory_object_offset_t offset,
110 memory_object_size_t length,
111 vm_sync_t sync_flags);
112 kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
113 vm_prot_t prot);
114 kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
115 boolean_t apple_protect_pager_backing_object(
116 memory_object_t mem_obj,
117 memory_object_offset_t mem_obj_offset,
118 vm_object_t *backing_object,
119 vm_object_offset_t *backing_offset);
120
121 #define CRYPT_INFO_DEBUG 0
122 void crypt_info_reference(struct pager_crypt_info *crypt_info);
123 void crypt_info_deallocate(struct pager_crypt_info *crypt_info);
124
125 /*
126 * Vector of VM operations for this EMM.
127 * These routines are invoked by VM via the memory_object_*() interfaces.
128 */
129 const struct memory_object_pager_ops apple_protect_pager_ops = {
130 .memory_object_reference = apple_protect_pager_reference,
131 .memory_object_deallocate = apple_protect_pager_deallocate,
132 .memory_object_init = apple_protect_pager_init,
133 .memory_object_terminate = apple_protect_pager_terminate,
134 .memory_object_data_request = apple_protect_pager_data_request,
135 .memory_object_data_return = apple_protect_pager_data_return,
136 .memory_object_data_initialize = apple_protect_pager_data_initialize,
137 .memory_object_data_unlock = apple_protect_pager_data_unlock,
138 .memory_object_synchronize = apple_protect_pager_synchronize,
139 .memory_object_map = apple_protect_pager_map,
140 .memory_object_last_unmap = apple_protect_pager_last_unmap,
141 .memory_object_data_reclaim = NULL,
142 .memory_object_backing_object = apple_protect_pager_backing_object,
143 .memory_object_pager_name = "apple_protect"
144 };
145
146 /*
147 * The "apple_protect_pager" describes a memory object backed by
148 * the "apple protect" EMM.
149 */
150 typedef struct apple_protect_pager {
151 /* mandatory generic header */
152 struct memory_object ap_pgr_hdr;
153
154 /* pager-specific data */
155 queue_chain_t pager_queue; /* next & prev pagers */
156 #if MEMORY_OBJECT_HAS_REFCOUNT
157 #define ap_pgr_hdr_ref ap_pgr_hdr.mo_ref
158 #else
159 os_ref_atomic_t ap_pgr_hdr_ref; /* reference count */
160 #endif
161 bool is_ready; /* is this pager ready ? */
162 bool is_mapped; /* is this mem_obj mapped ? */
163 bool is_cached; /* is this pager cached ? */
164 vm_object_t backing_object; /* VM obj w/ encrypted data */
165 vm_object_offset_t backing_offset;
166 vm_object_offset_t crypto_backing_offset; /* for key... */
167 vm_object_offset_t crypto_start;
168 vm_object_offset_t crypto_end;
169 struct pager_crypt_info *crypt_info;
170 } *apple_protect_pager_t;
171 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
172
173 /*
174 * List of memory objects managed by this EMM.
175 * The list is protected by the "apple_protect_pager_lock" lock.
176 */
177 unsigned int apple_protect_pager_count = 0; /* number of pagers */
178 unsigned int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
179 queue_head_t apple_protect_pager_queue = QUEUE_HEAD_INITIALIZER(apple_protect_pager_queue);
180 LCK_GRP_DECLARE(apple_protect_pager_lck_grp, "apple_protect");
181 LCK_MTX_DECLARE(apple_protect_pager_lock, &apple_protect_pager_lck_grp);
182
183 /*
184 * Maximum number of unmapped pagers we're willing to keep around.
185 */
186 unsigned int apple_protect_pager_cache_limit = 20;
187
188 /*
189 * Statistics & counters.
190 */
191 unsigned int apple_protect_pager_count_max = 0;
192 unsigned int apple_protect_pager_count_unmapped_max = 0;
193 unsigned int apple_protect_pager_num_trim_max = 0;
194 unsigned int apple_protect_pager_num_trim_total = 0;
195
196
197
198 /* internal prototypes */
199 apple_protect_pager_t apple_protect_pager_create(
200 vm_object_t backing_object,
201 vm_object_offset_t backing_offset,
202 vm_object_offset_t crypto_backing_offset,
203 struct pager_crypt_info *crypt_info,
204 vm_object_offset_t crypto_start,
205 vm_object_offset_t crypto_end,
206 boolean_t cache_pager);
207 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
208 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
209 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
210 boolean_t locked);
211 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
212 void apple_protect_pager_trim(void);
213
214
215 #if DEBUG
216 int apple_protect_pagerdebug = 0;
217 #define PAGER_ALL 0xffffffff
218 #define PAGER_INIT 0x00000001
219 #define PAGER_PAGEIN 0x00000002
220
221 #define PAGER_DEBUG(LEVEL, A) \
222 MACRO_BEGIN \
223 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
224 printf A; \
225 } \
226 MACRO_END
227 #else
228 #define PAGER_DEBUG(LEVEL, A)
229 #endif
230
231 /*
232 * apple_protect_pager_init()
233 *
234 * Initialize the memory object and makes it ready to be used and mapped.
235 */
236 kern_return_t
apple_protect_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)237 apple_protect_pager_init(
238 memory_object_t mem_obj,
239 memory_object_control_t control,
240 #if !DEBUG
241 __unused
242 #endif
243 memory_object_cluster_size_t pg_size)
244 {
245 apple_protect_pager_t pager;
246 kern_return_t kr;
247 memory_object_attr_info_data_t attributes;
248
249 PAGER_DEBUG(PAGER_ALL,
250 ("apple_protect_pager_init: %p, %p, %x\n",
251 mem_obj, control, pg_size));
252
253 if (control == MEMORY_OBJECT_CONTROL_NULL) {
254 return KERN_INVALID_ARGUMENT;
255 }
256
257 pager = apple_protect_pager_lookup(mem_obj);
258
259 memory_object_control_reference(control);
260
261 pager->ap_pgr_hdr.mo_control = control;
262
263 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
264 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
265 attributes.cluster_size = (1 << (PAGE_SHIFT));
266 attributes.may_cache_object = FALSE;
267 attributes.temporary = TRUE;
268
269 kr = memory_object_change_attributes(
270 control,
271 MEMORY_OBJECT_ATTRIBUTE_INFO,
272 (memory_object_info_t) &attributes,
273 MEMORY_OBJECT_ATTR_INFO_COUNT);
274 if (kr != KERN_SUCCESS) {
275 panic("apple_protect_pager_init: "
276 "memory_object_change_attributes() failed");
277 }
278
279 #if CONFIG_SECLUDED_MEMORY
280 if (secluded_for_filecache) {
281 memory_object_mark_eligible_for_secluded(control, TRUE);
282 }
283 #endif /* CONFIG_SECLUDED_MEMORY */
284
285 return KERN_SUCCESS;
286 }
287
288 /*
289 * apple_protect_data_return()
290 *
291 * Handles page-out requests from VM. This should never happen since
292 * the pages provided by this EMM are not supposed to be dirty or dirtied
293 * and VM should simply discard the contents and reclaim the pages if it
294 * needs to.
295 */
296 kern_return_t
apple_protect_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)297 apple_protect_pager_data_return(
298 __unused memory_object_t mem_obj,
299 __unused memory_object_offset_t offset,
300 __unused memory_object_cluster_size_t data_cnt,
301 __unused memory_object_offset_t *resid_offset,
302 __unused int *io_error,
303 __unused boolean_t dirty,
304 __unused boolean_t kernel_copy,
305 __unused int upl_flags)
306 {
307 panic("apple_protect_pager_data_return: should never get called");
308 return KERN_FAILURE;
309 }
310
311 kern_return_t
apple_protect_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)312 apple_protect_pager_data_initialize(
313 __unused memory_object_t mem_obj,
314 __unused memory_object_offset_t offset,
315 __unused memory_object_cluster_size_t data_cnt)
316 {
317 panic("apple_protect_pager_data_initialize: should never get called");
318 return KERN_FAILURE;
319 }
320
321 kern_return_t
apple_protect_pager_data_unlock(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t size,__unused vm_prot_t desired_access)322 apple_protect_pager_data_unlock(
323 __unused memory_object_t mem_obj,
324 __unused memory_object_offset_t offset,
325 __unused memory_object_size_t size,
326 __unused vm_prot_t desired_access)
327 {
328 return KERN_FAILURE;
329 }
330
331 /*
332 * apple_protect_pager_data_request()
333 *
334 * Handles page-in requests from VM.
335 */
336 int apple_protect_pager_data_request_debug = 0;
337 kern_return_t
apple_protect_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)338 apple_protect_pager_data_request(
339 memory_object_t mem_obj,
340 memory_object_offset_t offset,
341 memory_object_cluster_size_t length,
342 #if !DEBUG
343 __unused
344 #endif
345 vm_prot_t protection_required,
346 memory_object_fault_info_t mo_fault_info)
347 {
348 apple_protect_pager_t pager;
349 memory_object_control_t mo_control;
350 upl_t upl;
351 int upl_flags;
352 upl_size_t upl_size;
353 upl_page_info_t *upl_pl;
354 unsigned int pl_count;
355 vm_object_t src_top_object, src_page_object, dst_object;
356 kern_return_t kr, retval;
357 vm_offset_t src_vaddr, dst_vaddr;
358 vm_offset_t cur_offset;
359 vm_offset_t offset_in_page;
360 kern_return_t error_code;
361 vm_prot_t prot;
362 vm_page_t src_page, top_page;
363 int interruptible;
364 struct vm_object_fault_info fault_info;
365 int ret;
366
367 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
368
369 retval = KERN_SUCCESS;
370 src_top_object = VM_OBJECT_NULL;
371 src_page_object = VM_OBJECT_NULL;
372 upl = NULL;
373 upl_pl = NULL;
374 fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
375 fault_info.stealth = TRUE;
376 fault_info.io_sync = FALSE;
377 fault_info.mark_zf_absent = FALSE;
378 fault_info.batch_pmap_op = FALSE;
379 interruptible = fault_info.interruptible;
380
381 pager = apple_protect_pager_lookup(mem_obj);
382 assert(pager->is_ready);
383 assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 1); /* pager is alive and mapped */
384
385 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
386
387 fault_info.lo_offset += pager->backing_offset;
388 fault_info.hi_offset += pager->backing_offset;
389
390 /*
391 * Gather in a UPL all the VM pages requested by VM.
392 */
393 mo_control = pager->ap_pgr_hdr.mo_control;
394
395 upl_size = length;
396 upl_flags =
397 UPL_RET_ONLY_ABSENT |
398 UPL_SET_LITE |
399 UPL_NO_SYNC |
400 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
401 UPL_SET_INTERNAL;
402 pl_count = 0;
403 kr = memory_object_upl_request(mo_control,
404 offset, upl_size,
405 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
406 if (kr != KERN_SUCCESS) {
407 retval = kr;
408 goto done;
409 }
410 dst_object = memory_object_control_to_vm_object(mo_control);
411 assert(dst_object != VM_OBJECT_NULL);
412
413 /*
414 * We'll map the encrypted data in the kernel address space from the
415 * backing VM object (itself backed by the encrypted file via
416 * the vnode pager).
417 */
418 src_top_object = pager->backing_object;
419 assert(src_top_object != VM_OBJECT_NULL);
420 vm_object_reference(src_top_object); /* keep the source object alive */
421
422 /*
423 * Fill in the contents of the pages requested by VM.
424 */
425 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
426 pl_count = length / PAGE_SIZE;
427 for (cur_offset = 0;
428 retval == KERN_SUCCESS && cur_offset < length;
429 cur_offset += PAGE_SIZE) {
430 ppnum_t dst_pnum;
431
432 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
433 /* this page is not in the UPL: skip it */
434 continue;
435 }
436
437 /*
438 * Map the source (encrypted) page in the kernel's
439 * virtual address space.
440 * We already hold a reference on the src_top_object.
441 */
442 retry_src_fault:
443 vm_object_lock(src_top_object);
444 vm_object_paging_begin(src_top_object);
445 error_code = 0;
446 prot = VM_PROT_READ;
447 src_page = VM_PAGE_NULL;
448 kr = vm_fault_page(src_top_object,
449 pager->backing_offset + offset + cur_offset,
450 VM_PROT_READ,
451 FALSE,
452 FALSE, /* src_page not looked up */
453 &prot,
454 &src_page,
455 &top_page,
456 NULL,
457 &error_code,
458 FALSE,
459 &fault_info);
460 switch (kr) {
461 case VM_FAULT_SUCCESS:
462 break;
463 case VM_FAULT_RETRY:
464 goto retry_src_fault;
465 case VM_FAULT_MEMORY_SHORTAGE:
466 if (vm_page_wait(interruptible)) {
467 goto retry_src_fault;
468 }
469 OS_FALLTHROUGH;
470 case VM_FAULT_INTERRUPTED:
471 retval = MACH_SEND_INTERRUPTED;
472 goto done;
473 case VM_FAULT_SUCCESS_NO_VM_PAGE:
474 /* success but no VM page: fail */
475 vm_object_paging_end(src_top_object);
476 vm_object_unlock(src_top_object);
477 OS_FALLTHROUGH;
478 case VM_FAULT_MEMORY_ERROR:
479 /* the page is not there ! */
480 if (error_code) {
481 retval = error_code;
482 } else {
483 retval = KERN_MEMORY_ERROR;
484 }
485 goto done;
486 default:
487 panic("apple_protect_pager_data_request: "
488 "vm_fault_page() unexpected error 0x%x\n",
489 kr);
490 }
491 assert(src_page != VM_PAGE_NULL);
492 assert(src_page->vmp_busy);
493
494 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
495 vm_page_lockspin_queues();
496
497 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
498 vm_page_speculate(src_page, FALSE);
499 }
500 vm_page_unlock_queues();
501 }
502
503 /*
504 * Establish pointers to the source
505 * and destination physical pages.
506 */
507 dst_pnum = (ppnum_t)
508 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
509 assert(dst_pnum != 0);
510
511 src_vaddr = (vm_map_offset_t)
512 phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
513 << PAGE_SHIFT);
514 dst_vaddr = (vm_map_offset_t)
515 phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
516
517 src_page_object = VM_PAGE_OBJECT(src_page);
518
519 /*
520 * Validate the original page...
521 */
522 if (src_page_object->code_signed) {
523 vm_page_validate_cs_mapped(
524 src_page, PAGE_SIZE, 0,
525 (const void *) src_vaddr);
526 }
527 /*
528 * ... and transfer the results to the destination page.
529 */
530 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
531 src_page->vmp_cs_validated);
532 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
533 src_page->vmp_cs_tainted);
534 UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
535 src_page->vmp_cs_nx);
536
537 /*
538 * page_decrypt() might access a mapped file, so let's release
539 * the object lock for the source page to avoid a potential
540 * deadlock. The source page is kept busy and we have a
541 * "paging_in_progress" reference on its object, so it's safe
542 * to unlock the object here.
543 */
544 assert(src_page->vmp_busy);
545 assert(src_page_object->paging_in_progress > 0);
546 vm_object_unlock(src_page_object);
547
548 /*
549 * Decrypt the encrypted contents of the source page
550 * into the destination page.
551 */
552 for (offset_in_page = 0;
553 offset_in_page < PAGE_SIZE;
554 offset_in_page += 4096) {
555 if (offset + cur_offset + offset_in_page <
556 pager->crypto_start ||
557 offset + cur_offset + offset_in_page >=
558 pager->crypto_end) {
559 /* not encrypted: just copy */
560 bcopy((const char *)(src_vaddr +
561 offset_in_page),
562 (char *)(dst_vaddr + offset_in_page),
563 4096);
564
565 if (apple_protect_pager_data_request_debug) {
566 printf("apple_protect_data_request"
567 "(%p,0x%llx+0x%llx+0x%04llx): "
568 "out of crypto range "
569 "[0x%llx:0x%llx]: "
570 "COPY [0x%016llx 0x%016llx] "
571 "code_signed=%d "
572 "cs_validated=%d "
573 "cs_tainted=%d "
574 "cs_nx=%d\n",
575 pager,
576 offset,
577 (uint64_t) cur_offset,
578 (uint64_t) offset_in_page,
579 pager->crypto_start,
580 pager->crypto_end,
581 *(uint64_t *)(dst_vaddr +
582 offset_in_page),
583 *(uint64_t *)(dst_vaddr +
584 offset_in_page + 8),
585 src_page_object->code_signed,
586 src_page->vmp_cs_validated,
587 src_page->vmp_cs_tainted,
588 src_page->vmp_cs_nx);
589 }
590 ret = 0;
591 continue;
592 }
593 ret = pager->crypt_info->page_decrypt(
594 (const void *)(src_vaddr + offset_in_page),
595 (void *)(dst_vaddr + offset_in_page),
596 ((pager->crypto_backing_offset -
597 pager->crypto_start) + /* XXX ? */
598 offset +
599 cur_offset +
600 offset_in_page),
601 pager->crypt_info->crypt_ops);
602
603 if (apple_protect_pager_data_request_debug) {
604 printf("apple_protect_data_request"
605 "(%p,0x%llx+0x%llx+0x%04llx): "
606 "in crypto range [0x%llx:0x%llx]: "
607 "DECRYPT offset 0x%llx="
608 "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)"
609 "[0x%016llx 0x%016llx] "
610 "code_signed=%d "
611 "cs_validated=%d "
612 "cs_tainted=%d "
613 "cs_nx=%d "
614 "ret=0x%x\n",
615 pager,
616 offset,
617 (uint64_t) cur_offset,
618 (uint64_t) offset_in_page,
619 pager->crypto_start, pager->crypto_end,
620 ((pager->crypto_backing_offset -
621 pager->crypto_start) +
622 offset +
623 cur_offset +
624 offset_in_page),
625 pager->crypto_backing_offset,
626 pager->crypto_start,
627 offset,
628 (uint64_t) cur_offset,
629 (uint64_t) offset_in_page,
630 *(uint64_t *)(dst_vaddr + offset_in_page),
631 *(uint64_t *)(dst_vaddr + offset_in_page + 8),
632 src_page_object->code_signed,
633 src_page->vmp_cs_validated,
634 src_page->vmp_cs_tainted,
635 src_page->vmp_cs_nx,
636 ret);
637 }
638 if (ret) {
639 break;
640 }
641 }
642 if (ret) {
643 /*
644 * Decryption failed. Abort the fault.
645 */
646 retval = KERN_ABORTED;
647 }
648
649 assert(VM_PAGE_OBJECT(src_page) == src_page_object);
650 assert(src_page->vmp_busy);
651 assert(src_page_object->paging_in_progress > 0);
652 vm_object_lock(src_page_object);
653
654 /*
655 * Cleanup the result of vm_fault_page() of the source page.
656 */
657 PAGE_WAKEUP_DONE(src_page);
658 src_page = VM_PAGE_NULL;
659 vm_object_paging_end(src_page_object);
660 vm_object_unlock(src_page_object);
661
662 if (top_page != VM_PAGE_NULL) {
663 assert(VM_PAGE_OBJECT(top_page) == src_top_object);
664 vm_object_lock(src_top_object);
665 VM_PAGE_FREE(top_page);
666 vm_object_paging_end(src_top_object);
667 vm_object_unlock(src_top_object);
668 }
669 }
670
671 done:
672 if (upl != NULL) {
673 /* clean up the UPL */
674
675 /*
676 * The pages are currently dirty because we've just been
677 * writing on them, but as far as we're concerned, they're
678 * clean since they contain their "original" contents as
679 * provided by us, the pager.
680 * Tell the UPL to mark them "clean".
681 */
682 upl_clear_dirty(upl, TRUE);
683
684 /* abort or commit the UPL */
685 if (retval != KERN_SUCCESS) {
686 upl_abort(upl, 0);
687 if (retval == KERN_ABORTED) {
688 wait_result_t wait_result;
689
690 /*
691 * We aborted the fault and did not provide
692 * any contents for the requested pages but
693 * the pages themselves are not invalid, so
694 * let's return success and let the caller
695 * retry the fault, in case it might succeed
696 * later (when the decryption code is up and
697 * running in the kernel, for example).
698 */
699 retval = KERN_SUCCESS;
700 /*
701 * Wait a little bit first to avoid using
702 * too much CPU time retrying and failing
703 * the same fault over and over again.
704 */
705 wait_result = assert_wait_timeout(
706 (event_t) apple_protect_pager_data_request,
707 THREAD_UNINT,
708 10000, /* 10ms */
709 NSEC_PER_USEC);
710 assert(wait_result == THREAD_WAITING);
711 wait_result = thread_block(THREAD_CONTINUE_NULL);
712 assert(wait_result == THREAD_TIMED_OUT);
713 }
714 } else {
715 boolean_t empty;
716 assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
717 "upl %p offset 0x%llx size 0x%x",
718 upl, upl->u_offset, upl->u_size);
719 upl_commit_range(upl, 0, upl->u_size,
720 UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
721 upl_pl, pl_count, &empty);
722 }
723
724 /* and deallocate the UPL */
725 upl_deallocate(upl);
726 upl = NULL;
727 }
728 if (src_top_object != VM_OBJECT_NULL) {
729 vm_object_deallocate(src_top_object);
730 }
731 return retval;
732 }
733
734 /*
735 * apple_protect_pager_reference()
736 *
737 * Get a reference on this memory object.
738 * For external usage only. Assumes that the initial reference count is not 0,
739 * i.e one should not "revive" a dead pager this way.
740 */
741 void
apple_protect_pager_reference(memory_object_t mem_obj)742 apple_protect_pager_reference(
743 memory_object_t mem_obj)
744 {
745 apple_protect_pager_t pager;
746
747 pager = apple_protect_pager_lookup(mem_obj);
748
749 lck_mtx_lock(&apple_protect_pager_lock);
750 os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
751 lck_mtx_unlock(&apple_protect_pager_lock);
752 }
753
754
755 /*
756 * apple_protect_pager_dequeue:
757 *
758 * Removes a pager from the list of pagers.
759 *
760 * The caller must hold "apple_protect_pager_lock".
761 */
762 void
apple_protect_pager_dequeue(apple_protect_pager_t pager)763 apple_protect_pager_dequeue(
764 apple_protect_pager_t pager)
765 {
766 assert(!pager->is_mapped);
767
768 queue_remove(&apple_protect_pager_queue,
769 pager,
770 apple_protect_pager_t,
771 pager_queue);
772 pager->pager_queue.next = NULL;
773 pager->pager_queue.prev = NULL;
774
775 apple_protect_pager_count--;
776 }
777
778 /*
779 * apple_protect_pager_terminate_internal:
780 *
781 * Trigger the asynchronous termination of the memory object associated
782 * with this pager.
783 * When the memory object is terminated, there will be one more call
784 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
785 * to finish the clean up.
786 *
787 * "apple_protect_pager_lock" should not be held by the caller.
788 * We don't need the lock because the pager has already been removed from
789 * the pagers' list and is now ours exclusively.
790 */
791 void
apple_protect_pager_terminate_internal(apple_protect_pager_t pager)792 apple_protect_pager_terminate_internal(
793 apple_protect_pager_t pager)
794 {
795 assert(pager->is_ready);
796 assert(!pager->is_mapped);
797
798 if (pager->backing_object != VM_OBJECT_NULL) {
799 vm_object_deallocate(pager->backing_object);
800 pager->backing_object = VM_OBJECT_NULL;
801 }
802
803 /* one less pager using this "pager_crypt_info" */
804 #if CRYPT_INFO_DEBUG
805 printf("CRYPT_INFO %s: deallocate %p ref %d\n",
806 __FUNCTION__,
807 pager->crypt_info,
808 pager->crypt_info->crypt_refcnt);
809 #endif /* CRYPT_INFO_DEBUG */
810 crypt_info_deallocate(pager->crypt_info);
811 pager->crypt_info = NULL;
812
813 /* trigger the destruction of the memory object */
814 memory_object_destroy(pager->ap_pgr_hdr.mo_control, 0);
815 }
816
817 /*
818 * apple_protect_pager_deallocate_internal()
819 *
820 * Release a reference on this pager and free it when the last
821 * reference goes away.
822 * Can be called with apple_protect_pager_lock held or not but always returns
823 * with it unlocked.
824 */
825 void
apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,boolean_t locked)826 apple_protect_pager_deallocate_internal(
827 apple_protect_pager_t pager,
828 boolean_t locked)
829 {
830 boolean_t needs_trimming;
831 unsigned int count_unmapped;
832 os_ref_count_t ref_count;
833
834 if (!locked) {
835 lck_mtx_lock(&apple_protect_pager_lock);
836 }
837
838 count_unmapped = (apple_protect_pager_count -
839 apple_protect_pager_count_mapped);
840 if (count_unmapped > apple_protect_pager_cache_limit) {
841 /* we have too many unmapped pagers: trim some */
842 needs_trimming = TRUE;
843 } else {
844 needs_trimming = FALSE;
845 }
846
847 /* drop a reference on this pager */
848 ref_count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
849
850 if (ref_count == 1) {
851 /*
852 * Only the "named" reference is left, which means that
853 * no one is really holding on to this pager anymore.
854 * Terminate it.
855 */
856 apple_protect_pager_dequeue(pager);
857 /* the pager is all ours: no need for the lock now */
858 lck_mtx_unlock(&apple_protect_pager_lock);
859 apple_protect_pager_terminate_internal(pager);
860 } else if (ref_count == 0) {
861 /*
862 * Dropped the existence reference; the memory object has
863 * been terminated. Do some final cleanup and release the
864 * pager structure.
865 */
866 lck_mtx_unlock(&apple_protect_pager_lock);
867 if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
868 memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control);
869 pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
870 }
871 kfree_type(struct apple_protect_pager, pager);
872 pager = APPLE_PROTECT_PAGER_NULL;
873 } else {
874 /* there are still plenty of references: keep going... */
875 lck_mtx_unlock(&apple_protect_pager_lock);
876 }
877
878 if (needs_trimming) {
879 apple_protect_pager_trim();
880 }
881 /* caution: lock is not held on return... */
882 }
883
884 /*
885 * apple_protect_pager_deallocate()
886 *
887 * Release a reference on this pager and free it when the last
888 * reference goes away.
889 */
890 void
apple_protect_pager_deallocate(memory_object_t mem_obj)891 apple_protect_pager_deallocate(
892 memory_object_t mem_obj)
893 {
894 apple_protect_pager_t pager;
895
896 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
897 pager = apple_protect_pager_lookup(mem_obj);
898 apple_protect_pager_deallocate_internal(pager, FALSE);
899 }
900
901 /*
902 *
903 */
904 kern_return_t
apple_protect_pager_terminate(__unused memory_object_t mem_obj)905 apple_protect_pager_terminate(
906 #if !DEBUG
907 __unused
908 #endif
909 memory_object_t mem_obj)
910 {
911 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
912
913 return KERN_SUCCESS;
914 }
915
916 /*
917 *
918 */
919 kern_return_t
apple_protect_pager_synchronize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t length,__unused vm_sync_t sync_flags)920 apple_protect_pager_synchronize(
921 __unused memory_object_t mem_obj,
922 __unused memory_object_offset_t offset,
923 __unused memory_object_size_t length,
924 __unused vm_sync_t sync_flags)
925 {
926 panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported");
927 return KERN_FAILURE;
928 }
929
930 /*
931 * apple_protect_pager_map()
932 *
933 * This allows VM to let us, the EMM, know that this memory object
934 * is currently mapped one or more times. This is called by VM each time
935 * the memory object gets mapped and we take one extra reference on the
936 * memory object to account for all its mappings.
937 */
938 kern_return_t
apple_protect_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)939 apple_protect_pager_map(
940 memory_object_t mem_obj,
941 __unused vm_prot_t prot)
942 {
943 apple_protect_pager_t pager;
944
945 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
946
947 pager = apple_protect_pager_lookup(mem_obj);
948
949 lck_mtx_lock(&apple_protect_pager_lock);
950 assert(pager->is_ready);
951 assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0); /* pager is alive */
952 if (pager->is_mapped == FALSE) {
953 /*
954 * First mapping of this pager: take an extra reference
955 * that will remain until all the mappings of this pager
956 * are removed.
957 */
958 pager->is_mapped = TRUE;
959 os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
960 apple_protect_pager_count_mapped++;
961 }
962 lck_mtx_unlock(&apple_protect_pager_lock);
963
964 return KERN_SUCCESS;
965 }
966
967 /*
968 * apple_protect_pager_last_unmap()
969 *
970 * This is called by VM when this memory object is no longer mapped anywhere.
971 */
972 kern_return_t
apple_protect_pager_last_unmap(memory_object_t mem_obj)973 apple_protect_pager_last_unmap(
974 memory_object_t mem_obj)
975 {
976 apple_protect_pager_t pager;
977 unsigned int count_unmapped;
978
979 PAGER_DEBUG(PAGER_ALL,
980 ("apple_protect_pager_last_unmap: %p\n", mem_obj));
981
982 pager = apple_protect_pager_lookup(mem_obj);
983
984 lck_mtx_lock(&apple_protect_pager_lock);
985 if (pager->is_mapped) {
986 /*
987 * All the mappings are gone, so let go of the one extra
988 * reference that represents all the mappings of this pager.
989 */
990 apple_protect_pager_count_mapped--;
991 count_unmapped = (apple_protect_pager_count -
992 apple_protect_pager_count_mapped);
993 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
994 apple_protect_pager_count_unmapped_max = count_unmapped;
995 }
996 pager->is_mapped = FALSE;
997 apple_protect_pager_deallocate_internal(pager, TRUE);
998 /* caution: deallocate_internal() released the lock ! */
999 } else {
1000 lck_mtx_unlock(&apple_protect_pager_lock);
1001 }
1002
1003 return KERN_SUCCESS;
1004 }
1005
1006 boolean_t
apple_protect_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1007 apple_protect_pager_backing_object(
1008 memory_object_t mem_obj,
1009 memory_object_offset_t offset,
1010 vm_object_t *backing_object,
1011 vm_object_offset_t *backing_offset)
1012 {
1013 apple_protect_pager_t pager;
1014
1015 PAGER_DEBUG(PAGER_ALL,
1016 ("apple_protect_pager_backing_object: %p\n", mem_obj));
1017
1018 pager = apple_protect_pager_lookup(mem_obj);
1019
1020 *backing_object = pager->backing_object;
1021 *backing_offset = pager->backing_offset + offset;
1022
1023 return TRUE;
1024 }
1025
1026 /*
1027 *
1028 */
1029 apple_protect_pager_t
apple_protect_pager_lookup(memory_object_t mem_obj)1030 apple_protect_pager_lookup(
1031 memory_object_t mem_obj)
1032 {
1033 apple_protect_pager_t pager;
1034
1035 assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
1036 pager = (apple_protect_pager_t)(uintptr_t) mem_obj;
1037 assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0);
1038 return pager;
1039 }
1040
1041 apple_protect_pager_t
apple_protect_pager_create(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_object_offset_t crypto_backing_offset,struct pager_crypt_info * crypt_info,vm_object_offset_t crypto_start,vm_object_offset_t crypto_end,boolean_t cache_pager)1042 apple_protect_pager_create(
1043 vm_object_t backing_object,
1044 vm_object_offset_t backing_offset,
1045 vm_object_offset_t crypto_backing_offset,
1046 struct pager_crypt_info *crypt_info,
1047 vm_object_offset_t crypto_start,
1048 vm_object_offset_t crypto_end,
1049 boolean_t cache_pager)
1050 {
1051 apple_protect_pager_t pager, pager2;
1052 memory_object_control_t control;
1053 kern_return_t kr;
1054 struct pager_crypt_info *old_crypt_info;
1055
1056 pager = kalloc_type(struct apple_protect_pager, Z_WAITOK | Z_NOFAIL);
1057
1058 /*
1059 * The vm_map call takes both named entry ports and raw memory
1060 * objects in the same parameter. We need to make sure that
1061 * vm_map does not see this object as a named entry port. So,
1062 * we reserve the first word in the object for a fake ip_kotype
1063 * setting - that will tell vm_map to use it as a memory object.
1064 */
1065 pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
1066 pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops;
1067 pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1068
1069 pager->is_ready = FALSE;/* not ready until it has a "name" */
1070 /* one reference for the caller */
1071 os_ref_init_count_raw(&pager->ap_pgr_hdr_ref, NULL, 1);
1072 pager->is_mapped = FALSE;
1073 if (cache_pager) {
1074 /* extra reference for the cache */
1075 os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1076 pager->is_cached = true;
1077 } else {
1078 pager->is_cached = false;
1079 }
1080 pager->backing_object = backing_object;
1081 pager->backing_offset = backing_offset;
1082 pager->crypto_backing_offset = crypto_backing_offset;
1083 pager->crypto_start = crypto_start;
1084 pager->crypto_end = crypto_end;
1085 pager->crypt_info = crypt_info; /* allocated by caller */
1086
1087 #if CRYPT_INFO_DEBUG
1088 printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n",
1089 __FUNCTION__,
1090 crypt_info,
1091 crypt_info->page_decrypt,
1092 crypt_info->crypt_end,
1093 crypt_info->crypt_ops,
1094 crypt_info->crypt_refcnt);
1095 #endif /* CRYPT_INFO_DEBUG */
1096
1097 vm_object_reference(backing_object);
1098
1099 old_crypt_info = NULL;
1100
1101 lck_mtx_lock(&apple_protect_pager_lock);
1102 /* see if anyone raced us to create a pager for the same object */
1103 queue_iterate(&apple_protect_pager_queue,
1104 pager2,
1105 apple_protect_pager_t,
1106 pager_queue) {
1107 if ((pager2->crypt_info->page_decrypt !=
1108 crypt_info->page_decrypt) ||
1109 (pager2->crypt_info->crypt_end !=
1110 crypt_info->crypt_end) ||
1111 (pager2->crypt_info->crypt_ops !=
1112 crypt_info->crypt_ops)) {
1113 /* crypt_info contents do not match: next pager */
1114 continue;
1115 }
1116
1117 /* found a match for crypt_info ... */
1118 if (old_crypt_info) {
1119 /* ... already switched to that crypt_info */
1120 assert(old_crypt_info == pager2->crypt_info);
1121 } else if (pager2->crypt_info != crypt_info) {
1122 /* ... switch to that pager's crypt_info */
1123 #if CRYPT_INFO_DEBUG
1124 printf("CRYPT_INFO %s: reference %p ref %d "
1125 "(create match)\n",
1126 __FUNCTION__,
1127 pager2->crypt_info,
1128 pager2->crypt_info->crypt_refcnt);
1129 #endif /* CRYPT_INFO_DEBUG */
1130 old_crypt_info = pager2->crypt_info;
1131 crypt_info_reference(old_crypt_info);
1132 pager->crypt_info = old_crypt_info;
1133 }
1134
1135 if (pager2->backing_object == backing_object &&
1136 pager2->backing_offset == backing_offset &&
1137 pager2->crypto_backing_offset == crypto_backing_offset &&
1138 pager2->crypto_start == crypto_start &&
1139 pager2->crypto_end == crypto_end) {
1140 /* full match: use that pager */
1141 break;
1142 }
1143 }
1144 if (!queue_end(&apple_protect_pager_queue,
1145 (queue_entry_t) pager2)) {
1146 /* we lost the race, down with the loser... */
1147 lck_mtx_unlock(&apple_protect_pager_lock);
1148 vm_object_deallocate(pager->backing_object);
1149 pager->backing_object = VM_OBJECT_NULL;
1150 #if CRYPT_INFO_DEBUG
1151 printf("CRYPT_INFO %s: %p ref %d (create pager match)\n",
1152 __FUNCTION__,
1153 pager->crypt_info,
1154 pager->crypt_info->crypt_refcnt);
1155 #endif /* CRYPT_INFO_DEBUG */
1156 crypt_info_deallocate(pager->crypt_info);
1157 pager->crypt_info = NULL;
1158 kfree_type(struct apple_protect_pager, pager);
1159 /* ... and go with the winner */
1160 pager = pager2;
1161 /* let the winner make sure the pager gets ready */
1162 return pager;
1163 }
1164
1165 /* enter new pager at the head of our list of pagers */
1166 queue_enter_first(&apple_protect_pager_queue,
1167 pager,
1168 apple_protect_pager_t,
1169 pager_queue);
1170 apple_protect_pager_count++;
1171 if (apple_protect_pager_count > apple_protect_pager_count_max) {
1172 apple_protect_pager_count_max = apple_protect_pager_count;
1173 }
1174 lck_mtx_unlock(&apple_protect_pager_lock);
1175
1176 kr = memory_object_create_named((memory_object_t) pager,
1177 0,
1178 &control);
1179 assert(kr == KERN_SUCCESS);
1180
1181 memory_object_mark_trusted(control);
1182
1183 lck_mtx_lock(&apple_protect_pager_lock);
1184 /* the new pager is now ready to be used */
1185 pager->is_ready = TRUE;
1186 lck_mtx_unlock(&apple_protect_pager_lock);
1187
1188 /* wakeup anyone waiting for this pager to be ready */
1189 thread_wakeup(&pager->is_ready);
1190
1191 if (old_crypt_info != NULL &&
1192 old_crypt_info != crypt_info) {
1193 /* we re-used an old crypt_info instead of using our new one */
1194 #if CRYPT_INFO_DEBUG
1195 printf("CRYPT_INFO %s: deallocate %p ref %d "
1196 "(create used old)\n",
1197 __FUNCTION__,
1198 crypt_info,
1199 crypt_info->crypt_refcnt);
1200 #endif /* CRYPT_INFO_DEBUG */
1201 crypt_info_deallocate(crypt_info);
1202 crypt_info = NULL;
1203 }
1204
1205 return pager;
1206 }
1207
1208 /*
1209 * apple_protect_pager_setup()
1210 *
1211 * Provide the caller with a memory object backed by the provided
1212 * "backing_object" VM object. If such a memory object already exists,
1213 * re-use it, otherwise create a new memory object.
1214 */
1215 memory_object_t
apple_protect_pager_setup(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_object_offset_t crypto_backing_offset,struct pager_crypt_info * crypt_info,vm_object_offset_t crypto_start,vm_object_offset_t crypto_end,boolean_t cache_pager)1216 apple_protect_pager_setup(
1217 vm_object_t backing_object,
1218 vm_object_offset_t backing_offset,
1219 vm_object_offset_t crypto_backing_offset,
1220 struct pager_crypt_info *crypt_info,
1221 vm_object_offset_t crypto_start,
1222 vm_object_offset_t crypto_end,
1223 boolean_t cache_pager)
1224 {
1225 apple_protect_pager_t pager;
1226 struct pager_crypt_info *old_crypt_info, *new_crypt_info;
1227
1228 #if CRYPT_INFO_DEBUG
1229 printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n",
1230 __FUNCTION__,
1231 crypt_info,
1232 crypt_info->page_decrypt,
1233 crypt_info->crypt_end,
1234 crypt_info->crypt_ops,
1235 crypt_info->crypt_refcnt);
1236 #endif /* CRYPT_INFO_DEBUG */
1237
1238 old_crypt_info = NULL;
1239
1240 lck_mtx_lock(&apple_protect_pager_lock);
1241
1242 queue_iterate(&apple_protect_pager_queue,
1243 pager,
1244 apple_protect_pager_t,
1245 pager_queue) {
1246 if ((pager->crypt_info->page_decrypt !=
1247 crypt_info->page_decrypt) ||
1248 (pager->crypt_info->crypt_end !=
1249 crypt_info->crypt_end) ||
1250 (pager->crypt_info->crypt_ops !=
1251 crypt_info->crypt_ops)) {
1252 /* no match for "crypt_info": next pager */
1253 continue;
1254 }
1255 /* found a match for crypt_info ... */
1256 if (old_crypt_info) {
1257 /* ... already switched to that crypt_info */
1258 assert(old_crypt_info == pager->crypt_info);
1259 } else {
1260 /* ... switch to that pager's crypt_info */
1261 old_crypt_info = pager->crypt_info;
1262 #if CRYPT_INFO_DEBUG
1263 printf("CRYPT_INFO %s: "
1264 "switching crypt_info from %p [%p,%p,%p,%d] "
1265 "to %p [%p,%p,%p,%d] from pager %p\n",
1266 __FUNCTION__,
1267 crypt_info,
1268 crypt_info->page_decrypt,
1269 crypt_info->crypt_end,
1270 crypt_info->crypt_ops,
1271 crypt_info->crypt_refcnt,
1272 old_crypt_info,
1273 old_crypt_info->page_decrypt,
1274 old_crypt_info->crypt_end,
1275 old_crypt_info->crypt_ops,
1276 old_crypt_info->crypt_refcnt,
1277 pager);
1278 printf("CRYPT_INFO %s: %p ref %d (setup match)\n",
1279 __FUNCTION__,
1280 pager->crypt_info,
1281 pager->crypt_info->crypt_refcnt);
1282 #endif /* CRYPT_INFO_DEBUG */
1283 crypt_info_reference(pager->crypt_info);
1284 }
1285
1286 if (pager->backing_object == backing_object &&
1287 pager->backing_offset == backing_offset &&
1288 pager->crypto_backing_offset == crypto_backing_offset &&
1289 pager->crypto_start == crypto_start &&
1290 pager->crypto_end == crypto_end) {
1291 /* full match: use that pager! */
1292 assert(old_crypt_info == pager->crypt_info);
1293 assert(old_crypt_info->crypt_refcnt > 1);
1294 #if CRYPT_INFO_DEBUG
1295 printf("CRYPT_INFO %s: "
1296 "pager match with %p crypt_info %p\n",
1297 __FUNCTION__,
1298 pager,
1299 pager->crypt_info);
1300 printf("CRYPT_INFO %s: deallocate %p ref %d "
1301 "(pager match)\n",
1302 __FUNCTION__,
1303 old_crypt_info,
1304 old_crypt_info->crypt_refcnt);
1305 #endif /* CRYPT_INFO_DEBUG */
1306 /* release the extra ref on crypt_info we got above */
1307 crypt_info_deallocate(old_crypt_info);
1308 assert(old_crypt_info->crypt_refcnt > 0);
1309 /* give extra reference on pager to the caller */
1310 os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1311 break;
1312 }
1313 }
1314 if (queue_end(&apple_protect_pager_queue,
1315 (queue_entry_t) pager)) {
1316 lck_mtx_unlock(&apple_protect_pager_lock);
1317 /* no existing pager for this backing object */
1318 pager = APPLE_PROTECT_PAGER_NULL;
1319 if (old_crypt_info) {
1320 /* use this old crypt_info for new pager */
1321 new_crypt_info = old_crypt_info;
1322 #if CRYPT_INFO_DEBUG
1323 printf("CRYPT_INFO %s: "
1324 "will use old_crypt_info %p for new pager\n",
1325 __FUNCTION__,
1326 old_crypt_info);
1327 #endif /* CRYPT_INFO_DEBUG */
1328 } else {
1329 /* allocate a new crypt_info for new pager */
1330 new_crypt_info = kalloc_type(struct pager_crypt_info, Z_WAITOK);
1331 *new_crypt_info = *crypt_info;
1332 new_crypt_info->crypt_refcnt = 1;
1333 #if CRYPT_INFO_DEBUG
1334 printf("CRYPT_INFO %s: "
1335 "will use new_crypt_info %p for new pager\n",
1336 __FUNCTION__,
1337 new_crypt_info);
1338 #endif /* CRYPT_INFO_DEBUG */
1339 }
1340 if (new_crypt_info == NULL) {
1341 /* can't create new pager without a crypt_info */
1342 } else {
1343 /* create new pager */
1344 pager = apple_protect_pager_create(
1345 backing_object,
1346 backing_offset,
1347 crypto_backing_offset,
1348 new_crypt_info,
1349 crypto_start,
1350 crypto_end,
1351 cache_pager);
1352 }
1353 if (pager == APPLE_PROTECT_PAGER_NULL) {
1354 /* could not create a new pager */
1355 if (new_crypt_info == old_crypt_info) {
1356 /* release extra reference on old_crypt_info */
1357 #if CRYPT_INFO_DEBUG
1358 printf("CRYPT_INFO %s: deallocate %p ref %d "
1359 "(create fail old_crypt_info)\n",
1360 __FUNCTION__,
1361 old_crypt_info,
1362 old_crypt_info->crypt_refcnt);
1363 #endif /* CRYPT_INFO_DEBUG */
1364 crypt_info_deallocate(old_crypt_info);
1365 old_crypt_info = NULL;
1366 } else {
1367 /* release unused new_crypt_info */
1368 assert(new_crypt_info->crypt_refcnt == 1);
1369 #if CRYPT_INFO_DEBUG
1370 printf("CRYPT_INFO %s: deallocate %p ref %d "
1371 "(create fail new_crypt_info)\n",
1372 __FUNCTION__,
1373 new_crypt_info,
1374 new_crypt_info->crypt_refcnt);
1375 #endif /* CRYPT_INFO_DEBUG */
1376 crypt_info_deallocate(new_crypt_info);
1377 new_crypt_info = NULL;
1378 }
1379 return MEMORY_OBJECT_NULL;
1380 }
1381 lck_mtx_lock(&apple_protect_pager_lock);
1382 } else {
1383 assert(old_crypt_info == pager->crypt_info);
1384 }
1385
1386 while (!pager->is_ready) {
1387 lck_mtx_sleep(&apple_protect_pager_lock,
1388 LCK_SLEEP_DEFAULT,
1389 &pager->is_ready,
1390 THREAD_UNINT);
1391 }
1392 lck_mtx_unlock(&apple_protect_pager_lock);
1393
1394 return (memory_object_t) pager;
1395 }
1396
1397 void
apple_protect_pager_trim(void)1398 apple_protect_pager_trim(void)
1399 {
1400 apple_protect_pager_t pager, prev_pager;
1401 queue_head_t trim_queue;
1402 unsigned int num_trim;
1403 unsigned int count_unmapped;
1404
1405 lck_mtx_lock(&apple_protect_pager_lock);
1406
1407 /*
1408 * We have too many pagers, try and trim some unused ones,
1409 * starting with the oldest pager at the end of the queue.
1410 */
1411 queue_init(&trim_queue);
1412 num_trim = 0;
1413
1414 for (pager = (apple_protect_pager_t)
1415 queue_last(&apple_protect_pager_queue);
1416 !queue_end(&apple_protect_pager_queue,
1417 (queue_entry_t) pager);
1418 pager = prev_pager) {
1419 /* get prev elt before we dequeue */
1420 prev_pager = (apple_protect_pager_t)
1421 queue_prev(&pager->pager_queue);
1422
1423 if (pager->is_cached &&
1424 os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) == 2 &&
1425 pager->is_ready &&
1426 !pager->is_mapped) {
1427 /* this pager can be trimmed */
1428 num_trim++;
1429 /* remove this pager from the main list ... */
1430 apple_protect_pager_dequeue(pager);
1431 /* ... and add it to our trim queue */
1432 queue_enter_first(&trim_queue,
1433 pager,
1434 apple_protect_pager_t,
1435 pager_queue);
1436
1437 count_unmapped = (apple_protect_pager_count -
1438 apple_protect_pager_count_mapped);
1439 if (count_unmapped <= apple_protect_pager_cache_limit) {
1440 /* we have enough pagers to trim */
1441 break;
1442 }
1443 }
1444 }
1445 if (num_trim > apple_protect_pager_num_trim_max) {
1446 apple_protect_pager_num_trim_max = num_trim;
1447 }
1448 apple_protect_pager_num_trim_total += num_trim;
1449
1450 lck_mtx_unlock(&apple_protect_pager_lock);
1451
1452 /* terminate the trimmed pagers */
1453 while (!queue_empty(&trim_queue)) {
1454 queue_remove_first(&trim_queue,
1455 pager,
1456 apple_protect_pager_t,
1457 pager_queue);
1458 assert(pager->is_cached);
1459 pager->is_cached = false;
1460 pager->pager_queue.next = NULL;
1461 pager->pager_queue.prev = NULL;
1462 /*
1463 * We can't call deallocate_internal() because the pager
1464 * has already been dequeued, but we still need to remove
1465 * a reference.
1466 */
1467 os_ref_count_t __assert_only count;
1468 count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1469 assert(count == 1);
1470 apple_protect_pager_terminate_internal(pager);
1471 }
1472 }
1473
1474
1475 void
crypt_info_reference(struct pager_crypt_info * crypt_info)1476 crypt_info_reference(
1477 struct pager_crypt_info *crypt_info)
1478 {
1479 assert(crypt_info->crypt_refcnt != 0);
1480 #if CRYPT_INFO_DEBUG
1481 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1482 __FUNCTION__,
1483 crypt_info,
1484 crypt_info->crypt_refcnt,
1485 crypt_info->crypt_refcnt + 1);
1486 #endif /* CRYPT_INFO_DEBUG */
1487 OSAddAtomic(+1, &crypt_info->crypt_refcnt);
1488 }
1489
1490 void
crypt_info_deallocate(struct pager_crypt_info * crypt_info)1491 crypt_info_deallocate(
1492 struct pager_crypt_info *crypt_info)
1493 {
1494 #if CRYPT_INFO_DEBUG
1495 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1496 __FUNCTION__,
1497 crypt_info,
1498 crypt_info->crypt_refcnt,
1499 crypt_info->crypt_refcnt - 1);
1500 #endif /* CRYPT_INFO_DEBUG */
1501 OSAddAtomic(-1, &crypt_info->crypt_refcnt);
1502 if (crypt_info->crypt_refcnt == 0) {
1503 /* deallocate any crypt module data */
1504 if (crypt_info->crypt_end) {
1505 crypt_info->crypt_end(crypt_info->crypt_ops);
1506 crypt_info->crypt_end = NULL;
1507 }
1508 #if CRYPT_INFO_DEBUG
1509 printf("CRYPT_INFO %s: freeing %p\n",
1510 __FUNCTION__,
1511 crypt_info);
1512 #endif /* CRYPT_INFO_DEBUG */
1513 kfree_type(struct pager_crypt_info, crypt_info);
1514 }
1515 }
1516