xref: /xnu-8020.101.4/osfmk/vm/vm_apple_protect.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_kobject.h>
49 #include <os/refcnt.h>
50 
51 #include <ipc/ipc_port.h>
52 #include <ipc/ipc_space.h>
53 
54 #include <vm/vm_fault.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_pageout.h>
57 #include <vm/memory_object.h>
58 #include <vm/vm_pageout.h>
59 #include <vm/vm_protos.h>
60 #include <vm/vm_kern.h>
61 
62 /*
63  * APPLE PROTECT MEMORY PAGER
64  *
65  * This external memory manager (EMM) handles memory from the encrypted
66  * sections of some executables protected by the DSMOS kernel extension.
67  *
68  * It mostly handles page-in requests (from memory_object_data_request()) by
69  * getting the encrypted data from its backing VM object, itself backed by
70  * the encrypted file, decrypting it and providing it to VM.
71  *
72  * The decrypted pages will never be dirtied, so the memory manager doesn't
73  * need to handle page-out requests (from memory_object_data_return()).  The
74  * pages need to be mapped copy-on-write, so that the originals stay clean.
75  *
76  * We don't expect to have to handle a large number of apple-protected
77  * binaries, so the data structures are very simple (simple linked list)
78  * for now.
79  */
80 
81 /* forward declarations */
82 void apple_protect_pager_reference(memory_object_t mem_obj);
83 void apple_protect_pager_deallocate(memory_object_t mem_obj);
84 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
85     memory_object_control_t control,
86     memory_object_cluster_size_t pg_size);
87 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
88 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
89     memory_object_offset_t offset,
90     memory_object_cluster_size_t length,
91     vm_prot_t protection_required,
92     memory_object_fault_info_t fault_info);
93 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
94     memory_object_offset_t offset,
95     memory_object_cluster_size_t      data_cnt,
96     memory_object_offset_t *resid_offset,
97     int *io_error,
98     boolean_t dirty,
99     boolean_t kernel_copy,
100     int upl_flags);
101 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
102     memory_object_offset_t offset,
103     memory_object_cluster_size_t data_cnt);
104 kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
105     memory_object_offset_t offset,
106     memory_object_size_t size,
107     vm_prot_t desired_access);
108 kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
109     memory_object_offset_t offset,
110     memory_object_size_t length,
111     vm_sync_t sync_flags);
112 kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
113     vm_prot_t prot);
114 kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
115 boolean_t apple_protect_pager_backing_object(
116 	memory_object_t mem_obj,
117 	memory_object_offset_t mem_obj_offset,
118 	vm_object_t *backing_object,
119 	vm_object_offset_t *backing_offset);
120 
121 #define CRYPT_INFO_DEBUG 0
122 void crypt_info_reference(struct pager_crypt_info *crypt_info);
123 void crypt_info_deallocate(struct pager_crypt_info *crypt_info);
124 
125 /*
126  * Vector of VM operations for this EMM.
127  * These routines are invoked by VM via the memory_object_*() interfaces.
128  */
129 const struct memory_object_pager_ops apple_protect_pager_ops = {
130 	.memory_object_reference = apple_protect_pager_reference,
131 	.memory_object_deallocate = apple_protect_pager_deallocate,
132 	.memory_object_init = apple_protect_pager_init,
133 	.memory_object_terminate = apple_protect_pager_terminate,
134 	.memory_object_data_request = apple_protect_pager_data_request,
135 	.memory_object_data_return = apple_protect_pager_data_return,
136 	.memory_object_data_initialize = apple_protect_pager_data_initialize,
137 	.memory_object_data_unlock = apple_protect_pager_data_unlock,
138 	.memory_object_synchronize = apple_protect_pager_synchronize,
139 	.memory_object_map = apple_protect_pager_map,
140 	.memory_object_last_unmap = apple_protect_pager_last_unmap,
141 	.memory_object_data_reclaim = NULL,
142 	.memory_object_backing_object = apple_protect_pager_backing_object,
143 	.memory_object_pager_name = "apple_protect"
144 };
145 
146 /*
147  * The "apple_protect_pager" describes a memory object backed by
148  * the "apple protect" EMM.
149  */
150 typedef struct apple_protect_pager {
151 	/* mandatory generic header */
152 	struct memory_object    ap_pgr_hdr;
153 
154 	/* pager-specific data */
155 	queue_chain_t           pager_queue;    /* next & prev pagers */
156 #if MEMORY_OBJECT_HAS_REFCOUNT
157 #define ap_pgr_hdr_ref          ap_pgr_hdr.mo_ref
158 #else
159 	os_ref_atomic_t         ap_pgr_hdr_ref;      /* reference count */
160 #endif
161 	bool                    is_ready;       /* is this pager ready ? */
162 	bool                    is_mapped;      /* is this mem_obj mapped ? */
163 	bool                    is_cached;      /* is this pager cached ? */
164 	vm_object_t             backing_object; /* VM obj w/ encrypted data */
165 	vm_object_offset_t      backing_offset;
166 	vm_object_offset_t      crypto_backing_offset; /* for key... */
167 	vm_object_offset_t      crypto_start;
168 	vm_object_offset_t      crypto_end;
169 	struct pager_crypt_info *crypt_info;
170 } *apple_protect_pager_t;
171 #define APPLE_PROTECT_PAGER_NULL        ((apple_protect_pager_t) NULL)
172 
173 /*
174  * List of memory objects managed by this EMM.
175  * The list is protected by the "apple_protect_pager_lock" lock.
176  */
177 unsigned int apple_protect_pager_count = 0;        /* number of pagers */
178 unsigned int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
179 queue_head_t apple_protect_pager_queue = QUEUE_HEAD_INITIALIZER(apple_protect_pager_queue);
180 LCK_GRP_DECLARE(apple_protect_pager_lck_grp, "apple_protect");
181 LCK_MTX_DECLARE(apple_protect_pager_lock, &apple_protect_pager_lck_grp);
182 
183 /*
184  * Maximum number of unmapped pagers we're willing to keep around.
185  */
186 unsigned int apple_protect_pager_cache_limit = 20;
187 
188 /*
189  * Statistics & counters.
190  */
191 unsigned int apple_protect_pager_count_max = 0;
192 unsigned int apple_protect_pager_count_unmapped_max = 0;
193 unsigned int apple_protect_pager_num_trim_max = 0;
194 unsigned int apple_protect_pager_num_trim_total = 0;
195 
196 
197 
198 /* internal prototypes */
199 apple_protect_pager_t apple_protect_pager_create(
200 	vm_object_t backing_object,
201 	vm_object_offset_t backing_offset,
202 	vm_object_offset_t crypto_backing_offset,
203 	struct pager_crypt_info *crypt_info,
204 	vm_object_offset_t crypto_start,
205 	vm_object_offset_t crypto_end,
206 	boolean_t cache_pager);
207 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
208 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
209 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
210     boolean_t locked);
211 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
212 void apple_protect_pager_trim(void);
213 
214 
215 #if DEBUG
216 int apple_protect_pagerdebug = 0;
217 #define PAGER_ALL               0xffffffff
218 #define PAGER_INIT              0x00000001
219 #define PAGER_PAGEIN            0x00000002
220 
221 #define PAGER_DEBUG(LEVEL, A)                                           \
222 	MACRO_BEGIN                                                     \
223 	if ((apple_protect_pagerdebug & LEVEL)==LEVEL) {                \
224 	        printf A;                                               \
225 	}                                                               \
226 	MACRO_END
227 #else
228 #define PAGER_DEBUG(LEVEL, A)
229 #endif
230 
231 /*
232  * apple_protect_pager_init()
233  *
234  * Initialize the memory object and makes it ready to be used and mapped.
235  */
236 kern_return_t
apple_protect_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)237 apple_protect_pager_init(
238 	memory_object_t         mem_obj,
239 	memory_object_control_t control,
240 #if !DEBUG
241 	__unused
242 #endif
243 	memory_object_cluster_size_t pg_size)
244 {
245 	apple_protect_pager_t   pager;
246 	kern_return_t           kr;
247 	memory_object_attr_info_data_t  attributes;
248 
249 	PAGER_DEBUG(PAGER_ALL,
250 	    ("apple_protect_pager_init: %p, %p, %x\n",
251 	    mem_obj, control, pg_size));
252 
253 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
254 		return KERN_INVALID_ARGUMENT;
255 	}
256 
257 	pager = apple_protect_pager_lookup(mem_obj);
258 
259 	memory_object_control_reference(control);
260 
261 	pager->ap_pgr_hdr.mo_control = control;
262 
263 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
264 	/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
265 	attributes.cluster_size = (1 << (PAGE_SHIFT));
266 	attributes.may_cache_object = FALSE;
267 	attributes.temporary = TRUE;
268 
269 	kr = memory_object_change_attributes(
270 		control,
271 		MEMORY_OBJECT_ATTRIBUTE_INFO,
272 		(memory_object_info_t) &attributes,
273 		MEMORY_OBJECT_ATTR_INFO_COUNT);
274 	if (kr != KERN_SUCCESS) {
275 		panic("apple_protect_pager_init: "
276 		    "memory_object_change_attributes() failed");
277 	}
278 
279 #if CONFIG_SECLUDED_MEMORY
280 	if (secluded_for_filecache) {
281 		memory_object_mark_eligible_for_secluded(control, TRUE);
282 	}
283 #endif /* CONFIG_SECLUDED_MEMORY */
284 
285 	return KERN_SUCCESS;
286 }
287 
288 /*
289  * apple_protect_data_return()
290  *
291  * Handles page-out requests from VM.  This should never happen since
292  * the pages provided by this EMM are not supposed to be dirty or dirtied
293  * and VM should simply discard the contents and reclaim the pages if it
294  * needs to.
295  */
296 kern_return_t
apple_protect_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)297 apple_protect_pager_data_return(
298 	__unused memory_object_t        mem_obj,
299 	__unused memory_object_offset_t offset,
300 	__unused memory_object_cluster_size_t           data_cnt,
301 	__unused memory_object_offset_t *resid_offset,
302 	__unused int                    *io_error,
303 	__unused boolean_t              dirty,
304 	__unused boolean_t              kernel_copy,
305 	__unused int                    upl_flags)
306 {
307 	panic("apple_protect_pager_data_return: should never get called");
308 	return KERN_FAILURE;
309 }
310 
311 kern_return_t
apple_protect_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)312 apple_protect_pager_data_initialize(
313 	__unused memory_object_t        mem_obj,
314 	__unused memory_object_offset_t offset,
315 	__unused memory_object_cluster_size_t           data_cnt)
316 {
317 	panic("apple_protect_pager_data_initialize: should never get called");
318 	return KERN_FAILURE;
319 }
320 
321 kern_return_t
apple_protect_pager_data_unlock(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t size,__unused vm_prot_t desired_access)322 apple_protect_pager_data_unlock(
323 	__unused memory_object_t        mem_obj,
324 	__unused memory_object_offset_t offset,
325 	__unused memory_object_size_t           size,
326 	__unused vm_prot_t              desired_access)
327 {
328 	return KERN_FAILURE;
329 }
330 
331 /*
332  * apple_protect_pager_data_request()
333  *
334  * Handles page-in requests from VM.
335  */
336 int apple_protect_pager_data_request_debug = 0;
337 kern_return_t
apple_protect_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)338 apple_protect_pager_data_request(
339 	memory_object_t         mem_obj,
340 	memory_object_offset_t  offset,
341 	memory_object_cluster_size_t            length,
342 #if !DEBUG
343 	__unused
344 #endif
345 	vm_prot_t               protection_required,
346 	memory_object_fault_info_t mo_fault_info)
347 {
348 	apple_protect_pager_t   pager;
349 	memory_object_control_t mo_control;
350 	upl_t                   upl;
351 	int                     upl_flags;
352 	upl_size_t              upl_size;
353 	upl_page_info_t         *upl_pl;
354 	unsigned int            pl_count;
355 	vm_object_t             src_top_object, src_page_object, dst_object;
356 	kern_return_t           kr, retval;
357 	vm_offset_t             src_vaddr, dst_vaddr;
358 	vm_offset_t             cur_offset;
359 	vm_offset_t             offset_in_page;
360 	kern_return_t           error_code;
361 	vm_prot_t               prot;
362 	vm_page_t               src_page, top_page;
363 	int                     interruptible;
364 	struct vm_object_fault_info     fault_info;
365 	int                     ret;
366 
367 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
368 
369 	retval = KERN_SUCCESS;
370 	src_top_object = VM_OBJECT_NULL;
371 	src_page_object = VM_OBJECT_NULL;
372 	upl = NULL;
373 	upl_pl = NULL;
374 	fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
375 	fault_info.stealth = TRUE;
376 	fault_info.io_sync = FALSE;
377 	fault_info.mark_zf_absent = FALSE;
378 	fault_info.batch_pmap_op = FALSE;
379 	interruptible = fault_info.interruptible;
380 
381 	pager = apple_protect_pager_lookup(mem_obj);
382 	assert(pager->is_ready);
383 	assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 1); /* pager is alive and mapped */
384 
385 	PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
386 
387 	fault_info.lo_offset += pager->backing_offset;
388 	fault_info.hi_offset += pager->backing_offset;
389 
390 	/*
391 	 * Gather in a UPL all the VM pages requested by VM.
392 	 */
393 	mo_control = pager->ap_pgr_hdr.mo_control;
394 
395 	upl_size = length;
396 	upl_flags =
397 	    UPL_RET_ONLY_ABSENT |
398 	    UPL_SET_LITE |
399 	    UPL_NO_SYNC |
400 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
401 	    UPL_SET_INTERNAL;
402 	pl_count = 0;
403 	kr = memory_object_upl_request(mo_control,
404 	    offset, upl_size,
405 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
406 	if (kr != KERN_SUCCESS) {
407 		retval = kr;
408 		goto done;
409 	}
410 	dst_object = memory_object_control_to_vm_object(mo_control);
411 	assert(dst_object != VM_OBJECT_NULL);
412 
413 	/*
414 	 * We'll map the encrypted data in the kernel address space from the
415 	 * backing VM object (itself backed by the encrypted file via
416 	 * the vnode pager).
417 	 */
418 	src_top_object = pager->backing_object;
419 	assert(src_top_object != VM_OBJECT_NULL);
420 	vm_object_reference(src_top_object); /* keep the source object alive */
421 
422 	/*
423 	 * Fill in the contents of the pages requested by VM.
424 	 */
425 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
426 	pl_count = length / PAGE_SIZE;
427 	for (cur_offset = 0;
428 	    retval == KERN_SUCCESS && cur_offset < length;
429 	    cur_offset += PAGE_SIZE) {
430 		ppnum_t dst_pnum;
431 
432 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
433 			/* this page is not in the UPL: skip it */
434 			continue;
435 		}
436 
437 		/*
438 		 * Map the source (encrypted) page in the kernel's
439 		 * virtual address space.
440 		 * We already hold a reference on the src_top_object.
441 		 */
442 retry_src_fault:
443 		vm_object_lock(src_top_object);
444 		vm_object_paging_begin(src_top_object);
445 		error_code = 0;
446 		prot = VM_PROT_READ;
447 		src_page = VM_PAGE_NULL;
448 		kr = vm_fault_page(src_top_object,
449 		    pager->backing_offset + offset + cur_offset,
450 		    VM_PROT_READ,
451 		    FALSE,
452 		    FALSE,                /* src_page not looked up */
453 		    &prot,
454 		    &src_page,
455 		    &top_page,
456 		    NULL,
457 		    &error_code,
458 		    FALSE,
459 		    FALSE,
460 		    &fault_info);
461 		switch (kr) {
462 		case VM_FAULT_SUCCESS:
463 			break;
464 		case VM_FAULT_RETRY:
465 			goto retry_src_fault;
466 		case VM_FAULT_MEMORY_SHORTAGE:
467 			if (vm_page_wait(interruptible)) {
468 				goto retry_src_fault;
469 			}
470 			OS_FALLTHROUGH;
471 		case VM_FAULT_INTERRUPTED:
472 			retval = MACH_SEND_INTERRUPTED;
473 			goto done;
474 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
475 			/* success but no VM page: fail */
476 			vm_object_paging_end(src_top_object);
477 			vm_object_unlock(src_top_object);
478 			OS_FALLTHROUGH;
479 		case VM_FAULT_MEMORY_ERROR:
480 			/* the page is not there ! */
481 			if (error_code) {
482 				retval = error_code;
483 			} else {
484 				retval = KERN_MEMORY_ERROR;
485 			}
486 			goto done;
487 		default:
488 			panic("apple_protect_pager_data_request: "
489 			    "vm_fault_page() unexpected error 0x%x\n",
490 			    kr);
491 		}
492 		assert(src_page != VM_PAGE_NULL);
493 		assert(src_page->vmp_busy);
494 
495 		if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
496 			vm_page_lockspin_queues();
497 
498 			if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
499 				vm_page_speculate(src_page, FALSE);
500 			}
501 			vm_page_unlock_queues();
502 		}
503 
504 		/*
505 		 * Establish pointers to the source
506 		 * and destination physical pages.
507 		 */
508 		dst_pnum = (ppnum_t)
509 		    upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
510 		assert(dst_pnum != 0);
511 
512 		src_vaddr = (vm_map_offset_t)
513 		    phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
514 		        << PAGE_SHIFT);
515 		dst_vaddr = (vm_map_offset_t)
516 		    phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
517 
518 		src_page_object = VM_PAGE_OBJECT(src_page);
519 
520 		/*
521 		 * Validate the original page...
522 		 */
523 		if (src_page_object->code_signed) {
524 			vm_page_validate_cs_mapped(
525 				src_page, PAGE_SIZE, 0,
526 				(const void *) src_vaddr);
527 		}
528 		/*
529 		 * ... and transfer the results to the destination page.
530 		 */
531 		UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
532 		    src_page->vmp_cs_validated);
533 		UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
534 		    src_page->vmp_cs_tainted);
535 		UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
536 		    src_page->vmp_cs_nx);
537 
538 		/*
539 		 * page_decrypt() might access a mapped file, so let's release
540 		 * the object lock for the source page to avoid a potential
541 		 * deadlock.  The source page is kept busy and we have a
542 		 * "paging_in_progress" reference on its object, so it's safe
543 		 * to unlock the object here.
544 		 */
545 		assert(src_page->vmp_busy);
546 		assert(src_page_object->paging_in_progress > 0);
547 		vm_object_unlock(src_page_object);
548 
549 		/*
550 		 * Decrypt the encrypted contents of the source page
551 		 * into the destination page.
552 		 */
553 		for (offset_in_page = 0;
554 		    offset_in_page < PAGE_SIZE;
555 		    offset_in_page += 4096) {
556 			if (offset + cur_offset + offset_in_page <
557 			    pager->crypto_start ||
558 			    offset + cur_offset + offset_in_page >=
559 			    pager->crypto_end) {
560 				/* not encrypted: just copy */
561 				bcopy((const char *)(src_vaddr +
562 				    offset_in_page),
563 				    (char *)(dst_vaddr + offset_in_page),
564 				    4096);
565 
566 				if (apple_protect_pager_data_request_debug) {
567 					printf("apple_protect_data_request"
568 					    "(%p,0x%llx+0x%llx+0x%04llx): "
569 					    "out of crypto range "
570 					    "[0x%llx:0x%llx]: "
571 					    "COPY [0x%016llx 0x%016llx] "
572 					    "code_signed=%d "
573 					    "cs_validated=%d "
574 					    "cs_tainted=%d "
575 					    "cs_nx=%d\n",
576 					    pager,
577 					    offset,
578 					    (uint64_t) cur_offset,
579 					    (uint64_t) offset_in_page,
580 					    pager->crypto_start,
581 					    pager->crypto_end,
582 					    *(uint64_t *)(dst_vaddr +
583 					    offset_in_page),
584 					    *(uint64_t *)(dst_vaddr +
585 					    offset_in_page + 8),
586 					    src_page_object->code_signed,
587 					    src_page->vmp_cs_validated,
588 					    src_page->vmp_cs_tainted,
589 					    src_page->vmp_cs_nx);
590 				}
591 				ret = 0;
592 				continue;
593 			}
594 			ret = pager->crypt_info->page_decrypt(
595 				(const void *)(src_vaddr + offset_in_page),
596 				(void *)(dst_vaddr + offset_in_page),
597 				((pager->crypto_backing_offset -
598 				pager->crypto_start) +   /* XXX ? */
599 				offset +
600 				cur_offset +
601 				offset_in_page),
602 				pager->crypt_info->crypt_ops);
603 
604 			if (apple_protect_pager_data_request_debug) {
605 				printf("apple_protect_data_request"
606 				    "(%p,0x%llx+0x%llx+0x%04llx): "
607 				    "in crypto range [0x%llx:0x%llx]: "
608 				    "DECRYPT offset 0x%llx="
609 				    "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)"
610 				    "[0x%016llx 0x%016llx] "
611 				    "code_signed=%d "
612 				    "cs_validated=%d "
613 				    "cs_tainted=%d "
614 				    "cs_nx=%d "
615 				    "ret=0x%x\n",
616 				    pager,
617 				    offset,
618 				    (uint64_t) cur_offset,
619 				    (uint64_t) offset_in_page,
620 				    pager->crypto_start, pager->crypto_end,
621 				    ((pager->crypto_backing_offset -
622 				    pager->crypto_start) +
623 				    offset +
624 				    cur_offset +
625 				    offset_in_page),
626 				    pager->crypto_backing_offset,
627 				    pager->crypto_start,
628 				    offset,
629 				    (uint64_t) cur_offset,
630 				    (uint64_t) offset_in_page,
631 				    *(uint64_t *)(dst_vaddr + offset_in_page),
632 				    *(uint64_t *)(dst_vaddr + offset_in_page + 8),
633 				    src_page_object->code_signed,
634 				    src_page->vmp_cs_validated,
635 				    src_page->vmp_cs_tainted,
636 				    src_page->vmp_cs_nx,
637 				    ret);
638 			}
639 			if (ret) {
640 				break;
641 			}
642 		}
643 		if (ret) {
644 			/*
645 			 * Decryption failed.  Abort the fault.
646 			 */
647 			retval = KERN_ABORTED;
648 		}
649 
650 		assert(VM_PAGE_OBJECT(src_page) == src_page_object);
651 		assert(src_page->vmp_busy);
652 		assert(src_page_object->paging_in_progress > 0);
653 		vm_object_lock(src_page_object);
654 
655 		/*
656 		 * Cleanup the result of vm_fault_page() of the source page.
657 		 */
658 		PAGE_WAKEUP_DONE(src_page);
659 		src_page = VM_PAGE_NULL;
660 		vm_object_paging_end(src_page_object);
661 		vm_object_unlock(src_page_object);
662 
663 		if (top_page != VM_PAGE_NULL) {
664 			assert(VM_PAGE_OBJECT(top_page) == src_top_object);
665 			vm_object_lock(src_top_object);
666 			VM_PAGE_FREE(top_page);
667 			vm_object_paging_end(src_top_object);
668 			vm_object_unlock(src_top_object);
669 		}
670 	}
671 
672 done:
673 	if (upl != NULL) {
674 		/* clean up the UPL */
675 
676 		/*
677 		 * The pages are currently dirty because we've just been
678 		 * writing on them, but as far as we're concerned, they're
679 		 * clean since they contain their "original" contents as
680 		 * provided by us, the pager.
681 		 * Tell the UPL to mark them "clean".
682 		 */
683 		upl_clear_dirty(upl, TRUE);
684 
685 		/* abort or commit the UPL */
686 		if (retval != KERN_SUCCESS) {
687 			upl_abort(upl, 0);
688 			if (retval == KERN_ABORTED) {
689 				wait_result_t   wait_result;
690 
691 				/*
692 				 * We aborted the fault and did not provide
693 				 * any contents for the requested pages but
694 				 * the pages themselves are not invalid, so
695 				 * let's return success and let the caller
696 				 * retry the fault, in case it might succeed
697 				 * later (when the decryption code is up and
698 				 * running in the kernel, for example).
699 				 */
700 				retval = KERN_SUCCESS;
701 				/*
702 				 * Wait a little bit first to avoid using
703 				 * too much CPU time retrying and failing
704 				 * the same fault over and over again.
705 				 */
706 				wait_result = assert_wait_timeout(
707 					(event_t) apple_protect_pager_data_request,
708 					THREAD_UNINT,
709 					10000,  /* 10ms */
710 					NSEC_PER_USEC);
711 				assert(wait_result == THREAD_WAITING);
712 				wait_result = thread_block(THREAD_CONTINUE_NULL);
713 				assert(wait_result == THREAD_TIMED_OUT);
714 			}
715 		} else {
716 			boolean_t empty;
717 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
718 			    "upl %p offset 0x%llx size 0x%x",
719 			    upl, upl->u_offset, upl->u_size);
720 			upl_commit_range(upl, 0, upl->u_size,
721 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
722 			    upl_pl, pl_count, &empty);
723 		}
724 
725 		/* and deallocate the UPL */
726 		upl_deallocate(upl);
727 		upl = NULL;
728 	}
729 	if (src_top_object != VM_OBJECT_NULL) {
730 		vm_object_deallocate(src_top_object);
731 	}
732 	return retval;
733 }
734 
735 /*
736  * apple_protect_pager_reference()
737  *
738  * Get a reference on this memory object.
739  * For external usage only.  Assumes that the initial reference count is not 0,
740  * i.e one should not "revive" a dead pager this way.
741  */
742 void
apple_protect_pager_reference(memory_object_t mem_obj)743 apple_protect_pager_reference(
744 	memory_object_t         mem_obj)
745 {
746 	apple_protect_pager_t   pager;
747 
748 	pager = apple_protect_pager_lookup(mem_obj);
749 
750 	lck_mtx_lock(&apple_protect_pager_lock);
751 	os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
752 	lck_mtx_unlock(&apple_protect_pager_lock);
753 }
754 
755 
756 /*
757  * apple_protect_pager_dequeue:
758  *
759  * Removes a pager from the list of pagers.
760  *
761  * The caller must hold "apple_protect_pager_lock".
762  */
763 void
apple_protect_pager_dequeue(apple_protect_pager_t pager)764 apple_protect_pager_dequeue(
765 	apple_protect_pager_t pager)
766 {
767 	assert(!pager->is_mapped);
768 
769 	queue_remove(&apple_protect_pager_queue,
770 	    pager,
771 	    apple_protect_pager_t,
772 	    pager_queue);
773 	pager->pager_queue.next = NULL;
774 	pager->pager_queue.prev = NULL;
775 
776 	apple_protect_pager_count--;
777 }
778 
779 /*
780  * apple_protect_pager_terminate_internal:
781  *
782  * Trigger the asynchronous termination of the memory object associated
783  * with this pager.
784  * When the memory object is terminated, there will be one more call
785  * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
786  * to finish the clean up.
787  *
788  * "apple_protect_pager_lock" should not be held by the caller.
789  * We don't need the lock because the pager has already been removed from
790  * the pagers' list and is now ours exclusively.
791  */
792 void
apple_protect_pager_terminate_internal(apple_protect_pager_t pager)793 apple_protect_pager_terminate_internal(
794 	apple_protect_pager_t pager)
795 {
796 	assert(pager->is_ready);
797 	assert(!pager->is_mapped);
798 
799 	if (pager->backing_object != VM_OBJECT_NULL) {
800 		vm_object_deallocate(pager->backing_object);
801 		pager->backing_object = VM_OBJECT_NULL;
802 	}
803 
804 	/* one less pager using this "pager_crypt_info" */
805 #if CRYPT_INFO_DEBUG
806 	printf("CRYPT_INFO %s: deallocate %p ref %d\n",
807 	    __FUNCTION__,
808 	    pager->crypt_info,
809 	    pager->crypt_info->crypt_refcnt);
810 #endif /* CRYPT_INFO_DEBUG */
811 	crypt_info_deallocate(pager->crypt_info);
812 	pager->crypt_info = NULL;
813 
814 	/* trigger the destruction of the memory object */
815 	memory_object_destroy(pager->ap_pgr_hdr.mo_control, 0);
816 }
817 
818 /*
819  * apple_protect_pager_deallocate_internal()
820  *
821  * Release a reference on this pager and free it when the last
822  * reference goes away.
823  * Can be called with apple_protect_pager_lock held or not but always returns
824  * with it unlocked.
825  */
826 void
apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,boolean_t locked)827 apple_protect_pager_deallocate_internal(
828 	apple_protect_pager_t   pager,
829 	boolean_t               locked)
830 {
831 	boolean_t       needs_trimming;
832 	unsigned int    count_unmapped;
833 	os_ref_count_t  ref_count;
834 
835 	if (!locked) {
836 		lck_mtx_lock(&apple_protect_pager_lock);
837 	}
838 
839 	count_unmapped = (apple_protect_pager_count -
840 	    apple_protect_pager_count_mapped);
841 	if (count_unmapped > apple_protect_pager_cache_limit) {
842 		/* we have too many unmapped pagers:  trim some */
843 		needs_trimming = TRUE;
844 	} else {
845 		needs_trimming = FALSE;
846 	}
847 
848 	/* drop a reference on this pager */
849 	ref_count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
850 
851 	if (ref_count == 1) {
852 		/*
853 		 * Only the "named" reference is left, which means that
854 		 * no one is really holding on to this pager anymore.
855 		 * Terminate it.
856 		 */
857 		apple_protect_pager_dequeue(pager);
858 		/* the pager is all ours: no need for the lock now */
859 		lck_mtx_unlock(&apple_protect_pager_lock);
860 		apple_protect_pager_terminate_internal(pager);
861 	} else if (ref_count == 0) {
862 		/*
863 		 * Dropped the existence reference;  the memory object has
864 		 * been terminated.  Do some final cleanup and release the
865 		 * pager structure.
866 		 */
867 		lck_mtx_unlock(&apple_protect_pager_lock);
868 		if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
869 			memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control);
870 			pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
871 		}
872 		kfree_type(struct apple_protect_pager, pager);
873 		pager = APPLE_PROTECT_PAGER_NULL;
874 	} else {
875 		/* there are still plenty of references:  keep going... */
876 		lck_mtx_unlock(&apple_protect_pager_lock);
877 	}
878 
879 	if (needs_trimming) {
880 		apple_protect_pager_trim();
881 	}
882 	/* caution: lock is not held on return... */
883 }
884 
885 /*
886  * apple_protect_pager_deallocate()
887  *
888  * Release a reference on this pager and free it when the last
889  * reference goes away.
890  */
891 void
apple_protect_pager_deallocate(memory_object_t mem_obj)892 apple_protect_pager_deallocate(
893 	memory_object_t         mem_obj)
894 {
895 	apple_protect_pager_t   pager;
896 
897 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
898 	pager = apple_protect_pager_lookup(mem_obj);
899 	apple_protect_pager_deallocate_internal(pager, FALSE);
900 }
901 
902 /*
903  *
904  */
905 kern_return_t
apple_protect_pager_terminate(__unused memory_object_t mem_obj)906 apple_protect_pager_terminate(
907 #if !DEBUG
908 	__unused
909 #endif
910 	memory_object_t mem_obj)
911 {
912 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
913 
914 	return KERN_SUCCESS;
915 }
916 
917 /*
918  *
919  */
920 kern_return_t
apple_protect_pager_synchronize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t length,__unused vm_sync_t sync_flags)921 apple_protect_pager_synchronize(
922 	__unused memory_object_t                mem_obj,
923 	__unused memory_object_offset_t offset,
924 	__unused memory_object_size_t           length,
925 	__unused vm_sync_t              sync_flags)
926 {
927 	panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported");
928 	return KERN_FAILURE;
929 }
930 
931 /*
932  * apple_protect_pager_map()
933  *
934  * This allows VM to let us, the EMM, know that this memory object
935  * is currently mapped one or more times.  This is called by VM each time
936  * the memory object gets mapped and we take one extra reference on the
937  * memory object to account for all its mappings.
938  */
939 kern_return_t
apple_protect_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)940 apple_protect_pager_map(
941 	memory_object_t         mem_obj,
942 	__unused vm_prot_t      prot)
943 {
944 	apple_protect_pager_t   pager;
945 
946 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
947 
948 	pager = apple_protect_pager_lookup(mem_obj);
949 
950 	lck_mtx_lock(&apple_protect_pager_lock);
951 	assert(pager->is_ready);
952 	assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0); /* pager is alive */
953 	if (pager->is_mapped == FALSE) {
954 		/*
955 		 * First mapping of this pager:  take an extra reference
956 		 * that will remain until all the mappings of this pager
957 		 * are removed.
958 		 */
959 		pager->is_mapped = TRUE;
960 		os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
961 		apple_protect_pager_count_mapped++;
962 	}
963 	lck_mtx_unlock(&apple_protect_pager_lock);
964 
965 	return KERN_SUCCESS;
966 }
967 
968 /*
969  * apple_protect_pager_last_unmap()
970  *
971  * This is called by VM when this memory object is no longer mapped anywhere.
972  */
973 kern_return_t
apple_protect_pager_last_unmap(memory_object_t mem_obj)974 apple_protect_pager_last_unmap(
975 	memory_object_t         mem_obj)
976 {
977 	apple_protect_pager_t   pager;
978 	unsigned int            count_unmapped;
979 
980 	PAGER_DEBUG(PAGER_ALL,
981 	    ("apple_protect_pager_last_unmap: %p\n", mem_obj));
982 
983 	pager = apple_protect_pager_lookup(mem_obj);
984 
985 	lck_mtx_lock(&apple_protect_pager_lock);
986 	if (pager->is_mapped) {
987 		/*
988 		 * All the mappings are gone, so let go of the one extra
989 		 * reference that represents all the mappings of this pager.
990 		 */
991 		apple_protect_pager_count_mapped--;
992 		count_unmapped = (apple_protect_pager_count -
993 		    apple_protect_pager_count_mapped);
994 		if (count_unmapped > apple_protect_pager_count_unmapped_max) {
995 			apple_protect_pager_count_unmapped_max = count_unmapped;
996 		}
997 		pager->is_mapped = FALSE;
998 		apple_protect_pager_deallocate_internal(pager, TRUE);
999 		/* caution: deallocate_internal() released the lock ! */
1000 	} else {
1001 		lck_mtx_unlock(&apple_protect_pager_lock);
1002 	}
1003 
1004 	return KERN_SUCCESS;
1005 }
1006 
1007 boolean_t
apple_protect_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1008 apple_protect_pager_backing_object(
1009 	memory_object_t mem_obj,
1010 	memory_object_offset_t offset,
1011 	vm_object_t *backing_object,
1012 	vm_object_offset_t *backing_offset)
1013 {
1014 	apple_protect_pager_t   pager;
1015 
1016 	PAGER_DEBUG(PAGER_ALL,
1017 	    ("apple_protect_pager_backing_object: %p\n", mem_obj));
1018 
1019 	pager = apple_protect_pager_lookup(mem_obj);
1020 
1021 	*backing_object = pager->backing_object;
1022 	*backing_offset = pager->backing_offset + offset;
1023 
1024 	return TRUE;
1025 }
1026 
1027 /*
1028  *
1029  */
1030 apple_protect_pager_t
apple_protect_pager_lookup(memory_object_t mem_obj)1031 apple_protect_pager_lookup(
1032 	memory_object_t  mem_obj)
1033 {
1034 	apple_protect_pager_t   pager;
1035 
1036 	assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
1037 	pager = (apple_protect_pager_t)(uintptr_t) mem_obj;
1038 	assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0);
1039 	return pager;
1040 }
1041 
1042 apple_protect_pager_t
apple_protect_pager_create(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_object_offset_t crypto_backing_offset,struct pager_crypt_info * crypt_info,vm_object_offset_t crypto_start,vm_object_offset_t crypto_end,boolean_t cache_pager)1043 apple_protect_pager_create(
1044 	vm_object_t             backing_object,
1045 	vm_object_offset_t      backing_offset,
1046 	vm_object_offset_t      crypto_backing_offset,
1047 	struct pager_crypt_info *crypt_info,
1048 	vm_object_offset_t      crypto_start,
1049 	vm_object_offset_t      crypto_end,
1050 	boolean_t               cache_pager)
1051 {
1052 	apple_protect_pager_t   pager, pager2;
1053 	memory_object_control_t control;
1054 	kern_return_t           kr;
1055 	struct pager_crypt_info *old_crypt_info;
1056 
1057 	pager = kalloc_type(struct apple_protect_pager, Z_WAITOK | Z_NOFAIL);
1058 
1059 	/*
1060 	 * The vm_map call takes both named entry ports and raw memory
1061 	 * objects in the same parameter.  We need to make sure that
1062 	 * vm_map does not see this object as a named entry port.  So,
1063 	 * we reserve the first word in the object for a fake ip_kotype
1064 	 * setting - that will tell vm_map to use it as a memory object.
1065 	 */
1066 	pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
1067 	pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops;
1068 	pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1069 
1070 	pager->is_ready = FALSE;/* not ready until it has a "name" */
1071 	/* one reference for the caller */
1072 	os_ref_init_count_raw(&pager->ap_pgr_hdr_ref, NULL, 1);
1073 	pager->is_mapped = FALSE;
1074 	if (cache_pager) {
1075 		/* extra reference for the cache */
1076 		os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1077 		pager->is_cached = true;
1078 	} else {
1079 		pager->is_cached = false;
1080 	}
1081 	pager->backing_object = backing_object;
1082 	pager->backing_offset = backing_offset;
1083 	pager->crypto_backing_offset = crypto_backing_offset;
1084 	pager->crypto_start = crypto_start;
1085 	pager->crypto_end = crypto_end;
1086 	pager->crypt_info = crypt_info; /* allocated by caller */
1087 
1088 #if CRYPT_INFO_DEBUG
1089 	printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n",
1090 	    __FUNCTION__,
1091 	    crypt_info,
1092 	    crypt_info->page_decrypt,
1093 	    crypt_info->crypt_end,
1094 	    crypt_info->crypt_ops,
1095 	    crypt_info->crypt_refcnt);
1096 #endif /* CRYPT_INFO_DEBUG */
1097 
1098 	vm_object_reference(backing_object);
1099 
1100 	old_crypt_info = NULL;
1101 
1102 	lck_mtx_lock(&apple_protect_pager_lock);
1103 	/* see if anyone raced us to create a pager for the same object */
1104 	queue_iterate(&apple_protect_pager_queue,
1105 	    pager2,
1106 	    apple_protect_pager_t,
1107 	    pager_queue) {
1108 		if ((pager2->crypt_info->page_decrypt !=
1109 		    crypt_info->page_decrypt) ||
1110 		    (pager2->crypt_info->crypt_end !=
1111 		    crypt_info->crypt_end) ||
1112 		    (pager2->crypt_info->crypt_ops !=
1113 		    crypt_info->crypt_ops)) {
1114 			/* crypt_info contents do not match: next pager */
1115 			continue;
1116 		}
1117 
1118 		/* found a match for crypt_info ... */
1119 		if (old_crypt_info) {
1120 			/* ... already switched to that crypt_info */
1121 			assert(old_crypt_info == pager2->crypt_info);
1122 		} else if (pager2->crypt_info != crypt_info) {
1123 			/* ... switch to that pager's crypt_info */
1124 #if CRYPT_INFO_DEBUG
1125 			printf("CRYPT_INFO %s: reference %p ref %d "
1126 			    "(create match)\n",
1127 			    __FUNCTION__,
1128 			    pager2->crypt_info,
1129 			    pager2->crypt_info->crypt_refcnt);
1130 #endif /* CRYPT_INFO_DEBUG */
1131 			old_crypt_info = pager2->crypt_info;
1132 			crypt_info_reference(old_crypt_info);
1133 			pager->crypt_info = old_crypt_info;
1134 		}
1135 
1136 		if (pager2->backing_object == backing_object &&
1137 		    pager2->backing_offset == backing_offset &&
1138 		    pager2->crypto_backing_offset == crypto_backing_offset &&
1139 		    pager2->crypto_start == crypto_start &&
1140 		    pager2->crypto_end == crypto_end) {
1141 			/* full match: use that pager */
1142 			break;
1143 		}
1144 	}
1145 	if (!queue_end(&apple_protect_pager_queue,
1146 	    (queue_entry_t) pager2)) {
1147 		/* we lost the race, down with the loser... */
1148 		lck_mtx_unlock(&apple_protect_pager_lock);
1149 		vm_object_deallocate(pager->backing_object);
1150 		pager->backing_object = VM_OBJECT_NULL;
1151 #if CRYPT_INFO_DEBUG
1152 		printf("CRYPT_INFO %s: %p ref %d (create pager match)\n",
1153 		    __FUNCTION__,
1154 		    pager->crypt_info,
1155 		    pager->crypt_info->crypt_refcnt);
1156 #endif /* CRYPT_INFO_DEBUG */
1157 		crypt_info_deallocate(pager->crypt_info);
1158 		pager->crypt_info = NULL;
1159 		kfree_type(struct apple_protect_pager, pager);
1160 		/* ... and go with the winner */
1161 		pager = pager2;
1162 		/* let the winner make sure the pager gets ready */
1163 		return pager;
1164 	}
1165 
1166 	/* enter new pager at the head of our list of pagers */
1167 	queue_enter_first(&apple_protect_pager_queue,
1168 	    pager,
1169 	    apple_protect_pager_t,
1170 	    pager_queue);
1171 	apple_protect_pager_count++;
1172 	if (apple_protect_pager_count > apple_protect_pager_count_max) {
1173 		apple_protect_pager_count_max = apple_protect_pager_count;
1174 	}
1175 	lck_mtx_unlock(&apple_protect_pager_lock);
1176 
1177 	kr = memory_object_create_named((memory_object_t) pager,
1178 	    0,
1179 	    &control);
1180 	assert(kr == KERN_SUCCESS);
1181 
1182 	memory_object_mark_trusted(control);
1183 
1184 	lck_mtx_lock(&apple_protect_pager_lock);
1185 	/* the new pager is now ready to be used */
1186 	pager->is_ready = TRUE;
1187 	lck_mtx_unlock(&apple_protect_pager_lock);
1188 
1189 	/* wakeup anyone waiting for this pager to be ready */
1190 	thread_wakeup(&pager->is_ready);
1191 
1192 	if (old_crypt_info != NULL &&
1193 	    old_crypt_info != crypt_info) {
1194 		/* we re-used an old crypt_info instead of using our new one */
1195 #if CRYPT_INFO_DEBUG
1196 		printf("CRYPT_INFO %s: deallocate %p ref %d "
1197 		    "(create used old)\n",
1198 		    __FUNCTION__,
1199 		    crypt_info,
1200 		    crypt_info->crypt_refcnt);
1201 #endif /* CRYPT_INFO_DEBUG */
1202 		crypt_info_deallocate(crypt_info);
1203 		crypt_info = NULL;
1204 	}
1205 
1206 	return pager;
1207 }
1208 
1209 /*
1210  * apple_protect_pager_setup()
1211  *
1212  * Provide the caller with a memory object backed by the provided
1213  * "backing_object" VM object.  If such a memory object already exists,
1214  * re-use it, otherwise create a new memory object.
1215  */
1216 memory_object_t
apple_protect_pager_setup(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_object_offset_t crypto_backing_offset,struct pager_crypt_info * crypt_info,vm_object_offset_t crypto_start,vm_object_offset_t crypto_end,boolean_t cache_pager)1217 apple_protect_pager_setup(
1218 	vm_object_t             backing_object,
1219 	vm_object_offset_t      backing_offset,
1220 	vm_object_offset_t      crypto_backing_offset,
1221 	struct pager_crypt_info *crypt_info,
1222 	vm_object_offset_t      crypto_start,
1223 	vm_object_offset_t      crypto_end,
1224 	boolean_t               cache_pager)
1225 {
1226 	apple_protect_pager_t   pager;
1227 	struct pager_crypt_info *old_crypt_info, *new_crypt_info;
1228 
1229 #if CRYPT_INFO_DEBUG
1230 	printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n",
1231 	    __FUNCTION__,
1232 	    crypt_info,
1233 	    crypt_info->page_decrypt,
1234 	    crypt_info->crypt_end,
1235 	    crypt_info->crypt_ops,
1236 	    crypt_info->crypt_refcnt);
1237 #endif /* CRYPT_INFO_DEBUG */
1238 
1239 	old_crypt_info = NULL;
1240 
1241 	lck_mtx_lock(&apple_protect_pager_lock);
1242 
1243 	queue_iterate(&apple_protect_pager_queue,
1244 	    pager,
1245 	    apple_protect_pager_t,
1246 	    pager_queue) {
1247 		if ((pager->crypt_info->page_decrypt !=
1248 		    crypt_info->page_decrypt) ||
1249 		    (pager->crypt_info->crypt_end !=
1250 		    crypt_info->crypt_end) ||
1251 		    (pager->crypt_info->crypt_ops !=
1252 		    crypt_info->crypt_ops)) {
1253 			/* no match for "crypt_info": next pager */
1254 			continue;
1255 		}
1256 		/* found a match for crypt_info ... */
1257 		if (old_crypt_info) {
1258 			/* ... already switched to that crypt_info */
1259 			assert(old_crypt_info == pager->crypt_info);
1260 		} else {
1261 			/* ... switch to that pager's crypt_info */
1262 			old_crypt_info = pager->crypt_info;
1263 #if CRYPT_INFO_DEBUG
1264 			printf("CRYPT_INFO %s: "
1265 			    "switching crypt_info from %p [%p,%p,%p,%d] "
1266 			    "to %p [%p,%p,%p,%d] from pager %p\n",
1267 			    __FUNCTION__,
1268 			    crypt_info,
1269 			    crypt_info->page_decrypt,
1270 			    crypt_info->crypt_end,
1271 			    crypt_info->crypt_ops,
1272 			    crypt_info->crypt_refcnt,
1273 			    old_crypt_info,
1274 			    old_crypt_info->page_decrypt,
1275 			    old_crypt_info->crypt_end,
1276 			    old_crypt_info->crypt_ops,
1277 			    old_crypt_info->crypt_refcnt,
1278 			    pager);
1279 			printf("CRYPT_INFO %s: %p ref %d (setup match)\n",
1280 			    __FUNCTION__,
1281 			    pager->crypt_info,
1282 			    pager->crypt_info->crypt_refcnt);
1283 #endif /* CRYPT_INFO_DEBUG */
1284 			crypt_info_reference(pager->crypt_info);
1285 		}
1286 
1287 		if (pager->backing_object == backing_object &&
1288 		    pager->backing_offset == backing_offset &&
1289 		    pager->crypto_backing_offset == crypto_backing_offset &&
1290 		    pager->crypto_start == crypto_start &&
1291 		    pager->crypto_end == crypto_end) {
1292 			/* full match: use that pager! */
1293 			assert(old_crypt_info == pager->crypt_info);
1294 			assert(old_crypt_info->crypt_refcnt > 1);
1295 #if CRYPT_INFO_DEBUG
1296 			printf("CRYPT_INFO %s: "
1297 			    "pager match with %p crypt_info %p\n",
1298 			    __FUNCTION__,
1299 			    pager,
1300 			    pager->crypt_info);
1301 			printf("CRYPT_INFO %s: deallocate %p ref %d "
1302 			    "(pager match)\n",
1303 			    __FUNCTION__,
1304 			    old_crypt_info,
1305 			    old_crypt_info->crypt_refcnt);
1306 #endif /* CRYPT_INFO_DEBUG */
1307 			/* release the extra ref on crypt_info we got above */
1308 			crypt_info_deallocate(old_crypt_info);
1309 			assert(old_crypt_info->crypt_refcnt > 0);
1310 			/* give extra reference on pager to the caller */
1311 			os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1312 			break;
1313 		}
1314 	}
1315 	if (queue_end(&apple_protect_pager_queue,
1316 	    (queue_entry_t) pager)) {
1317 		lck_mtx_unlock(&apple_protect_pager_lock);
1318 		/* no existing pager for this backing object */
1319 		pager = APPLE_PROTECT_PAGER_NULL;
1320 		if (old_crypt_info) {
1321 			/* use this old crypt_info for new pager */
1322 			new_crypt_info = old_crypt_info;
1323 #if CRYPT_INFO_DEBUG
1324 			printf("CRYPT_INFO %s: "
1325 			    "will use old_crypt_info %p for new pager\n",
1326 			    __FUNCTION__,
1327 			    old_crypt_info);
1328 #endif /* CRYPT_INFO_DEBUG */
1329 		} else {
1330 			/* allocate a new crypt_info for new pager */
1331 			new_crypt_info = kalloc_type(struct pager_crypt_info, Z_WAITOK);
1332 			*new_crypt_info = *crypt_info;
1333 			new_crypt_info->crypt_refcnt = 1;
1334 #if CRYPT_INFO_DEBUG
1335 			printf("CRYPT_INFO %s: "
1336 			    "will use new_crypt_info %p for new pager\n",
1337 			    __FUNCTION__,
1338 			    new_crypt_info);
1339 #endif /* CRYPT_INFO_DEBUG */
1340 		}
1341 		if (new_crypt_info == NULL) {
1342 			/* can't create new pager without a crypt_info */
1343 		} else {
1344 			/* create new pager */
1345 			pager = apple_protect_pager_create(
1346 				backing_object,
1347 				backing_offset,
1348 				crypto_backing_offset,
1349 				new_crypt_info,
1350 				crypto_start,
1351 				crypto_end,
1352 				cache_pager);
1353 		}
1354 		if (pager == APPLE_PROTECT_PAGER_NULL) {
1355 			/* could not create a new pager */
1356 			if (new_crypt_info == old_crypt_info) {
1357 				/* release extra reference on old_crypt_info */
1358 #if CRYPT_INFO_DEBUG
1359 				printf("CRYPT_INFO %s: deallocate %p ref %d "
1360 				    "(create fail old_crypt_info)\n",
1361 				    __FUNCTION__,
1362 				    old_crypt_info,
1363 				    old_crypt_info->crypt_refcnt);
1364 #endif /* CRYPT_INFO_DEBUG */
1365 				crypt_info_deallocate(old_crypt_info);
1366 				old_crypt_info = NULL;
1367 			} else {
1368 				/* release unused new_crypt_info */
1369 				assert(new_crypt_info->crypt_refcnt == 1);
1370 #if CRYPT_INFO_DEBUG
1371 				printf("CRYPT_INFO %s: deallocate %p ref %d "
1372 				    "(create fail new_crypt_info)\n",
1373 				    __FUNCTION__,
1374 				    new_crypt_info,
1375 				    new_crypt_info->crypt_refcnt);
1376 #endif /* CRYPT_INFO_DEBUG */
1377 				crypt_info_deallocate(new_crypt_info);
1378 				new_crypt_info = NULL;
1379 			}
1380 			return MEMORY_OBJECT_NULL;
1381 		}
1382 		lck_mtx_lock(&apple_protect_pager_lock);
1383 	} else {
1384 		assert(old_crypt_info == pager->crypt_info);
1385 	}
1386 
1387 	while (!pager->is_ready) {
1388 		lck_mtx_sleep(&apple_protect_pager_lock,
1389 		    LCK_SLEEP_DEFAULT,
1390 		    &pager->is_ready,
1391 		    THREAD_UNINT);
1392 	}
1393 	lck_mtx_unlock(&apple_protect_pager_lock);
1394 
1395 	return (memory_object_t) pager;
1396 }
1397 
1398 void
apple_protect_pager_trim(void)1399 apple_protect_pager_trim(void)
1400 {
1401 	apple_protect_pager_t   pager, prev_pager;
1402 	queue_head_t            trim_queue;
1403 	unsigned int            num_trim;
1404 	unsigned int            count_unmapped;
1405 
1406 	lck_mtx_lock(&apple_protect_pager_lock);
1407 
1408 	/*
1409 	 * We have too many pagers, try and trim some unused ones,
1410 	 * starting with the oldest pager at the end of the queue.
1411 	 */
1412 	queue_init(&trim_queue);
1413 	num_trim = 0;
1414 
1415 	for (pager = (apple_protect_pager_t)
1416 	    queue_last(&apple_protect_pager_queue);
1417 	    !queue_end(&apple_protect_pager_queue,
1418 	    (queue_entry_t) pager);
1419 	    pager = prev_pager) {
1420 		/* get prev elt before we dequeue */
1421 		prev_pager = (apple_protect_pager_t)
1422 		    queue_prev(&pager->pager_queue);
1423 
1424 		if (pager->is_cached &&
1425 		    os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) == 2 &&
1426 		    pager->is_ready &&
1427 		    !pager->is_mapped) {
1428 			/* this pager can be trimmed */
1429 			num_trim++;
1430 			/* remove this pager from the main list ... */
1431 			apple_protect_pager_dequeue(pager);
1432 			/* ... and add it to our trim queue */
1433 			queue_enter_first(&trim_queue,
1434 			    pager,
1435 			    apple_protect_pager_t,
1436 			    pager_queue);
1437 
1438 			count_unmapped = (apple_protect_pager_count -
1439 			    apple_protect_pager_count_mapped);
1440 			if (count_unmapped <= apple_protect_pager_cache_limit) {
1441 				/* we have enough pagers to trim */
1442 				break;
1443 			}
1444 		}
1445 	}
1446 	if (num_trim > apple_protect_pager_num_trim_max) {
1447 		apple_protect_pager_num_trim_max = num_trim;
1448 	}
1449 	apple_protect_pager_num_trim_total += num_trim;
1450 
1451 	lck_mtx_unlock(&apple_protect_pager_lock);
1452 
1453 	/* terminate the trimmed pagers */
1454 	while (!queue_empty(&trim_queue)) {
1455 		queue_remove_first(&trim_queue,
1456 		    pager,
1457 		    apple_protect_pager_t,
1458 		    pager_queue);
1459 		assert(pager->is_cached);
1460 		pager->is_cached = false;
1461 		pager->pager_queue.next = NULL;
1462 		pager->pager_queue.prev = NULL;
1463 		/*
1464 		 * We can't call deallocate_internal() because the pager
1465 		 * has already been dequeued, but we still need to remove
1466 		 * a reference.
1467 		 */
1468 		os_ref_count_t __assert_only count;
1469 		count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1470 		assert(count == 1);
1471 		apple_protect_pager_terminate_internal(pager);
1472 	}
1473 }
1474 
1475 
1476 void
crypt_info_reference(struct pager_crypt_info * crypt_info)1477 crypt_info_reference(
1478 	struct pager_crypt_info *crypt_info)
1479 {
1480 	assert(crypt_info->crypt_refcnt != 0);
1481 #if CRYPT_INFO_DEBUG
1482 	printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1483 	    __FUNCTION__,
1484 	    crypt_info,
1485 	    crypt_info->crypt_refcnt,
1486 	    crypt_info->crypt_refcnt + 1);
1487 #endif /* CRYPT_INFO_DEBUG */
1488 	OSAddAtomic(+1, &crypt_info->crypt_refcnt);
1489 }
1490 
1491 void
crypt_info_deallocate(struct pager_crypt_info * crypt_info)1492 crypt_info_deallocate(
1493 	struct pager_crypt_info *crypt_info)
1494 {
1495 #if CRYPT_INFO_DEBUG
1496 	printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1497 	    __FUNCTION__,
1498 	    crypt_info,
1499 	    crypt_info->crypt_refcnt,
1500 	    crypt_info->crypt_refcnt - 1);
1501 #endif /* CRYPT_INFO_DEBUG */
1502 	OSAddAtomic(-1, &crypt_info->crypt_refcnt);
1503 	if (crypt_info->crypt_refcnt == 0) {
1504 		/* deallocate any crypt module data */
1505 		if (crypt_info->crypt_end) {
1506 			crypt_info->crypt_end(crypt_info->crypt_ops);
1507 			crypt_info->crypt_end = NULL;
1508 		}
1509 #if CRYPT_INFO_DEBUG
1510 		printf("CRYPT_INFO %s: freeing %p\n",
1511 		    __FUNCTION__,
1512 		    crypt_info);
1513 #endif /* CRYPT_INFO_DEBUG */
1514 		kfree_type(struct pager_crypt_info, crypt_info);
1515 	}
1516 }
1517