xref: /xnu-11417.140.69/osfmk/vm/vm_apple_protect.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_kobject.h>
49 #include <os/refcnt.h>
50 
51 #include <sys/kdebug_triage.h>
52 
53 #include <ipc/ipc_port.h>
54 #include <ipc/ipc_space.h>
55 
56 #include <vm/vm_fault_internal.h>
57 #include <vm/vm_map.h>
58 #include <vm/memory_object_internal.h>
59 #include <vm/vm_pageout_xnu.h>
60 #include <vm/vm_protos_internal.h>
61 #include <vm/vm_kern.h>
62 #include <vm/vm_ubc.h>
63 #include <vm/vm_page_internal.h>
64 #include <vm/vm_object_internal.h>
65 
66 /*
67  * APPLE PROTECT MEMORY PAGER
68  *
69  * This external memory manager (EMM) handles memory from the encrypted
70  * sections of some executables protected by the DSMOS kernel extension.
71  *
72  * It mostly handles page-in requests (from memory_object_data_request()) by
73  * getting the encrypted data from its backing VM object, itself backed by
74  * the encrypted file, decrypting it and providing it to VM.
75  *
76  * The decrypted pages will never be dirtied, so the memory manager doesn't
77  * need to handle page-out requests (from memory_object_data_return()).  The
78  * pages need to be mapped copy-on-write, so that the originals stay clean.
79  *
80  * We don't expect to have to handle a large number of apple-protected
81  * binaries, so the data structures are very simple (simple linked list)
82  * for now.
83  */
84 
85 /* forward declarations */
86 void apple_protect_pager_reference(memory_object_t mem_obj);
87 void apple_protect_pager_deallocate(memory_object_t mem_obj);
88 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
89     memory_object_control_t control,
90     memory_object_cluster_size_t pg_size);
91 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
92 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
93     memory_object_offset_t offset,
94     memory_object_cluster_size_t length,
95     vm_prot_t protection_required,
96     memory_object_fault_info_t fault_info);
97 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
98     memory_object_offset_t offset,
99     memory_object_cluster_size_t      data_cnt,
100     memory_object_offset_t *resid_offset,
101     int *io_error,
102     boolean_t dirty,
103     boolean_t kernel_copy,
104     int upl_flags);
105 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
106     memory_object_offset_t offset,
107     memory_object_cluster_size_t data_cnt);
108 kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
109     vm_prot_t prot);
110 kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
111 boolean_t apple_protect_pager_backing_object(
112 	memory_object_t mem_obj,
113 	memory_object_offset_t mem_obj_offset,
114 	vm_object_t *backing_object,
115 	vm_object_offset_t *backing_offset);
116 
117 #define CRYPT_INFO_DEBUG 0
118 void crypt_info_reference(struct pager_crypt_info *crypt_info);
119 void crypt_info_deallocate(struct pager_crypt_info *crypt_info);
120 
121 /*
122  * Vector of VM operations for this EMM.
123  * These routines are invoked by VM via the memory_object_*() interfaces.
124  */
125 const struct memory_object_pager_ops apple_protect_pager_ops = {
126 	.memory_object_reference = apple_protect_pager_reference,
127 	.memory_object_deallocate = apple_protect_pager_deallocate,
128 	.memory_object_init = apple_protect_pager_init,
129 	.memory_object_terminate = apple_protect_pager_terminate,
130 	.memory_object_data_request = apple_protect_pager_data_request,
131 	.memory_object_data_return = apple_protect_pager_data_return,
132 	.memory_object_data_initialize = apple_protect_pager_data_initialize,
133 	.memory_object_map = apple_protect_pager_map,
134 	.memory_object_last_unmap = apple_protect_pager_last_unmap,
135 	.memory_object_backing_object = apple_protect_pager_backing_object,
136 	.memory_object_pager_name = "apple_protect"
137 };
138 
139 /*
140  * The "apple_protect_pager" describes a memory object backed by
141  * the "apple protect" EMM.
142  */
143 typedef struct apple_protect_pager {
144 	/* mandatory generic header */
145 	struct memory_object    ap_pgr_hdr;
146 
147 	/* pager-specific data */
148 	queue_chain_t           pager_queue;    /* next & prev pagers */
149 #if MEMORY_OBJECT_HAS_REFCOUNT
150 #define ap_pgr_hdr_ref          ap_pgr_hdr.mo_ref
151 #else
152 	os_ref_atomic_t         ap_pgr_hdr_ref;      /* reference count */
153 #endif
154 	bool                    is_ready;       /* is this pager ready ? */
155 	bool                    is_mapped;      /* is this mem_obj mapped ? */
156 	bool                    is_cached;      /* is this pager cached ? */
157 	vm_object_t             backing_object; /* VM obj w/ encrypted data */
158 	vm_object_offset_t      backing_offset;
159 	vm_object_offset_t      crypto_backing_offset; /* for key... */
160 	vm_object_offset_t      crypto_start;
161 	vm_object_offset_t      crypto_end;
162 	struct pager_crypt_info *crypt_info;
163 } *apple_protect_pager_t;
164 #define APPLE_PROTECT_PAGER_NULL        ((apple_protect_pager_t) NULL)
165 
166 /*
167  * List of memory objects managed by this EMM.
168  * The list is protected by the "apple_protect_pager_lock" lock.
169  */
170 unsigned int apple_protect_pager_count = 0;        /* number of pagers */
171 unsigned int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
172 queue_head_t apple_protect_pager_queue = QUEUE_HEAD_INITIALIZER(apple_protect_pager_queue);
173 LCK_GRP_DECLARE(apple_protect_pager_lck_grp, "apple_protect");
174 LCK_MTX_DECLARE(apple_protect_pager_lock, &apple_protect_pager_lck_grp);
175 
176 /*
177  * Maximum number of unmapped pagers we're willing to keep around.
178  */
179 unsigned int apple_protect_pager_cache_limit = 20;
180 
181 /*
182  * Statistics & counters.
183  */
184 unsigned int apple_protect_pager_count_max = 0;
185 unsigned int apple_protect_pager_count_unmapped_max = 0;
186 unsigned int apple_protect_pager_num_trim_max = 0;
187 unsigned int apple_protect_pager_num_trim_total = 0;
188 
189 
190 
191 /* internal prototypes */
192 apple_protect_pager_t apple_protect_pager_create(
193 	vm_object_t backing_object,
194 	vm_object_offset_t backing_offset,
195 	vm_object_offset_t crypto_backing_offset,
196 	struct pager_crypt_info *crypt_info,
197 	vm_object_offset_t crypto_start,
198 	vm_object_offset_t crypto_end,
199 	boolean_t cache_pager);
200 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
201 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
202 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
203     boolean_t locked);
204 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
205 void apple_protect_pager_trim(void);
206 
207 
208 #if DEBUG
209 int apple_protect_pagerdebug = 0;
210 #define PAGER_ALL               0xffffffff
211 #define PAGER_INIT              0x00000001
212 #define PAGER_PAGEIN            0x00000002
213 
214 #define PAGER_DEBUG(LEVEL, A)                                           \
215 	MACRO_BEGIN                                                     \
216 	if ((apple_protect_pagerdebug & LEVEL)==LEVEL) {                \
217 	        printf A;                                               \
218 	}                                                               \
219 	MACRO_END
220 #else
221 #define PAGER_DEBUG(LEVEL, A)
222 #endif
223 
224 /*
225  * apple_protect_pager_init()
226  *
227  * Initialize the memory object and makes it ready to be used and mapped.
228  */
229 kern_return_t
apple_protect_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)230 apple_protect_pager_init(
231 	memory_object_t         mem_obj,
232 	memory_object_control_t control,
233 #if !DEBUG
234 	__unused
235 #endif
236 	memory_object_cluster_size_t pg_size)
237 {
238 	apple_protect_pager_t   pager;
239 	kern_return_t           kr;
240 	memory_object_attr_info_data_t  attributes;
241 
242 	PAGER_DEBUG(PAGER_ALL,
243 	    ("apple_protect_pager_init: %p, %p, %x\n",
244 	    mem_obj, control, pg_size));
245 
246 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
247 		return KERN_INVALID_ARGUMENT;
248 	}
249 
250 	pager = apple_protect_pager_lookup(mem_obj);
251 
252 	memory_object_control_reference(control);
253 
254 	pager->ap_pgr_hdr.mo_control = control;
255 
256 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
257 	/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
258 	attributes.cluster_size = (1 << (PAGE_SHIFT));
259 	attributes.may_cache_object = FALSE;
260 	attributes.temporary = TRUE;
261 
262 	kr = memory_object_change_attributes(
263 		control,
264 		MEMORY_OBJECT_ATTRIBUTE_INFO,
265 		(memory_object_info_t) &attributes,
266 		MEMORY_OBJECT_ATTR_INFO_COUNT);
267 	if (kr != KERN_SUCCESS) {
268 		panic("apple_protect_pager_init: "
269 		    "memory_object_change_attributes() failed");
270 	}
271 
272 #if CONFIG_SECLUDED_MEMORY
273 	if (secluded_for_filecache) {
274 		memory_object_mark_eligible_for_secluded(control, TRUE);
275 	}
276 #endif /* CONFIG_SECLUDED_MEMORY */
277 
278 	return KERN_SUCCESS;
279 }
280 
281 /*
282  * apple_protect_data_return()
283  *
284  * Handles page-out requests from VM.  This should never happen since
285  * the pages provided by this EMM are not supposed to be dirty or dirtied
286  * and VM should simply discard the contents and reclaim the pages if it
287  * needs to.
288  */
289 kern_return_t
apple_protect_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)290 apple_protect_pager_data_return(
291 	__unused memory_object_t        mem_obj,
292 	__unused memory_object_offset_t offset,
293 	__unused memory_object_cluster_size_t           data_cnt,
294 	__unused memory_object_offset_t *resid_offset,
295 	__unused int                    *io_error,
296 	__unused boolean_t              dirty,
297 	__unused boolean_t              kernel_copy,
298 	__unused int                    upl_flags)
299 {
300 	panic("apple_protect_pager_data_return: should never get called");
301 	return KERN_FAILURE;
302 }
303 
304 kern_return_t
apple_protect_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)305 apple_protect_pager_data_initialize(
306 	__unused memory_object_t        mem_obj,
307 	__unused memory_object_offset_t offset,
308 	__unused memory_object_cluster_size_t           data_cnt)
309 {
310 	panic("apple_protect_pager_data_initialize: should never get called");
311 	return KERN_FAILURE;
312 }
313 
314 /*
315  * apple_protect_pager_data_request()
316  *
317  * Handles page-in requests from VM.
318  */
319 int apple_protect_pager_data_request_debug = 0;
320 kern_return_t
apple_protect_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)321 apple_protect_pager_data_request(
322 	memory_object_t         mem_obj,
323 	memory_object_offset_t  offset,
324 	memory_object_cluster_size_t            length,
325 #if !DEBUG
326 	__unused
327 #endif
328 	vm_prot_t               protection_required,
329 	memory_object_fault_info_t mo_fault_info)
330 {
331 	apple_protect_pager_t   pager;
332 	memory_object_control_t mo_control;
333 	upl_t                   upl;
334 	int                     upl_flags;
335 	upl_size_t              upl_size;
336 	upl_page_info_t         *upl_pl;
337 	unsigned int            pl_count;
338 	vm_object_t             src_top_object, src_page_object, dst_object;
339 	kern_return_t           kr, retval;
340 	vm_offset_t             src_vaddr, dst_vaddr;
341 	vm_offset_t             cur_offset;
342 	vm_offset_t             offset_in_page;
343 	kern_return_t           error_code;
344 	vm_prot_t               prot;
345 	vm_page_t               src_page, top_page;
346 	int                     interruptible;
347 	struct vm_object_fault_info     fault_info;
348 	vm_fault_return_t       vmfr;
349 	int                     ret;
350 
351 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
352 
353 	retval = KERN_SUCCESS;
354 	src_top_object = VM_OBJECT_NULL;
355 	src_page_object = VM_OBJECT_NULL;
356 	upl = NULL;
357 	upl_pl = NULL;
358 	fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
359 	fault_info.stealth = TRUE;
360 	fault_info.io_sync = FALSE;
361 	fault_info.mark_zf_absent = FALSE;
362 	fault_info.batch_pmap_op = FALSE;
363 	interruptible = fault_info.interruptible;
364 
365 	pager = apple_protect_pager_lookup(mem_obj);
366 	assert(pager->is_ready);
367 	assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 1); /* pager is alive and mapped */
368 
369 	PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
370 
371 	fault_info.lo_offset += pager->backing_offset;
372 	fault_info.hi_offset += pager->backing_offset;
373 
374 	/*
375 	 * Gather in a UPL all the VM pages requested by VM.
376 	 */
377 	mo_control = pager->ap_pgr_hdr.mo_control;
378 
379 	upl_size = length;
380 	upl_flags =
381 	    UPL_RET_ONLY_ABSENT |
382 	    UPL_SET_LITE |
383 	    UPL_NO_SYNC |
384 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
385 	    UPL_SET_INTERNAL;
386 	pl_count = 0;
387 	kr = memory_object_upl_request(mo_control,
388 	    offset, upl_size,
389 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
390 	if (kr != KERN_SUCCESS) {
391 		retval = kr;
392 		goto done;
393 	}
394 	dst_object = memory_object_control_to_vm_object(mo_control);
395 	assert(dst_object != VM_OBJECT_NULL);
396 
397 	/*
398 	 * We'll map the encrypted data in the kernel address space from the
399 	 * backing VM object (itself backed by the encrypted file via
400 	 * the vnode pager).
401 	 */
402 	src_top_object = pager->backing_object;
403 	assert(src_top_object != VM_OBJECT_NULL);
404 	vm_object_reference(src_top_object); /* keep the source object alive */
405 
406 	/*
407 	 * Fill in the contents of the pages requested by VM.
408 	 */
409 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
410 	pl_count = length / PAGE_SIZE;
411 	for (cur_offset = 0;
412 	    retval == KERN_SUCCESS && cur_offset < length;
413 	    cur_offset += PAGE_SIZE) {
414 		ppnum_t dst_pnum;
415 
416 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
417 			/* this page is not in the UPL: skip it */
418 			continue;
419 		}
420 
421 		/*
422 		 * Map the source (encrypted) page in the kernel's
423 		 * virtual address space.
424 		 * We already hold a reference on the src_top_object.
425 		 */
426 retry_src_fault:
427 		vm_object_lock(src_top_object);
428 		vm_object_paging_begin(src_top_object);
429 		error_code = 0;
430 		prot = VM_PROT_READ;
431 		src_page = VM_PAGE_NULL;
432 		vmfr = vm_fault_page(src_top_object,
433 		    pager->backing_offset + offset + cur_offset,
434 		    VM_PROT_READ,
435 		    FALSE,
436 		    FALSE,                /* src_page not looked up */
437 		    &prot,
438 		    &src_page,
439 		    &top_page,
440 		    NULL,
441 		    &error_code,
442 		    FALSE,
443 		    &fault_info);
444 		switch (vmfr) {
445 		case VM_FAULT_SUCCESS:
446 			break;
447 		case VM_FAULT_RETRY:
448 			goto retry_src_fault;
449 		case VM_FAULT_MEMORY_SHORTAGE:
450 			if (vm_page_wait(interruptible)) {
451 				goto retry_src_fault;
452 			}
453 			ktriage_record(thread_tid(current_thread()),
454 			    KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_APPLE_PROTECT_PAGER,
455 			    KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_APPLE_PROTECT_PAGER_MEMORY_SHORTAGE),
456 			    0 /* arg */);
457 			OS_FALLTHROUGH;
458 		case VM_FAULT_INTERRUPTED:
459 			retval = MACH_SEND_INTERRUPTED;
460 			goto done;
461 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
462 			/* success but no VM page: fail */
463 			vm_object_paging_end(src_top_object);
464 			vm_object_unlock(src_top_object);
465 			OS_FALLTHROUGH;
466 		case VM_FAULT_MEMORY_ERROR:
467 			/* the page is not there ! */
468 			if (error_code) {
469 				retval = error_code;
470 			} else {
471 				retval = KERN_MEMORY_ERROR;
472 			}
473 			goto done;
474 		case VM_FAULT_BUSY:
475 			retval = KERN_ALREADY_WAITING;
476 			goto done;
477 		default:
478 			panic("%s: "
479 			    "vm_fault_page() return unexpected error 0x%x\n",
480 			    __func__, vmfr);
481 		}
482 		assert(src_page != VM_PAGE_NULL);
483 		assert(src_page->vmp_busy);
484 
485 		if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
486 			vm_page_lockspin_queues();
487 
488 			if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
489 				vm_page_speculate(src_page, FALSE);
490 			}
491 			vm_page_unlock_queues();
492 		}
493 
494 		/*
495 		 * Establish pointers to the source
496 		 * and destination physical pages.
497 		 */
498 		dst_pnum = (ppnum_t)
499 		    upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
500 		assert(dst_pnum != 0);
501 
502 		src_vaddr = (vm_map_offset_t)
503 		    phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
504 		        << PAGE_SHIFT);
505 		dst_vaddr = (vm_map_offset_t)
506 		    phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
507 
508 		src_page_object = VM_PAGE_OBJECT(src_page);
509 
510 		/*
511 		 * Validate the original page...
512 		 */
513 		if (src_page_object->code_signed) {
514 			vm_page_validate_cs_mapped(
515 				src_page, PAGE_SIZE, 0,
516 				(const void *) src_vaddr);
517 		}
518 		/*
519 		 * ... and transfer the results to the destination page.
520 		 */
521 		UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
522 		    src_page->vmp_cs_validated);
523 		UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
524 		    src_page->vmp_cs_tainted);
525 		UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
526 		    src_page->vmp_cs_nx);
527 
528 		/*
529 		 * page_decrypt() might access a mapped file, so let's release
530 		 * the object lock for the source page to avoid a potential
531 		 * deadlock.  The source page is kept busy and we have a
532 		 * "paging_in_progress" reference on its object, so it's safe
533 		 * to unlock the object here.
534 		 */
535 		assert(src_page->vmp_busy);
536 		assert(src_page_object->paging_in_progress > 0);
537 		vm_object_unlock(src_page_object);
538 
539 		/*
540 		 * Decrypt the encrypted contents of the source page
541 		 * into the destination page.
542 		 */
543 		for (offset_in_page = 0;
544 		    offset_in_page < PAGE_SIZE;
545 		    offset_in_page += 4096) {
546 			if (offset + cur_offset + offset_in_page <
547 			    pager->crypto_start ||
548 			    offset + cur_offset + offset_in_page >=
549 			    pager->crypto_end) {
550 				/* not encrypted: just copy */
551 				bcopy((const char *)(src_vaddr +
552 				    offset_in_page),
553 				    (char *)(dst_vaddr + offset_in_page),
554 				    4096);
555 
556 				if (apple_protect_pager_data_request_debug) {
557 					printf("apple_protect_data_request"
558 					    "(%p,0x%llx+0x%llx+0x%04llx): "
559 					    "out of crypto range "
560 					    "[0x%llx:0x%llx]: "
561 					    "COPY [0x%016llx 0x%016llx] "
562 					    "code_signed=%d "
563 					    "cs_validated=%d "
564 					    "cs_tainted=%d "
565 					    "cs_nx=%d\n",
566 					    pager,
567 					    offset,
568 					    (uint64_t) cur_offset,
569 					    (uint64_t) offset_in_page,
570 					    pager->crypto_start,
571 					    pager->crypto_end,
572 					    *(uint64_t *)(dst_vaddr +
573 					    offset_in_page),
574 					    *(uint64_t *)(dst_vaddr +
575 					    offset_in_page + 8),
576 					    src_page_object->code_signed,
577 					    src_page->vmp_cs_validated,
578 					    src_page->vmp_cs_tainted,
579 					    src_page->vmp_cs_nx);
580 				}
581 				ret = 0;
582 				continue;
583 			}
584 			ret = pager->crypt_info->page_decrypt(
585 				(const void *)(src_vaddr + offset_in_page),
586 				(void *)(dst_vaddr + offset_in_page),
587 				((pager->crypto_backing_offset -
588 				pager->crypto_start) +   /* XXX ? */
589 				offset +
590 				cur_offset +
591 				offset_in_page),
592 				pager->crypt_info->crypt_ops);
593 
594 			if (apple_protect_pager_data_request_debug) {
595 				printf("apple_protect_data_request"
596 				    "(%p,0x%llx+0x%llx+0x%04llx): "
597 				    "in crypto range [0x%llx:0x%llx]: "
598 				    "DECRYPT offset 0x%llx="
599 				    "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)"
600 				    "[0x%016llx 0x%016llx] "
601 				    "code_signed=%d "
602 				    "cs_validated=%d "
603 				    "cs_tainted=%d "
604 				    "cs_nx=%d "
605 				    "ret=0x%x\n",
606 				    pager,
607 				    offset,
608 				    (uint64_t) cur_offset,
609 				    (uint64_t) offset_in_page,
610 				    pager->crypto_start, pager->crypto_end,
611 				    ((pager->crypto_backing_offset -
612 				    pager->crypto_start) +
613 				    offset +
614 				    cur_offset +
615 				    offset_in_page),
616 				    pager->crypto_backing_offset,
617 				    pager->crypto_start,
618 				    offset,
619 				    (uint64_t) cur_offset,
620 				    (uint64_t) offset_in_page,
621 				    *(uint64_t *)(dst_vaddr + offset_in_page),
622 				    *(uint64_t *)(dst_vaddr + offset_in_page + 8),
623 				    src_page_object->code_signed,
624 				    src_page->vmp_cs_validated,
625 				    src_page->vmp_cs_tainted,
626 				    src_page->vmp_cs_nx,
627 				    ret);
628 			}
629 			if (ret) {
630 				break;
631 			}
632 		}
633 		if (ret) {
634 			/*
635 			 * Decryption failed.  Abort the fault.
636 			 */
637 			retval = KERN_ABORTED;
638 		}
639 
640 		assert(VM_PAGE_OBJECT(src_page) == src_page_object);
641 		assert(src_page->vmp_busy);
642 		assert(src_page_object->paging_in_progress > 0);
643 		vm_object_lock(src_page_object);
644 
645 		/*
646 		 * Cleanup the result of vm_fault_page() of the source page.
647 		 */
648 		vm_page_wakeup_done(src_page_object, src_page);
649 		src_page = VM_PAGE_NULL;
650 		vm_object_paging_end(src_page_object);
651 		vm_object_unlock(src_page_object);
652 
653 		if (top_page != VM_PAGE_NULL) {
654 			assert(VM_PAGE_OBJECT(top_page) == src_top_object);
655 			vm_object_lock(src_top_object);
656 			VM_PAGE_FREE(top_page);
657 			vm_object_paging_end(src_top_object);
658 			vm_object_unlock(src_top_object);
659 		}
660 	}
661 
662 done:
663 	if (upl != NULL) {
664 		/* clean up the UPL */
665 
666 		/*
667 		 * The pages are currently dirty because we've just been
668 		 * writing on them, but as far as we're concerned, they're
669 		 * clean since they contain their "original" contents as
670 		 * provided by us, the pager.
671 		 * Tell the UPL to mark them "clean".
672 		 */
673 		upl_clear_dirty(upl, TRUE);
674 
675 		/* abort or commit the UPL */
676 		if (retval != KERN_SUCCESS) {
677 			upl_abort(upl, 0);
678 			if (retval == KERN_ABORTED) {
679 				wait_result_t   wait_result;
680 
681 				/*
682 				 * We aborted the fault and did not provide
683 				 * any contents for the requested pages but
684 				 * the pages themselves are not invalid, so
685 				 * let's return success and let the caller
686 				 * retry the fault, in case it might succeed
687 				 * later (when the decryption code is up and
688 				 * running in the kernel, for example).
689 				 */
690 				retval = KERN_SUCCESS;
691 				/*
692 				 * Wait a little bit first to avoid using
693 				 * too much CPU time retrying and failing
694 				 * the same fault over and over again.
695 				 */
696 				wait_result = assert_wait_timeout(
697 					(event_t) apple_protect_pager_data_request,
698 					THREAD_UNINT,
699 					10000,  /* 10ms */
700 					NSEC_PER_USEC);
701 				assert(wait_result == THREAD_WAITING);
702 				wait_result = thread_block(THREAD_CONTINUE_NULL);
703 				assert(wait_result == THREAD_TIMED_OUT);
704 			}
705 		} else {
706 			boolean_t empty;
707 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
708 			    "upl %p offset 0x%llx size 0x%x",
709 			    upl, upl->u_offset, upl->u_size);
710 			upl_commit_range(upl, 0, upl->u_size,
711 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
712 			    upl_pl, pl_count, &empty);
713 		}
714 
715 		/* and deallocate the UPL */
716 		upl_deallocate(upl);
717 		upl = NULL;
718 	}
719 	if (src_top_object != VM_OBJECT_NULL) {
720 		vm_object_deallocate(src_top_object);
721 	}
722 	return retval;
723 }
724 
725 /*
726  * apple_protect_pager_reference()
727  *
728  * Get a reference on this memory object.
729  * For external usage only.  Assumes that the initial reference count is not 0,
730  * i.e one should not "revive" a dead pager this way.
731  */
732 void
apple_protect_pager_reference(memory_object_t mem_obj)733 apple_protect_pager_reference(
734 	memory_object_t         mem_obj)
735 {
736 	apple_protect_pager_t   pager;
737 
738 	pager = apple_protect_pager_lookup(mem_obj);
739 
740 	lck_mtx_lock(&apple_protect_pager_lock);
741 	os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
742 	lck_mtx_unlock(&apple_protect_pager_lock);
743 }
744 
745 
746 /*
747  * apple_protect_pager_dequeue:
748  *
749  * Removes a pager from the list of pagers.
750  *
751  * The caller must hold "apple_protect_pager_lock".
752  */
753 void
apple_protect_pager_dequeue(apple_protect_pager_t pager)754 apple_protect_pager_dequeue(
755 	apple_protect_pager_t pager)
756 {
757 	assert(!pager->is_mapped);
758 
759 	queue_remove(&apple_protect_pager_queue,
760 	    pager,
761 	    apple_protect_pager_t,
762 	    pager_queue);
763 	pager->pager_queue.next = NULL;
764 	pager->pager_queue.prev = NULL;
765 
766 	apple_protect_pager_count--;
767 }
768 
769 /*
770  * apple_protect_pager_terminate_internal:
771  *
772  * Trigger the asynchronous termination of the memory object associated
773  * with this pager.
774  * When the memory object is terminated, there will be one more call
775  * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
776  * to finish the clean up.
777  *
778  * "apple_protect_pager_lock" should not be held by the caller.
779  * We don't need the lock because the pager has already been removed from
780  * the pagers' list and is now ours exclusively.
781  */
782 void
apple_protect_pager_terminate_internal(apple_protect_pager_t pager)783 apple_protect_pager_terminate_internal(
784 	apple_protect_pager_t pager)
785 {
786 	assert(pager->is_ready);
787 	assert(!pager->is_mapped);
788 
789 	if (pager->backing_object != VM_OBJECT_NULL) {
790 		vm_object_deallocate(pager->backing_object);
791 		pager->backing_object = VM_OBJECT_NULL;
792 	}
793 
794 	/* one less pager using this "pager_crypt_info" */
795 #if CRYPT_INFO_DEBUG
796 	printf("CRYPT_INFO %s: deallocate %p ref %d\n",
797 	    __FUNCTION__,
798 	    pager->crypt_info,
799 	    pager->crypt_info->crypt_refcnt);
800 #endif /* CRYPT_INFO_DEBUG */
801 	crypt_info_deallocate(pager->crypt_info);
802 	pager->crypt_info = NULL;
803 
804 	/* trigger the destruction of the memory object */
805 	memory_object_destroy(pager->ap_pgr_hdr.mo_control, VM_OBJECT_DESTROY_PAGER);
806 }
807 
808 /*
809  * apple_protect_pager_deallocate_internal()
810  *
811  * Release a reference on this pager and free it when the last
812  * reference goes away.
813  * Can be called with apple_protect_pager_lock held or not but always returns
814  * with it unlocked.
815  */
816 void
apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,boolean_t locked)817 apple_protect_pager_deallocate_internal(
818 	apple_protect_pager_t   pager,
819 	boolean_t               locked)
820 {
821 	boolean_t       needs_trimming;
822 	unsigned int    count_unmapped;
823 	os_ref_count_t  ref_count;
824 
825 	if (!locked) {
826 		lck_mtx_lock(&apple_protect_pager_lock);
827 	}
828 
829 	count_unmapped = (apple_protect_pager_count -
830 	    apple_protect_pager_count_mapped);
831 	if (count_unmapped > apple_protect_pager_cache_limit) {
832 		/* we have too many unmapped pagers:  trim some */
833 		needs_trimming = TRUE;
834 	} else {
835 		needs_trimming = FALSE;
836 	}
837 
838 	/* drop a reference on this pager */
839 	ref_count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
840 
841 	if (ref_count == 1) {
842 		/*
843 		 * Only the "named" reference is left, which means that
844 		 * no one is really holding on to this pager anymore.
845 		 * Terminate it.
846 		 */
847 		apple_protect_pager_dequeue(pager);
848 		/* the pager is all ours: no need for the lock now */
849 		lck_mtx_unlock(&apple_protect_pager_lock);
850 		apple_protect_pager_terminate_internal(pager);
851 	} else if (ref_count == 0) {
852 		/*
853 		 * Dropped the existence reference;  the memory object has
854 		 * been terminated.  Do some final cleanup and release the
855 		 * pager structure.
856 		 */
857 		lck_mtx_unlock(&apple_protect_pager_lock);
858 		if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
859 			memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control);
860 			pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
861 		}
862 		kfree_type(struct apple_protect_pager, pager);
863 		pager = APPLE_PROTECT_PAGER_NULL;
864 	} else {
865 		/* there are still plenty of references:  keep going... */
866 		lck_mtx_unlock(&apple_protect_pager_lock);
867 	}
868 
869 	if (needs_trimming) {
870 		apple_protect_pager_trim();
871 	}
872 	/* caution: lock is not held on return... */
873 }
874 
875 /*
876  * apple_protect_pager_deallocate()
877  *
878  * Release a reference on this pager and free it when the last
879  * reference goes away.
880  */
881 void
apple_protect_pager_deallocate(memory_object_t mem_obj)882 apple_protect_pager_deallocate(
883 	memory_object_t         mem_obj)
884 {
885 	apple_protect_pager_t   pager;
886 
887 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
888 	pager = apple_protect_pager_lookup(mem_obj);
889 	apple_protect_pager_deallocate_internal(pager, FALSE);
890 }
891 
892 /*
893  *
894  */
895 kern_return_t
apple_protect_pager_terminate(__unused memory_object_t mem_obj)896 apple_protect_pager_terminate(
897 #if !DEBUG
898 	__unused
899 #endif
900 	memory_object_t mem_obj)
901 {
902 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
903 
904 	return KERN_SUCCESS;
905 }
906 
907 /*
908  * apple_protect_pager_map()
909  *
910  * This allows VM to let us, the EMM, know that this memory object
911  * is currently mapped one or more times.  This is called by VM each time
912  * the memory object gets mapped and we take one extra reference on the
913  * memory object to account for all its mappings.
914  */
915 kern_return_t
apple_protect_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)916 apple_protect_pager_map(
917 	memory_object_t         mem_obj,
918 	__unused vm_prot_t      prot)
919 {
920 	apple_protect_pager_t   pager;
921 
922 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
923 
924 	pager = apple_protect_pager_lookup(mem_obj);
925 
926 	lck_mtx_lock(&apple_protect_pager_lock);
927 	assert(pager->is_ready);
928 	assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0); /* pager is alive */
929 	if (pager->is_mapped == FALSE) {
930 		/*
931 		 * First mapping of this pager:  take an extra reference
932 		 * that will remain until all the mappings of this pager
933 		 * are removed.
934 		 */
935 		pager->is_mapped = TRUE;
936 		os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
937 		apple_protect_pager_count_mapped++;
938 	}
939 	lck_mtx_unlock(&apple_protect_pager_lock);
940 
941 	return KERN_SUCCESS;
942 }
943 
944 /*
945  * apple_protect_pager_last_unmap()
946  *
947  * This is called by VM when this memory object is no longer mapped anywhere.
948  */
949 kern_return_t
apple_protect_pager_last_unmap(memory_object_t mem_obj)950 apple_protect_pager_last_unmap(
951 	memory_object_t         mem_obj)
952 {
953 	apple_protect_pager_t   pager;
954 	unsigned int            count_unmapped;
955 
956 	PAGER_DEBUG(PAGER_ALL,
957 	    ("apple_protect_pager_last_unmap: %p\n", mem_obj));
958 
959 	pager = apple_protect_pager_lookup(mem_obj);
960 
961 	lck_mtx_lock(&apple_protect_pager_lock);
962 	if (pager->is_mapped) {
963 		/*
964 		 * All the mappings are gone, so let go of the one extra
965 		 * reference that represents all the mappings of this pager.
966 		 */
967 		apple_protect_pager_count_mapped--;
968 		count_unmapped = (apple_protect_pager_count -
969 		    apple_protect_pager_count_mapped);
970 		if (count_unmapped > apple_protect_pager_count_unmapped_max) {
971 			apple_protect_pager_count_unmapped_max = count_unmapped;
972 		}
973 		pager->is_mapped = FALSE;
974 		apple_protect_pager_deallocate_internal(pager, TRUE);
975 		/* caution: deallocate_internal() released the lock ! */
976 	} else {
977 		lck_mtx_unlock(&apple_protect_pager_lock);
978 	}
979 
980 	return KERN_SUCCESS;
981 }
982 
983 boolean_t
apple_protect_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)984 apple_protect_pager_backing_object(
985 	memory_object_t mem_obj,
986 	memory_object_offset_t offset,
987 	vm_object_t *backing_object,
988 	vm_object_offset_t *backing_offset)
989 {
990 	apple_protect_pager_t   pager;
991 
992 	PAGER_DEBUG(PAGER_ALL,
993 	    ("apple_protect_pager_backing_object: %p\n", mem_obj));
994 
995 	pager = apple_protect_pager_lookup(mem_obj);
996 
997 	*backing_object = pager->backing_object;
998 	*backing_offset = pager->backing_offset + offset;
999 
1000 	return TRUE;
1001 }
1002 
1003 /*
1004  *
1005  */
1006 apple_protect_pager_t
apple_protect_pager_lookup(memory_object_t mem_obj)1007 apple_protect_pager_lookup(
1008 	memory_object_t  mem_obj)
1009 {
1010 	apple_protect_pager_t   pager;
1011 
1012 	assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
1013 	pager = (apple_protect_pager_t)(uintptr_t) mem_obj;
1014 	assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0);
1015 	return pager;
1016 }
1017 
1018 apple_protect_pager_t
apple_protect_pager_create(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_object_offset_t crypto_backing_offset,struct pager_crypt_info * crypt_info,vm_object_offset_t crypto_start,vm_object_offset_t crypto_end,boolean_t cache_pager)1019 apple_protect_pager_create(
1020 	vm_object_t             backing_object,
1021 	vm_object_offset_t      backing_offset,
1022 	vm_object_offset_t      crypto_backing_offset,
1023 	struct pager_crypt_info *crypt_info,
1024 	vm_object_offset_t      crypto_start,
1025 	vm_object_offset_t      crypto_end,
1026 	boolean_t               cache_pager)
1027 {
1028 	apple_protect_pager_t   pager, pager2;
1029 	memory_object_control_t control;
1030 	kern_return_t           kr;
1031 	struct pager_crypt_info *old_crypt_info;
1032 
1033 	pager = kalloc_type(struct apple_protect_pager, Z_WAITOK | Z_NOFAIL);
1034 
1035 	/*
1036 	 * The vm_map call takes both named entry ports and raw memory
1037 	 * objects in the same parameter.  We need to make sure that
1038 	 * vm_map does not see this object as a named entry port.  So,
1039 	 * we reserve the first word in the object for a fake ip_kotype
1040 	 * setting - that will tell vm_map to use it as a memory object.
1041 	 */
1042 	pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
1043 	pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops;
1044 	pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1045 
1046 	pager->is_ready = FALSE;/* not ready until it has a "name" */
1047 	/* one reference for the caller */
1048 	os_ref_init_count_raw(&pager->ap_pgr_hdr_ref, NULL, 1);
1049 	pager->is_mapped = FALSE;
1050 	if (cache_pager) {
1051 		/* extra reference for the cache */
1052 		os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1053 		pager->is_cached = true;
1054 	} else {
1055 		pager->is_cached = false;
1056 	}
1057 	pager->backing_object = backing_object;
1058 	pager->backing_offset = backing_offset;
1059 	pager->crypto_backing_offset = crypto_backing_offset;
1060 	pager->crypto_start = crypto_start;
1061 	pager->crypto_end = crypto_end;
1062 	pager->crypt_info = crypt_info; /* allocated by caller */
1063 
1064 #if CRYPT_INFO_DEBUG
1065 	printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n",
1066 	    __FUNCTION__,
1067 	    crypt_info,
1068 	    crypt_info->page_decrypt,
1069 	    crypt_info->crypt_end,
1070 	    crypt_info->crypt_ops,
1071 	    crypt_info->crypt_refcnt);
1072 #endif /* CRYPT_INFO_DEBUG */
1073 
1074 	vm_object_reference(backing_object);
1075 
1076 	old_crypt_info = NULL;
1077 
1078 	lck_mtx_lock(&apple_protect_pager_lock);
1079 	/* see if anyone raced us to create a pager for the same object */
1080 	queue_iterate(&apple_protect_pager_queue,
1081 	    pager2,
1082 	    apple_protect_pager_t,
1083 	    pager_queue) {
1084 		if ((pager2->crypt_info->page_decrypt !=
1085 		    crypt_info->page_decrypt) ||
1086 		    (pager2->crypt_info->crypt_end !=
1087 		    crypt_info->crypt_end) ||
1088 		    (pager2->crypt_info->crypt_ops !=
1089 		    crypt_info->crypt_ops)) {
1090 			/* crypt_info contents do not match: next pager */
1091 			continue;
1092 		}
1093 
1094 		/* found a match for crypt_info ... */
1095 		if (old_crypt_info) {
1096 			/* ... already switched to that crypt_info */
1097 			assert(old_crypt_info == pager2->crypt_info);
1098 		} else if (pager2->crypt_info != crypt_info) {
1099 			/* ... switch to that pager's crypt_info */
1100 #if CRYPT_INFO_DEBUG
1101 			printf("CRYPT_INFO %s: reference %p ref %d "
1102 			    "(create match)\n",
1103 			    __FUNCTION__,
1104 			    pager2->crypt_info,
1105 			    pager2->crypt_info->crypt_refcnt);
1106 #endif /* CRYPT_INFO_DEBUG */
1107 			old_crypt_info = pager2->crypt_info;
1108 			crypt_info_reference(old_crypt_info);
1109 			pager->crypt_info = old_crypt_info;
1110 		}
1111 
1112 		if (pager2->backing_object == backing_object &&
1113 		    pager2->backing_offset == backing_offset &&
1114 		    pager2->crypto_backing_offset == crypto_backing_offset &&
1115 		    pager2->crypto_start == crypto_start &&
1116 		    pager2->crypto_end == crypto_end) {
1117 			/* full match: use that pager */
1118 			break;
1119 		}
1120 	}
1121 	if (!queue_end(&apple_protect_pager_queue,
1122 	    (queue_entry_t) pager2)) {
1123 		/* we lost the race, down with the loser... */
1124 		lck_mtx_unlock(&apple_protect_pager_lock);
1125 		vm_object_deallocate(pager->backing_object);
1126 		pager->backing_object = VM_OBJECT_NULL;
1127 #if CRYPT_INFO_DEBUG
1128 		printf("CRYPT_INFO %s: %p ref %d (create pager match)\n",
1129 		    __FUNCTION__,
1130 		    pager->crypt_info,
1131 		    pager->crypt_info->crypt_refcnt);
1132 #endif /* CRYPT_INFO_DEBUG */
1133 		crypt_info_deallocate(pager->crypt_info);
1134 		pager->crypt_info = NULL;
1135 		kfree_type(struct apple_protect_pager, pager);
1136 		/* ... and go with the winner */
1137 		pager = pager2;
1138 		/* let the winner make sure the pager gets ready */
1139 		return pager;
1140 	}
1141 
1142 	/* enter new pager at the head of our list of pagers */
1143 	queue_enter_first(&apple_protect_pager_queue,
1144 	    pager,
1145 	    apple_protect_pager_t,
1146 	    pager_queue);
1147 	apple_protect_pager_count++;
1148 	if (apple_protect_pager_count > apple_protect_pager_count_max) {
1149 		apple_protect_pager_count_max = apple_protect_pager_count;
1150 	}
1151 	lck_mtx_unlock(&apple_protect_pager_lock);
1152 
1153 	kr = memory_object_create_named((memory_object_t) pager,
1154 	    0,
1155 	    &control);
1156 	assert(kr == KERN_SUCCESS);
1157 
1158 	memory_object_mark_trusted(control);
1159 
1160 	lck_mtx_lock(&apple_protect_pager_lock);
1161 	/* the new pager is now ready to be used */
1162 	pager->is_ready = TRUE;
1163 	lck_mtx_unlock(&apple_protect_pager_lock);
1164 
1165 	/* wakeup anyone waiting for this pager to be ready */
1166 	thread_wakeup(&pager->is_ready);
1167 
1168 	if (old_crypt_info != NULL &&
1169 	    old_crypt_info != crypt_info) {
1170 		/* we re-used an old crypt_info instead of using our new one */
1171 #if CRYPT_INFO_DEBUG
1172 		printf("CRYPT_INFO %s: deallocate %p ref %d "
1173 		    "(create used old)\n",
1174 		    __FUNCTION__,
1175 		    crypt_info,
1176 		    crypt_info->crypt_refcnt);
1177 #endif /* CRYPT_INFO_DEBUG */
1178 		crypt_info_deallocate(crypt_info);
1179 		crypt_info = NULL;
1180 	}
1181 
1182 	return pager;
1183 }
1184 
1185 /*
1186  * apple_protect_pager_setup()
1187  *
1188  * Provide the caller with a memory object backed by the provided
1189  * "backing_object" VM object.  If such a memory object already exists,
1190  * re-use it, otherwise create a new memory object.
1191  */
1192 memory_object_t
apple_protect_pager_setup(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_object_offset_t crypto_backing_offset,struct pager_crypt_info * crypt_info,vm_object_offset_t crypto_start,vm_object_offset_t crypto_end,boolean_t cache_pager)1193 apple_protect_pager_setup(
1194 	vm_object_t             backing_object,
1195 	vm_object_offset_t      backing_offset,
1196 	vm_object_offset_t      crypto_backing_offset,
1197 	struct pager_crypt_info *crypt_info,
1198 	vm_object_offset_t      crypto_start,
1199 	vm_object_offset_t      crypto_end,
1200 	boolean_t               cache_pager)
1201 {
1202 	apple_protect_pager_t   pager;
1203 	struct pager_crypt_info *old_crypt_info, *new_crypt_info;
1204 
1205 #if CRYPT_INFO_DEBUG
1206 	printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n",
1207 	    __FUNCTION__,
1208 	    crypt_info,
1209 	    crypt_info->page_decrypt,
1210 	    crypt_info->crypt_end,
1211 	    crypt_info->crypt_ops,
1212 	    crypt_info->crypt_refcnt);
1213 #endif /* CRYPT_INFO_DEBUG */
1214 
1215 	old_crypt_info = NULL;
1216 
1217 	lck_mtx_lock(&apple_protect_pager_lock);
1218 
1219 	queue_iterate(&apple_protect_pager_queue,
1220 	    pager,
1221 	    apple_protect_pager_t,
1222 	    pager_queue) {
1223 		if ((pager->crypt_info->page_decrypt !=
1224 		    crypt_info->page_decrypt) ||
1225 		    (pager->crypt_info->crypt_end !=
1226 		    crypt_info->crypt_end) ||
1227 		    (pager->crypt_info->crypt_ops !=
1228 		    crypt_info->crypt_ops)) {
1229 			/* no match for "crypt_info": next pager */
1230 			continue;
1231 		}
1232 		/* found a match for crypt_info ... */
1233 		if (old_crypt_info) {
1234 			/* ... already switched to that crypt_info */
1235 			assert(old_crypt_info == pager->crypt_info);
1236 		} else {
1237 			/* ... switch to that pager's crypt_info */
1238 			old_crypt_info = pager->crypt_info;
1239 #if CRYPT_INFO_DEBUG
1240 			printf("CRYPT_INFO %s: "
1241 			    "switching crypt_info from %p [%p,%p,%p,%d] "
1242 			    "to %p [%p,%p,%p,%d] from pager %p\n",
1243 			    __FUNCTION__,
1244 			    crypt_info,
1245 			    crypt_info->page_decrypt,
1246 			    crypt_info->crypt_end,
1247 			    crypt_info->crypt_ops,
1248 			    crypt_info->crypt_refcnt,
1249 			    old_crypt_info,
1250 			    old_crypt_info->page_decrypt,
1251 			    old_crypt_info->crypt_end,
1252 			    old_crypt_info->crypt_ops,
1253 			    old_crypt_info->crypt_refcnt,
1254 			    pager);
1255 			printf("CRYPT_INFO %s: %p ref %d (setup match)\n",
1256 			    __FUNCTION__,
1257 			    pager->crypt_info,
1258 			    pager->crypt_info->crypt_refcnt);
1259 #endif /* CRYPT_INFO_DEBUG */
1260 			crypt_info_reference(pager->crypt_info);
1261 		}
1262 
1263 		if (pager->backing_object == backing_object &&
1264 		    pager->backing_offset == backing_offset &&
1265 		    pager->crypto_backing_offset == crypto_backing_offset &&
1266 		    pager->crypto_start == crypto_start &&
1267 		    pager->crypto_end == crypto_end) {
1268 			/* full match: use that pager! */
1269 			assert(old_crypt_info == pager->crypt_info);
1270 			assert(old_crypt_info->crypt_refcnt > 1);
1271 #if CRYPT_INFO_DEBUG
1272 			printf("CRYPT_INFO %s: "
1273 			    "pager match with %p crypt_info %p\n",
1274 			    __FUNCTION__,
1275 			    pager,
1276 			    pager->crypt_info);
1277 			printf("CRYPT_INFO %s: deallocate %p ref %d "
1278 			    "(pager match)\n",
1279 			    __FUNCTION__,
1280 			    old_crypt_info,
1281 			    old_crypt_info->crypt_refcnt);
1282 #endif /* CRYPT_INFO_DEBUG */
1283 			/* release the extra ref on crypt_info we got above */
1284 			crypt_info_deallocate(old_crypt_info);
1285 			assert(old_crypt_info->crypt_refcnt > 0);
1286 			/* give extra reference on pager to the caller */
1287 			os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1288 			break;
1289 		}
1290 	}
1291 	if (queue_end(&apple_protect_pager_queue,
1292 	    (queue_entry_t) pager)) {
1293 		lck_mtx_unlock(&apple_protect_pager_lock);
1294 		/* no existing pager for this backing object */
1295 		pager = APPLE_PROTECT_PAGER_NULL;
1296 		if (old_crypt_info) {
1297 			/* use this old crypt_info for new pager */
1298 			new_crypt_info = old_crypt_info;
1299 #if CRYPT_INFO_DEBUG
1300 			printf("CRYPT_INFO %s: "
1301 			    "will use old_crypt_info %p for new pager\n",
1302 			    __FUNCTION__,
1303 			    old_crypt_info);
1304 #endif /* CRYPT_INFO_DEBUG */
1305 		} else {
1306 			/* allocate a new crypt_info for new pager */
1307 			new_crypt_info = kalloc_type(struct pager_crypt_info, Z_WAITOK);
1308 			*new_crypt_info = *crypt_info;
1309 			new_crypt_info->crypt_refcnt = 1;
1310 #if CRYPT_INFO_DEBUG
1311 			printf("CRYPT_INFO %s: "
1312 			    "will use new_crypt_info %p for new pager\n",
1313 			    __FUNCTION__,
1314 			    new_crypt_info);
1315 #endif /* CRYPT_INFO_DEBUG */
1316 		}
1317 		if (new_crypt_info == NULL) {
1318 			/* can't create new pager without a crypt_info */
1319 		} else {
1320 			/* create new pager */
1321 			pager = apple_protect_pager_create(
1322 				backing_object,
1323 				backing_offset,
1324 				crypto_backing_offset,
1325 				new_crypt_info,
1326 				crypto_start,
1327 				crypto_end,
1328 				cache_pager);
1329 		}
1330 		if (pager == APPLE_PROTECT_PAGER_NULL) {
1331 			/* could not create a new pager */
1332 			if (new_crypt_info == old_crypt_info) {
1333 				/* release extra reference on old_crypt_info */
1334 #if CRYPT_INFO_DEBUG
1335 				printf("CRYPT_INFO %s: deallocate %p ref %d "
1336 				    "(create fail old_crypt_info)\n",
1337 				    __FUNCTION__,
1338 				    old_crypt_info,
1339 				    old_crypt_info->crypt_refcnt);
1340 #endif /* CRYPT_INFO_DEBUG */
1341 				crypt_info_deallocate(old_crypt_info);
1342 				old_crypt_info = NULL;
1343 			} else {
1344 				/* release unused new_crypt_info */
1345 				assert(new_crypt_info->crypt_refcnt == 1);
1346 #if CRYPT_INFO_DEBUG
1347 				printf("CRYPT_INFO %s: deallocate %p ref %d "
1348 				    "(create fail new_crypt_info)\n",
1349 				    __FUNCTION__,
1350 				    new_crypt_info,
1351 				    new_crypt_info->crypt_refcnt);
1352 #endif /* CRYPT_INFO_DEBUG */
1353 				crypt_info_deallocate(new_crypt_info);
1354 				new_crypt_info = NULL;
1355 			}
1356 			return MEMORY_OBJECT_NULL;
1357 		}
1358 		lck_mtx_lock(&apple_protect_pager_lock);
1359 	} else {
1360 		assert(old_crypt_info == pager->crypt_info);
1361 	}
1362 
1363 	while (!pager->is_ready) {
1364 		lck_mtx_sleep(&apple_protect_pager_lock,
1365 		    LCK_SLEEP_DEFAULT,
1366 		    &pager->is_ready,
1367 		    THREAD_UNINT);
1368 	}
1369 	lck_mtx_unlock(&apple_protect_pager_lock);
1370 
1371 	return (memory_object_t) pager;
1372 }
1373 
1374 void
apple_protect_pager_trim(void)1375 apple_protect_pager_trim(void)
1376 {
1377 	apple_protect_pager_t   pager, prev_pager;
1378 	queue_head_t            trim_queue;
1379 	unsigned int            num_trim;
1380 	unsigned int            count_unmapped;
1381 
1382 	lck_mtx_lock(&apple_protect_pager_lock);
1383 
1384 	/*
1385 	 * We have too many pagers, try and trim some unused ones,
1386 	 * starting with the oldest pager at the end of the queue.
1387 	 */
1388 	queue_init(&trim_queue);
1389 	num_trim = 0;
1390 
1391 	for (pager = (apple_protect_pager_t)
1392 	    queue_last(&apple_protect_pager_queue);
1393 	    !queue_end(&apple_protect_pager_queue,
1394 	    (queue_entry_t) pager);
1395 	    pager = prev_pager) {
1396 		/* get prev elt before we dequeue */
1397 		prev_pager = (apple_protect_pager_t)
1398 		    queue_prev(&pager->pager_queue);
1399 
1400 		if (pager->is_cached &&
1401 		    os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) == 2 &&
1402 		    pager->is_ready &&
1403 		    !pager->is_mapped) {
1404 			/* this pager can be trimmed */
1405 			num_trim++;
1406 			/* remove this pager from the main list ... */
1407 			apple_protect_pager_dequeue(pager);
1408 			/* ... and add it to our trim queue */
1409 			queue_enter_first(&trim_queue,
1410 			    pager,
1411 			    apple_protect_pager_t,
1412 			    pager_queue);
1413 
1414 			count_unmapped = (apple_protect_pager_count -
1415 			    apple_protect_pager_count_mapped);
1416 			if (count_unmapped <= apple_protect_pager_cache_limit) {
1417 				/* we have enough pagers to trim */
1418 				break;
1419 			}
1420 		}
1421 	}
1422 	if (num_trim > apple_protect_pager_num_trim_max) {
1423 		apple_protect_pager_num_trim_max = num_trim;
1424 	}
1425 	apple_protect_pager_num_trim_total += num_trim;
1426 
1427 	lck_mtx_unlock(&apple_protect_pager_lock);
1428 
1429 	/* terminate the trimmed pagers */
1430 	while (!queue_empty(&trim_queue)) {
1431 		queue_remove_first(&trim_queue,
1432 		    pager,
1433 		    apple_protect_pager_t,
1434 		    pager_queue);
1435 		assert(pager->is_cached);
1436 		pager->is_cached = false;
1437 		pager->pager_queue.next = NULL;
1438 		pager->pager_queue.prev = NULL;
1439 		/*
1440 		 * We can't call deallocate_internal() because the pager
1441 		 * has already been dequeued, but we still need to remove
1442 		 * a reference.
1443 		 */
1444 		os_ref_count_t __assert_only count;
1445 		count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1446 		assert(count == 1);
1447 		apple_protect_pager_terminate_internal(pager);
1448 	}
1449 }
1450 
1451 
1452 void
crypt_info_reference(struct pager_crypt_info * crypt_info)1453 crypt_info_reference(
1454 	struct pager_crypt_info *crypt_info)
1455 {
1456 	assert(crypt_info->crypt_refcnt != 0);
1457 #if CRYPT_INFO_DEBUG
1458 	printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1459 	    __FUNCTION__,
1460 	    crypt_info,
1461 	    crypt_info->crypt_refcnt,
1462 	    crypt_info->crypt_refcnt + 1);
1463 #endif /* CRYPT_INFO_DEBUG */
1464 	OSAddAtomic(+1, &crypt_info->crypt_refcnt);
1465 }
1466 
1467 void
crypt_info_deallocate(struct pager_crypt_info * crypt_info)1468 crypt_info_deallocate(
1469 	struct pager_crypt_info *crypt_info)
1470 {
1471 #if CRYPT_INFO_DEBUG
1472 	printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1473 	    __FUNCTION__,
1474 	    crypt_info,
1475 	    crypt_info->crypt_refcnt,
1476 	    crypt_info->crypt_refcnt - 1);
1477 #endif /* CRYPT_INFO_DEBUG */
1478 	OSAddAtomic(-1, &crypt_info->crypt_refcnt);
1479 	if (crypt_info->crypt_refcnt == 0) {
1480 		/* deallocate any crypt module data */
1481 		if (crypt_info->crypt_end) {
1482 			crypt_info->crypt_end(crypt_info->crypt_ops);
1483 			crypt_info->crypt_end = NULL;
1484 		}
1485 #if CRYPT_INFO_DEBUG
1486 		printf("CRYPT_INFO %s: freeing %p\n",
1487 		    __FUNCTION__,
1488 		    crypt_info);
1489 #endif /* CRYPT_INFO_DEBUG */
1490 		kfree_type(struct pager_crypt_info, crypt_info);
1491 	}
1492 }
1493 
1494 static uint64_t
apple_protect_pager_purge(apple_protect_pager_t pager)1495 apple_protect_pager_purge(
1496 	apple_protect_pager_t pager)
1497 {
1498 	uint64_t pages_purged;
1499 	vm_object_t object;
1500 
1501 	pages_purged = 0;
1502 	object = memory_object_to_vm_object((memory_object_t) pager);
1503 	assert(object != VM_OBJECT_NULL);
1504 	vm_object_lock(object);
1505 	pages_purged = object->resident_page_count;
1506 	vm_object_reap_pages(object, REAP_DATA_FLUSH);
1507 	pages_purged -= object->resident_page_count;
1508 //	printf("     %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count);
1509 	vm_object_unlock(object);
1510 	return pages_purged;
1511 }
1512 
1513 uint64_t
apple_protect_pager_purge_all(void)1514 apple_protect_pager_purge_all(void)
1515 {
1516 	uint64_t pages_purged;
1517 	apple_protect_pager_t pager;
1518 
1519 	pages_purged = 0;
1520 	lck_mtx_lock(&apple_protect_pager_lock);
1521 	queue_iterate(&apple_protect_pager_queue, pager, apple_protect_pager_t, pager_queue) {
1522 		pages_purged += apple_protect_pager_purge(pager);
1523 	}
1524 	lck_mtx_unlock(&apple_protect_pager_lock);
1525 #if DEVELOPMENT || DEBUG
1526 	printf("   %s:%d pages purged: %llu\n", __FUNCTION__, __LINE__, pages_purged);
1527 #endif /* DEVELOPMENT || DEBUG */
1528 	return pages_purged;
1529 }
1530