xref: /xnu-11215.81.4/osfmk/vm/vm_apple_protect.c (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /*
2  * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_kobject.h>
49 #include <os/refcnt.h>
50 
51 #include <sys/kdebug_triage.h>
52 
53 #include <ipc/ipc_port.h>
54 #include <ipc/ipc_space.h>
55 
56 #include <vm/vm_fault_internal.h>
57 #include <vm/vm_map.h>
58 #include <vm/memory_object_internal.h>
59 #include <vm/vm_pageout_xnu.h>
60 #include <vm/vm_protos_internal.h>
61 #include <vm/vm_kern.h>
62 #include <vm/vm_ubc.h>
63 #include <vm/vm_page_internal.h>
64 #include <vm/vm_object_internal.h>
65 
66 /*
67  * APPLE PROTECT MEMORY PAGER
68  *
69  * This external memory manager (EMM) handles memory from the encrypted
70  * sections of some executables protected by the DSMOS kernel extension.
71  *
72  * It mostly handles page-in requests (from memory_object_data_request()) by
73  * getting the encrypted data from its backing VM object, itself backed by
74  * the encrypted file, decrypting it and providing it to VM.
75  *
76  * The decrypted pages will never be dirtied, so the memory manager doesn't
77  * need to handle page-out requests (from memory_object_data_return()).  The
78  * pages need to be mapped copy-on-write, so that the originals stay clean.
79  *
80  * We don't expect to have to handle a large number of apple-protected
81  * binaries, so the data structures are very simple (simple linked list)
82  * for now.
83  */
84 
85 /* forward declarations */
86 void apple_protect_pager_reference(memory_object_t mem_obj);
87 void apple_protect_pager_deallocate(memory_object_t mem_obj);
88 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
89     memory_object_control_t control,
90     memory_object_cluster_size_t pg_size);
91 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
92 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
93     memory_object_offset_t offset,
94     memory_object_cluster_size_t length,
95     vm_prot_t protection_required,
96     memory_object_fault_info_t fault_info);
97 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
98     memory_object_offset_t offset,
99     memory_object_cluster_size_t      data_cnt,
100     memory_object_offset_t *resid_offset,
101     int *io_error,
102     boolean_t dirty,
103     boolean_t kernel_copy,
104     int upl_flags);
105 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
106     memory_object_offset_t offset,
107     memory_object_cluster_size_t data_cnt);
108 kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
109     vm_prot_t prot);
110 kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
111 boolean_t apple_protect_pager_backing_object(
112 	memory_object_t mem_obj,
113 	memory_object_offset_t mem_obj_offset,
114 	vm_object_t *backing_object,
115 	vm_object_offset_t *backing_offset);
116 
117 #define CRYPT_INFO_DEBUG 0
118 void crypt_info_reference(struct pager_crypt_info *crypt_info);
119 void crypt_info_deallocate(struct pager_crypt_info *crypt_info);
120 
121 /*
122  * Vector of VM operations for this EMM.
123  * These routines are invoked by VM via the memory_object_*() interfaces.
124  */
125 const struct memory_object_pager_ops apple_protect_pager_ops = {
126 	.memory_object_reference = apple_protect_pager_reference,
127 	.memory_object_deallocate = apple_protect_pager_deallocate,
128 	.memory_object_init = apple_protect_pager_init,
129 	.memory_object_terminate = apple_protect_pager_terminate,
130 	.memory_object_data_request = apple_protect_pager_data_request,
131 	.memory_object_data_return = apple_protect_pager_data_return,
132 	.memory_object_data_initialize = apple_protect_pager_data_initialize,
133 	.memory_object_map = apple_protect_pager_map,
134 	.memory_object_last_unmap = apple_protect_pager_last_unmap,
135 	.memory_object_backing_object = apple_protect_pager_backing_object,
136 	.memory_object_pager_name = "apple_protect"
137 };
138 
139 /*
140  * The "apple_protect_pager" describes a memory object backed by
141  * the "apple protect" EMM.
142  */
143 typedef struct apple_protect_pager {
144 	/* mandatory generic header */
145 	struct memory_object    ap_pgr_hdr;
146 
147 	/* pager-specific data */
148 	queue_chain_t           pager_queue;    /* next & prev pagers */
149 #if MEMORY_OBJECT_HAS_REFCOUNT
150 #define ap_pgr_hdr_ref          ap_pgr_hdr.mo_ref
151 #else
152 	os_ref_atomic_t         ap_pgr_hdr_ref;      /* reference count */
153 #endif
154 	bool                    is_ready;       /* is this pager ready ? */
155 	bool                    is_mapped;      /* is this mem_obj mapped ? */
156 	bool                    is_cached;      /* is this pager cached ? */
157 	vm_object_t             backing_object; /* VM obj w/ encrypted data */
158 	vm_object_offset_t      backing_offset;
159 	vm_object_offset_t      crypto_backing_offset; /* for key... */
160 	vm_object_offset_t      crypto_start;
161 	vm_object_offset_t      crypto_end;
162 	struct pager_crypt_info *crypt_info;
163 } *apple_protect_pager_t;
164 #define APPLE_PROTECT_PAGER_NULL        ((apple_protect_pager_t) NULL)
165 
166 /*
167  * List of memory objects managed by this EMM.
168  * The list is protected by the "apple_protect_pager_lock" lock.
169  */
170 unsigned int apple_protect_pager_count = 0;        /* number of pagers */
171 unsigned int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
172 queue_head_t apple_protect_pager_queue = QUEUE_HEAD_INITIALIZER(apple_protect_pager_queue);
173 LCK_GRP_DECLARE(apple_protect_pager_lck_grp, "apple_protect");
174 LCK_MTX_DECLARE(apple_protect_pager_lock, &apple_protect_pager_lck_grp);
175 
176 /*
177  * Maximum number of unmapped pagers we're willing to keep around.
178  */
179 unsigned int apple_protect_pager_cache_limit = 20;
180 
181 /*
182  * Statistics & counters.
183  */
184 unsigned int apple_protect_pager_count_max = 0;
185 unsigned int apple_protect_pager_count_unmapped_max = 0;
186 unsigned int apple_protect_pager_num_trim_max = 0;
187 unsigned int apple_protect_pager_num_trim_total = 0;
188 
189 
190 
191 /* internal prototypes */
192 apple_protect_pager_t apple_protect_pager_create(
193 	vm_object_t backing_object,
194 	vm_object_offset_t backing_offset,
195 	vm_object_offset_t crypto_backing_offset,
196 	struct pager_crypt_info *crypt_info,
197 	vm_object_offset_t crypto_start,
198 	vm_object_offset_t crypto_end,
199 	boolean_t cache_pager);
200 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
201 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
202 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
203     boolean_t locked);
204 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
205 void apple_protect_pager_trim(void);
206 
207 
208 #if DEBUG
209 int apple_protect_pagerdebug = 0;
210 #define PAGER_ALL               0xffffffff
211 #define PAGER_INIT              0x00000001
212 #define PAGER_PAGEIN            0x00000002
213 
214 #define PAGER_DEBUG(LEVEL, A)                                           \
215 	MACRO_BEGIN                                                     \
216 	if ((apple_protect_pagerdebug & LEVEL)==LEVEL) {                \
217 	        printf A;                                               \
218 	}                                                               \
219 	MACRO_END
220 #else
221 #define PAGER_DEBUG(LEVEL, A)
222 #endif
223 
224 /*
225  * apple_protect_pager_init()
226  *
227  * Initialize the memory object and makes it ready to be used and mapped.
228  */
229 kern_return_t
apple_protect_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)230 apple_protect_pager_init(
231 	memory_object_t         mem_obj,
232 	memory_object_control_t control,
233 #if !DEBUG
234 	__unused
235 #endif
236 	memory_object_cluster_size_t pg_size)
237 {
238 	apple_protect_pager_t   pager;
239 	kern_return_t           kr;
240 	memory_object_attr_info_data_t  attributes;
241 
242 	PAGER_DEBUG(PAGER_ALL,
243 	    ("apple_protect_pager_init: %p, %p, %x\n",
244 	    mem_obj, control, pg_size));
245 
246 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
247 		return KERN_INVALID_ARGUMENT;
248 	}
249 
250 	pager = apple_protect_pager_lookup(mem_obj);
251 
252 	memory_object_control_reference(control);
253 
254 	pager->ap_pgr_hdr.mo_control = control;
255 
256 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
257 	/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
258 	attributes.cluster_size = (1 << (PAGE_SHIFT));
259 	attributes.may_cache_object = FALSE;
260 	attributes.temporary = TRUE;
261 
262 	kr = memory_object_change_attributes(
263 		control,
264 		MEMORY_OBJECT_ATTRIBUTE_INFO,
265 		(memory_object_info_t) &attributes,
266 		MEMORY_OBJECT_ATTR_INFO_COUNT);
267 	if (kr != KERN_SUCCESS) {
268 		panic("apple_protect_pager_init: "
269 		    "memory_object_change_attributes() failed");
270 	}
271 
272 #if CONFIG_SECLUDED_MEMORY
273 	if (secluded_for_filecache) {
274 		memory_object_mark_eligible_for_secluded(control, TRUE);
275 	}
276 #endif /* CONFIG_SECLUDED_MEMORY */
277 
278 	return KERN_SUCCESS;
279 }
280 
281 /*
282  * apple_protect_data_return()
283  *
284  * Handles page-out requests from VM.  This should never happen since
285  * the pages provided by this EMM are not supposed to be dirty or dirtied
286  * and VM should simply discard the contents and reclaim the pages if it
287  * needs to.
288  */
289 kern_return_t
apple_protect_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)290 apple_protect_pager_data_return(
291 	__unused memory_object_t        mem_obj,
292 	__unused memory_object_offset_t offset,
293 	__unused memory_object_cluster_size_t           data_cnt,
294 	__unused memory_object_offset_t *resid_offset,
295 	__unused int                    *io_error,
296 	__unused boolean_t              dirty,
297 	__unused boolean_t              kernel_copy,
298 	__unused int                    upl_flags)
299 {
300 	panic("apple_protect_pager_data_return: should never get called");
301 	return KERN_FAILURE;
302 }
303 
304 kern_return_t
apple_protect_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)305 apple_protect_pager_data_initialize(
306 	__unused memory_object_t        mem_obj,
307 	__unused memory_object_offset_t offset,
308 	__unused memory_object_cluster_size_t           data_cnt)
309 {
310 	panic("apple_protect_pager_data_initialize: should never get called");
311 	return KERN_FAILURE;
312 }
313 
314 /*
315  * apple_protect_pager_data_request()
316  *
317  * Handles page-in requests from VM.
318  */
319 int apple_protect_pager_data_request_debug = 0;
320 kern_return_t
apple_protect_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)321 apple_protect_pager_data_request(
322 	memory_object_t         mem_obj,
323 	memory_object_offset_t  offset,
324 	memory_object_cluster_size_t            length,
325 #if !DEBUG
326 	__unused
327 #endif
328 	vm_prot_t               protection_required,
329 	memory_object_fault_info_t mo_fault_info)
330 {
331 	apple_protect_pager_t   pager;
332 	memory_object_control_t mo_control;
333 	upl_t                   upl;
334 	int                     upl_flags;
335 	upl_size_t              upl_size;
336 	upl_page_info_t         *upl_pl;
337 	unsigned int            pl_count;
338 	vm_object_t             src_top_object, src_page_object, dst_object;
339 	kern_return_t           kr, retval;
340 	vm_offset_t             src_vaddr, dst_vaddr;
341 	vm_offset_t             cur_offset;
342 	vm_offset_t             offset_in_page;
343 	kern_return_t           error_code;
344 	vm_prot_t               prot;
345 	vm_page_t               src_page, top_page;
346 	int                     interruptible;
347 	struct vm_object_fault_info     fault_info;
348 	int                     ret;
349 
350 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
351 
352 	retval = KERN_SUCCESS;
353 	src_top_object = VM_OBJECT_NULL;
354 	src_page_object = VM_OBJECT_NULL;
355 	upl = NULL;
356 	upl_pl = NULL;
357 	fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
358 	fault_info.stealth = TRUE;
359 	fault_info.io_sync = FALSE;
360 	fault_info.mark_zf_absent = FALSE;
361 	fault_info.batch_pmap_op = FALSE;
362 	interruptible = fault_info.interruptible;
363 
364 	pager = apple_protect_pager_lookup(mem_obj);
365 	assert(pager->is_ready);
366 	assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 1); /* pager is alive and mapped */
367 
368 	PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
369 
370 	fault_info.lo_offset += pager->backing_offset;
371 	fault_info.hi_offset += pager->backing_offset;
372 
373 	/*
374 	 * Gather in a UPL all the VM pages requested by VM.
375 	 */
376 	mo_control = pager->ap_pgr_hdr.mo_control;
377 
378 	upl_size = length;
379 	upl_flags =
380 	    UPL_RET_ONLY_ABSENT |
381 	    UPL_SET_LITE |
382 	    UPL_NO_SYNC |
383 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
384 	    UPL_SET_INTERNAL;
385 	pl_count = 0;
386 	kr = memory_object_upl_request(mo_control,
387 	    offset, upl_size,
388 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
389 	if (kr != KERN_SUCCESS) {
390 		retval = kr;
391 		goto done;
392 	}
393 	dst_object = memory_object_control_to_vm_object(mo_control);
394 	assert(dst_object != VM_OBJECT_NULL);
395 
396 	/*
397 	 * We'll map the encrypted data in the kernel address space from the
398 	 * backing VM object (itself backed by the encrypted file via
399 	 * the vnode pager).
400 	 */
401 	src_top_object = pager->backing_object;
402 	assert(src_top_object != VM_OBJECT_NULL);
403 	vm_object_reference(src_top_object); /* keep the source object alive */
404 
405 	/*
406 	 * Fill in the contents of the pages requested by VM.
407 	 */
408 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
409 	pl_count = length / PAGE_SIZE;
410 	for (cur_offset = 0;
411 	    retval == KERN_SUCCESS && cur_offset < length;
412 	    cur_offset += PAGE_SIZE) {
413 		ppnum_t dst_pnum;
414 
415 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
416 			/* this page is not in the UPL: skip it */
417 			continue;
418 		}
419 
420 		/*
421 		 * Map the source (encrypted) page in the kernel's
422 		 * virtual address space.
423 		 * We already hold a reference on the src_top_object.
424 		 */
425 retry_src_fault:
426 		vm_object_lock(src_top_object);
427 		vm_object_paging_begin(src_top_object);
428 		error_code = 0;
429 		prot = VM_PROT_READ;
430 		src_page = VM_PAGE_NULL;
431 		kr = vm_fault_page(src_top_object,
432 		    pager->backing_offset + offset + cur_offset,
433 		    VM_PROT_READ,
434 		    FALSE,
435 		    FALSE,                /* src_page not looked up */
436 		    &prot,
437 		    &src_page,
438 		    &top_page,
439 		    NULL,
440 		    &error_code,
441 		    FALSE,
442 		    &fault_info);
443 		switch (kr) {
444 		case VM_FAULT_SUCCESS:
445 			break;
446 		case VM_FAULT_RETRY:
447 			goto retry_src_fault;
448 		case VM_FAULT_MEMORY_SHORTAGE:
449 			if (vm_page_wait(interruptible)) {
450 				goto retry_src_fault;
451 			}
452 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_APPLE_PROTECT_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_APPLE_PROTECT_PAGER_MEMORY_SHORTAGE), 0 /* arg */);
453 			OS_FALLTHROUGH;
454 		case VM_FAULT_INTERRUPTED:
455 			retval = MACH_SEND_INTERRUPTED;
456 			goto done;
457 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
458 			/* success but no VM page: fail */
459 			vm_object_paging_end(src_top_object);
460 			vm_object_unlock(src_top_object);
461 			OS_FALLTHROUGH;
462 		case VM_FAULT_MEMORY_ERROR:
463 			/* the page is not there ! */
464 			if (error_code) {
465 				retval = error_code;
466 			} else {
467 				retval = KERN_MEMORY_ERROR;
468 			}
469 			goto done;
470 		default:
471 			panic("apple_protect_pager_data_request: "
472 			    "vm_fault_page() unexpected error 0x%x\n",
473 			    kr);
474 		}
475 		assert(src_page != VM_PAGE_NULL);
476 		assert(src_page->vmp_busy);
477 
478 		if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
479 			vm_page_lockspin_queues();
480 
481 			if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
482 				vm_page_speculate(src_page, FALSE);
483 			}
484 			vm_page_unlock_queues();
485 		}
486 
487 		/*
488 		 * Establish pointers to the source
489 		 * and destination physical pages.
490 		 */
491 		dst_pnum = (ppnum_t)
492 		    upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
493 		assert(dst_pnum != 0);
494 
495 		src_vaddr = (vm_map_offset_t)
496 		    phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
497 		        << PAGE_SHIFT);
498 		dst_vaddr = (vm_map_offset_t)
499 		    phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
500 
501 		src_page_object = VM_PAGE_OBJECT(src_page);
502 
503 		/*
504 		 * Validate the original page...
505 		 */
506 		if (src_page_object->code_signed) {
507 			vm_page_validate_cs_mapped(
508 				src_page, PAGE_SIZE, 0,
509 				(const void *) src_vaddr);
510 		}
511 		/*
512 		 * ... and transfer the results to the destination page.
513 		 */
514 		UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
515 		    src_page->vmp_cs_validated);
516 		UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
517 		    src_page->vmp_cs_tainted);
518 		UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
519 		    src_page->vmp_cs_nx);
520 
521 		/*
522 		 * page_decrypt() might access a mapped file, so let's release
523 		 * the object lock for the source page to avoid a potential
524 		 * deadlock.  The source page is kept busy and we have a
525 		 * "paging_in_progress" reference on its object, so it's safe
526 		 * to unlock the object here.
527 		 */
528 		assert(src_page->vmp_busy);
529 		assert(src_page_object->paging_in_progress > 0);
530 		vm_object_unlock(src_page_object);
531 
532 		/*
533 		 * Decrypt the encrypted contents of the source page
534 		 * into the destination page.
535 		 */
536 		for (offset_in_page = 0;
537 		    offset_in_page < PAGE_SIZE;
538 		    offset_in_page += 4096) {
539 			if (offset + cur_offset + offset_in_page <
540 			    pager->crypto_start ||
541 			    offset + cur_offset + offset_in_page >=
542 			    pager->crypto_end) {
543 				/* not encrypted: just copy */
544 				bcopy((const char *)(src_vaddr +
545 				    offset_in_page),
546 				    (char *)(dst_vaddr + offset_in_page),
547 				    4096);
548 
549 				if (apple_protect_pager_data_request_debug) {
550 					printf("apple_protect_data_request"
551 					    "(%p,0x%llx+0x%llx+0x%04llx): "
552 					    "out of crypto range "
553 					    "[0x%llx:0x%llx]: "
554 					    "COPY [0x%016llx 0x%016llx] "
555 					    "code_signed=%d "
556 					    "cs_validated=%d "
557 					    "cs_tainted=%d "
558 					    "cs_nx=%d\n",
559 					    pager,
560 					    offset,
561 					    (uint64_t) cur_offset,
562 					    (uint64_t) offset_in_page,
563 					    pager->crypto_start,
564 					    pager->crypto_end,
565 					    *(uint64_t *)(dst_vaddr +
566 					    offset_in_page),
567 					    *(uint64_t *)(dst_vaddr +
568 					    offset_in_page + 8),
569 					    src_page_object->code_signed,
570 					    src_page->vmp_cs_validated,
571 					    src_page->vmp_cs_tainted,
572 					    src_page->vmp_cs_nx);
573 				}
574 				ret = 0;
575 				continue;
576 			}
577 			ret = pager->crypt_info->page_decrypt(
578 				(const void *)(src_vaddr + offset_in_page),
579 				(void *)(dst_vaddr + offset_in_page),
580 				((pager->crypto_backing_offset -
581 				pager->crypto_start) +   /* XXX ? */
582 				offset +
583 				cur_offset +
584 				offset_in_page),
585 				pager->crypt_info->crypt_ops);
586 
587 			if (apple_protect_pager_data_request_debug) {
588 				printf("apple_protect_data_request"
589 				    "(%p,0x%llx+0x%llx+0x%04llx): "
590 				    "in crypto range [0x%llx:0x%llx]: "
591 				    "DECRYPT offset 0x%llx="
592 				    "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)"
593 				    "[0x%016llx 0x%016llx] "
594 				    "code_signed=%d "
595 				    "cs_validated=%d "
596 				    "cs_tainted=%d "
597 				    "cs_nx=%d "
598 				    "ret=0x%x\n",
599 				    pager,
600 				    offset,
601 				    (uint64_t) cur_offset,
602 				    (uint64_t) offset_in_page,
603 				    pager->crypto_start, pager->crypto_end,
604 				    ((pager->crypto_backing_offset -
605 				    pager->crypto_start) +
606 				    offset +
607 				    cur_offset +
608 				    offset_in_page),
609 				    pager->crypto_backing_offset,
610 				    pager->crypto_start,
611 				    offset,
612 				    (uint64_t) cur_offset,
613 				    (uint64_t) offset_in_page,
614 				    *(uint64_t *)(dst_vaddr + offset_in_page),
615 				    *(uint64_t *)(dst_vaddr + offset_in_page + 8),
616 				    src_page_object->code_signed,
617 				    src_page->vmp_cs_validated,
618 				    src_page->vmp_cs_tainted,
619 				    src_page->vmp_cs_nx,
620 				    ret);
621 			}
622 			if (ret) {
623 				break;
624 			}
625 		}
626 		if (ret) {
627 			/*
628 			 * Decryption failed.  Abort the fault.
629 			 */
630 			retval = KERN_ABORTED;
631 		}
632 
633 		assert(VM_PAGE_OBJECT(src_page) == src_page_object);
634 		assert(src_page->vmp_busy);
635 		assert(src_page_object->paging_in_progress > 0);
636 		vm_object_lock(src_page_object);
637 
638 		/*
639 		 * Cleanup the result of vm_fault_page() of the source page.
640 		 */
641 		vm_page_wakeup_done(src_page_object, src_page);
642 		src_page = VM_PAGE_NULL;
643 		vm_object_paging_end(src_page_object);
644 		vm_object_unlock(src_page_object);
645 
646 		if (top_page != VM_PAGE_NULL) {
647 			assert(VM_PAGE_OBJECT(top_page) == src_top_object);
648 			vm_object_lock(src_top_object);
649 			VM_PAGE_FREE(top_page);
650 			vm_object_paging_end(src_top_object);
651 			vm_object_unlock(src_top_object);
652 		}
653 	}
654 
655 done:
656 	if (upl != NULL) {
657 		/* clean up the UPL */
658 
659 		/*
660 		 * The pages are currently dirty because we've just been
661 		 * writing on them, but as far as we're concerned, they're
662 		 * clean since they contain their "original" contents as
663 		 * provided by us, the pager.
664 		 * Tell the UPL to mark them "clean".
665 		 */
666 		upl_clear_dirty(upl, TRUE);
667 
668 		/* abort or commit the UPL */
669 		if (retval != KERN_SUCCESS) {
670 			upl_abort(upl, 0);
671 			if (retval == KERN_ABORTED) {
672 				wait_result_t   wait_result;
673 
674 				/*
675 				 * We aborted the fault and did not provide
676 				 * any contents for the requested pages but
677 				 * the pages themselves are not invalid, so
678 				 * let's return success and let the caller
679 				 * retry the fault, in case it might succeed
680 				 * later (when the decryption code is up and
681 				 * running in the kernel, for example).
682 				 */
683 				retval = KERN_SUCCESS;
684 				/*
685 				 * Wait a little bit first to avoid using
686 				 * too much CPU time retrying and failing
687 				 * the same fault over and over again.
688 				 */
689 				wait_result = assert_wait_timeout(
690 					(event_t) apple_protect_pager_data_request,
691 					THREAD_UNINT,
692 					10000,  /* 10ms */
693 					NSEC_PER_USEC);
694 				assert(wait_result == THREAD_WAITING);
695 				wait_result = thread_block(THREAD_CONTINUE_NULL);
696 				assert(wait_result == THREAD_TIMED_OUT);
697 			}
698 		} else {
699 			boolean_t empty;
700 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
701 			    "upl %p offset 0x%llx size 0x%x",
702 			    upl, upl->u_offset, upl->u_size);
703 			upl_commit_range(upl, 0, upl->u_size,
704 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
705 			    upl_pl, pl_count, &empty);
706 		}
707 
708 		/* and deallocate the UPL */
709 		upl_deallocate(upl);
710 		upl = NULL;
711 	}
712 	if (src_top_object != VM_OBJECT_NULL) {
713 		vm_object_deallocate(src_top_object);
714 	}
715 	return retval;
716 }
717 
718 /*
719  * apple_protect_pager_reference()
720  *
721  * Get a reference on this memory object.
722  * For external usage only.  Assumes that the initial reference count is not 0,
723  * i.e one should not "revive" a dead pager this way.
724  */
725 void
apple_protect_pager_reference(memory_object_t mem_obj)726 apple_protect_pager_reference(
727 	memory_object_t         mem_obj)
728 {
729 	apple_protect_pager_t   pager;
730 
731 	pager = apple_protect_pager_lookup(mem_obj);
732 
733 	lck_mtx_lock(&apple_protect_pager_lock);
734 	os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
735 	lck_mtx_unlock(&apple_protect_pager_lock);
736 }
737 
738 
739 /*
740  * apple_protect_pager_dequeue:
741  *
742  * Removes a pager from the list of pagers.
743  *
744  * The caller must hold "apple_protect_pager_lock".
745  */
746 void
apple_protect_pager_dequeue(apple_protect_pager_t pager)747 apple_protect_pager_dequeue(
748 	apple_protect_pager_t pager)
749 {
750 	assert(!pager->is_mapped);
751 
752 	queue_remove(&apple_protect_pager_queue,
753 	    pager,
754 	    apple_protect_pager_t,
755 	    pager_queue);
756 	pager->pager_queue.next = NULL;
757 	pager->pager_queue.prev = NULL;
758 
759 	apple_protect_pager_count--;
760 }
761 
762 /*
763  * apple_protect_pager_terminate_internal:
764  *
765  * Trigger the asynchronous termination of the memory object associated
766  * with this pager.
767  * When the memory object is terminated, there will be one more call
768  * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
769  * to finish the clean up.
770  *
771  * "apple_protect_pager_lock" should not be held by the caller.
772  * We don't need the lock because the pager has already been removed from
773  * the pagers' list and is now ours exclusively.
774  */
775 void
apple_protect_pager_terminate_internal(apple_protect_pager_t pager)776 apple_protect_pager_terminate_internal(
777 	apple_protect_pager_t pager)
778 {
779 	assert(pager->is_ready);
780 	assert(!pager->is_mapped);
781 
782 	if (pager->backing_object != VM_OBJECT_NULL) {
783 		vm_object_deallocate(pager->backing_object);
784 		pager->backing_object = VM_OBJECT_NULL;
785 	}
786 
787 	/* one less pager using this "pager_crypt_info" */
788 #if CRYPT_INFO_DEBUG
789 	printf("CRYPT_INFO %s: deallocate %p ref %d\n",
790 	    __FUNCTION__,
791 	    pager->crypt_info,
792 	    pager->crypt_info->crypt_refcnt);
793 #endif /* CRYPT_INFO_DEBUG */
794 	crypt_info_deallocate(pager->crypt_info);
795 	pager->crypt_info = NULL;
796 
797 	/* trigger the destruction of the memory object */
798 	memory_object_destroy(pager->ap_pgr_hdr.mo_control, VM_OBJECT_DESTROY_PAGER);
799 }
800 
801 /*
802  * apple_protect_pager_deallocate_internal()
803  *
804  * Release a reference on this pager and free it when the last
805  * reference goes away.
806  * Can be called with apple_protect_pager_lock held or not but always returns
807  * with it unlocked.
808  */
809 void
apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,boolean_t locked)810 apple_protect_pager_deallocate_internal(
811 	apple_protect_pager_t   pager,
812 	boolean_t               locked)
813 {
814 	boolean_t       needs_trimming;
815 	unsigned int    count_unmapped;
816 	os_ref_count_t  ref_count;
817 
818 	if (!locked) {
819 		lck_mtx_lock(&apple_protect_pager_lock);
820 	}
821 
822 	count_unmapped = (apple_protect_pager_count -
823 	    apple_protect_pager_count_mapped);
824 	if (count_unmapped > apple_protect_pager_cache_limit) {
825 		/* we have too many unmapped pagers:  trim some */
826 		needs_trimming = TRUE;
827 	} else {
828 		needs_trimming = FALSE;
829 	}
830 
831 	/* drop a reference on this pager */
832 	ref_count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
833 
834 	if (ref_count == 1) {
835 		/*
836 		 * Only the "named" reference is left, which means that
837 		 * no one is really holding on to this pager anymore.
838 		 * Terminate it.
839 		 */
840 		apple_protect_pager_dequeue(pager);
841 		/* the pager is all ours: no need for the lock now */
842 		lck_mtx_unlock(&apple_protect_pager_lock);
843 		apple_protect_pager_terminate_internal(pager);
844 	} else if (ref_count == 0) {
845 		/*
846 		 * Dropped the existence reference;  the memory object has
847 		 * been terminated.  Do some final cleanup and release the
848 		 * pager structure.
849 		 */
850 		lck_mtx_unlock(&apple_protect_pager_lock);
851 		if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
852 			memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control);
853 			pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
854 		}
855 		kfree_type(struct apple_protect_pager, pager);
856 		pager = APPLE_PROTECT_PAGER_NULL;
857 	} else {
858 		/* there are still plenty of references:  keep going... */
859 		lck_mtx_unlock(&apple_protect_pager_lock);
860 	}
861 
862 	if (needs_trimming) {
863 		apple_protect_pager_trim();
864 	}
865 	/* caution: lock is not held on return... */
866 }
867 
868 /*
869  * apple_protect_pager_deallocate()
870  *
871  * Release a reference on this pager and free it when the last
872  * reference goes away.
873  */
874 void
apple_protect_pager_deallocate(memory_object_t mem_obj)875 apple_protect_pager_deallocate(
876 	memory_object_t         mem_obj)
877 {
878 	apple_protect_pager_t   pager;
879 
880 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
881 	pager = apple_protect_pager_lookup(mem_obj);
882 	apple_protect_pager_deallocate_internal(pager, FALSE);
883 }
884 
885 /*
886  *
887  */
888 kern_return_t
apple_protect_pager_terminate(__unused memory_object_t mem_obj)889 apple_protect_pager_terminate(
890 #if !DEBUG
891 	__unused
892 #endif
893 	memory_object_t mem_obj)
894 {
895 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
896 
897 	return KERN_SUCCESS;
898 }
899 
900 /*
901  * apple_protect_pager_map()
902  *
903  * This allows VM to let us, the EMM, know that this memory object
904  * is currently mapped one or more times.  This is called by VM each time
905  * the memory object gets mapped and we take one extra reference on the
906  * memory object to account for all its mappings.
907  */
908 kern_return_t
apple_protect_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)909 apple_protect_pager_map(
910 	memory_object_t         mem_obj,
911 	__unused vm_prot_t      prot)
912 {
913 	apple_protect_pager_t   pager;
914 
915 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
916 
917 	pager = apple_protect_pager_lookup(mem_obj);
918 
919 	lck_mtx_lock(&apple_protect_pager_lock);
920 	assert(pager->is_ready);
921 	assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0); /* pager is alive */
922 	if (pager->is_mapped == FALSE) {
923 		/*
924 		 * First mapping of this pager:  take an extra reference
925 		 * that will remain until all the mappings of this pager
926 		 * are removed.
927 		 */
928 		pager->is_mapped = TRUE;
929 		os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
930 		apple_protect_pager_count_mapped++;
931 	}
932 	lck_mtx_unlock(&apple_protect_pager_lock);
933 
934 	return KERN_SUCCESS;
935 }
936 
937 /*
938  * apple_protect_pager_last_unmap()
939  *
940  * This is called by VM when this memory object is no longer mapped anywhere.
941  */
942 kern_return_t
apple_protect_pager_last_unmap(memory_object_t mem_obj)943 apple_protect_pager_last_unmap(
944 	memory_object_t         mem_obj)
945 {
946 	apple_protect_pager_t   pager;
947 	unsigned int            count_unmapped;
948 
949 	PAGER_DEBUG(PAGER_ALL,
950 	    ("apple_protect_pager_last_unmap: %p\n", mem_obj));
951 
952 	pager = apple_protect_pager_lookup(mem_obj);
953 
954 	lck_mtx_lock(&apple_protect_pager_lock);
955 	if (pager->is_mapped) {
956 		/*
957 		 * All the mappings are gone, so let go of the one extra
958 		 * reference that represents all the mappings of this pager.
959 		 */
960 		apple_protect_pager_count_mapped--;
961 		count_unmapped = (apple_protect_pager_count -
962 		    apple_protect_pager_count_mapped);
963 		if (count_unmapped > apple_protect_pager_count_unmapped_max) {
964 			apple_protect_pager_count_unmapped_max = count_unmapped;
965 		}
966 		pager->is_mapped = FALSE;
967 		apple_protect_pager_deallocate_internal(pager, TRUE);
968 		/* caution: deallocate_internal() released the lock ! */
969 	} else {
970 		lck_mtx_unlock(&apple_protect_pager_lock);
971 	}
972 
973 	return KERN_SUCCESS;
974 }
975 
976 boolean_t
apple_protect_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)977 apple_protect_pager_backing_object(
978 	memory_object_t mem_obj,
979 	memory_object_offset_t offset,
980 	vm_object_t *backing_object,
981 	vm_object_offset_t *backing_offset)
982 {
983 	apple_protect_pager_t   pager;
984 
985 	PAGER_DEBUG(PAGER_ALL,
986 	    ("apple_protect_pager_backing_object: %p\n", mem_obj));
987 
988 	pager = apple_protect_pager_lookup(mem_obj);
989 
990 	*backing_object = pager->backing_object;
991 	*backing_offset = pager->backing_offset + offset;
992 
993 	return TRUE;
994 }
995 
996 /*
997  *
998  */
999 apple_protect_pager_t
apple_protect_pager_lookup(memory_object_t mem_obj)1000 apple_protect_pager_lookup(
1001 	memory_object_t  mem_obj)
1002 {
1003 	apple_protect_pager_t   pager;
1004 
1005 	assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
1006 	pager = (apple_protect_pager_t)(uintptr_t) mem_obj;
1007 	assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0);
1008 	return pager;
1009 }
1010 
1011 apple_protect_pager_t
apple_protect_pager_create(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_object_offset_t crypto_backing_offset,struct pager_crypt_info * crypt_info,vm_object_offset_t crypto_start,vm_object_offset_t crypto_end,boolean_t cache_pager)1012 apple_protect_pager_create(
1013 	vm_object_t             backing_object,
1014 	vm_object_offset_t      backing_offset,
1015 	vm_object_offset_t      crypto_backing_offset,
1016 	struct pager_crypt_info *crypt_info,
1017 	vm_object_offset_t      crypto_start,
1018 	vm_object_offset_t      crypto_end,
1019 	boolean_t               cache_pager)
1020 {
1021 	apple_protect_pager_t   pager, pager2;
1022 	memory_object_control_t control;
1023 	kern_return_t           kr;
1024 	struct pager_crypt_info *old_crypt_info;
1025 
1026 	pager = kalloc_type(struct apple_protect_pager, Z_WAITOK | Z_NOFAIL);
1027 
1028 	/*
1029 	 * The vm_map call takes both named entry ports and raw memory
1030 	 * objects in the same parameter.  We need to make sure that
1031 	 * vm_map does not see this object as a named entry port.  So,
1032 	 * we reserve the first word in the object for a fake ip_kotype
1033 	 * setting - that will tell vm_map to use it as a memory object.
1034 	 */
1035 	pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
1036 	pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops;
1037 	pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1038 
1039 	pager->is_ready = FALSE;/* not ready until it has a "name" */
1040 	/* one reference for the caller */
1041 	os_ref_init_count_raw(&pager->ap_pgr_hdr_ref, NULL, 1);
1042 	pager->is_mapped = FALSE;
1043 	if (cache_pager) {
1044 		/* extra reference for the cache */
1045 		os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1046 		pager->is_cached = true;
1047 	} else {
1048 		pager->is_cached = false;
1049 	}
1050 	pager->backing_object = backing_object;
1051 	pager->backing_offset = backing_offset;
1052 	pager->crypto_backing_offset = crypto_backing_offset;
1053 	pager->crypto_start = crypto_start;
1054 	pager->crypto_end = crypto_end;
1055 	pager->crypt_info = crypt_info; /* allocated by caller */
1056 
1057 #if CRYPT_INFO_DEBUG
1058 	printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n",
1059 	    __FUNCTION__,
1060 	    crypt_info,
1061 	    crypt_info->page_decrypt,
1062 	    crypt_info->crypt_end,
1063 	    crypt_info->crypt_ops,
1064 	    crypt_info->crypt_refcnt);
1065 #endif /* CRYPT_INFO_DEBUG */
1066 
1067 	vm_object_reference(backing_object);
1068 
1069 	old_crypt_info = NULL;
1070 
1071 	lck_mtx_lock(&apple_protect_pager_lock);
1072 	/* see if anyone raced us to create a pager for the same object */
1073 	queue_iterate(&apple_protect_pager_queue,
1074 	    pager2,
1075 	    apple_protect_pager_t,
1076 	    pager_queue) {
1077 		if ((pager2->crypt_info->page_decrypt !=
1078 		    crypt_info->page_decrypt) ||
1079 		    (pager2->crypt_info->crypt_end !=
1080 		    crypt_info->crypt_end) ||
1081 		    (pager2->crypt_info->crypt_ops !=
1082 		    crypt_info->crypt_ops)) {
1083 			/* crypt_info contents do not match: next pager */
1084 			continue;
1085 		}
1086 
1087 		/* found a match for crypt_info ... */
1088 		if (old_crypt_info) {
1089 			/* ... already switched to that crypt_info */
1090 			assert(old_crypt_info == pager2->crypt_info);
1091 		} else if (pager2->crypt_info != crypt_info) {
1092 			/* ... switch to that pager's crypt_info */
1093 #if CRYPT_INFO_DEBUG
1094 			printf("CRYPT_INFO %s: reference %p ref %d "
1095 			    "(create match)\n",
1096 			    __FUNCTION__,
1097 			    pager2->crypt_info,
1098 			    pager2->crypt_info->crypt_refcnt);
1099 #endif /* CRYPT_INFO_DEBUG */
1100 			old_crypt_info = pager2->crypt_info;
1101 			crypt_info_reference(old_crypt_info);
1102 			pager->crypt_info = old_crypt_info;
1103 		}
1104 
1105 		if (pager2->backing_object == backing_object &&
1106 		    pager2->backing_offset == backing_offset &&
1107 		    pager2->crypto_backing_offset == crypto_backing_offset &&
1108 		    pager2->crypto_start == crypto_start &&
1109 		    pager2->crypto_end == crypto_end) {
1110 			/* full match: use that pager */
1111 			break;
1112 		}
1113 	}
1114 	if (!queue_end(&apple_protect_pager_queue,
1115 	    (queue_entry_t) pager2)) {
1116 		/* we lost the race, down with the loser... */
1117 		lck_mtx_unlock(&apple_protect_pager_lock);
1118 		vm_object_deallocate(pager->backing_object);
1119 		pager->backing_object = VM_OBJECT_NULL;
1120 #if CRYPT_INFO_DEBUG
1121 		printf("CRYPT_INFO %s: %p ref %d (create pager match)\n",
1122 		    __FUNCTION__,
1123 		    pager->crypt_info,
1124 		    pager->crypt_info->crypt_refcnt);
1125 #endif /* CRYPT_INFO_DEBUG */
1126 		crypt_info_deallocate(pager->crypt_info);
1127 		pager->crypt_info = NULL;
1128 		kfree_type(struct apple_protect_pager, pager);
1129 		/* ... and go with the winner */
1130 		pager = pager2;
1131 		/* let the winner make sure the pager gets ready */
1132 		return pager;
1133 	}
1134 
1135 	/* enter new pager at the head of our list of pagers */
1136 	queue_enter_first(&apple_protect_pager_queue,
1137 	    pager,
1138 	    apple_protect_pager_t,
1139 	    pager_queue);
1140 	apple_protect_pager_count++;
1141 	if (apple_protect_pager_count > apple_protect_pager_count_max) {
1142 		apple_protect_pager_count_max = apple_protect_pager_count;
1143 	}
1144 	lck_mtx_unlock(&apple_protect_pager_lock);
1145 
1146 	kr = memory_object_create_named((memory_object_t) pager,
1147 	    0,
1148 	    &control);
1149 	assert(kr == KERN_SUCCESS);
1150 
1151 	memory_object_mark_trusted(control);
1152 
1153 	lck_mtx_lock(&apple_protect_pager_lock);
1154 	/* the new pager is now ready to be used */
1155 	pager->is_ready = TRUE;
1156 	lck_mtx_unlock(&apple_protect_pager_lock);
1157 
1158 	/* wakeup anyone waiting for this pager to be ready */
1159 	thread_wakeup(&pager->is_ready);
1160 
1161 	if (old_crypt_info != NULL &&
1162 	    old_crypt_info != crypt_info) {
1163 		/* we re-used an old crypt_info instead of using our new one */
1164 #if CRYPT_INFO_DEBUG
1165 		printf("CRYPT_INFO %s: deallocate %p ref %d "
1166 		    "(create used old)\n",
1167 		    __FUNCTION__,
1168 		    crypt_info,
1169 		    crypt_info->crypt_refcnt);
1170 #endif /* CRYPT_INFO_DEBUG */
1171 		crypt_info_deallocate(crypt_info);
1172 		crypt_info = NULL;
1173 	}
1174 
1175 	return pager;
1176 }
1177 
1178 /*
1179  * apple_protect_pager_setup()
1180  *
1181  * Provide the caller with a memory object backed by the provided
1182  * "backing_object" VM object.  If such a memory object already exists,
1183  * re-use it, otherwise create a new memory object.
1184  */
1185 memory_object_t
apple_protect_pager_setup(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_object_offset_t crypto_backing_offset,struct pager_crypt_info * crypt_info,vm_object_offset_t crypto_start,vm_object_offset_t crypto_end,boolean_t cache_pager)1186 apple_protect_pager_setup(
1187 	vm_object_t             backing_object,
1188 	vm_object_offset_t      backing_offset,
1189 	vm_object_offset_t      crypto_backing_offset,
1190 	struct pager_crypt_info *crypt_info,
1191 	vm_object_offset_t      crypto_start,
1192 	vm_object_offset_t      crypto_end,
1193 	boolean_t               cache_pager)
1194 {
1195 	apple_protect_pager_t   pager;
1196 	struct pager_crypt_info *old_crypt_info, *new_crypt_info;
1197 
1198 #if CRYPT_INFO_DEBUG
1199 	printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n",
1200 	    __FUNCTION__,
1201 	    crypt_info,
1202 	    crypt_info->page_decrypt,
1203 	    crypt_info->crypt_end,
1204 	    crypt_info->crypt_ops,
1205 	    crypt_info->crypt_refcnt);
1206 #endif /* CRYPT_INFO_DEBUG */
1207 
1208 	old_crypt_info = NULL;
1209 
1210 	lck_mtx_lock(&apple_protect_pager_lock);
1211 
1212 	queue_iterate(&apple_protect_pager_queue,
1213 	    pager,
1214 	    apple_protect_pager_t,
1215 	    pager_queue) {
1216 		if ((pager->crypt_info->page_decrypt !=
1217 		    crypt_info->page_decrypt) ||
1218 		    (pager->crypt_info->crypt_end !=
1219 		    crypt_info->crypt_end) ||
1220 		    (pager->crypt_info->crypt_ops !=
1221 		    crypt_info->crypt_ops)) {
1222 			/* no match for "crypt_info": next pager */
1223 			continue;
1224 		}
1225 		/* found a match for crypt_info ... */
1226 		if (old_crypt_info) {
1227 			/* ... already switched to that crypt_info */
1228 			assert(old_crypt_info == pager->crypt_info);
1229 		} else {
1230 			/* ... switch to that pager's crypt_info */
1231 			old_crypt_info = pager->crypt_info;
1232 #if CRYPT_INFO_DEBUG
1233 			printf("CRYPT_INFO %s: "
1234 			    "switching crypt_info from %p [%p,%p,%p,%d] "
1235 			    "to %p [%p,%p,%p,%d] from pager %p\n",
1236 			    __FUNCTION__,
1237 			    crypt_info,
1238 			    crypt_info->page_decrypt,
1239 			    crypt_info->crypt_end,
1240 			    crypt_info->crypt_ops,
1241 			    crypt_info->crypt_refcnt,
1242 			    old_crypt_info,
1243 			    old_crypt_info->page_decrypt,
1244 			    old_crypt_info->crypt_end,
1245 			    old_crypt_info->crypt_ops,
1246 			    old_crypt_info->crypt_refcnt,
1247 			    pager);
1248 			printf("CRYPT_INFO %s: %p ref %d (setup match)\n",
1249 			    __FUNCTION__,
1250 			    pager->crypt_info,
1251 			    pager->crypt_info->crypt_refcnt);
1252 #endif /* CRYPT_INFO_DEBUG */
1253 			crypt_info_reference(pager->crypt_info);
1254 		}
1255 
1256 		if (pager->backing_object == backing_object &&
1257 		    pager->backing_offset == backing_offset &&
1258 		    pager->crypto_backing_offset == crypto_backing_offset &&
1259 		    pager->crypto_start == crypto_start &&
1260 		    pager->crypto_end == crypto_end) {
1261 			/* full match: use that pager! */
1262 			assert(old_crypt_info == pager->crypt_info);
1263 			assert(old_crypt_info->crypt_refcnt > 1);
1264 #if CRYPT_INFO_DEBUG
1265 			printf("CRYPT_INFO %s: "
1266 			    "pager match with %p crypt_info %p\n",
1267 			    __FUNCTION__,
1268 			    pager,
1269 			    pager->crypt_info);
1270 			printf("CRYPT_INFO %s: deallocate %p ref %d "
1271 			    "(pager match)\n",
1272 			    __FUNCTION__,
1273 			    old_crypt_info,
1274 			    old_crypt_info->crypt_refcnt);
1275 #endif /* CRYPT_INFO_DEBUG */
1276 			/* release the extra ref on crypt_info we got above */
1277 			crypt_info_deallocate(old_crypt_info);
1278 			assert(old_crypt_info->crypt_refcnt > 0);
1279 			/* give extra reference on pager to the caller */
1280 			os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1281 			break;
1282 		}
1283 	}
1284 	if (queue_end(&apple_protect_pager_queue,
1285 	    (queue_entry_t) pager)) {
1286 		lck_mtx_unlock(&apple_protect_pager_lock);
1287 		/* no existing pager for this backing object */
1288 		pager = APPLE_PROTECT_PAGER_NULL;
1289 		if (old_crypt_info) {
1290 			/* use this old crypt_info for new pager */
1291 			new_crypt_info = old_crypt_info;
1292 #if CRYPT_INFO_DEBUG
1293 			printf("CRYPT_INFO %s: "
1294 			    "will use old_crypt_info %p for new pager\n",
1295 			    __FUNCTION__,
1296 			    old_crypt_info);
1297 #endif /* CRYPT_INFO_DEBUG */
1298 		} else {
1299 			/* allocate a new crypt_info for new pager */
1300 			new_crypt_info = kalloc_type(struct pager_crypt_info, Z_WAITOK);
1301 			*new_crypt_info = *crypt_info;
1302 			new_crypt_info->crypt_refcnt = 1;
1303 #if CRYPT_INFO_DEBUG
1304 			printf("CRYPT_INFO %s: "
1305 			    "will use new_crypt_info %p for new pager\n",
1306 			    __FUNCTION__,
1307 			    new_crypt_info);
1308 #endif /* CRYPT_INFO_DEBUG */
1309 		}
1310 		if (new_crypt_info == NULL) {
1311 			/* can't create new pager without a crypt_info */
1312 		} else {
1313 			/* create new pager */
1314 			pager = apple_protect_pager_create(
1315 				backing_object,
1316 				backing_offset,
1317 				crypto_backing_offset,
1318 				new_crypt_info,
1319 				crypto_start,
1320 				crypto_end,
1321 				cache_pager);
1322 		}
1323 		if (pager == APPLE_PROTECT_PAGER_NULL) {
1324 			/* could not create a new pager */
1325 			if (new_crypt_info == old_crypt_info) {
1326 				/* release extra reference on old_crypt_info */
1327 #if CRYPT_INFO_DEBUG
1328 				printf("CRYPT_INFO %s: deallocate %p ref %d "
1329 				    "(create fail old_crypt_info)\n",
1330 				    __FUNCTION__,
1331 				    old_crypt_info,
1332 				    old_crypt_info->crypt_refcnt);
1333 #endif /* CRYPT_INFO_DEBUG */
1334 				crypt_info_deallocate(old_crypt_info);
1335 				old_crypt_info = NULL;
1336 			} else {
1337 				/* release unused new_crypt_info */
1338 				assert(new_crypt_info->crypt_refcnt == 1);
1339 #if CRYPT_INFO_DEBUG
1340 				printf("CRYPT_INFO %s: deallocate %p ref %d "
1341 				    "(create fail new_crypt_info)\n",
1342 				    __FUNCTION__,
1343 				    new_crypt_info,
1344 				    new_crypt_info->crypt_refcnt);
1345 #endif /* CRYPT_INFO_DEBUG */
1346 				crypt_info_deallocate(new_crypt_info);
1347 				new_crypt_info = NULL;
1348 			}
1349 			return MEMORY_OBJECT_NULL;
1350 		}
1351 		lck_mtx_lock(&apple_protect_pager_lock);
1352 	} else {
1353 		assert(old_crypt_info == pager->crypt_info);
1354 	}
1355 
1356 	while (!pager->is_ready) {
1357 		lck_mtx_sleep(&apple_protect_pager_lock,
1358 		    LCK_SLEEP_DEFAULT,
1359 		    &pager->is_ready,
1360 		    THREAD_UNINT);
1361 	}
1362 	lck_mtx_unlock(&apple_protect_pager_lock);
1363 
1364 	return (memory_object_t) pager;
1365 }
1366 
1367 void
apple_protect_pager_trim(void)1368 apple_protect_pager_trim(void)
1369 {
1370 	apple_protect_pager_t   pager, prev_pager;
1371 	queue_head_t            trim_queue;
1372 	unsigned int            num_trim;
1373 	unsigned int            count_unmapped;
1374 
1375 	lck_mtx_lock(&apple_protect_pager_lock);
1376 
1377 	/*
1378 	 * We have too many pagers, try and trim some unused ones,
1379 	 * starting with the oldest pager at the end of the queue.
1380 	 */
1381 	queue_init(&trim_queue);
1382 	num_trim = 0;
1383 
1384 	for (pager = (apple_protect_pager_t)
1385 	    queue_last(&apple_protect_pager_queue);
1386 	    !queue_end(&apple_protect_pager_queue,
1387 	    (queue_entry_t) pager);
1388 	    pager = prev_pager) {
1389 		/* get prev elt before we dequeue */
1390 		prev_pager = (apple_protect_pager_t)
1391 		    queue_prev(&pager->pager_queue);
1392 
1393 		if (pager->is_cached &&
1394 		    os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) == 2 &&
1395 		    pager->is_ready &&
1396 		    !pager->is_mapped) {
1397 			/* this pager can be trimmed */
1398 			num_trim++;
1399 			/* remove this pager from the main list ... */
1400 			apple_protect_pager_dequeue(pager);
1401 			/* ... and add it to our trim queue */
1402 			queue_enter_first(&trim_queue,
1403 			    pager,
1404 			    apple_protect_pager_t,
1405 			    pager_queue);
1406 
1407 			count_unmapped = (apple_protect_pager_count -
1408 			    apple_protect_pager_count_mapped);
1409 			if (count_unmapped <= apple_protect_pager_cache_limit) {
1410 				/* we have enough pagers to trim */
1411 				break;
1412 			}
1413 		}
1414 	}
1415 	if (num_trim > apple_protect_pager_num_trim_max) {
1416 		apple_protect_pager_num_trim_max = num_trim;
1417 	}
1418 	apple_protect_pager_num_trim_total += num_trim;
1419 
1420 	lck_mtx_unlock(&apple_protect_pager_lock);
1421 
1422 	/* terminate the trimmed pagers */
1423 	while (!queue_empty(&trim_queue)) {
1424 		queue_remove_first(&trim_queue,
1425 		    pager,
1426 		    apple_protect_pager_t,
1427 		    pager_queue);
1428 		assert(pager->is_cached);
1429 		pager->is_cached = false;
1430 		pager->pager_queue.next = NULL;
1431 		pager->pager_queue.prev = NULL;
1432 		/*
1433 		 * We can't call deallocate_internal() because the pager
1434 		 * has already been dequeued, but we still need to remove
1435 		 * a reference.
1436 		 */
1437 		os_ref_count_t __assert_only count;
1438 		count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1439 		assert(count == 1);
1440 		apple_protect_pager_terminate_internal(pager);
1441 	}
1442 }
1443 
1444 
1445 void
crypt_info_reference(struct pager_crypt_info * crypt_info)1446 crypt_info_reference(
1447 	struct pager_crypt_info *crypt_info)
1448 {
1449 	assert(crypt_info->crypt_refcnt != 0);
1450 #if CRYPT_INFO_DEBUG
1451 	printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1452 	    __FUNCTION__,
1453 	    crypt_info,
1454 	    crypt_info->crypt_refcnt,
1455 	    crypt_info->crypt_refcnt + 1);
1456 #endif /* CRYPT_INFO_DEBUG */
1457 	OSAddAtomic(+1, &crypt_info->crypt_refcnt);
1458 }
1459 
1460 void
crypt_info_deallocate(struct pager_crypt_info * crypt_info)1461 crypt_info_deallocate(
1462 	struct pager_crypt_info *crypt_info)
1463 {
1464 #if CRYPT_INFO_DEBUG
1465 	printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1466 	    __FUNCTION__,
1467 	    crypt_info,
1468 	    crypt_info->crypt_refcnt,
1469 	    crypt_info->crypt_refcnt - 1);
1470 #endif /* CRYPT_INFO_DEBUG */
1471 	OSAddAtomic(-1, &crypt_info->crypt_refcnt);
1472 	if (crypt_info->crypt_refcnt == 0) {
1473 		/* deallocate any crypt module data */
1474 		if (crypt_info->crypt_end) {
1475 			crypt_info->crypt_end(crypt_info->crypt_ops);
1476 			crypt_info->crypt_end = NULL;
1477 		}
1478 #if CRYPT_INFO_DEBUG
1479 		printf("CRYPT_INFO %s: freeing %p\n",
1480 		    __FUNCTION__,
1481 		    crypt_info);
1482 #endif /* CRYPT_INFO_DEBUG */
1483 		kfree_type(struct pager_crypt_info, crypt_info);
1484 	}
1485 }
1486 
1487 static uint64_t
apple_protect_pager_purge(apple_protect_pager_t pager)1488 apple_protect_pager_purge(
1489 	apple_protect_pager_t pager)
1490 {
1491 	uint64_t pages_purged;
1492 	vm_object_t object;
1493 
1494 	pages_purged = 0;
1495 	object = memory_object_to_vm_object((memory_object_t) pager);
1496 	assert(object != VM_OBJECT_NULL);
1497 	vm_object_lock(object);
1498 	pages_purged = object->resident_page_count;
1499 	vm_object_reap_pages(object, REAP_DATA_FLUSH);
1500 	pages_purged -= object->resident_page_count;
1501 //	printf("     %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count);
1502 	vm_object_unlock(object);
1503 	return pages_purged;
1504 }
1505 
1506 uint64_t
apple_protect_pager_purge_all(void)1507 apple_protect_pager_purge_all(void)
1508 {
1509 	uint64_t pages_purged;
1510 	apple_protect_pager_t pager;
1511 
1512 	pages_purged = 0;
1513 	lck_mtx_lock(&apple_protect_pager_lock);
1514 	queue_iterate(&apple_protect_pager_queue, pager, apple_protect_pager_t, pager_queue) {
1515 		pages_purged += apple_protect_pager_purge(pager);
1516 	}
1517 	lck_mtx_unlock(&apple_protect_pager_lock);
1518 #if DEVELOPMENT || DEBUG
1519 	printf("   %s:%d pages purged: %llu\n", __FUNCTION__, __LINE__, pages_purged);
1520 #endif /* DEVELOPMENT || DEBUG */
1521 	return pages_purged;
1522 }
1523