xref: /xnu-12377.61.12/osfmk/vm/vm_apple_protect.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_kobject.h>
49 #include <os/refcnt.h>
50 
51 #include <sys/kdebug_triage.h>
52 
53 #include <vm/vm_fault_internal.h>
54 #include <vm/vm_map.h>
55 #include <vm/memory_object_internal.h>
56 #include <vm/vm_pageout_xnu.h>
57 #include <vm/vm_protos_internal.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_ubc.h>
60 #include <vm/vm_page_internal.h>
61 #include <vm/vm_object_internal.h>
62 
63 /*
64  * APPLE PROTECT MEMORY PAGER
65  *
66  * This external memory manager (EMM) handles memory from the encrypted
67  * sections of some executables protected by the DSMOS kernel extension.
68  *
69  * It mostly handles page-in requests (from memory_object_data_request()) by
70  * getting the encrypted data from its backing VM object, itself backed by
71  * the encrypted file, decrypting it and providing it to VM.
72  *
73  * The decrypted pages will never be dirtied, so the memory manager doesn't
74  * need to handle page-out requests (from memory_object_data_return()).  The
75  * pages need to be mapped copy-on-write, so that the originals stay clean.
76  *
77  * We don't expect to have to handle a large number of apple-protected
78  * binaries, so the data structures are very simple (simple linked list)
79  * for now.
80  */
81 
82 /* forward declarations */
83 void apple_protect_pager_reference(memory_object_t mem_obj);
84 void apple_protect_pager_deallocate(memory_object_t mem_obj);
85 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
86     memory_object_control_t control,
87     memory_object_cluster_size_t pg_size);
88 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
89 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
90     memory_object_offset_t offset,
91     memory_object_cluster_size_t length,
92     vm_prot_t protection_required,
93     memory_object_fault_info_t fault_info);
94 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
95     memory_object_offset_t offset,
96     memory_object_cluster_size_t      data_cnt,
97     memory_object_offset_t *resid_offset,
98     int *io_error,
99     boolean_t dirty,
100     boolean_t kernel_copy,
101     int upl_flags);
102 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
103     memory_object_offset_t offset,
104     memory_object_cluster_size_t data_cnt);
105 kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
106     vm_prot_t prot);
107 kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
108 boolean_t apple_protect_pager_backing_object(
109 	memory_object_t mem_obj,
110 	memory_object_offset_t mem_obj_offset,
111 	vm_object_t *backing_object,
112 	vm_object_offset_t *backing_offset);
113 
114 #define CRYPT_INFO_DEBUG 0
115 void crypt_info_reference(struct pager_crypt_info *crypt_info);
116 void crypt_info_deallocate(struct pager_crypt_info *crypt_info);
117 
118 /*
119  * Vector of VM operations for this EMM.
120  * These routines are invoked by VM via the memory_object_*() interfaces.
121  */
122 const struct memory_object_pager_ops apple_protect_pager_ops = {
123 	.memory_object_reference = apple_protect_pager_reference,
124 	.memory_object_deallocate = apple_protect_pager_deallocate,
125 	.memory_object_init = apple_protect_pager_init,
126 	.memory_object_terminate = apple_protect_pager_terminate,
127 	.memory_object_data_request = apple_protect_pager_data_request,
128 	.memory_object_data_return = apple_protect_pager_data_return,
129 	.memory_object_data_initialize = apple_protect_pager_data_initialize,
130 	.memory_object_map = apple_protect_pager_map,
131 	.memory_object_last_unmap = apple_protect_pager_last_unmap,
132 	.memory_object_backing_object = apple_protect_pager_backing_object,
133 	.memory_object_pager_name = "apple_protect"
134 };
135 
136 /*
137  * The "apple_protect_pager" describes a memory object backed by
138  * the "apple protect" EMM.
139  */
140 typedef struct apple_protect_pager {
141 	/* mandatory generic header */
142 	struct memory_object    ap_pgr_hdr;
143 
144 	/* pager-specific data */
145 	queue_chain_t           pager_queue;    /* next & prev pagers */
146 #if MEMORY_OBJECT_HAS_REFCOUNT
147 #define ap_pgr_hdr_ref          ap_pgr_hdr.mo_ref
148 #else
149 	os_ref_atomic_t         ap_pgr_hdr_ref;      /* reference count */
150 #endif
151 	bool                    is_ready;       /* is this pager ready ? */
152 	bool                    is_mapped;      /* is this mem_obj mapped ? */
153 	bool                    is_cached;      /* is this pager cached ? */
154 	vm_object_t             backing_object; /* VM obj w/ encrypted data */
155 	vm_object_offset_t      backing_offset;
156 	vm_object_offset_t      crypto_backing_offset; /* for key... */
157 	vm_object_offset_t      crypto_start;
158 	vm_object_offset_t      crypto_end;
159 	struct pager_crypt_info *crypt_info;
160 } *apple_protect_pager_t;
161 #define APPLE_PROTECT_PAGER_NULL        ((apple_protect_pager_t) NULL)
162 
163 /*
164  * List of memory objects managed by this EMM.
165  * The list is protected by the "apple_protect_pager_lock" lock.
166  */
167 unsigned int apple_protect_pager_count = 0;        /* number of pagers */
168 unsigned int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
169 queue_head_t apple_protect_pager_queue = QUEUE_HEAD_INITIALIZER(apple_protect_pager_queue);
170 LCK_GRP_DECLARE(apple_protect_pager_lck_grp, "apple_protect");
171 LCK_MTX_DECLARE(apple_protect_pager_lock, &apple_protect_pager_lck_grp);
172 
173 /*
174  * Maximum number of unmapped pagers we're willing to keep around.
175  */
176 unsigned int apple_protect_pager_cache_limit = 20;
177 
178 /*
179  * Statistics & counters.
180  */
181 unsigned int apple_protect_pager_count_max = 0;
182 unsigned int apple_protect_pager_count_unmapped_max = 0;
183 unsigned int apple_protect_pager_num_trim_max = 0;
184 unsigned int apple_protect_pager_num_trim_total = 0;
185 
186 
187 
188 /* internal prototypes */
189 apple_protect_pager_t apple_protect_pager_create(
190 	vm_object_t backing_object,
191 	vm_object_offset_t backing_offset,
192 	vm_object_offset_t crypto_backing_offset,
193 	struct pager_crypt_info *crypt_info,
194 	vm_object_offset_t crypto_start,
195 	vm_object_offset_t crypto_end,
196 	boolean_t cache_pager);
197 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
198 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
199 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
200     boolean_t locked);
201 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
202 void apple_protect_pager_trim(void);
203 
204 
205 #if DEBUG
206 int apple_protect_pagerdebug = 0;
207 #define PAGER_ALL               0xffffffff
208 #define PAGER_INIT              0x00000001
209 #define PAGER_PAGEIN            0x00000002
210 
211 #define PAGER_DEBUG(LEVEL, A)                                           \
212 	MACRO_BEGIN                                                     \
213 	if ((apple_protect_pagerdebug & LEVEL)==LEVEL) {                \
214 	        printf A;                                               \
215 	}                                                               \
216 	MACRO_END
217 #else
218 #define PAGER_DEBUG(LEVEL, A)
219 #endif
220 
221 /*
222  * apple_protect_pager_init()
223  *
224  * Initialize the memory object and makes it ready to be used and mapped.
225  */
226 kern_return_t
apple_protect_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)227 apple_protect_pager_init(
228 	memory_object_t         mem_obj,
229 	memory_object_control_t control,
230 #if !DEBUG
231 	__unused
232 #endif
233 	memory_object_cluster_size_t pg_size)
234 {
235 	apple_protect_pager_t   pager;
236 	kern_return_t           kr;
237 	memory_object_attr_info_data_t  attributes;
238 
239 	PAGER_DEBUG(PAGER_ALL,
240 	    ("apple_protect_pager_init: %p, %p, %x\n",
241 	    mem_obj, control, pg_size));
242 
243 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
244 		return KERN_INVALID_ARGUMENT;
245 	}
246 
247 	pager = apple_protect_pager_lookup(mem_obj);
248 
249 	memory_object_control_reference(control);
250 
251 	pager->ap_pgr_hdr.mo_control = control;
252 
253 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
254 	/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
255 	attributes.cluster_size = (1 << (PAGE_SHIFT));
256 	attributes.may_cache_object = FALSE;
257 	attributes.temporary = TRUE;
258 
259 	kr = memory_object_change_attributes(
260 		control,
261 		MEMORY_OBJECT_ATTRIBUTE_INFO,
262 		(memory_object_info_t) &attributes,
263 		MEMORY_OBJECT_ATTR_INFO_COUNT);
264 	if (kr != KERN_SUCCESS) {
265 		panic("apple_protect_pager_init: "
266 		    "memory_object_change_attributes() failed");
267 	}
268 
269 #if CONFIG_SECLUDED_MEMORY
270 	if (secluded_for_filecache) {
271 		memory_object_mark_eligible_for_secluded(control, TRUE);
272 	}
273 #endif /* CONFIG_SECLUDED_MEMORY */
274 
275 	return KERN_SUCCESS;
276 }
277 
278 /*
279  * apple_protect_data_return()
280  *
281  * Handles page-out requests from VM.  This should never happen since
282  * the pages provided by this EMM are not supposed to be dirty or dirtied
283  * and VM should simply discard the contents and reclaim the pages if it
284  * needs to.
285  */
286 kern_return_t
apple_protect_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)287 apple_protect_pager_data_return(
288 	__unused memory_object_t        mem_obj,
289 	__unused memory_object_offset_t offset,
290 	__unused memory_object_cluster_size_t           data_cnt,
291 	__unused memory_object_offset_t *resid_offset,
292 	__unused int                    *io_error,
293 	__unused boolean_t              dirty,
294 	__unused boolean_t              kernel_copy,
295 	__unused int                    upl_flags)
296 {
297 	panic("apple_protect_pager_data_return: should never get called");
298 	return KERN_FAILURE;
299 }
300 
301 kern_return_t
apple_protect_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)302 apple_protect_pager_data_initialize(
303 	__unused memory_object_t        mem_obj,
304 	__unused memory_object_offset_t offset,
305 	__unused memory_object_cluster_size_t           data_cnt)
306 {
307 	panic("apple_protect_pager_data_initialize: should never get called");
308 	return KERN_FAILURE;
309 }
310 
311 /*
312  * apple_protect_pager_data_request()
313  *
314  * Handles page-in requests from VM.
315  */
316 int apple_protect_pager_data_request_debug = 0;
317 kern_return_t
apple_protect_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)318 apple_protect_pager_data_request(
319 	memory_object_t         mem_obj,
320 	memory_object_offset_t  offset,
321 	memory_object_cluster_size_t            length,
322 #if !DEBUG
323 	__unused
324 #endif
325 	vm_prot_t               protection_required,
326 	memory_object_fault_info_t mo_fault_info)
327 {
328 	apple_protect_pager_t   pager;
329 	memory_object_control_t mo_control;
330 	upl_t                   upl;
331 	int                     upl_flags;
332 	upl_size_t              upl_size;
333 	upl_page_info_t         *upl_pl;
334 	unsigned int            pl_count;
335 	vm_object_t             src_top_object, src_page_object, dst_object;
336 	kern_return_t           kr, retval;
337 	vm_offset_t             src_vaddr, dst_vaddr;
338 	vm_offset_t             cur_offset;
339 	vm_offset_t             offset_in_page;
340 	kern_return_t           error_code;
341 	vm_prot_t               prot;
342 	vm_page_t               src_page, top_page;
343 	int                     interruptible;
344 	struct vm_object_fault_info     fault_info;
345 	vm_fault_return_t       vmfr;
346 	int                     ret;
347 
348 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
349 
350 	retval = KERN_SUCCESS;
351 	src_top_object = VM_OBJECT_NULL;
352 	src_page_object = VM_OBJECT_NULL;
353 	upl = NULL;
354 	upl_pl = NULL;
355 	fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
356 	fault_info.stealth = TRUE;
357 	fault_info.io_sync = FALSE;
358 	fault_info.mark_zf_absent = FALSE;
359 	fault_info.batch_pmap_op = FALSE;
360 	interruptible = fault_info.interruptible;
361 
362 	pager = apple_protect_pager_lookup(mem_obj);
363 	assert(pager->is_ready);
364 	assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 1); /* pager is alive and mapped */
365 
366 	PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
367 
368 	fault_info.lo_offset += pager->backing_offset;
369 	fault_info.hi_offset += pager->backing_offset;
370 
371 	/*
372 	 * Gather in a UPL all the VM pages requested by VM.
373 	 */
374 	mo_control = pager->ap_pgr_hdr.mo_control;
375 
376 	upl_size = length;
377 	upl_flags =
378 	    UPL_RET_ONLY_ABSENT |
379 	    UPL_SET_LITE |
380 	    UPL_NO_SYNC |
381 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
382 	    UPL_SET_INTERNAL;
383 	pl_count = 0;
384 	kr = memory_object_upl_request(mo_control,
385 	    offset, upl_size,
386 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
387 	if (kr != KERN_SUCCESS) {
388 		retval = kr;
389 		goto done;
390 	}
391 	dst_object = memory_object_control_to_vm_object(mo_control);
392 	assert(dst_object != VM_OBJECT_NULL);
393 
394 	/*
395 	 * We'll map the encrypted data in the kernel address space from the
396 	 * backing VM object (itself backed by the encrypted file via
397 	 * the vnode pager).
398 	 */
399 	src_top_object = pager->backing_object;
400 	assert(src_top_object != VM_OBJECT_NULL);
401 	vm_object_reference(src_top_object); /* keep the source object alive */
402 
403 	/*
404 	 * Fill in the contents of the pages requested by VM.
405 	 */
406 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
407 	pl_count = length / PAGE_SIZE;
408 	for (cur_offset = 0;
409 	    retval == KERN_SUCCESS && cur_offset < length;
410 	    cur_offset += PAGE_SIZE) {
411 		ppnum_t dst_pnum;
412 
413 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
414 			/* this page is not in the UPL: skip it */
415 			continue;
416 		}
417 
418 		/*
419 		 * Map the source (encrypted) page in the kernel's
420 		 * virtual address space.
421 		 * We already hold a reference on the src_top_object.
422 		 */
423 retry_src_fault:
424 		vm_object_lock(src_top_object);
425 		vm_object_paging_begin(src_top_object);
426 		error_code = 0;
427 		prot = VM_PROT_READ;
428 		src_page = VM_PAGE_NULL;
429 		vmfr = vm_fault_page(src_top_object,
430 		    pager->backing_offset + offset + cur_offset,
431 		    VM_PROT_READ,
432 		    FALSE,
433 		    FALSE,                /* src_page not looked up */
434 		    &prot,
435 		    &src_page,
436 		    &top_page,
437 		    NULL,
438 		    &error_code,
439 		    FALSE,
440 		    &fault_info);
441 		switch (vmfr) {
442 		case VM_FAULT_SUCCESS:
443 			break;
444 		case VM_FAULT_RETRY:
445 			goto retry_src_fault;
446 		case VM_FAULT_MEMORY_SHORTAGE:
447 			if (vm_page_wait(interruptible)) {
448 				goto retry_src_fault;
449 			}
450 			ktriage_record(thread_tid(current_thread()),
451 			    KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_APPLE_PROTECT_PAGER,
452 			    KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_APPLE_PROTECT_PAGER_MEMORY_SHORTAGE),
453 			    0 /* arg */);
454 			OS_FALLTHROUGH;
455 		case VM_FAULT_INTERRUPTED:
456 			retval = MACH_SEND_INTERRUPTED;
457 			goto done;
458 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
459 			/* success but no VM page: fail */
460 			vm_object_paging_end(src_top_object);
461 			vm_object_unlock(src_top_object);
462 			OS_FALLTHROUGH;
463 		case VM_FAULT_MEMORY_ERROR:
464 			/* the page is not there ! */
465 			if (error_code) {
466 				retval = error_code;
467 			} else {
468 				retval = KERN_MEMORY_ERROR;
469 			}
470 			goto done;
471 		case VM_FAULT_BUSY:
472 			retval = KERN_ALREADY_WAITING;
473 			goto done;
474 		default:
475 			panic("%s: "
476 			    "vm_fault_page() return unexpected error 0x%x\n",
477 			    __func__, vmfr);
478 		}
479 		assert(src_page != VM_PAGE_NULL);
480 		assert(src_page->vmp_busy);
481 
482 		if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
483 			vm_page_lockspin_queues();
484 
485 			if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
486 				vm_page_speculate(src_page, FALSE);
487 			}
488 			vm_page_unlock_queues();
489 		}
490 
491 		/*
492 		 * Establish pointers to the source
493 		 * and destination physical pages.
494 		 */
495 		dst_pnum = (ppnum_t)
496 		    upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
497 		assert(dst_pnum != 0);
498 
499 		src_vaddr = (vm_map_offset_t)
500 		    phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
501 		        << PAGE_SHIFT);
502 		dst_vaddr = (vm_map_offset_t)
503 		    phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
504 
505 		src_page_object = VM_PAGE_OBJECT(src_page);
506 
507 		/*
508 		 * Validate the original page...
509 		 */
510 		if (src_page_object->code_signed) {
511 			vm_page_validate_cs_mapped(
512 				src_page, PAGE_SIZE, 0,
513 				(const void *) src_vaddr);
514 		}
515 		/*
516 		 * ... and transfer the results to the destination page.
517 		 */
518 		UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
519 		    src_page->vmp_cs_validated);
520 		UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
521 		    src_page->vmp_cs_tainted);
522 		UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
523 		    src_page->vmp_cs_nx);
524 
525 		/*
526 		 * page_decrypt() might access a mapped file, so let's release
527 		 * the object lock for the source page to avoid a potential
528 		 * deadlock.  The source page is kept busy and we have a
529 		 * "paging_in_progress" reference on its object, so it's safe
530 		 * to unlock the object here.
531 		 */
532 		assert(src_page->vmp_busy);
533 		assert(src_page_object->paging_in_progress > 0);
534 		vm_object_unlock(src_page_object);
535 
536 		/*
537 		 * Decrypt the encrypted contents of the source page
538 		 * into the destination page.
539 		 */
540 		for (offset_in_page = 0;
541 		    offset_in_page < PAGE_SIZE;
542 		    offset_in_page += 4096) {
543 			if (offset + cur_offset + offset_in_page <
544 			    pager->crypto_start ||
545 			    offset + cur_offset + offset_in_page >=
546 			    pager->crypto_end) {
547 				/* not encrypted: just copy */
548 				bcopy((const char *)(src_vaddr +
549 				    offset_in_page),
550 				    (char *)(dst_vaddr + offset_in_page),
551 				    4096);
552 
553 				if (apple_protect_pager_data_request_debug) {
554 					printf("apple_protect_data_request"
555 					    "(%p,0x%llx+0x%llx+0x%04llx): "
556 					    "out of crypto range "
557 					    "[0x%llx:0x%llx]: "
558 					    "COPY [0x%016llx 0x%016llx] "
559 					    "code_signed=%d "
560 					    "cs_validated=%d "
561 					    "cs_tainted=%d "
562 					    "cs_nx=%d\n",
563 					    pager,
564 					    offset,
565 					    (uint64_t) cur_offset,
566 					    (uint64_t) offset_in_page,
567 					    pager->crypto_start,
568 					    pager->crypto_end,
569 					    *(uint64_t *)(dst_vaddr +
570 					    offset_in_page),
571 					    *(uint64_t *)(dst_vaddr +
572 					    offset_in_page + 8),
573 					    src_page_object->code_signed,
574 					    src_page->vmp_cs_validated,
575 					    src_page->vmp_cs_tainted,
576 					    src_page->vmp_cs_nx);
577 				}
578 				ret = 0;
579 				continue;
580 			}
581 			ret = pager->crypt_info->page_decrypt(
582 				(const void *)(src_vaddr + offset_in_page),
583 				(void *)(dst_vaddr + offset_in_page),
584 				((pager->crypto_backing_offset -
585 				pager->crypto_start) +   /* XXX ? */
586 				offset +
587 				cur_offset +
588 				offset_in_page),
589 				pager->crypt_info->crypt_ops);
590 
591 			if (apple_protect_pager_data_request_debug) {
592 				printf("apple_protect_data_request"
593 				    "(%p,0x%llx+0x%llx+0x%04llx): "
594 				    "in crypto range [0x%llx:0x%llx]: "
595 				    "DECRYPT offset 0x%llx="
596 				    "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)"
597 				    "[0x%016llx 0x%016llx] "
598 				    "code_signed=%d "
599 				    "cs_validated=%d "
600 				    "cs_tainted=%d "
601 				    "cs_nx=%d "
602 				    "ret=0x%x\n",
603 				    pager,
604 				    offset,
605 				    (uint64_t) cur_offset,
606 				    (uint64_t) offset_in_page,
607 				    pager->crypto_start, pager->crypto_end,
608 				    ((pager->crypto_backing_offset -
609 				    pager->crypto_start) +
610 				    offset +
611 				    cur_offset +
612 				    offset_in_page),
613 				    pager->crypto_backing_offset,
614 				    pager->crypto_start,
615 				    offset,
616 				    (uint64_t) cur_offset,
617 				    (uint64_t) offset_in_page,
618 				    *(uint64_t *)(dst_vaddr + offset_in_page),
619 				    *(uint64_t *)(dst_vaddr + offset_in_page + 8),
620 				    src_page_object->code_signed,
621 				    src_page->vmp_cs_validated,
622 				    src_page->vmp_cs_tainted,
623 				    src_page->vmp_cs_nx,
624 				    ret);
625 			}
626 			if (ret) {
627 				break;
628 			}
629 		}
630 		if (ret) {
631 			/*
632 			 * Decryption failed.  Abort the fault.
633 			 */
634 			retval = KERN_ABORTED;
635 		}
636 
637 		assert(VM_PAGE_OBJECT(src_page) == src_page_object);
638 		assert(src_page->vmp_busy);
639 		assert(src_page_object->paging_in_progress > 0);
640 		vm_object_lock(src_page_object);
641 
642 		/*
643 		 * Cleanup the result of vm_fault_page() of the source page.
644 		 */
645 		vm_page_wakeup_done(src_page_object, src_page);
646 		src_page = VM_PAGE_NULL;
647 		vm_object_paging_end(src_page_object);
648 		vm_object_unlock(src_page_object);
649 
650 		if (top_page != VM_PAGE_NULL) {
651 			assert(VM_PAGE_OBJECT(top_page) == src_top_object);
652 			vm_object_lock(src_top_object);
653 			VM_PAGE_FREE(top_page);
654 			vm_object_paging_end(src_top_object);
655 			vm_object_unlock(src_top_object);
656 		}
657 	}
658 
659 done:
660 	if (upl != NULL) {
661 		/* clean up the UPL */
662 
663 		/*
664 		 * The pages are currently dirty because we've just been
665 		 * writing on them, but as far as we're concerned, they're
666 		 * clean since they contain their "original" contents as
667 		 * provided by us, the pager.
668 		 * Tell the UPL to mark them "clean".
669 		 */
670 		upl_clear_dirty(upl, TRUE);
671 
672 		/* abort or commit the UPL */
673 		if (retval != KERN_SUCCESS) {
674 			upl_abort(upl, 0);
675 			if (retval == KERN_ABORTED) {
676 				wait_result_t   wait_result;
677 
678 				/*
679 				 * We aborted the fault and did not provide
680 				 * any contents for the requested pages but
681 				 * the pages themselves are not invalid, so
682 				 * let's return success and let the caller
683 				 * retry the fault, in case it might succeed
684 				 * later (when the decryption code is up and
685 				 * running in the kernel, for example).
686 				 */
687 				retval = KERN_SUCCESS;
688 				/*
689 				 * Wait a little bit first to avoid using
690 				 * too much CPU time retrying and failing
691 				 * the same fault over and over again.
692 				 */
693 				wait_result = assert_wait_timeout(
694 					(event_t) apple_protect_pager_data_request,
695 					THREAD_UNINT,
696 					10000,  /* 10ms */
697 					NSEC_PER_USEC);
698 				assert(wait_result == THREAD_WAITING);
699 				wait_result = thread_block(THREAD_CONTINUE_NULL);
700 				assert(wait_result == THREAD_TIMED_OUT);
701 			}
702 		} else {
703 			boolean_t empty;
704 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
705 			    "upl %p offset 0x%llx size 0x%x",
706 			    upl, upl->u_offset, upl->u_size);
707 			upl_commit_range(upl, 0, upl->u_size,
708 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
709 			    upl_pl, pl_count, &empty);
710 		}
711 
712 		/* and deallocate the UPL */
713 		upl_deallocate(upl);
714 		upl = NULL;
715 	}
716 	if (src_top_object != VM_OBJECT_NULL) {
717 		vm_object_deallocate(src_top_object);
718 	}
719 	return retval;
720 }
721 
722 /*
723  * apple_protect_pager_reference()
724  *
725  * Get a reference on this memory object.
726  * For external usage only.  Assumes that the initial reference count is not 0,
727  * i.e one should not "revive" a dead pager this way.
728  */
729 void
apple_protect_pager_reference(memory_object_t mem_obj)730 apple_protect_pager_reference(
731 	memory_object_t         mem_obj)
732 {
733 	apple_protect_pager_t   pager;
734 
735 	pager = apple_protect_pager_lookup(mem_obj);
736 
737 	lck_mtx_lock(&apple_protect_pager_lock);
738 	os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
739 	lck_mtx_unlock(&apple_protect_pager_lock);
740 }
741 
742 
743 /*
744  * apple_protect_pager_dequeue:
745  *
746  * Removes a pager from the list of pagers.
747  *
748  * The caller must hold "apple_protect_pager_lock".
749  */
750 void
apple_protect_pager_dequeue(apple_protect_pager_t pager)751 apple_protect_pager_dequeue(
752 	apple_protect_pager_t pager)
753 {
754 	assert(!pager->is_mapped);
755 
756 	queue_remove(&apple_protect_pager_queue,
757 	    pager,
758 	    apple_protect_pager_t,
759 	    pager_queue);
760 	pager->pager_queue.next = NULL;
761 	pager->pager_queue.prev = NULL;
762 
763 	apple_protect_pager_count--;
764 }
765 
766 /*
767  * apple_protect_pager_terminate_internal:
768  *
769  * Trigger the asynchronous termination of the memory object associated
770  * with this pager.
771  * When the memory object is terminated, there will be one more call
772  * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
773  * to finish the clean up.
774  *
775  * "apple_protect_pager_lock" should not be held by the caller.
776  * We don't need the lock because the pager has already been removed from
777  * the pagers' list and is now ours exclusively.
778  */
779 void
apple_protect_pager_terminate_internal(apple_protect_pager_t pager)780 apple_protect_pager_terminate_internal(
781 	apple_protect_pager_t pager)
782 {
783 	assert(pager->is_ready);
784 	assert(!pager->is_mapped);
785 
786 	if (pager->backing_object != VM_OBJECT_NULL) {
787 		vm_object_deallocate(pager->backing_object);
788 		pager->backing_object = VM_OBJECT_NULL;
789 	}
790 
791 	/* one less pager using this "pager_crypt_info" */
792 #if CRYPT_INFO_DEBUG
793 	printf("CRYPT_INFO %s: deallocate %p ref %d\n",
794 	    __FUNCTION__,
795 	    pager->crypt_info,
796 	    pager->crypt_info->crypt_refcnt);
797 #endif /* CRYPT_INFO_DEBUG */
798 	crypt_info_deallocate(pager->crypt_info);
799 	pager->crypt_info = NULL;
800 
801 	/* trigger the destruction of the memory object */
802 	memory_object_destroy(pager->ap_pgr_hdr.mo_control, VM_OBJECT_DESTROY_PAGER);
803 }
804 
805 /*
806  * apple_protect_pager_deallocate_internal()
807  *
808  * Release a reference on this pager and free it when the last
809  * reference goes away.
810  * Can be called with apple_protect_pager_lock held or not but always returns
811  * with it unlocked.
812  */
813 void
apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,boolean_t locked)814 apple_protect_pager_deallocate_internal(
815 	apple_protect_pager_t   pager,
816 	boolean_t               locked)
817 {
818 	boolean_t       needs_trimming;
819 	unsigned int    count_unmapped;
820 	os_ref_count_t  ref_count;
821 
822 	if (!locked) {
823 		lck_mtx_lock(&apple_protect_pager_lock);
824 	}
825 
826 	count_unmapped = (apple_protect_pager_count -
827 	    apple_protect_pager_count_mapped);
828 	if (count_unmapped > apple_protect_pager_cache_limit) {
829 		/* we have too many unmapped pagers:  trim some */
830 		needs_trimming = TRUE;
831 	} else {
832 		needs_trimming = FALSE;
833 	}
834 
835 	/* drop a reference on this pager */
836 	ref_count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
837 
838 	if (ref_count == 1) {
839 		/*
840 		 * Only the "named" reference is left, which means that
841 		 * no one is really holding on to this pager anymore.
842 		 * Terminate it.
843 		 */
844 		apple_protect_pager_dequeue(pager);
845 		/* the pager is all ours: no need for the lock now */
846 		lck_mtx_unlock(&apple_protect_pager_lock);
847 		apple_protect_pager_terminate_internal(pager);
848 	} else if (ref_count == 0) {
849 		/*
850 		 * Dropped the existence reference;  the memory object has
851 		 * been terminated.  Do some final cleanup and release the
852 		 * pager structure.
853 		 */
854 		lck_mtx_unlock(&apple_protect_pager_lock);
855 		if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
856 			memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control);
857 			pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
858 		}
859 		kfree_type(struct apple_protect_pager, pager);
860 		pager = APPLE_PROTECT_PAGER_NULL;
861 	} else {
862 		/* there are still plenty of references:  keep going... */
863 		lck_mtx_unlock(&apple_protect_pager_lock);
864 	}
865 
866 	if (needs_trimming) {
867 		apple_protect_pager_trim();
868 	}
869 	/* caution: lock is not held on return... */
870 }
871 
872 /*
873  * apple_protect_pager_deallocate()
874  *
875  * Release a reference on this pager and free it when the last
876  * reference goes away.
877  */
878 void
apple_protect_pager_deallocate(memory_object_t mem_obj)879 apple_protect_pager_deallocate(
880 	memory_object_t         mem_obj)
881 {
882 	apple_protect_pager_t   pager;
883 
884 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
885 	pager = apple_protect_pager_lookup(mem_obj);
886 	apple_protect_pager_deallocate_internal(pager, FALSE);
887 }
888 
889 /*
890  *
891  */
892 kern_return_t
apple_protect_pager_terminate(__unused memory_object_t mem_obj)893 apple_protect_pager_terminate(
894 #if !DEBUG
895 	__unused
896 #endif
897 	memory_object_t mem_obj)
898 {
899 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
900 
901 	return KERN_SUCCESS;
902 }
903 
904 /*
905  * apple_protect_pager_map()
906  *
907  * This allows VM to let us, the EMM, know that this memory object
908  * is currently mapped one or more times.  This is called by VM each time
909  * the memory object gets mapped and we take one extra reference on the
910  * memory object to account for all its mappings.
911  */
912 kern_return_t
apple_protect_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)913 apple_protect_pager_map(
914 	memory_object_t         mem_obj,
915 	__unused vm_prot_t      prot)
916 {
917 	apple_protect_pager_t   pager;
918 
919 	PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
920 
921 	pager = apple_protect_pager_lookup(mem_obj);
922 
923 	lck_mtx_lock(&apple_protect_pager_lock);
924 	assert(pager->is_ready);
925 	assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0); /* pager is alive */
926 	if (pager->is_mapped == FALSE) {
927 		/*
928 		 * First mapping of this pager:  take an extra reference
929 		 * that will remain until all the mappings of this pager
930 		 * are removed.
931 		 */
932 		pager->is_mapped = TRUE;
933 		os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
934 		apple_protect_pager_count_mapped++;
935 	}
936 	lck_mtx_unlock(&apple_protect_pager_lock);
937 
938 	return KERN_SUCCESS;
939 }
940 
941 /*
942  * apple_protect_pager_last_unmap()
943  *
944  * This is called by VM when this memory object is no longer mapped anywhere.
945  */
946 kern_return_t
apple_protect_pager_last_unmap(memory_object_t mem_obj)947 apple_protect_pager_last_unmap(
948 	memory_object_t         mem_obj)
949 {
950 	apple_protect_pager_t   pager;
951 	unsigned int            count_unmapped;
952 
953 	PAGER_DEBUG(PAGER_ALL,
954 	    ("apple_protect_pager_last_unmap: %p\n", mem_obj));
955 
956 	pager = apple_protect_pager_lookup(mem_obj);
957 
958 	lck_mtx_lock(&apple_protect_pager_lock);
959 	if (pager->is_mapped) {
960 		/*
961 		 * All the mappings are gone, so let go of the one extra
962 		 * reference that represents all the mappings of this pager.
963 		 */
964 		apple_protect_pager_count_mapped--;
965 		count_unmapped = (apple_protect_pager_count -
966 		    apple_protect_pager_count_mapped);
967 		if (count_unmapped > apple_protect_pager_count_unmapped_max) {
968 			apple_protect_pager_count_unmapped_max = count_unmapped;
969 		}
970 		pager->is_mapped = FALSE;
971 		apple_protect_pager_deallocate_internal(pager, TRUE);
972 		/* caution: deallocate_internal() released the lock ! */
973 	} else {
974 		lck_mtx_unlock(&apple_protect_pager_lock);
975 	}
976 
977 	return KERN_SUCCESS;
978 }
979 
980 boolean_t
apple_protect_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)981 apple_protect_pager_backing_object(
982 	memory_object_t mem_obj,
983 	memory_object_offset_t offset,
984 	vm_object_t *backing_object,
985 	vm_object_offset_t *backing_offset)
986 {
987 	apple_protect_pager_t   pager;
988 
989 	PAGER_DEBUG(PAGER_ALL,
990 	    ("apple_protect_pager_backing_object: %p\n", mem_obj));
991 
992 	pager = apple_protect_pager_lookup(mem_obj);
993 
994 	*backing_object = pager->backing_object;
995 	*backing_offset = pager->backing_offset + offset;
996 
997 	return TRUE;
998 }
999 
1000 /*
1001  *
1002  */
1003 apple_protect_pager_t
apple_protect_pager_lookup(memory_object_t mem_obj)1004 apple_protect_pager_lookup(
1005 	memory_object_t  mem_obj)
1006 {
1007 	apple_protect_pager_t   pager;
1008 
1009 	assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
1010 	pager = (apple_protect_pager_t)(uintptr_t) mem_obj;
1011 	assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0);
1012 	return pager;
1013 }
1014 
1015 apple_protect_pager_t
apple_protect_pager_create(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_object_offset_t crypto_backing_offset,struct pager_crypt_info * crypt_info,vm_object_offset_t crypto_start,vm_object_offset_t crypto_end,boolean_t cache_pager)1016 apple_protect_pager_create(
1017 	vm_object_t             backing_object,
1018 	vm_object_offset_t      backing_offset,
1019 	vm_object_offset_t      crypto_backing_offset,
1020 	struct pager_crypt_info *crypt_info,
1021 	vm_object_offset_t      crypto_start,
1022 	vm_object_offset_t      crypto_end,
1023 	boolean_t               cache_pager)
1024 {
1025 	apple_protect_pager_t   pager, pager2;
1026 	memory_object_control_t control;
1027 	kern_return_t           kr;
1028 	struct pager_crypt_info *old_crypt_info;
1029 
1030 	pager = kalloc_type(struct apple_protect_pager, Z_WAITOK | Z_NOFAIL);
1031 
1032 	/*
1033 	 * The vm_map call takes both named entry ports and raw memory
1034 	 * objects in the same parameter.  We need to make sure that
1035 	 * vm_map does not see this object as a named entry port.  So,
1036 	 * we reserve the first word in the object for a fake object type
1037 	 * setting - that will tell vm_map to use it as a memory object.
1038 	 */
1039 	pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
1040 	pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops;
1041 	pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1042 	pager->ap_pgr_hdr.mo_last_unmap_ctid = 0;
1043 
1044 	pager->is_ready = FALSE;/* not ready until it has a "name" */
1045 	/* one reference for the caller */
1046 	os_ref_init_count_raw(&pager->ap_pgr_hdr_ref, NULL, 1);
1047 	pager->is_mapped = FALSE;
1048 	if (cache_pager) {
1049 		/* extra reference for the cache */
1050 		os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1051 		pager->is_cached = true;
1052 	} else {
1053 		pager->is_cached = false;
1054 	}
1055 	pager->backing_object = backing_object;
1056 	pager->backing_offset = backing_offset;
1057 	pager->crypto_backing_offset = crypto_backing_offset;
1058 	pager->crypto_start = crypto_start;
1059 	pager->crypto_end = crypto_end;
1060 	pager->crypt_info = crypt_info; /* allocated by caller */
1061 
1062 #if CRYPT_INFO_DEBUG
1063 	printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n",
1064 	    __FUNCTION__,
1065 	    crypt_info,
1066 	    crypt_info->page_decrypt,
1067 	    crypt_info->crypt_end,
1068 	    crypt_info->crypt_ops,
1069 	    crypt_info->crypt_refcnt);
1070 #endif /* CRYPT_INFO_DEBUG */
1071 
1072 	vm_object_reference(backing_object);
1073 
1074 	old_crypt_info = NULL;
1075 
1076 	lck_mtx_lock(&apple_protect_pager_lock);
1077 	/* see if anyone raced us to create a pager for the same object */
1078 	queue_iterate(&apple_protect_pager_queue,
1079 	    pager2,
1080 	    apple_protect_pager_t,
1081 	    pager_queue) {
1082 		if ((pager2->crypt_info->page_decrypt !=
1083 		    crypt_info->page_decrypt) ||
1084 		    (pager2->crypt_info->crypt_end !=
1085 		    crypt_info->crypt_end) ||
1086 		    (pager2->crypt_info->crypt_ops !=
1087 		    crypt_info->crypt_ops)) {
1088 			/* crypt_info contents do not match: next pager */
1089 			continue;
1090 		}
1091 
1092 		/* found a match for crypt_info ... */
1093 		if (old_crypt_info) {
1094 			/* ... already switched to that crypt_info */
1095 			assert(old_crypt_info == pager2->crypt_info);
1096 		} else if (pager2->crypt_info != crypt_info) {
1097 			/* ... switch to that pager's crypt_info */
1098 #if CRYPT_INFO_DEBUG
1099 			printf("CRYPT_INFO %s: reference %p ref %d "
1100 			    "(create match)\n",
1101 			    __FUNCTION__,
1102 			    pager2->crypt_info,
1103 			    pager2->crypt_info->crypt_refcnt);
1104 #endif /* CRYPT_INFO_DEBUG */
1105 			old_crypt_info = pager2->crypt_info;
1106 			crypt_info_reference(old_crypt_info);
1107 			pager->crypt_info = old_crypt_info;
1108 		}
1109 
1110 		if (pager2->backing_object == backing_object &&
1111 		    pager2->backing_offset == backing_offset &&
1112 		    pager2->crypto_backing_offset == crypto_backing_offset &&
1113 		    pager2->crypto_start == crypto_start &&
1114 		    pager2->crypto_end == crypto_end) {
1115 			/* full match: use that pager */
1116 			break;
1117 		}
1118 	}
1119 	if (!queue_end(&apple_protect_pager_queue,
1120 	    (queue_entry_t) pager2)) {
1121 		/* we lost the race, down with the loser... */
1122 		lck_mtx_unlock(&apple_protect_pager_lock);
1123 		vm_object_deallocate(pager->backing_object);
1124 		pager->backing_object = VM_OBJECT_NULL;
1125 #if CRYPT_INFO_DEBUG
1126 		printf("CRYPT_INFO %s: %p ref %d (create pager match)\n",
1127 		    __FUNCTION__,
1128 		    pager->crypt_info,
1129 		    pager->crypt_info->crypt_refcnt);
1130 #endif /* CRYPT_INFO_DEBUG */
1131 		crypt_info_deallocate(pager->crypt_info);
1132 		pager->crypt_info = NULL;
1133 		kfree_type(struct apple_protect_pager, pager);
1134 		/* ... and go with the winner */
1135 		pager = pager2;
1136 		/* let the winner make sure the pager gets ready */
1137 		return pager;
1138 	}
1139 
1140 	/* enter new pager at the head of our list of pagers */
1141 	queue_enter_first(&apple_protect_pager_queue,
1142 	    pager,
1143 	    apple_protect_pager_t,
1144 	    pager_queue);
1145 	apple_protect_pager_count++;
1146 	if (apple_protect_pager_count > apple_protect_pager_count_max) {
1147 		apple_protect_pager_count_max = apple_protect_pager_count;
1148 	}
1149 	lck_mtx_unlock(&apple_protect_pager_lock);
1150 
1151 	kr = memory_object_create_named((memory_object_t) pager,
1152 	    0,
1153 	    &control);
1154 	assert(kr == KERN_SUCCESS);
1155 
1156 	memory_object_mark_trusted(control);
1157 
1158 	lck_mtx_lock(&apple_protect_pager_lock);
1159 	/* the new pager is now ready to be used */
1160 	pager->is_ready = TRUE;
1161 	lck_mtx_unlock(&apple_protect_pager_lock);
1162 
1163 	/* wakeup anyone waiting for this pager to be ready */
1164 	thread_wakeup(&pager->is_ready);
1165 
1166 	if (old_crypt_info != NULL &&
1167 	    old_crypt_info != crypt_info) {
1168 		/* we re-used an old crypt_info instead of using our new one */
1169 #if CRYPT_INFO_DEBUG
1170 		printf("CRYPT_INFO %s: deallocate %p ref %d "
1171 		    "(create used old)\n",
1172 		    __FUNCTION__,
1173 		    crypt_info,
1174 		    crypt_info->crypt_refcnt);
1175 #endif /* CRYPT_INFO_DEBUG */
1176 		crypt_info_deallocate(crypt_info);
1177 		crypt_info = NULL;
1178 	}
1179 
1180 	return pager;
1181 }
1182 
1183 /*
1184  * apple_protect_pager_setup()
1185  *
1186  * Provide the caller with a memory object backed by the provided
1187  * "backing_object" VM object.  If such a memory object already exists,
1188  * re-use it, otherwise create a new memory object.
1189  */
1190 memory_object_t
apple_protect_pager_setup(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_object_offset_t crypto_backing_offset,struct pager_crypt_info * crypt_info,vm_object_offset_t crypto_start,vm_object_offset_t crypto_end,boolean_t cache_pager)1191 apple_protect_pager_setup(
1192 	vm_object_t             backing_object,
1193 	vm_object_offset_t      backing_offset,
1194 	vm_object_offset_t      crypto_backing_offset,
1195 	struct pager_crypt_info *crypt_info,
1196 	vm_object_offset_t      crypto_start,
1197 	vm_object_offset_t      crypto_end,
1198 	boolean_t               cache_pager)
1199 {
1200 	apple_protect_pager_t   pager;
1201 	struct pager_crypt_info *old_crypt_info, *new_crypt_info;
1202 
1203 #if CRYPT_INFO_DEBUG
1204 	printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n",
1205 	    __FUNCTION__,
1206 	    crypt_info,
1207 	    crypt_info->page_decrypt,
1208 	    crypt_info->crypt_end,
1209 	    crypt_info->crypt_ops,
1210 	    crypt_info->crypt_refcnt);
1211 #endif /* CRYPT_INFO_DEBUG */
1212 
1213 	old_crypt_info = NULL;
1214 
1215 	lck_mtx_lock(&apple_protect_pager_lock);
1216 
1217 	queue_iterate(&apple_protect_pager_queue,
1218 	    pager,
1219 	    apple_protect_pager_t,
1220 	    pager_queue) {
1221 		if ((pager->crypt_info->page_decrypt !=
1222 		    crypt_info->page_decrypt) ||
1223 		    (pager->crypt_info->crypt_end !=
1224 		    crypt_info->crypt_end) ||
1225 		    (pager->crypt_info->crypt_ops !=
1226 		    crypt_info->crypt_ops)) {
1227 			/* no match for "crypt_info": next pager */
1228 			continue;
1229 		}
1230 		/* found a match for crypt_info ... */
1231 		if (old_crypt_info) {
1232 			/* ... already switched to that crypt_info */
1233 			assert(old_crypt_info == pager->crypt_info);
1234 		} else {
1235 			/* ... switch to that pager's crypt_info */
1236 			old_crypt_info = pager->crypt_info;
1237 #if CRYPT_INFO_DEBUG
1238 			printf("CRYPT_INFO %s: "
1239 			    "switching crypt_info from %p [%p,%p,%p,%d] "
1240 			    "to %p [%p,%p,%p,%d] from pager %p\n",
1241 			    __FUNCTION__,
1242 			    crypt_info,
1243 			    crypt_info->page_decrypt,
1244 			    crypt_info->crypt_end,
1245 			    crypt_info->crypt_ops,
1246 			    crypt_info->crypt_refcnt,
1247 			    old_crypt_info,
1248 			    old_crypt_info->page_decrypt,
1249 			    old_crypt_info->crypt_end,
1250 			    old_crypt_info->crypt_ops,
1251 			    old_crypt_info->crypt_refcnt,
1252 			    pager);
1253 			printf("CRYPT_INFO %s: %p ref %d (setup match)\n",
1254 			    __FUNCTION__,
1255 			    pager->crypt_info,
1256 			    pager->crypt_info->crypt_refcnt);
1257 #endif /* CRYPT_INFO_DEBUG */
1258 			crypt_info_reference(pager->crypt_info);
1259 		}
1260 
1261 		if (pager->backing_object == backing_object &&
1262 		    pager->backing_offset == backing_offset &&
1263 		    pager->crypto_backing_offset == crypto_backing_offset &&
1264 		    pager->crypto_start == crypto_start &&
1265 		    pager->crypto_end == crypto_end) {
1266 			/* full match: use that pager! */
1267 			assert(old_crypt_info == pager->crypt_info);
1268 			assert(old_crypt_info->crypt_refcnt > 1);
1269 #if CRYPT_INFO_DEBUG
1270 			printf("CRYPT_INFO %s: "
1271 			    "pager match with %p crypt_info %p\n",
1272 			    __FUNCTION__,
1273 			    pager,
1274 			    pager->crypt_info);
1275 			printf("CRYPT_INFO %s: deallocate %p ref %d "
1276 			    "(pager match)\n",
1277 			    __FUNCTION__,
1278 			    old_crypt_info,
1279 			    old_crypt_info->crypt_refcnt);
1280 #endif /* CRYPT_INFO_DEBUG */
1281 			/* release the extra ref on crypt_info we got above */
1282 			crypt_info_deallocate(old_crypt_info);
1283 			assert(old_crypt_info->crypt_refcnt > 0);
1284 			/* give extra reference on pager to the caller */
1285 			os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1286 			break;
1287 		}
1288 	}
1289 	if (queue_end(&apple_protect_pager_queue,
1290 	    (queue_entry_t) pager)) {
1291 		lck_mtx_unlock(&apple_protect_pager_lock);
1292 		/* no existing pager for this backing object */
1293 		pager = APPLE_PROTECT_PAGER_NULL;
1294 		if (old_crypt_info) {
1295 			/* use this old crypt_info for new pager */
1296 			new_crypt_info = old_crypt_info;
1297 #if CRYPT_INFO_DEBUG
1298 			printf("CRYPT_INFO %s: "
1299 			    "will use old_crypt_info %p for new pager\n",
1300 			    __FUNCTION__,
1301 			    old_crypt_info);
1302 #endif /* CRYPT_INFO_DEBUG */
1303 		} else {
1304 			/* allocate a new crypt_info for new pager */
1305 			new_crypt_info = kalloc_type(struct pager_crypt_info, Z_WAITOK);
1306 			*new_crypt_info = *crypt_info;
1307 			new_crypt_info->crypt_refcnt = 1;
1308 #if CRYPT_INFO_DEBUG
1309 			printf("CRYPT_INFO %s: "
1310 			    "will use new_crypt_info %p for new pager\n",
1311 			    __FUNCTION__,
1312 			    new_crypt_info);
1313 #endif /* CRYPT_INFO_DEBUG */
1314 		}
1315 		if (new_crypt_info == NULL) {
1316 			/* can't create new pager without a crypt_info */
1317 		} else {
1318 			/* create new pager */
1319 			pager = apple_protect_pager_create(
1320 				backing_object,
1321 				backing_offset,
1322 				crypto_backing_offset,
1323 				new_crypt_info,
1324 				crypto_start,
1325 				crypto_end,
1326 				cache_pager);
1327 		}
1328 		if (pager == APPLE_PROTECT_PAGER_NULL) {
1329 			/* could not create a new pager */
1330 			if (new_crypt_info == old_crypt_info) {
1331 				/* release extra reference on old_crypt_info */
1332 #if CRYPT_INFO_DEBUG
1333 				printf("CRYPT_INFO %s: deallocate %p ref %d "
1334 				    "(create fail old_crypt_info)\n",
1335 				    __FUNCTION__,
1336 				    old_crypt_info,
1337 				    old_crypt_info->crypt_refcnt);
1338 #endif /* CRYPT_INFO_DEBUG */
1339 				crypt_info_deallocate(old_crypt_info);
1340 				old_crypt_info = NULL;
1341 			} else {
1342 				/* release unused new_crypt_info */
1343 				assert(new_crypt_info->crypt_refcnt == 1);
1344 #if CRYPT_INFO_DEBUG
1345 				printf("CRYPT_INFO %s: deallocate %p ref %d "
1346 				    "(create fail new_crypt_info)\n",
1347 				    __FUNCTION__,
1348 				    new_crypt_info,
1349 				    new_crypt_info->crypt_refcnt);
1350 #endif /* CRYPT_INFO_DEBUG */
1351 				crypt_info_deallocate(new_crypt_info);
1352 				new_crypt_info = NULL;
1353 			}
1354 			return MEMORY_OBJECT_NULL;
1355 		}
1356 		lck_mtx_lock(&apple_protect_pager_lock);
1357 	} else {
1358 		assert(old_crypt_info == pager->crypt_info);
1359 	}
1360 
1361 	while (!pager->is_ready) {
1362 		lck_mtx_sleep(&apple_protect_pager_lock,
1363 		    LCK_SLEEP_DEFAULT,
1364 		    &pager->is_ready,
1365 		    THREAD_UNINT);
1366 	}
1367 	lck_mtx_unlock(&apple_protect_pager_lock);
1368 
1369 	return (memory_object_t) pager;
1370 }
1371 
1372 void
apple_protect_pager_trim(void)1373 apple_protect_pager_trim(void)
1374 {
1375 	apple_protect_pager_t   pager, prev_pager;
1376 	queue_head_t            trim_queue;
1377 	unsigned int            num_trim;
1378 	unsigned int            count_unmapped;
1379 
1380 	lck_mtx_lock(&apple_protect_pager_lock);
1381 
1382 	/*
1383 	 * We have too many pagers, try and trim some unused ones,
1384 	 * starting with the oldest pager at the end of the queue.
1385 	 */
1386 	queue_init(&trim_queue);
1387 	num_trim = 0;
1388 
1389 	for (pager = (apple_protect_pager_t)
1390 	    queue_last(&apple_protect_pager_queue);
1391 	    !queue_end(&apple_protect_pager_queue,
1392 	    (queue_entry_t) pager);
1393 	    pager = prev_pager) {
1394 		/* get prev elt before we dequeue */
1395 		prev_pager = (apple_protect_pager_t)
1396 		    queue_prev(&pager->pager_queue);
1397 
1398 		if (pager->is_cached &&
1399 		    os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) == 2 &&
1400 		    pager->is_ready &&
1401 		    !pager->is_mapped) {
1402 			/* this pager can be trimmed */
1403 			num_trim++;
1404 			/* remove this pager from the main list ... */
1405 			apple_protect_pager_dequeue(pager);
1406 			/* ... and add it to our trim queue */
1407 			queue_enter_first(&trim_queue,
1408 			    pager,
1409 			    apple_protect_pager_t,
1410 			    pager_queue);
1411 
1412 			count_unmapped = (apple_protect_pager_count -
1413 			    apple_protect_pager_count_mapped);
1414 			if (count_unmapped <= apple_protect_pager_cache_limit) {
1415 				/* we have enough pagers to trim */
1416 				break;
1417 			}
1418 		}
1419 	}
1420 	if (num_trim > apple_protect_pager_num_trim_max) {
1421 		apple_protect_pager_num_trim_max = num_trim;
1422 	}
1423 	apple_protect_pager_num_trim_total += num_trim;
1424 
1425 	lck_mtx_unlock(&apple_protect_pager_lock);
1426 
1427 	/* terminate the trimmed pagers */
1428 	while (!queue_empty(&trim_queue)) {
1429 		queue_remove_first(&trim_queue,
1430 		    pager,
1431 		    apple_protect_pager_t,
1432 		    pager_queue);
1433 		assert(pager->is_cached);
1434 		pager->is_cached = false;
1435 		pager->pager_queue.next = NULL;
1436 		pager->pager_queue.prev = NULL;
1437 		/*
1438 		 * We can't call deallocate_internal() because the pager
1439 		 * has already been dequeued, but we still need to remove
1440 		 * a reference.
1441 		 */
1442 		os_ref_count_t __assert_only count;
1443 		count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1444 		assert(count == 1);
1445 		apple_protect_pager_terminate_internal(pager);
1446 	}
1447 }
1448 
1449 
1450 void
crypt_info_reference(struct pager_crypt_info * crypt_info)1451 crypt_info_reference(
1452 	struct pager_crypt_info *crypt_info)
1453 {
1454 	assert(crypt_info->crypt_refcnt != 0);
1455 #if CRYPT_INFO_DEBUG
1456 	printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1457 	    __FUNCTION__,
1458 	    crypt_info,
1459 	    crypt_info->crypt_refcnt,
1460 	    crypt_info->crypt_refcnt + 1);
1461 #endif /* CRYPT_INFO_DEBUG */
1462 	OSAddAtomic(+1, &crypt_info->crypt_refcnt);
1463 }
1464 
1465 void
crypt_info_deallocate(struct pager_crypt_info * crypt_info)1466 crypt_info_deallocate(
1467 	struct pager_crypt_info *crypt_info)
1468 {
1469 #if CRYPT_INFO_DEBUG
1470 	printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1471 	    __FUNCTION__,
1472 	    crypt_info,
1473 	    crypt_info->crypt_refcnt,
1474 	    crypt_info->crypt_refcnt - 1);
1475 #endif /* CRYPT_INFO_DEBUG */
1476 	OSAddAtomic(-1, &crypt_info->crypt_refcnt);
1477 	if (crypt_info->crypt_refcnt == 0) {
1478 		/* deallocate any crypt module data */
1479 		if (crypt_info->crypt_end) {
1480 			crypt_info->crypt_end(crypt_info->crypt_ops);
1481 			crypt_info->crypt_end = NULL;
1482 		}
1483 #if CRYPT_INFO_DEBUG
1484 		printf("CRYPT_INFO %s: freeing %p\n",
1485 		    __FUNCTION__,
1486 		    crypt_info);
1487 #endif /* CRYPT_INFO_DEBUG */
1488 		kfree_type(struct pager_crypt_info, crypt_info);
1489 	}
1490 }
1491 
1492 static uint64_t
apple_protect_pager_purge(apple_protect_pager_t pager)1493 apple_protect_pager_purge(
1494 	apple_protect_pager_t pager)
1495 {
1496 	uint64_t pages_purged;
1497 	vm_object_t object;
1498 
1499 	pages_purged = 0;
1500 	object = memory_object_to_vm_object((memory_object_t) pager);
1501 	assert(object != VM_OBJECT_NULL);
1502 	vm_object_lock(object);
1503 	pages_purged = object->resident_page_count;
1504 	vm_object_reap_pages(object, REAP_DATA_FLUSH_CLEAN);
1505 	pages_purged -= object->resident_page_count;
1506 //	printf("     %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count);
1507 	vm_object_unlock(object);
1508 	return pages_purged;
1509 }
1510 
1511 uint64_t
apple_protect_pager_purge_all(void)1512 apple_protect_pager_purge_all(void)
1513 {
1514 	uint64_t pages_purged;
1515 	apple_protect_pager_t pager;
1516 
1517 	pages_purged = 0;
1518 	lck_mtx_lock(&apple_protect_pager_lock);
1519 	queue_iterate(&apple_protect_pager_queue, pager, apple_protect_pager_t, pager_queue) {
1520 		pages_purged += apple_protect_pager_purge(pager);
1521 	}
1522 	lck_mtx_unlock(&apple_protect_pager_lock);
1523 #if DEVELOPMENT || DEBUG
1524 	printf("   %s:%d pages purged: %llu\n", __FUNCTION__, __LINE__, pages_purged);
1525 #endif /* DEVELOPMENT || DEBUG */
1526 	return pages_purged;
1527 }
1528