xref: /xnu-8020.121.3/osfmk/vm/vm_fourk_pager.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2014-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_kobject.h>
49 
50 #include <ipc/ipc_port.h>
51 #include <ipc/ipc_space.h>
52 
53 #include <vm/vm_fault.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/memory_object.h>
57 #include <vm/vm_pageout.h>
58 #include <vm/vm_protos.h>
59 #include <vm/vm_kern.h>
60 
61 
62 /*
63  * 4K MEMORY PAGER
64  *
65  * This external memory manager (EMM) handles memory mappings that are
66  * 4K-aligned but not page-aligned and can therefore not be mapped directly.
67  *
68  * It mostly handles page-in requests (from memory_object_data_request()) by
69  * getting the data needed to fill in each 4K-chunk.  That can require
70  * getting data from one or two pages from its backing VM object
71  * (a file or a "apple-protected" pager backed by an encrypted file), and
72  * copies the data to another page so that it is aligned as expected by
73  * the mapping.
74  *
75  * Returned pages can never be dirtied and must always be mapped copy-on-write,
76  * so the memory manager does not need to handle page-out requests (from
77  * memory_object_data_return()).
78  *
79  */
80 
81 /* forward declarations */
82 void fourk_pager_reference(memory_object_t mem_obj);
83 void fourk_pager_deallocate(memory_object_t mem_obj);
84 kern_return_t fourk_pager_init(memory_object_t mem_obj,
85     memory_object_control_t control,
86     memory_object_cluster_size_t pg_size);
87 kern_return_t fourk_pager_terminate(memory_object_t mem_obj);
88 kern_return_t fourk_pager_data_request(memory_object_t mem_obj,
89     memory_object_offset_t offset,
90     memory_object_cluster_size_t length,
91     vm_prot_t protection_required,
92     memory_object_fault_info_t fault_info);
93 kern_return_t fourk_pager_data_return(memory_object_t mem_obj,
94     memory_object_offset_t offset,
95     memory_object_cluster_size_t    data_cnt,
96     memory_object_offset_t *resid_offset,
97     int *io_error,
98     boolean_t dirty,
99     boolean_t kernel_copy,
100     int upl_flags);
101 kern_return_t fourk_pager_data_initialize(memory_object_t mem_obj,
102     memory_object_offset_t offset,
103     memory_object_cluster_size_t data_cnt);
104 kern_return_t fourk_pager_data_unlock(memory_object_t mem_obj,
105     memory_object_offset_t offset,
106     memory_object_size_t size,
107     vm_prot_t desired_access);
108 kern_return_t fourk_pager_synchronize(memory_object_t mem_obj,
109     memory_object_offset_t offset,
110     memory_object_size_t length,
111     vm_sync_t sync_flags);
112 kern_return_t fourk_pager_map(memory_object_t mem_obj,
113     vm_prot_t prot);
114 kern_return_t fourk_pager_last_unmap(memory_object_t mem_obj);
115 
116 /*
117  * Vector of VM operations for this EMM.
118  * These routines are invoked by VM via the memory_object_*() interfaces.
119  */
120 const struct memory_object_pager_ops fourk_pager_ops = {
121 	.memory_object_reference = fourk_pager_reference,
122 	.memory_object_deallocate = fourk_pager_deallocate,
123 	.memory_object_init = fourk_pager_init,
124 	.memory_object_terminate = fourk_pager_terminate,
125 	.memory_object_data_request = fourk_pager_data_request,
126 	.memory_object_data_return = fourk_pager_data_return,
127 	.memory_object_data_initialize = fourk_pager_data_initialize,
128 	.memory_object_data_unlock = fourk_pager_data_unlock,
129 	.memory_object_synchronize = fourk_pager_synchronize,
130 	.memory_object_map = fourk_pager_map,
131 	.memory_object_last_unmap = fourk_pager_last_unmap,
132 	.memory_object_data_reclaim = NULL,
133 	.memory_object_backing_object = NULL,
134 	.memory_object_pager_name = "fourk_pager"
135 };
136 
137 /*
138  * The "fourk_pager" describes a memory object backed by
139  * the "4K" EMM.
140  */
141 #define FOURK_PAGER_SLOTS 4     /* 16K / 4K */
142 typedef struct fourk_pager_backing {
143 	vm_object_t             backing_object;
144 	vm_object_offset_t      backing_offset;
145 } *fourk_pager_backing_t;
146 typedef struct fourk_pager {
147 	/* mandatory generic header */
148 	struct memory_object fourk_pgr_hdr;
149 
150 	/* pager-specific data */
151 	queue_chain_t           pager_queue;    /* next & prev pagers */
152 #if MEMORY_OBJECT_HAS_REFCOUNT
153 #define fourk_pgr_hdr_ref       fourk_pgr_hdr.mo_ref
154 #else
155 	os_ref_atomic_t         fourk_pgr_hdr_ref;
156 #endif
157 	bool    is_ready;       /* is this pager ready ? */
158 	bool    is_mapped;      /* is this mem_obj mapped ? */
159 	struct fourk_pager_backing slots[FOURK_PAGER_SLOTS]; /* backing for each
160 	                                                      *  4K-chunk */
161 } *fourk_pager_t;
162 #define FOURK_PAGER_NULL        ((fourk_pager_t) NULL)
163 
164 /*
165  * List of memory objects managed by this EMM.
166  * The list is protected by the "fourk_pager_lock" lock.
167  */
168 int fourk_pager_count = 0;              /* number of pagers */
169 int fourk_pager_count_mapped = 0;       /* number of unmapped pagers */
170 queue_head_t fourk_pager_queue = QUEUE_HEAD_INITIALIZER(fourk_pager_queue);
171 LCK_GRP_DECLARE(fourk_pager_lck_grp, "4K-pager");
172 LCK_MTX_DECLARE(fourk_pager_lock, &fourk_pager_lck_grp);
173 
174 /*
175  * Maximum number of unmapped pagers we're willing to keep around.
176  */
177 int fourk_pager_cache_limit = 0;
178 
179 /*
180  * Statistics & counters.
181  */
182 int fourk_pager_count_max = 0;
183 int fourk_pager_count_unmapped_max = 0;
184 int fourk_pager_num_trim_max = 0;
185 int fourk_pager_num_trim_total = 0;
186 
187 /* internal prototypes */
188 fourk_pager_t fourk_pager_lookup(memory_object_t mem_obj);
189 void fourk_pager_dequeue(fourk_pager_t pager);
190 void fourk_pager_deallocate_internal(fourk_pager_t pager,
191     boolean_t locked);
192 void fourk_pager_terminate_internal(fourk_pager_t pager);
193 void fourk_pager_trim(void);
194 
195 
196 #if DEBUG
197 int fourk_pagerdebug = 0;
198 #define PAGER_ALL               0xffffffff
199 #define PAGER_INIT              0x00000001
200 #define PAGER_PAGEIN            0x00000002
201 
202 #define PAGER_DEBUG(LEVEL, A)                                           \
203 	MACRO_BEGIN                                                     \
204 	if ((fourk_pagerdebug & LEVEL)==LEVEL) {                \
205 	        printf A;                                               \
206 	}                                                               \
207 	MACRO_END
208 #else
209 #define PAGER_DEBUG(LEVEL, A)
210 #endif
211 
212 
213 /*
214  * fourk_pager_init()
215  *
216  * Initialize the memory object and makes it ready to be used and mapped.
217  */
218 kern_return_t
fourk_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)219 fourk_pager_init(
220 	memory_object_t         mem_obj,
221 	memory_object_control_t control,
222 #if !DEBUG
223 	__unused
224 #endif
225 	memory_object_cluster_size_t pg_size)
226 {
227 	fourk_pager_t   pager;
228 	kern_return_t           kr;
229 	memory_object_attr_info_data_t  attributes;
230 
231 	PAGER_DEBUG(PAGER_ALL,
232 	    ("fourk_pager_init: %p, %p, %x\n",
233 	    mem_obj, control, pg_size));
234 
235 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
236 		return KERN_INVALID_ARGUMENT;
237 	}
238 
239 	pager = fourk_pager_lookup(mem_obj);
240 
241 	memory_object_control_reference(control);
242 
243 	pager->fourk_pgr_hdr.mo_control = control;
244 
245 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
246 	/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
247 	attributes.cluster_size = (1 << (PAGE_SHIFT));
248 	attributes.may_cache_object = FALSE;
249 	attributes.temporary = TRUE;
250 
251 	kr = memory_object_change_attributes(
252 		control,
253 		MEMORY_OBJECT_ATTRIBUTE_INFO,
254 		(memory_object_info_t) &attributes,
255 		MEMORY_OBJECT_ATTR_INFO_COUNT);
256 	if (kr != KERN_SUCCESS) {
257 		panic("fourk_pager_init: "
258 		    "memory_object_change_attributes() failed");
259 	}
260 
261 #if CONFIG_SECLUDED_MEMORY
262 	if (secluded_for_filecache) {
263 		memory_object_mark_eligible_for_secluded(control, TRUE);
264 	}
265 #endif /* CONFIG_SECLUDED_MEMORY */
266 
267 	return KERN_SUCCESS;
268 }
269 
270 /*
271  * fourk_pager_data_return()
272  *
273  * Handles page-out requests from VM.  This should never happen since
274  * the pages provided by this EMM are not supposed to be dirty or dirtied
275  * and VM should simply discard the contents and reclaim the pages if it
276  * needs to.
277  */
278 kern_return_t
fourk_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)279 fourk_pager_data_return(
280 	__unused memory_object_t        mem_obj,
281 	__unused memory_object_offset_t offset,
282 	__unused memory_object_cluster_size_t           data_cnt,
283 	__unused memory_object_offset_t *resid_offset,
284 	__unused int                    *io_error,
285 	__unused boolean_t              dirty,
286 	__unused boolean_t              kernel_copy,
287 	__unused int                    upl_flags)
288 {
289 	panic("fourk_pager_data_return: should never get called");
290 	return KERN_FAILURE;
291 }
292 
293 kern_return_t
fourk_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)294 fourk_pager_data_initialize(
295 	__unused memory_object_t        mem_obj,
296 	__unused memory_object_offset_t offset,
297 	__unused memory_object_cluster_size_t           data_cnt)
298 {
299 	panic("fourk_pager_data_initialize: should never get called");
300 	return KERN_FAILURE;
301 }
302 
303 kern_return_t
fourk_pager_data_unlock(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t size,__unused vm_prot_t desired_access)304 fourk_pager_data_unlock(
305 	__unused memory_object_t        mem_obj,
306 	__unused memory_object_offset_t offset,
307 	__unused memory_object_size_t           size,
308 	__unused vm_prot_t              desired_access)
309 {
310 	return KERN_FAILURE;
311 }
312 
313 /*
314  * fourk_pager_reference()
315  *
316  * Get a reference on this memory object.
317  * For external usage only.  Assumes that the initial reference count is not 0,
318  * i.e one should not "revive" a dead pager this way.
319  */
320 void
fourk_pager_reference(memory_object_t mem_obj)321 fourk_pager_reference(
322 	memory_object_t         mem_obj)
323 {
324 	fourk_pager_t   pager;
325 
326 	pager = fourk_pager_lookup(mem_obj);
327 
328 	lck_mtx_lock(&fourk_pager_lock);
329 	os_ref_retain_locked_raw(&pager->fourk_pgr_hdr_ref, NULL);
330 	lck_mtx_unlock(&fourk_pager_lock);
331 }
332 
333 
334 /*
335  * fourk_pager_dequeue:
336  *
337  * Removes a pager from the list of pagers.
338  *
339  * The caller must hold "fourk_pager_lock".
340  */
341 void
fourk_pager_dequeue(fourk_pager_t pager)342 fourk_pager_dequeue(
343 	fourk_pager_t pager)
344 {
345 	assert(!pager->is_mapped);
346 
347 	queue_remove(&fourk_pager_queue,
348 	    pager,
349 	    fourk_pager_t,
350 	    pager_queue);
351 	pager->pager_queue.next = NULL;
352 	pager->pager_queue.prev = NULL;
353 
354 	fourk_pager_count--;
355 }
356 
357 /*
358  * fourk_pager_terminate_internal:
359  *
360  * Trigger the asynchronous termination of the memory object associated
361  * with this pager.
362  * When the memory object is terminated, there will be one more call
363  * to memory_object_deallocate() (i.e. fourk_pager_deallocate())
364  * to finish the clean up.
365  *
366  * "fourk_pager_lock" should not be held by the caller.
367  * We don't need the lock because the pager has already been removed from
368  * the pagers' list and is now ours exclusively.
369  */
370 void
fourk_pager_terminate_internal(fourk_pager_t pager)371 fourk_pager_terminate_internal(
372 	fourk_pager_t pager)
373 {
374 	int i;
375 
376 	assert(pager->is_ready);
377 	assert(!pager->is_mapped);
378 
379 	for (i = 0; i < FOURK_PAGER_SLOTS; i++) {
380 		if (pager->slots[i].backing_object != VM_OBJECT_NULL &&
381 		    pager->slots[i].backing_object != (vm_object_t) -1) {
382 			vm_object_deallocate(pager->slots[i].backing_object);
383 			pager->slots[i].backing_object = (vm_object_t) -1;
384 			pager->slots[i].backing_offset = (vm_object_offset_t) -1;
385 		}
386 	}
387 
388 	/* trigger the destruction of the memory object */
389 	memory_object_destroy(pager->fourk_pgr_hdr.mo_control, 0);
390 }
391 
392 /*
393  * fourk_pager_deallocate_internal()
394  *
395  * Release a reference on this pager and free it when the last
396  * reference goes away.
397  * Can be called with fourk_pager_lock held or not but always returns
398  * with it unlocked.
399  */
400 void
fourk_pager_deallocate_internal(fourk_pager_t pager,boolean_t locked)401 fourk_pager_deallocate_internal(
402 	fourk_pager_t   pager,
403 	boolean_t               locked)
404 {
405 	boolean_t       needs_trimming;
406 	int             count_unmapped;
407 	os_ref_count_t  ref_count;
408 
409 	if (!locked) {
410 		lck_mtx_lock(&fourk_pager_lock);
411 	}
412 
413 	count_unmapped = (fourk_pager_count -
414 	    fourk_pager_count_mapped);
415 	if (count_unmapped > fourk_pager_cache_limit) {
416 		/* we have too many unmapped pagers:  trim some */
417 		needs_trimming = TRUE;
418 	} else {
419 		needs_trimming = FALSE;
420 	}
421 
422 	/* drop a reference on this pager */
423 	ref_count = os_ref_release_locked_raw(&pager->fourk_pgr_hdr_ref, NULL);
424 
425 	if (ref_count == 1) {
426 		/*
427 		 * Only the "named" reference is left, which means that
428 		 * no one is really holding on to this pager anymore.
429 		 * Terminate it.
430 		 */
431 		fourk_pager_dequeue(pager);
432 		/* the pager is all ours: no need for the lock now */
433 		lck_mtx_unlock(&fourk_pager_lock);
434 		fourk_pager_terminate_internal(pager);
435 	} else if (ref_count == 0) {
436 		/*
437 		 * Dropped the existence reference;  the memory object has
438 		 * been terminated.  Do some final cleanup and release the
439 		 * pager structure.
440 		 */
441 		lck_mtx_unlock(&fourk_pager_lock);
442 		if (pager->fourk_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
443 			memory_object_control_deallocate(pager->fourk_pgr_hdr.mo_control);
444 			pager->fourk_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
445 		}
446 		kfree_type(struct fourk_pager, pager);
447 		pager = FOURK_PAGER_NULL;
448 	} else {
449 		/* there are still plenty of references:  keep going... */
450 		lck_mtx_unlock(&fourk_pager_lock);
451 	}
452 
453 	if (needs_trimming) {
454 		fourk_pager_trim();
455 	}
456 	/* caution: lock is not held on return... */
457 }
458 
459 /*
460  * fourk_pager_deallocate()
461  *
462  * Release a reference on this pager and free it when the last
463  * reference goes away.
464  */
465 void
fourk_pager_deallocate(memory_object_t mem_obj)466 fourk_pager_deallocate(
467 	memory_object_t         mem_obj)
468 {
469 	fourk_pager_t   pager;
470 
471 	PAGER_DEBUG(PAGER_ALL, ("fourk_pager_deallocate: %p\n", mem_obj));
472 	pager = fourk_pager_lookup(mem_obj);
473 	fourk_pager_deallocate_internal(pager, FALSE);
474 }
475 
476 /*
477  *
478  */
479 kern_return_t
fourk_pager_terminate(__unused memory_object_t mem_obj)480 fourk_pager_terminate(
481 #if !DEBUG
482 	__unused
483 #endif
484 	memory_object_t mem_obj)
485 {
486 	PAGER_DEBUG(PAGER_ALL, ("fourk_pager_terminate: %p\n", mem_obj));
487 
488 	return KERN_SUCCESS;
489 }
490 
491 /*
492  *
493  */
494 kern_return_t
fourk_pager_synchronize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t length,__unused vm_sync_t sync_flags)495 fourk_pager_synchronize(
496 	__unused memory_object_t        mem_obj,
497 	__unused memory_object_offset_t offset,
498 	__unused memory_object_size_t   length,
499 	__unused vm_sync_t              sync_flags)
500 {
501 	panic("fourk_pager_synchronize: memory_object_synchronize no longer supported");
502 	return KERN_FAILURE;
503 }
504 
505 /*
506  * fourk_pager_map()
507  *
508  * This allows VM to let us, the EMM, know that this memory object
509  * is currently mapped one or more times.  This is called by VM each time
510  * the memory object gets mapped and we take one extra reference on the
511  * memory object to account for all its mappings.
512  */
513 kern_return_t
fourk_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)514 fourk_pager_map(
515 	memory_object_t         mem_obj,
516 	__unused vm_prot_t      prot)
517 {
518 	fourk_pager_t   pager;
519 
520 	PAGER_DEBUG(PAGER_ALL, ("fourk_pager_map: %p\n", mem_obj));
521 
522 	pager = fourk_pager_lookup(mem_obj);
523 
524 	lck_mtx_lock(&fourk_pager_lock);
525 	assert(pager->is_ready);
526 	assert(os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) > 0); /* pager is alive */
527 	if (pager->is_mapped == FALSE) {
528 		/*
529 		 * First mapping of this pager:  take an extra reference
530 		 * that will remain until all the mappings of this pager
531 		 * are removed.
532 		 */
533 		pager->is_mapped = TRUE;
534 		os_ref_retain_locked_raw(&pager->fourk_pgr_hdr_ref, NULL);
535 		fourk_pager_count_mapped++;
536 	}
537 	lck_mtx_unlock(&fourk_pager_lock);
538 
539 	return KERN_SUCCESS;
540 }
541 
542 /*
543  * fourk_pager_last_unmap()
544  *
545  * This is called by VM when this memory object is no longer mapped anywhere.
546  */
547 kern_return_t
fourk_pager_last_unmap(memory_object_t mem_obj)548 fourk_pager_last_unmap(
549 	memory_object_t         mem_obj)
550 {
551 	fourk_pager_t   pager;
552 	int                     count_unmapped;
553 
554 	PAGER_DEBUG(PAGER_ALL,
555 	    ("fourk_pager_last_unmap: %p\n", mem_obj));
556 
557 	pager = fourk_pager_lookup(mem_obj);
558 
559 	lck_mtx_lock(&fourk_pager_lock);
560 	if (pager->is_mapped) {
561 		/*
562 		 * All the mappings are gone, so let go of the one extra
563 		 * reference that represents all the mappings of this pager.
564 		 */
565 		fourk_pager_count_mapped--;
566 		count_unmapped = (fourk_pager_count -
567 		    fourk_pager_count_mapped);
568 		if (count_unmapped > fourk_pager_count_unmapped_max) {
569 			fourk_pager_count_unmapped_max = count_unmapped;
570 		}
571 		pager->is_mapped = FALSE;
572 		fourk_pager_deallocate_internal(pager, TRUE);
573 		/* caution: deallocate_internal() released the lock ! */
574 	} else {
575 		lck_mtx_unlock(&fourk_pager_lock);
576 	}
577 
578 	return KERN_SUCCESS;
579 }
580 
581 
582 /*
583  *
584  */
585 fourk_pager_t
fourk_pager_lookup(memory_object_t mem_obj)586 fourk_pager_lookup(
587 	memory_object_t  mem_obj)
588 {
589 	fourk_pager_t   pager;
590 
591 	assert(mem_obj->mo_pager_ops == &fourk_pager_ops);
592 	pager = (fourk_pager_t) mem_obj;
593 	assert(os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) > 0);
594 	return pager;
595 }
596 
597 void
fourk_pager_trim(void)598 fourk_pager_trim(void)
599 {
600 	fourk_pager_t   pager, prev_pager;
601 	queue_head_t            trim_queue;
602 	int                     num_trim;
603 	int                     count_unmapped;
604 
605 	lck_mtx_lock(&fourk_pager_lock);
606 
607 	/*
608 	 * We have too many pagers, try and trim some unused ones,
609 	 * starting with the oldest pager at the end of the queue.
610 	 */
611 	queue_init(&trim_queue);
612 	num_trim = 0;
613 
614 	for (pager = (fourk_pager_t)
615 	    queue_last(&fourk_pager_queue);
616 	    !queue_end(&fourk_pager_queue,
617 	    (queue_entry_t) pager);
618 	    pager = prev_pager) {
619 		/* get prev elt before we dequeue */
620 		prev_pager = (fourk_pager_t)
621 		    queue_prev(&pager->pager_queue);
622 
623 		if (os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) == 2 &&
624 		    pager->is_ready &&
625 		    !pager->is_mapped) {
626 			/* this pager can be trimmed */
627 			num_trim++;
628 			/* remove this pager from the main list ... */
629 			fourk_pager_dequeue(pager);
630 			/* ... and add it to our trim queue */
631 			queue_enter_first(&trim_queue,
632 			    pager,
633 			    fourk_pager_t,
634 			    pager_queue);
635 
636 			count_unmapped = (fourk_pager_count -
637 			    fourk_pager_count_mapped);
638 			if (count_unmapped <= fourk_pager_cache_limit) {
639 				/* we have enough pagers to trim */
640 				break;
641 			}
642 		}
643 	}
644 	if (num_trim > fourk_pager_num_trim_max) {
645 		fourk_pager_num_trim_max = num_trim;
646 	}
647 	fourk_pager_num_trim_total += num_trim;
648 
649 	lck_mtx_unlock(&fourk_pager_lock);
650 
651 	/* terminate the trimmed pagers */
652 	while (!queue_empty(&trim_queue)) {
653 		queue_remove_first(&trim_queue,
654 		    pager,
655 		    fourk_pager_t,
656 		    pager_queue);
657 		pager->pager_queue.next = NULL;
658 		pager->pager_queue.prev = NULL;
659 		assert(os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) == 2);
660 		/*
661 		 * We can't call deallocate_internal() because the pager
662 		 * has already been dequeued, but we still need to remove
663 		 * a reference.
664 		 */
665 		(void)os_ref_release_locked_raw(&pager->fourk_pgr_hdr_ref, NULL);
666 		fourk_pager_terminate_internal(pager);
667 	}
668 }
669 
670 
671 
672 
673 
674 
675 vm_object_t
fourk_pager_to_vm_object(memory_object_t mem_obj)676 fourk_pager_to_vm_object(
677 	memory_object_t mem_obj)
678 {
679 	fourk_pager_t   pager;
680 	vm_object_t     object;
681 
682 	pager = fourk_pager_lookup(mem_obj);
683 	if (pager == NULL) {
684 		return VM_OBJECT_NULL;
685 	}
686 
687 	assert(os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) > 0);
688 	assert(pager->fourk_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL);
689 	object = memory_object_control_to_vm_object(pager->fourk_pgr_hdr.mo_control);
690 	assert(object != VM_OBJECT_NULL);
691 	return object;
692 }
693 
694 memory_object_t
fourk_pager_create(void)695 fourk_pager_create(void)
696 {
697 	fourk_pager_t           pager;
698 	memory_object_control_t control;
699 	kern_return_t           kr;
700 	int                     i;
701 
702 #if 00
703 	if (PAGE_SIZE_64 == FOURK_PAGE_SIZE) {
704 		panic("fourk_pager_create: page size is 4K !?");
705 	}
706 #endif
707 
708 	pager = kalloc_type(struct fourk_pager, Z_WAITOK | Z_ZERO | Z_NOFAIL);
709 
710 	/*
711 	 * The vm_map call takes both named entry ports and raw memory
712 	 * objects in the same parameter.  We need to make sure that
713 	 * vm_map does not see this object as a named entry port.  So,
714 	 * we reserve the first word in the object for a fake ip_kotype
715 	 * setting - that will tell vm_map to use it as a memory object.
716 	 */
717 	pager->fourk_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
718 	pager->fourk_pgr_hdr.mo_pager_ops = &fourk_pager_ops;
719 	pager->fourk_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
720 
721 	os_ref_init_count_raw(&pager->fourk_pgr_hdr_ref, NULL, 2); /* existence + setup reference */
722 	pager->is_ready = FALSE; /* not ready until it has a "name" */
723 	pager->is_mapped = FALSE;
724 
725 	for (i = 0; i < FOURK_PAGER_SLOTS; i++) {
726 		pager->slots[i].backing_object = (vm_object_t) -1;
727 		pager->slots[i].backing_offset = (vm_object_offset_t) -1;
728 	}
729 
730 	lck_mtx_lock(&fourk_pager_lock);
731 
732 	/* enter new pager at the head of our list of pagers */
733 	queue_enter_first(&fourk_pager_queue,
734 	    pager,
735 	    fourk_pager_t,
736 	    pager_queue);
737 	fourk_pager_count++;
738 	if (fourk_pager_count > fourk_pager_count_max) {
739 		fourk_pager_count_max = fourk_pager_count;
740 	}
741 	lck_mtx_unlock(&fourk_pager_lock);
742 
743 	kr = memory_object_create_named((memory_object_t) pager,
744 	    0,
745 	    &control);
746 	assert(kr == KERN_SUCCESS);
747 
748 	memory_object_mark_trusted(control);
749 
750 	lck_mtx_lock(&fourk_pager_lock);
751 	/* the new pager is now ready to be used */
752 	pager->is_ready = TRUE;
753 	lck_mtx_unlock(&fourk_pager_lock);
754 
755 	/* wakeup anyone waiting for this pager to be ready */
756 	thread_wakeup(&pager->is_ready);
757 
758 	return (memory_object_t) pager;
759 }
760 
761 /*
762  * fourk_pager_data_request()
763  *
764  * Handles page-in requests from VM.
765  */
766 int fourk_pager_data_request_debug = 0;
767 kern_return_t
fourk_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)768 fourk_pager_data_request(
769 	memory_object_t         mem_obj,
770 	memory_object_offset_t  offset,
771 	memory_object_cluster_size_t            length,
772 #if !DEBUG
773 	__unused
774 #endif
775 	vm_prot_t               protection_required,
776 	memory_object_fault_info_t mo_fault_info)
777 {
778 	fourk_pager_t           pager;
779 	memory_object_control_t mo_control;
780 	upl_t                   upl;
781 	int                     upl_flags;
782 	upl_size_t              upl_size;
783 	upl_page_info_t         *upl_pl;
784 	unsigned int            pl_count;
785 	vm_object_t             dst_object;
786 	kern_return_t           kr, retval;
787 	vm_offset_t             kernel_mapping;
788 	vm_offset_t             src_vaddr, dst_vaddr;
789 	vm_offset_t             cur_offset;
790 	int                     sub_page;
791 	int                     sub_page_idx, sub_page_cnt;
792 
793 	pager = fourk_pager_lookup(mem_obj);
794 	assert(pager->is_ready);
795 	assert(os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) > 1); /* pager is alive and mapped */
796 
797 	PAGER_DEBUG(PAGER_PAGEIN, ("fourk_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
798 
799 	retval = KERN_SUCCESS;
800 	kernel_mapping = 0;
801 
802 	offset = memory_object_trunc_page(offset);
803 
804 	/*
805 	 * Gather in a UPL all the VM pages requested by VM.
806 	 */
807 	mo_control = pager->fourk_pgr_hdr.mo_control;
808 
809 	upl_size = length;
810 	upl_flags =
811 	    UPL_RET_ONLY_ABSENT |
812 	    UPL_SET_LITE |
813 	    UPL_NO_SYNC |
814 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
815 	    UPL_SET_INTERNAL;
816 	pl_count = 0;
817 	kr = memory_object_upl_request(mo_control,
818 	    offset, upl_size,
819 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_NONE);
820 	if (kr != KERN_SUCCESS) {
821 		retval = kr;
822 		goto done;
823 	}
824 	dst_object = memory_object_control_to_vm_object(mo_control);
825 	assert(dst_object != VM_OBJECT_NULL);
826 
827 #if __x86_64__ || __arm__ || __arm64__
828 	/* use the 1-to-1 mapping of physical memory */
829 #else /* __x86_64__ || __arm__ || __arm64__ */
830 	/*
831 	 * Reserve 2 virtual pages in the kernel address space to map the
832 	 * source and destination physical pages when it's their turn to
833 	 * be processed.
834 	 */
835 
836 	kr = kmem_alloc(kernel_map, &kernel_mapping, ptoa(2),
837 	    KMA_DATA | KMA_KOBJECT | KMA_PAGEABLE, VM_KERN_MEMORY_NONE);
838 	if (kr != KERN_SUCCESS) {
839 		retval = kr;
840 		goto done;
841 	}
842 	src_vaddr = kernel_mapping;
843 	dst_vaddr = kernel_mapping + PAGE_SIZE;
844 #endif /* __x86_64__ || __arm__ || __arm64__ */
845 
846 	/*
847 	 * Fill in the contents of the pages requested by VM.
848 	 */
849 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
850 	pl_count = length / PAGE_SIZE;
851 	for (cur_offset = 0;
852 	    retval == KERN_SUCCESS && cur_offset < length;
853 	    cur_offset += PAGE_SIZE) {
854 		ppnum_t dst_pnum;
855 		int num_subpg_signed, num_subpg_validated;
856 		int num_subpg_tainted, num_subpg_nx;
857 
858 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
859 			/* this page is not in the UPL: skip it */
860 			continue;
861 		}
862 
863 		/*
864 		 * Establish an explicit pmap mapping of the destination
865 		 * physical page.
866 		 * We can't do a regular VM mapping because the VM page
867 		 * is "busy".
868 		 */
869 		dst_pnum = (ppnum_t)
870 		    upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
871 		assert(dst_pnum != 0);
872 		dst_vaddr = (vm_map_offset_t)
873 		    phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
874 
875 		/* retrieve appropriate data for each 4K-page in this page */
876 		if (PAGE_SHIFT == FOURK_PAGE_SHIFT &&
877 		    page_shift_user32 == SIXTEENK_PAGE_SHIFT) {
878 			/*
879 			 * Find the slot for the requested 4KB page in
880 			 * the 16K page...
881 			 */
882 			assert(PAGE_SHIFT == FOURK_PAGE_SHIFT);
883 			assert(page_shift_user32 == SIXTEENK_PAGE_SHIFT);
884 			sub_page_idx = ((offset & SIXTEENK_PAGE_MASK) /
885 			    PAGE_SIZE);
886 			/*
887 			 * ... and provide only that one 4KB page.
888 			 */
889 			sub_page_cnt = 1;
890 		} else {
891 			/*
892 			 * Iterate over all slots, i.e. retrieve all four 4KB
893 			 * pages in the requested 16KB page.
894 			 */
895 			assert(PAGE_SHIFT == SIXTEENK_PAGE_SHIFT);
896 			sub_page_idx = 0;
897 			sub_page_cnt = FOURK_PAGER_SLOTS;
898 		}
899 
900 		num_subpg_signed = 0;
901 		num_subpg_validated = 0;
902 		num_subpg_tainted = 0;
903 		num_subpg_nx = 0;
904 
905 		/* retrieve appropriate data for each 4K-page in this page */
906 		for (sub_page = sub_page_idx;
907 		    sub_page < sub_page_idx + sub_page_cnt;
908 		    sub_page++) {
909 			vm_object_t             src_object;
910 			memory_object_offset_t  src_offset;
911 			vm_offset_t             offset_in_src_page;
912 			kern_return_t           error_code;
913 			vm_object_t             src_page_object;
914 			vm_page_t               src_page;
915 			vm_page_t               top_page;
916 			vm_prot_t               prot;
917 			int                     interruptible;
918 			struct vm_object_fault_info     fault_info;
919 			boolean_t       subpg_validated;
920 			unsigned        subpg_tainted;
921 
922 
923 			if (offset < SIXTEENK_PAGE_SIZE) {
924 				/*
925 				 * The 1st 16K-page can cover multiple
926 				 * sub-mappings, as described in the
927 				 * pager->slots[] array.
928 				 */
929 				src_object =
930 				    pager->slots[sub_page].backing_object;
931 				src_offset =
932 				    pager->slots[sub_page].backing_offset;
933 			} else {
934 				fourk_pager_backing_t slot;
935 
936 				/*
937 				 * Beyond the 1st 16K-page in the pager is
938 				 * an extension of the last "sub page" in
939 				 * the pager->slots[] array.
940 				 */
941 				slot = &pager->slots[FOURK_PAGER_SLOTS - 1];
942 				src_object = slot->backing_object;
943 				src_offset = slot->backing_offset;
944 				src_offset += FOURK_PAGE_SIZE;
945 				src_offset +=
946 				    (vm_map_trunc_page(offset,
947 				    SIXTEENK_PAGE_MASK)
948 				    - SIXTEENK_PAGE_SIZE);
949 				src_offset += sub_page * FOURK_PAGE_SIZE;
950 			}
951 			offset_in_src_page = src_offset & PAGE_MASK_64;
952 			src_offset = vm_object_trunc_page(src_offset);
953 
954 			if (src_object == VM_OBJECT_NULL ||
955 			    src_object == (vm_object_t) -1) {
956 				/* zero-fill */
957 				bzero((char *)(dst_vaddr +
958 				    ((sub_page - sub_page_idx)
959 				    * FOURK_PAGE_SIZE)),
960 				    FOURK_PAGE_SIZE);
961 				if (fourk_pager_data_request_debug) {
962 					printf("fourk_pager_data_request"
963 					    "(%p,0x%llx+0x%lx+0x%04x): "
964 					    "ZERO\n",
965 					    pager,
966 					    offset,
967 					    cur_offset,
968 					    ((sub_page - sub_page_idx)
969 					    * FOURK_PAGE_SIZE));
970 				}
971 				continue;
972 			}
973 
974 			/* fault in the source page from src_object */
975 retry_src_fault:
976 			src_page = VM_PAGE_NULL;
977 			top_page = VM_PAGE_NULL;
978 			fault_info = *((struct vm_object_fault_info *)
979 			    (uintptr_t)mo_fault_info);
980 			fault_info.stealth = TRUE;
981 			fault_info.io_sync = FALSE;
982 			fault_info.mark_zf_absent = FALSE;
983 			fault_info.batch_pmap_op = FALSE;
984 			interruptible = fault_info.interruptible;
985 			prot = VM_PROT_READ;
986 			error_code = 0;
987 
988 			vm_object_lock(src_object);
989 			vm_object_paging_begin(src_object);
990 			kr = vm_fault_page(src_object,
991 			    src_offset,
992 			    VM_PROT_READ,
993 			    FALSE,
994 			    FALSE,                /* src_page not looked up */
995 			    &prot,
996 			    &src_page,
997 			    &top_page,
998 			    NULL,
999 			    &error_code,
1000 			    FALSE,
1001 			    &fault_info);
1002 			switch (kr) {
1003 			case VM_FAULT_SUCCESS:
1004 				break;
1005 			case VM_FAULT_RETRY:
1006 				goto retry_src_fault;
1007 			case VM_FAULT_MEMORY_SHORTAGE:
1008 				if (vm_page_wait(interruptible)) {
1009 					goto retry_src_fault;
1010 				}
1011 				OS_FALLTHROUGH;
1012 			case VM_FAULT_INTERRUPTED:
1013 				retval = MACH_SEND_INTERRUPTED;
1014 				goto src_fault_done;
1015 			case VM_FAULT_SUCCESS_NO_VM_PAGE:
1016 				/* success but no VM page: fail */
1017 				vm_object_paging_end(src_object);
1018 				vm_object_unlock(src_object);
1019 				OS_FALLTHROUGH;
1020 			case VM_FAULT_MEMORY_ERROR:
1021 				/* the page is not there! */
1022 				if (error_code) {
1023 					retval = error_code;
1024 				} else {
1025 					retval = KERN_MEMORY_ERROR;
1026 				}
1027 				goto src_fault_done;
1028 			default:
1029 				panic("fourk_pager_data_request: "
1030 				    "vm_fault_page() unexpected error 0x%x\n",
1031 				    kr);
1032 			}
1033 			assert(src_page != VM_PAGE_NULL);
1034 			assert(src_page->vmp_busy);
1035 
1036 			src_page_object = VM_PAGE_OBJECT(src_page);
1037 
1038 			if ((!VM_PAGE_PAGEABLE(src_page)) &&
1039 			    !VM_PAGE_WIRED(src_page)) {
1040 				vm_page_lockspin_queues();
1041 				if ((!VM_PAGE_PAGEABLE(src_page)) &&
1042 				    !VM_PAGE_WIRED(src_page)) {
1043 					vm_page_deactivate(src_page);
1044 				}
1045 				vm_page_unlock_queues();
1046 			}
1047 
1048 			src_vaddr = (vm_map_offset_t)
1049 			    phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
1050 			        << PAGE_SHIFT);
1051 
1052 			/*
1053 			 * Validate the 4K page we want from
1054 			 * this source page...
1055 			 */
1056 			subpg_validated = FALSE;
1057 			subpg_tainted = 0;
1058 			if (src_page_object->code_signed) {
1059 				vm_page_validate_cs_mapped_chunk(
1060 					src_page,
1061 					(const void *) src_vaddr,
1062 					offset_in_src_page,
1063 					FOURK_PAGE_SIZE,
1064 					&subpg_validated,
1065 					&subpg_tainted);
1066 				num_subpg_signed++;
1067 				if (subpg_validated) {
1068 					num_subpg_validated++;
1069 				}
1070 				if (subpg_tainted & CS_VALIDATE_TAINTED) {
1071 					num_subpg_tainted++;
1072 				}
1073 				if (subpg_tainted & CS_VALIDATE_NX) {
1074 					/* subpg should not be executable */
1075 					if (sub_page_cnt > 1) {
1076 						/*
1077 						 * The destination page has
1078 						 * more than 1 subpage and its
1079 						 * other subpages might need
1080 						 * EXEC, so we do not propagate
1081 						 * CS_VALIDATE_NX to the
1082 						 * destination page...
1083 						 */
1084 					} else {
1085 						num_subpg_nx++;
1086 					}
1087 				}
1088 			}
1089 
1090 			/*
1091 			 * Copy the relevant portion of the source page
1092 			 * into the appropriate part of the destination page.
1093 			 */
1094 			bcopy((const char *)(src_vaddr + offset_in_src_page),
1095 			    (char *)(dst_vaddr +
1096 			    ((sub_page - sub_page_idx) *
1097 			    FOURK_PAGE_SIZE)),
1098 			    FOURK_PAGE_SIZE);
1099 			if (fourk_pager_data_request_debug) {
1100 				printf("fourk_data_request"
1101 				    "(%p,0x%llx+0x%lx+0x%04x): "
1102 				    "backed by [%p:0x%llx]: "
1103 				    "[0x%016llx 0x%016llx] "
1104 				    "code_signed=%d "
1105 				    "cs_valid=%d cs_tainted=%d cs_nx=%d\n",
1106 				    pager,
1107 				    offset, cur_offset,
1108 				    (sub_page - sub_page_idx) * FOURK_PAGE_SIZE,
1109 				    src_page_object,
1110 				    src_page->vmp_offset + offset_in_src_page,
1111 				    *(uint64_t *)(dst_vaddr +
1112 				    ((sub_page - sub_page_idx) *
1113 				    FOURK_PAGE_SIZE)),
1114 				    *(uint64_t *)(dst_vaddr +
1115 				    ((sub_page - sub_page_idx) *
1116 				    FOURK_PAGE_SIZE) +
1117 				    8),
1118 				    src_page_object->code_signed,
1119 				    subpg_validated,
1120 				    !!(subpg_tainted & CS_VALIDATE_TAINTED),
1121 				    !!(subpg_tainted & CS_VALIDATE_NX));
1122 			}
1123 
1124 #if __x86_64__ || __arm__ || __arm64__
1125 			/* we used the 1-to-1 mapping of physical memory */
1126 			src_vaddr = 0;
1127 #else /* __x86_64__ || __arm__ || __arm64__ */
1128 			/*
1129 			 * Remove the pmap mapping of the source page
1130 			 * in the kernel.
1131 			 */
1132 			pmap_remove(kernel_pmap,
1133 			    (addr64_t) src_vaddr,
1134 			    (addr64_t) src_vaddr + PAGE_SIZE_64);
1135 #endif /* __x86_64__ || __arm__ || __arm64__ */
1136 
1137 src_fault_done:
1138 			/*
1139 			 * Cleanup the result of vm_fault_page().
1140 			 */
1141 			if (src_page) {
1142 				assert(VM_PAGE_OBJECT(src_page) == src_page_object);
1143 
1144 				PAGE_WAKEUP_DONE(src_page);
1145 				src_page = VM_PAGE_NULL;
1146 				vm_object_paging_end(src_page_object);
1147 				vm_object_unlock(src_page_object);
1148 				if (top_page) {
1149 					vm_object_t     top_object;
1150 
1151 					top_object = VM_PAGE_OBJECT(top_page);
1152 					vm_object_lock(top_object);
1153 					VM_PAGE_FREE(top_page);
1154 					top_page = VM_PAGE_NULL;
1155 					vm_object_paging_end(top_object);
1156 					vm_object_unlock(top_object);
1157 				}
1158 			}
1159 		}
1160 		if (num_subpg_signed > 0) {
1161 			/* some code-signing involved with this 16K page */
1162 			if (num_subpg_tainted > 0) {
1163 				/* a tainted subpage taints entire 16K page */
1164 				UPL_SET_CS_TAINTED(upl_pl,
1165 				    cur_offset / PAGE_SIZE,
1166 				    VMP_CS_ALL_TRUE);
1167 				/* also mark as "validated" for consisteny */
1168 				UPL_SET_CS_VALIDATED(upl_pl,
1169 				    cur_offset / PAGE_SIZE,
1170 				    VMP_CS_ALL_TRUE);
1171 			} else if (num_subpg_validated == num_subpg_signed) {
1172 				/*
1173 				 * All the code-signed 4K subpages of this
1174 				 * 16K page are validated:  our 16K page is
1175 				 * considered validated.
1176 				 */
1177 				UPL_SET_CS_VALIDATED(upl_pl,
1178 				    cur_offset / PAGE_SIZE,
1179 				    VMP_CS_ALL_TRUE);
1180 			}
1181 			if (num_subpg_nx > 0) {
1182 				UPL_SET_CS_NX(upl_pl,
1183 				    cur_offset / PAGE_SIZE,
1184 				    VMP_CS_ALL_TRUE);
1185 			}
1186 		}
1187 	}
1188 
1189 done:
1190 	if (upl != NULL) {
1191 		/* clean up the UPL */
1192 
1193 		/*
1194 		 * The pages are currently dirty because we've just been
1195 		 * writing on them, but as far as we're concerned, they're
1196 		 * clean since they contain their "original" contents as
1197 		 * provided by us, the pager.
1198 		 * Tell the UPL to mark them "clean".
1199 		 */
1200 		upl_clear_dirty(upl, TRUE);
1201 
1202 		/* abort or commit the UPL */
1203 		if (retval != KERN_SUCCESS) {
1204 			upl_abort(upl, 0);
1205 			if (retval == KERN_ABORTED) {
1206 				wait_result_t   wait_result;
1207 
1208 				/*
1209 				 * We aborted the fault and did not provide
1210 				 * any contents for the requested pages but
1211 				 * the pages themselves are not invalid, so
1212 				 * let's return success and let the caller
1213 				 * retry the fault, in case it might succeed
1214 				 * later (when the decryption code is up and
1215 				 * running in the kernel, for example).
1216 				 */
1217 				retval = KERN_SUCCESS;
1218 				/*
1219 				 * Wait a little bit first to avoid using
1220 				 * too much CPU time retrying and failing
1221 				 * the same fault over and over again.
1222 				 */
1223 				wait_result = assert_wait_timeout(
1224 					(event_t) fourk_pager_data_request,
1225 					THREAD_UNINT,
1226 					10000,  /* 10ms */
1227 					NSEC_PER_USEC);
1228 				assert(wait_result == THREAD_WAITING);
1229 				wait_result = thread_block(THREAD_CONTINUE_NULL);
1230 				assert(wait_result == THREAD_TIMED_OUT);
1231 			}
1232 		} else {
1233 			boolean_t empty;
1234 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
1235 			    "upl %p offset 0x%llx size 0x%x",
1236 			    upl, upl->u_offset, upl->u_size);
1237 			upl_commit_range(upl, 0, upl->u_size,
1238 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
1239 			    upl_pl, pl_count, &empty);
1240 		}
1241 
1242 		/* and deallocate the UPL */
1243 		upl_deallocate(upl);
1244 		upl = NULL;
1245 	}
1246 	if (kernel_mapping != 0) {
1247 		/* clean up the mapping of the source and destination pages */
1248 		kmem_free(kernel_map, kernel_mapping, ptoa(2));
1249 		kernel_mapping = 0;
1250 		src_vaddr = 0;
1251 		dst_vaddr = 0;
1252 	}
1253 
1254 	return retval;
1255 }
1256 
1257 
1258 
1259 kern_return_t
fourk_pager_populate(memory_object_t mem_obj,boolean_t overwrite,int index,vm_object_t new_backing_object,vm_object_offset_t new_backing_offset,vm_object_t * old_backing_object,vm_object_offset_t * old_backing_offset)1260 fourk_pager_populate(
1261 	memory_object_t         mem_obj,
1262 	boolean_t               overwrite,
1263 	int                     index,
1264 	vm_object_t             new_backing_object,
1265 	vm_object_offset_t      new_backing_offset,
1266 	vm_object_t             *old_backing_object,
1267 	vm_object_offset_t      *old_backing_offset)
1268 {
1269 	fourk_pager_t   pager;
1270 
1271 	pager = fourk_pager_lookup(mem_obj);
1272 	if (pager == NULL) {
1273 		return KERN_INVALID_ARGUMENT;
1274 	}
1275 
1276 	assert(os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) > 0);
1277 	assert(pager->fourk_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL);
1278 
1279 	if (index < 0 || index > FOURK_PAGER_SLOTS) {
1280 		return KERN_INVALID_ARGUMENT;
1281 	}
1282 
1283 	if (!overwrite &&
1284 	    (pager->slots[index].backing_object != (vm_object_t) -1 ||
1285 	    pager->slots[index].backing_offset != (vm_object_offset_t) -1)) {
1286 		return KERN_INVALID_ADDRESS;
1287 	}
1288 
1289 	*old_backing_object = pager->slots[index].backing_object;
1290 	*old_backing_offset = pager->slots[index].backing_offset;
1291 
1292 	pager->slots[index].backing_object = new_backing_object;
1293 	pager->slots[index].backing_offset = new_backing_offset;
1294 
1295 	return KERN_SUCCESS;
1296 }
1297