1 /*
2 * Copyright (c) 2014-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_kobject.h>
49
50 #include <ipc/ipc_port.h>
51 #include <ipc/ipc_space.h>
52
53 #include <vm/vm_fault.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/memory_object.h>
57 #include <vm/vm_pageout.h>
58 #include <vm/vm_protos.h>
59 #include <vm/vm_kern.h>
60
61
62 /*
63 * 4K MEMORY PAGER
64 *
65 * This external memory manager (EMM) handles memory mappings that are
66 * 4K-aligned but not page-aligned and can therefore not be mapped directly.
67 *
68 * It mostly handles page-in requests (from memory_object_data_request()) by
69 * getting the data needed to fill in each 4K-chunk. That can require
70 * getting data from one or two pages from its backing VM object
71 * (a file or a "apple-protected" pager backed by an encrypted file), and
72 * copies the data to another page so that it is aligned as expected by
73 * the mapping.
74 *
75 * Returned pages can never be dirtied and must always be mapped copy-on-write,
76 * so the memory manager does not need to handle page-out requests (from
77 * memory_object_data_return()).
78 *
79 */
80
81 /* forward declarations */
82 void fourk_pager_reference(memory_object_t mem_obj);
83 void fourk_pager_deallocate(memory_object_t mem_obj);
84 kern_return_t fourk_pager_init(memory_object_t mem_obj,
85 memory_object_control_t control,
86 memory_object_cluster_size_t pg_size);
87 kern_return_t fourk_pager_terminate(memory_object_t mem_obj);
88 kern_return_t fourk_pager_data_request(memory_object_t mem_obj,
89 memory_object_offset_t offset,
90 memory_object_cluster_size_t length,
91 vm_prot_t protection_required,
92 memory_object_fault_info_t fault_info);
93 kern_return_t fourk_pager_data_return(memory_object_t mem_obj,
94 memory_object_offset_t offset,
95 memory_object_cluster_size_t data_cnt,
96 memory_object_offset_t *resid_offset,
97 int *io_error,
98 boolean_t dirty,
99 boolean_t kernel_copy,
100 int upl_flags);
101 kern_return_t fourk_pager_data_initialize(memory_object_t mem_obj,
102 memory_object_offset_t offset,
103 memory_object_cluster_size_t data_cnt);
104 kern_return_t fourk_pager_data_unlock(memory_object_t mem_obj,
105 memory_object_offset_t offset,
106 memory_object_size_t size,
107 vm_prot_t desired_access);
108 kern_return_t fourk_pager_synchronize(memory_object_t mem_obj,
109 memory_object_offset_t offset,
110 memory_object_size_t length,
111 vm_sync_t sync_flags);
112 kern_return_t fourk_pager_map(memory_object_t mem_obj,
113 vm_prot_t prot);
114 kern_return_t fourk_pager_last_unmap(memory_object_t mem_obj);
115
116 /*
117 * Vector of VM operations for this EMM.
118 * These routines are invoked by VM via the memory_object_*() interfaces.
119 */
120 const struct memory_object_pager_ops fourk_pager_ops = {
121 .memory_object_reference = fourk_pager_reference,
122 .memory_object_deallocate = fourk_pager_deallocate,
123 .memory_object_init = fourk_pager_init,
124 .memory_object_terminate = fourk_pager_terminate,
125 .memory_object_data_request = fourk_pager_data_request,
126 .memory_object_data_return = fourk_pager_data_return,
127 .memory_object_data_initialize = fourk_pager_data_initialize,
128 .memory_object_data_unlock = fourk_pager_data_unlock,
129 .memory_object_synchronize = fourk_pager_synchronize,
130 .memory_object_map = fourk_pager_map,
131 .memory_object_last_unmap = fourk_pager_last_unmap,
132 .memory_object_data_reclaim = NULL,
133 .memory_object_backing_object = NULL,
134 .memory_object_pager_name = "fourk_pager"
135 };
136
137 /*
138 * The "fourk_pager" describes a memory object backed by
139 * the "4K" EMM.
140 */
141 #define FOURK_PAGER_SLOTS 4 /* 16K / 4K */
142 typedef struct fourk_pager_backing {
143 vm_object_t backing_object;
144 vm_object_offset_t backing_offset;
145 } *fourk_pager_backing_t;
146 typedef struct fourk_pager {
147 /* mandatory generic header */
148 struct memory_object fourk_pgr_hdr;
149
150 /* pager-specific data */
151 queue_chain_t pager_queue; /* next & prev pagers */
152 #if MEMORY_OBJECT_HAS_REFCOUNT
153 #define fourk_pgr_hdr_ref fourk_pgr_hdr.mo_ref
154 #else
155 os_ref_atomic_t fourk_pgr_hdr_ref;
156 #endif
157 bool is_ready; /* is this pager ready ? */
158 bool is_mapped; /* is this mem_obj mapped ? */
159 struct fourk_pager_backing slots[FOURK_PAGER_SLOTS]; /* backing for each
160 * 4K-chunk */
161 } *fourk_pager_t;
162 #define FOURK_PAGER_NULL ((fourk_pager_t) NULL)
163
164 /*
165 * List of memory objects managed by this EMM.
166 * The list is protected by the "fourk_pager_lock" lock.
167 */
168 int fourk_pager_count = 0; /* number of pagers */
169 int fourk_pager_count_mapped = 0; /* number of unmapped pagers */
170 queue_head_t fourk_pager_queue = QUEUE_HEAD_INITIALIZER(fourk_pager_queue);
171 LCK_GRP_DECLARE(fourk_pager_lck_grp, "4K-pager");
172 LCK_MTX_DECLARE(fourk_pager_lock, &fourk_pager_lck_grp);
173
174 /*
175 * Maximum number of unmapped pagers we're willing to keep around.
176 */
177 int fourk_pager_cache_limit = 0;
178
179 /*
180 * Statistics & counters.
181 */
182 int fourk_pager_count_max = 0;
183 int fourk_pager_count_unmapped_max = 0;
184 int fourk_pager_num_trim_max = 0;
185 int fourk_pager_num_trim_total = 0;
186
187 /* internal prototypes */
188 fourk_pager_t fourk_pager_lookup(memory_object_t mem_obj);
189 void fourk_pager_dequeue(fourk_pager_t pager);
190 void fourk_pager_deallocate_internal(fourk_pager_t pager,
191 boolean_t locked);
192 void fourk_pager_terminate_internal(fourk_pager_t pager);
193 void fourk_pager_trim(void);
194
195
196 #if DEBUG
197 int fourk_pagerdebug = 0;
198 #define PAGER_ALL 0xffffffff
199 #define PAGER_INIT 0x00000001
200 #define PAGER_PAGEIN 0x00000002
201
202 #define PAGER_DEBUG(LEVEL, A) \
203 MACRO_BEGIN \
204 if ((fourk_pagerdebug & LEVEL)==LEVEL) { \
205 printf A; \
206 } \
207 MACRO_END
208 #else
209 #define PAGER_DEBUG(LEVEL, A)
210 #endif
211
212
213 /*
214 * fourk_pager_init()
215 *
216 * Initialize the memory object and makes it ready to be used and mapped.
217 */
218 kern_return_t
fourk_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)219 fourk_pager_init(
220 memory_object_t mem_obj,
221 memory_object_control_t control,
222 #if !DEBUG
223 __unused
224 #endif
225 memory_object_cluster_size_t pg_size)
226 {
227 fourk_pager_t pager;
228 kern_return_t kr;
229 memory_object_attr_info_data_t attributes;
230
231 PAGER_DEBUG(PAGER_ALL,
232 ("fourk_pager_init: %p, %p, %x\n",
233 mem_obj, control, pg_size));
234
235 if (control == MEMORY_OBJECT_CONTROL_NULL) {
236 return KERN_INVALID_ARGUMENT;
237 }
238
239 pager = fourk_pager_lookup(mem_obj);
240
241 memory_object_control_reference(control);
242
243 pager->fourk_pgr_hdr.mo_control = control;
244
245 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
246 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
247 attributes.cluster_size = (1 << (PAGE_SHIFT));
248 attributes.may_cache_object = FALSE;
249 attributes.temporary = TRUE;
250
251 kr = memory_object_change_attributes(
252 control,
253 MEMORY_OBJECT_ATTRIBUTE_INFO,
254 (memory_object_info_t) &attributes,
255 MEMORY_OBJECT_ATTR_INFO_COUNT);
256 if (kr != KERN_SUCCESS) {
257 panic("fourk_pager_init: "
258 "memory_object_change_attributes() failed");
259 }
260
261 #if CONFIG_SECLUDED_MEMORY
262 if (secluded_for_filecache) {
263 memory_object_mark_eligible_for_secluded(control, TRUE);
264 }
265 #endif /* CONFIG_SECLUDED_MEMORY */
266
267 return KERN_SUCCESS;
268 }
269
270 /*
271 * fourk_pager_data_return()
272 *
273 * Handles page-out requests from VM. This should never happen since
274 * the pages provided by this EMM are not supposed to be dirty or dirtied
275 * and VM should simply discard the contents and reclaim the pages if it
276 * needs to.
277 */
278 kern_return_t
fourk_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)279 fourk_pager_data_return(
280 __unused memory_object_t mem_obj,
281 __unused memory_object_offset_t offset,
282 __unused memory_object_cluster_size_t data_cnt,
283 __unused memory_object_offset_t *resid_offset,
284 __unused int *io_error,
285 __unused boolean_t dirty,
286 __unused boolean_t kernel_copy,
287 __unused int upl_flags)
288 {
289 panic("fourk_pager_data_return: should never get called");
290 return KERN_FAILURE;
291 }
292
293 kern_return_t
fourk_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)294 fourk_pager_data_initialize(
295 __unused memory_object_t mem_obj,
296 __unused memory_object_offset_t offset,
297 __unused memory_object_cluster_size_t data_cnt)
298 {
299 panic("fourk_pager_data_initialize: should never get called");
300 return KERN_FAILURE;
301 }
302
303 kern_return_t
fourk_pager_data_unlock(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t size,__unused vm_prot_t desired_access)304 fourk_pager_data_unlock(
305 __unused memory_object_t mem_obj,
306 __unused memory_object_offset_t offset,
307 __unused memory_object_size_t size,
308 __unused vm_prot_t desired_access)
309 {
310 return KERN_FAILURE;
311 }
312
313 /*
314 * fourk_pager_reference()
315 *
316 * Get a reference on this memory object.
317 * For external usage only. Assumes that the initial reference count is not 0,
318 * i.e one should not "revive" a dead pager this way.
319 */
320 void
fourk_pager_reference(memory_object_t mem_obj)321 fourk_pager_reference(
322 memory_object_t mem_obj)
323 {
324 fourk_pager_t pager;
325
326 pager = fourk_pager_lookup(mem_obj);
327
328 lck_mtx_lock(&fourk_pager_lock);
329 os_ref_retain_locked_raw(&pager->fourk_pgr_hdr_ref, NULL);
330 lck_mtx_unlock(&fourk_pager_lock);
331 }
332
333
334 /*
335 * fourk_pager_dequeue:
336 *
337 * Removes a pager from the list of pagers.
338 *
339 * The caller must hold "fourk_pager_lock".
340 */
341 void
fourk_pager_dequeue(fourk_pager_t pager)342 fourk_pager_dequeue(
343 fourk_pager_t pager)
344 {
345 assert(!pager->is_mapped);
346
347 queue_remove(&fourk_pager_queue,
348 pager,
349 fourk_pager_t,
350 pager_queue);
351 pager->pager_queue.next = NULL;
352 pager->pager_queue.prev = NULL;
353
354 fourk_pager_count--;
355 }
356
357 /*
358 * fourk_pager_terminate_internal:
359 *
360 * Trigger the asynchronous termination of the memory object associated
361 * with this pager.
362 * When the memory object is terminated, there will be one more call
363 * to memory_object_deallocate() (i.e. fourk_pager_deallocate())
364 * to finish the clean up.
365 *
366 * "fourk_pager_lock" should not be held by the caller.
367 * We don't need the lock because the pager has already been removed from
368 * the pagers' list and is now ours exclusively.
369 */
370 void
fourk_pager_terminate_internal(fourk_pager_t pager)371 fourk_pager_terminate_internal(
372 fourk_pager_t pager)
373 {
374 int i;
375
376 assert(pager->is_ready);
377 assert(!pager->is_mapped);
378
379 for (i = 0; i < FOURK_PAGER_SLOTS; i++) {
380 if (pager->slots[i].backing_object != VM_OBJECT_NULL &&
381 pager->slots[i].backing_object != (vm_object_t) -1) {
382 vm_object_deallocate(pager->slots[i].backing_object);
383 pager->slots[i].backing_object = (vm_object_t) -1;
384 pager->slots[i].backing_offset = (vm_object_offset_t) -1;
385 }
386 }
387
388 /* trigger the destruction of the memory object */
389 memory_object_destroy(pager->fourk_pgr_hdr.mo_control, 0);
390 }
391
392 /*
393 * fourk_pager_deallocate_internal()
394 *
395 * Release a reference on this pager and free it when the last
396 * reference goes away.
397 * Can be called with fourk_pager_lock held or not but always returns
398 * with it unlocked.
399 */
400 void
fourk_pager_deallocate_internal(fourk_pager_t pager,boolean_t locked)401 fourk_pager_deallocate_internal(
402 fourk_pager_t pager,
403 boolean_t locked)
404 {
405 boolean_t needs_trimming;
406 int count_unmapped;
407 os_ref_count_t ref_count;
408
409 if (!locked) {
410 lck_mtx_lock(&fourk_pager_lock);
411 }
412
413 count_unmapped = (fourk_pager_count -
414 fourk_pager_count_mapped);
415 if (count_unmapped > fourk_pager_cache_limit) {
416 /* we have too many unmapped pagers: trim some */
417 needs_trimming = TRUE;
418 } else {
419 needs_trimming = FALSE;
420 }
421
422 /* drop a reference on this pager */
423 ref_count = os_ref_release_locked_raw(&pager->fourk_pgr_hdr_ref, NULL);
424
425 if (ref_count == 1) {
426 /*
427 * Only the "named" reference is left, which means that
428 * no one is really holding on to this pager anymore.
429 * Terminate it.
430 */
431 fourk_pager_dequeue(pager);
432 /* the pager is all ours: no need for the lock now */
433 lck_mtx_unlock(&fourk_pager_lock);
434 fourk_pager_terminate_internal(pager);
435 } else if (ref_count == 0) {
436 /*
437 * Dropped the existence reference; the memory object has
438 * been terminated. Do some final cleanup and release the
439 * pager structure.
440 */
441 lck_mtx_unlock(&fourk_pager_lock);
442 if (pager->fourk_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
443 memory_object_control_deallocate(pager->fourk_pgr_hdr.mo_control);
444 pager->fourk_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
445 }
446 kfree_type(struct fourk_pager, pager);
447 pager = FOURK_PAGER_NULL;
448 } else {
449 /* there are still plenty of references: keep going... */
450 lck_mtx_unlock(&fourk_pager_lock);
451 }
452
453 if (needs_trimming) {
454 fourk_pager_trim();
455 }
456 /* caution: lock is not held on return... */
457 }
458
459 /*
460 * fourk_pager_deallocate()
461 *
462 * Release a reference on this pager and free it when the last
463 * reference goes away.
464 */
465 void
fourk_pager_deallocate(memory_object_t mem_obj)466 fourk_pager_deallocate(
467 memory_object_t mem_obj)
468 {
469 fourk_pager_t pager;
470
471 PAGER_DEBUG(PAGER_ALL, ("fourk_pager_deallocate: %p\n", mem_obj));
472 pager = fourk_pager_lookup(mem_obj);
473 fourk_pager_deallocate_internal(pager, FALSE);
474 }
475
476 /*
477 *
478 */
479 kern_return_t
fourk_pager_terminate(__unused memory_object_t mem_obj)480 fourk_pager_terminate(
481 #if !DEBUG
482 __unused
483 #endif
484 memory_object_t mem_obj)
485 {
486 PAGER_DEBUG(PAGER_ALL, ("fourk_pager_terminate: %p\n", mem_obj));
487
488 return KERN_SUCCESS;
489 }
490
491 /*
492 *
493 */
494 kern_return_t
fourk_pager_synchronize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t length,__unused vm_sync_t sync_flags)495 fourk_pager_synchronize(
496 __unused memory_object_t mem_obj,
497 __unused memory_object_offset_t offset,
498 __unused memory_object_size_t length,
499 __unused vm_sync_t sync_flags)
500 {
501 panic("fourk_pager_synchronize: memory_object_synchronize no longer supported");
502 return KERN_FAILURE;
503 }
504
505 /*
506 * fourk_pager_map()
507 *
508 * This allows VM to let us, the EMM, know that this memory object
509 * is currently mapped one or more times. This is called by VM each time
510 * the memory object gets mapped and we take one extra reference on the
511 * memory object to account for all its mappings.
512 */
513 kern_return_t
fourk_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)514 fourk_pager_map(
515 memory_object_t mem_obj,
516 __unused vm_prot_t prot)
517 {
518 fourk_pager_t pager;
519
520 PAGER_DEBUG(PAGER_ALL, ("fourk_pager_map: %p\n", mem_obj));
521
522 pager = fourk_pager_lookup(mem_obj);
523
524 lck_mtx_lock(&fourk_pager_lock);
525 assert(pager->is_ready);
526 assert(os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) > 0); /* pager is alive */
527 if (pager->is_mapped == FALSE) {
528 /*
529 * First mapping of this pager: take an extra reference
530 * that will remain until all the mappings of this pager
531 * are removed.
532 */
533 pager->is_mapped = TRUE;
534 os_ref_retain_locked_raw(&pager->fourk_pgr_hdr_ref, NULL);
535 fourk_pager_count_mapped++;
536 }
537 lck_mtx_unlock(&fourk_pager_lock);
538
539 return KERN_SUCCESS;
540 }
541
542 /*
543 * fourk_pager_last_unmap()
544 *
545 * This is called by VM when this memory object is no longer mapped anywhere.
546 */
547 kern_return_t
fourk_pager_last_unmap(memory_object_t mem_obj)548 fourk_pager_last_unmap(
549 memory_object_t mem_obj)
550 {
551 fourk_pager_t pager;
552 int count_unmapped;
553
554 PAGER_DEBUG(PAGER_ALL,
555 ("fourk_pager_last_unmap: %p\n", mem_obj));
556
557 pager = fourk_pager_lookup(mem_obj);
558
559 lck_mtx_lock(&fourk_pager_lock);
560 if (pager->is_mapped) {
561 /*
562 * All the mappings are gone, so let go of the one extra
563 * reference that represents all the mappings of this pager.
564 */
565 fourk_pager_count_mapped--;
566 count_unmapped = (fourk_pager_count -
567 fourk_pager_count_mapped);
568 if (count_unmapped > fourk_pager_count_unmapped_max) {
569 fourk_pager_count_unmapped_max = count_unmapped;
570 }
571 pager->is_mapped = FALSE;
572 fourk_pager_deallocate_internal(pager, TRUE);
573 /* caution: deallocate_internal() released the lock ! */
574 } else {
575 lck_mtx_unlock(&fourk_pager_lock);
576 }
577
578 return KERN_SUCCESS;
579 }
580
581
582 /*
583 *
584 */
585 fourk_pager_t
fourk_pager_lookup(memory_object_t mem_obj)586 fourk_pager_lookup(
587 memory_object_t mem_obj)
588 {
589 fourk_pager_t pager;
590
591 assert(mem_obj->mo_pager_ops == &fourk_pager_ops);
592 pager = (fourk_pager_t) mem_obj;
593 assert(os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) > 0);
594 return pager;
595 }
596
597 void
fourk_pager_trim(void)598 fourk_pager_trim(void)
599 {
600 fourk_pager_t pager, prev_pager;
601 queue_head_t trim_queue;
602 int num_trim;
603 int count_unmapped;
604
605 lck_mtx_lock(&fourk_pager_lock);
606
607 /*
608 * We have too many pagers, try and trim some unused ones,
609 * starting with the oldest pager at the end of the queue.
610 */
611 queue_init(&trim_queue);
612 num_trim = 0;
613
614 for (pager = (fourk_pager_t)
615 queue_last(&fourk_pager_queue);
616 !queue_end(&fourk_pager_queue,
617 (queue_entry_t) pager);
618 pager = prev_pager) {
619 /* get prev elt before we dequeue */
620 prev_pager = (fourk_pager_t)
621 queue_prev(&pager->pager_queue);
622
623 if (os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) == 2 &&
624 pager->is_ready &&
625 !pager->is_mapped) {
626 /* this pager can be trimmed */
627 num_trim++;
628 /* remove this pager from the main list ... */
629 fourk_pager_dequeue(pager);
630 /* ... and add it to our trim queue */
631 queue_enter_first(&trim_queue,
632 pager,
633 fourk_pager_t,
634 pager_queue);
635
636 count_unmapped = (fourk_pager_count -
637 fourk_pager_count_mapped);
638 if (count_unmapped <= fourk_pager_cache_limit) {
639 /* we have enough pagers to trim */
640 break;
641 }
642 }
643 }
644 if (num_trim > fourk_pager_num_trim_max) {
645 fourk_pager_num_trim_max = num_trim;
646 }
647 fourk_pager_num_trim_total += num_trim;
648
649 lck_mtx_unlock(&fourk_pager_lock);
650
651 /* terminate the trimmed pagers */
652 while (!queue_empty(&trim_queue)) {
653 queue_remove_first(&trim_queue,
654 pager,
655 fourk_pager_t,
656 pager_queue);
657 pager->pager_queue.next = NULL;
658 pager->pager_queue.prev = NULL;
659 assert(os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) == 2);
660 /*
661 * We can't call deallocate_internal() because the pager
662 * has already been dequeued, but we still need to remove
663 * a reference.
664 */
665 (void)os_ref_release_locked_raw(&pager->fourk_pgr_hdr_ref, NULL);
666 fourk_pager_terminate_internal(pager);
667 }
668 }
669
670
671
672
673
674
675 vm_object_t
fourk_pager_to_vm_object(memory_object_t mem_obj)676 fourk_pager_to_vm_object(
677 memory_object_t mem_obj)
678 {
679 fourk_pager_t pager;
680 vm_object_t object;
681
682 pager = fourk_pager_lookup(mem_obj);
683 if (pager == NULL) {
684 return VM_OBJECT_NULL;
685 }
686
687 assert(os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) > 0);
688 assert(pager->fourk_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL);
689 object = memory_object_control_to_vm_object(pager->fourk_pgr_hdr.mo_control);
690 assert(object != VM_OBJECT_NULL);
691 return object;
692 }
693
694 memory_object_t
fourk_pager_create(void)695 fourk_pager_create(void)
696 {
697 fourk_pager_t pager;
698 memory_object_control_t control;
699 kern_return_t kr;
700 int i;
701
702 #if 00
703 if (PAGE_SIZE_64 == FOURK_PAGE_SIZE) {
704 panic("fourk_pager_create: page size is 4K !?");
705 }
706 #endif
707
708 pager = kalloc_type(struct fourk_pager, Z_WAITOK | Z_ZERO | Z_NOFAIL);
709
710 /*
711 * The vm_map call takes both named entry ports and raw memory
712 * objects in the same parameter. We need to make sure that
713 * vm_map does not see this object as a named entry port. So,
714 * we reserve the first word in the object for a fake ip_kotype
715 * setting - that will tell vm_map to use it as a memory object.
716 */
717 pager->fourk_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
718 pager->fourk_pgr_hdr.mo_pager_ops = &fourk_pager_ops;
719 pager->fourk_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
720
721 os_ref_init_count_raw(&pager->fourk_pgr_hdr_ref, NULL, 2); /* existence + setup reference */
722 pager->is_ready = FALSE; /* not ready until it has a "name" */
723 pager->is_mapped = FALSE;
724
725 for (i = 0; i < FOURK_PAGER_SLOTS; i++) {
726 pager->slots[i].backing_object = (vm_object_t) -1;
727 pager->slots[i].backing_offset = (vm_object_offset_t) -1;
728 }
729
730 lck_mtx_lock(&fourk_pager_lock);
731
732 /* enter new pager at the head of our list of pagers */
733 queue_enter_first(&fourk_pager_queue,
734 pager,
735 fourk_pager_t,
736 pager_queue);
737 fourk_pager_count++;
738 if (fourk_pager_count > fourk_pager_count_max) {
739 fourk_pager_count_max = fourk_pager_count;
740 }
741 lck_mtx_unlock(&fourk_pager_lock);
742
743 kr = memory_object_create_named((memory_object_t) pager,
744 0,
745 &control);
746 assert(kr == KERN_SUCCESS);
747
748 memory_object_mark_trusted(control);
749
750 lck_mtx_lock(&fourk_pager_lock);
751 /* the new pager is now ready to be used */
752 pager->is_ready = TRUE;
753 lck_mtx_unlock(&fourk_pager_lock);
754
755 /* wakeup anyone waiting for this pager to be ready */
756 thread_wakeup(&pager->is_ready);
757
758 return (memory_object_t) pager;
759 }
760
761 /*
762 * fourk_pager_data_request()
763 *
764 * Handles page-in requests from VM.
765 */
766 int fourk_pager_data_request_debug = 0;
767 kern_return_t
fourk_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)768 fourk_pager_data_request(
769 memory_object_t mem_obj,
770 memory_object_offset_t offset,
771 memory_object_cluster_size_t length,
772 #if !DEBUG
773 __unused
774 #endif
775 vm_prot_t protection_required,
776 memory_object_fault_info_t mo_fault_info)
777 {
778 fourk_pager_t pager;
779 memory_object_control_t mo_control;
780 upl_t upl;
781 int upl_flags;
782 upl_size_t upl_size;
783 upl_page_info_t *upl_pl;
784 unsigned int pl_count;
785 vm_object_t dst_object;
786 kern_return_t kr, retval;
787 vm_map_offset_t kernel_mapping;
788 vm_offset_t src_vaddr, dst_vaddr;
789 vm_offset_t cur_offset;
790 int sub_page;
791 int sub_page_idx, sub_page_cnt;
792
793 pager = fourk_pager_lookup(mem_obj);
794 assert(pager->is_ready);
795 assert(os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) > 1); /* pager is alive and mapped */
796
797 PAGER_DEBUG(PAGER_PAGEIN, ("fourk_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
798
799 retval = KERN_SUCCESS;
800 kernel_mapping = 0;
801
802 offset = memory_object_trunc_page(offset);
803
804 /*
805 * Gather in a UPL all the VM pages requested by VM.
806 */
807 mo_control = pager->fourk_pgr_hdr.mo_control;
808
809 upl_size = length;
810 upl_flags =
811 UPL_RET_ONLY_ABSENT |
812 UPL_SET_LITE |
813 UPL_NO_SYNC |
814 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
815 UPL_SET_INTERNAL;
816 pl_count = 0;
817 kr = memory_object_upl_request(mo_control,
818 offset, upl_size,
819 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_NONE);
820 if (kr != KERN_SUCCESS) {
821 retval = kr;
822 goto done;
823 }
824 dst_object = memory_object_control_to_vm_object(mo_control);
825 assert(dst_object != VM_OBJECT_NULL);
826
827 #if __x86_64__ || __arm__ || __arm64__
828 /* use the 1-to-1 mapping of physical memory */
829 #else /* __x86_64__ || __arm__ || __arm64__ */
830 /*
831 * Reserve 2 virtual pages in the kernel address space to map the
832 * source and destination physical pages when it's their turn to
833 * be processed.
834 */
835 vm_map_entry_t map_entry;
836
837 vm_object_reference(kernel_object); /* ref. for mapping */
838 kr = vm_map_find_space(kernel_map,
839 &kernel_mapping,
840 2 * PAGE_SIZE_64,
841 0,
842 0,
843 VM_MAP_KERNEL_FLAGS_NONE,
844 &map_entry);
845 if (kr != KERN_SUCCESS) {
846 vm_object_deallocate(kernel_object);
847 retval = kr;
848 goto done;
849 }
850 map_entry->object.vm_object = kernel_object;
851 map_entry->offset = kernel_mapping;
852 vm_map_unlock(kernel_map);
853 src_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
854 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping + PAGE_SIZE_64);
855 #endif /* __x86_64__ || __arm__ || __arm64__ */
856
857 /*
858 * Fill in the contents of the pages requested by VM.
859 */
860 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
861 pl_count = length / PAGE_SIZE;
862 for (cur_offset = 0;
863 retval == KERN_SUCCESS && cur_offset < length;
864 cur_offset += PAGE_SIZE) {
865 ppnum_t dst_pnum;
866 int num_subpg_signed, num_subpg_validated;
867 int num_subpg_tainted, num_subpg_nx;
868
869 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
870 /* this page is not in the UPL: skip it */
871 continue;
872 }
873
874 /*
875 * Establish an explicit pmap mapping of the destination
876 * physical page.
877 * We can't do a regular VM mapping because the VM page
878 * is "busy".
879 */
880 dst_pnum = (ppnum_t)
881 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
882 assert(dst_pnum != 0);
883 dst_vaddr = (vm_map_offset_t)
884 phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
885
886 /* retrieve appropriate data for each 4K-page in this page */
887 if (PAGE_SHIFT == FOURK_PAGE_SHIFT &&
888 page_shift_user32 == SIXTEENK_PAGE_SHIFT) {
889 /*
890 * Find the slot for the requested 4KB page in
891 * the 16K page...
892 */
893 assert(PAGE_SHIFT == FOURK_PAGE_SHIFT);
894 assert(page_shift_user32 == SIXTEENK_PAGE_SHIFT);
895 sub_page_idx = ((offset & SIXTEENK_PAGE_MASK) /
896 PAGE_SIZE);
897 /*
898 * ... and provide only that one 4KB page.
899 */
900 sub_page_cnt = 1;
901 } else {
902 /*
903 * Iterate over all slots, i.e. retrieve all four 4KB
904 * pages in the requested 16KB page.
905 */
906 assert(PAGE_SHIFT == SIXTEENK_PAGE_SHIFT);
907 sub_page_idx = 0;
908 sub_page_cnt = FOURK_PAGER_SLOTS;
909 }
910
911 num_subpg_signed = 0;
912 num_subpg_validated = 0;
913 num_subpg_tainted = 0;
914 num_subpg_nx = 0;
915
916 /* retrieve appropriate data for each 4K-page in this page */
917 for (sub_page = sub_page_idx;
918 sub_page < sub_page_idx + sub_page_cnt;
919 sub_page++) {
920 vm_object_t src_object;
921 memory_object_offset_t src_offset;
922 vm_offset_t offset_in_src_page;
923 kern_return_t error_code;
924 vm_object_t src_page_object;
925 vm_page_t src_page;
926 vm_page_t top_page;
927 vm_prot_t prot;
928 int interruptible;
929 struct vm_object_fault_info fault_info;
930 boolean_t subpg_validated;
931 unsigned subpg_tainted;
932
933
934 if (offset < SIXTEENK_PAGE_SIZE) {
935 /*
936 * The 1st 16K-page can cover multiple
937 * sub-mappings, as described in the
938 * pager->slots[] array.
939 */
940 src_object =
941 pager->slots[sub_page].backing_object;
942 src_offset =
943 pager->slots[sub_page].backing_offset;
944 } else {
945 fourk_pager_backing_t slot;
946
947 /*
948 * Beyond the 1st 16K-page in the pager is
949 * an extension of the last "sub page" in
950 * the pager->slots[] array.
951 */
952 slot = &pager->slots[FOURK_PAGER_SLOTS - 1];
953 src_object = slot->backing_object;
954 src_offset = slot->backing_offset;
955 src_offset += FOURK_PAGE_SIZE;
956 src_offset +=
957 (vm_map_trunc_page(offset,
958 SIXTEENK_PAGE_MASK)
959 - SIXTEENK_PAGE_SIZE);
960 src_offset += sub_page * FOURK_PAGE_SIZE;
961 }
962 offset_in_src_page = src_offset & PAGE_MASK_64;
963 src_offset = vm_object_trunc_page(src_offset);
964
965 if (src_object == VM_OBJECT_NULL ||
966 src_object == (vm_object_t) -1) {
967 /* zero-fill */
968 bzero((char *)(dst_vaddr +
969 ((sub_page - sub_page_idx)
970 * FOURK_PAGE_SIZE)),
971 FOURK_PAGE_SIZE);
972 if (fourk_pager_data_request_debug) {
973 printf("fourk_pager_data_request"
974 "(%p,0x%llx+0x%lx+0x%04x): "
975 "ZERO\n",
976 pager,
977 offset,
978 cur_offset,
979 ((sub_page - sub_page_idx)
980 * FOURK_PAGE_SIZE));
981 }
982 continue;
983 }
984
985 /* fault in the source page from src_object */
986 retry_src_fault:
987 src_page = VM_PAGE_NULL;
988 top_page = VM_PAGE_NULL;
989 fault_info = *((struct vm_object_fault_info *)
990 (uintptr_t)mo_fault_info);
991 fault_info.stealth = TRUE;
992 fault_info.io_sync = FALSE;
993 fault_info.mark_zf_absent = FALSE;
994 fault_info.batch_pmap_op = FALSE;
995 interruptible = fault_info.interruptible;
996 prot = VM_PROT_READ;
997 error_code = 0;
998
999 vm_object_lock(src_object);
1000 vm_object_paging_begin(src_object);
1001 kr = vm_fault_page(src_object,
1002 src_offset,
1003 VM_PROT_READ,
1004 FALSE,
1005 FALSE, /* src_page not looked up */
1006 &prot,
1007 &src_page,
1008 &top_page,
1009 NULL,
1010 &error_code,
1011 FALSE,
1012 FALSE,
1013 &fault_info);
1014 switch (kr) {
1015 case VM_FAULT_SUCCESS:
1016 break;
1017 case VM_FAULT_RETRY:
1018 goto retry_src_fault;
1019 case VM_FAULT_MEMORY_SHORTAGE:
1020 if (vm_page_wait(interruptible)) {
1021 goto retry_src_fault;
1022 }
1023 OS_FALLTHROUGH;
1024 case VM_FAULT_INTERRUPTED:
1025 retval = MACH_SEND_INTERRUPTED;
1026 goto src_fault_done;
1027 case VM_FAULT_SUCCESS_NO_VM_PAGE:
1028 /* success but no VM page: fail */
1029 vm_object_paging_end(src_object);
1030 vm_object_unlock(src_object);
1031 OS_FALLTHROUGH;
1032 case VM_FAULT_MEMORY_ERROR:
1033 /* the page is not there! */
1034 if (error_code) {
1035 retval = error_code;
1036 } else {
1037 retval = KERN_MEMORY_ERROR;
1038 }
1039 goto src_fault_done;
1040 default:
1041 panic("fourk_pager_data_request: "
1042 "vm_fault_page() unexpected error 0x%x\n",
1043 kr);
1044 }
1045 assert(src_page != VM_PAGE_NULL);
1046 assert(src_page->vmp_busy);
1047
1048 src_page_object = VM_PAGE_OBJECT(src_page);
1049
1050 if ((!VM_PAGE_PAGEABLE(src_page)) &&
1051 !VM_PAGE_WIRED(src_page)) {
1052 vm_page_lockspin_queues();
1053 if ((!VM_PAGE_PAGEABLE(src_page)) &&
1054 !VM_PAGE_WIRED(src_page)) {
1055 vm_page_deactivate(src_page);
1056 }
1057 vm_page_unlock_queues();
1058 }
1059
1060 src_vaddr = (vm_map_offset_t)
1061 phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
1062 << PAGE_SHIFT);
1063
1064 /*
1065 * Validate the 4K page we want from
1066 * this source page...
1067 */
1068 subpg_validated = FALSE;
1069 subpg_tainted = 0;
1070 if (src_page_object->code_signed) {
1071 vm_page_validate_cs_mapped_chunk(
1072 src_page,
1073 (const void *) src_vaddr,
1074 offset_in_src_page,
1075 FOURK_PAGE_SIZE,
1076 &subpg_validated,
1077 &subpg_tainted);
1078 num_subpg_signed++;
1079 if (subpg_validated) {
1080 num_subpg_validated++;
1081 }
1082 if (subpg_tainted & CS_VALIDATE_TAINTED) {
1083 num_subpg_tainted++;
1084 }
1085 if (subpg_tainted & CS_VALIDATE_NX) {
1086 /* subpg should not be executable */
1087 if (sub_page_cnt > 1) {
1088 /*
1089 * The destination page has
1090 * more than 1 subpage and its
1091 * other subpages might need
1092 * EXEC, so we do not propagate
1093 * CS_VALIDATE_NX to the
1094 * destination page...
1095 */
1096 } else {
1097 num_subpg_nx++;
1098 }
1099 }
1100 }
1101
1102 /*
1103 * Copy the relevant portion of the source page
1104 * into the appropriate part of the destination page.
1105 */
1106 bcopy((const char *)(src_vaddr + offset_in_src_page),
1107 (char *)(dst_vaddr +
1108 ((sub_page - sub_page_idx) *
1109 FOURK_PAGE_SIZE)),
1110 FOURK_PAGE_SIZE);
1111 if (fourk_pager_data_request_debug) {
1112 printf("fourk_data_request"
1113 "(%p,0x%llx+0x%lx+0x%04x): "
1114 "backed by [%p:0x%llx]: "
1115 "[0x%016llx 0x%016llx] "
1116 "code_signed=%d "
1117 "cs_valid=%d cs_tainted=%d cs_nx=%d\n",
1118 pager,
1119 offset, cur_offset,
1120 (sub_page - sub_page_idx) * FOURK_PAGE_SIZE,
1121 src_page_object,
1122 src_page->vmp_offset + offset_in_src_page,
1123 *(uint64_t *)(dst_vaddr +
1124 ((sub_page - sub_page_idx) *
1125 FOURK_PAGE_SIZE)),
1126 *(uint64_t *)(dst_vaddr +
1127 ((sub_page - sub_page_idx) *
1128 FOURK_PAGE_SIZE) +
1129 8),
1130 src_page_object->code_signed,
1131 subpg_validated,
1132 !!(subpg_tainted & CS_VALIDATE_TAINTED),
1133 !!(subpg_tainted & CS_VALIDATE_NX));
1134 }
1135
1136 #if __x86_64__ || __arm__ || __arm64__
1137 /* we used the 1-to-1 mapping of physical memory */
1138 src_vaddr = 0;
1139 #else /* __x86_64__ || __arm__ || __arm64__ */
1140 /*
1141 * Remove the pmap mapping of the source page
1142 * in the kernel.
1143 */
1144 pmap_remove(kernel_pmap,
1145 (addr64_t) src_vaddr,
1146 (addr64_t) src_vaddr + PAGE_SIZE_64);
1147 #endif /* __x86_64__ || __arm__ || __arm64__ */
1148
1149 src_fault_done:
1150 /*
1151 * Cleanup the result of vm_fault_page().
1152 */
1153 if (src_page) {
1154 assert(VM_PAGE_OBJECT(src_page) == src_page_object);
1155
1156 PAGE_WAKEUP_DONE(src_page);
1157 src_page = VM_PAGE_NULL;
1158 vm_object_paging_end(src_page_object);
1159 vm_object_unlock(src_page_object);
1160 if (top_page) {
1161 vm_object_t top_object;
1162
1163 top_object = VM_PAGE_OBJECT(top_page);
1164 vm_object_lock(top_object);
1165 VM_PAGE_FREE(top_page);
1166 top_page = VM_PAGE_NULL;
1167 vm_object_paging_end(top_object);
1168 vm_object_unlock(top_object);
1169 }
1170 }
1171 }
1172 if (num_subpg_signed > 0) {
1173 /* some code-signing involved with this 16K page */
1174 if (num_subpg_tainted > 0) {
1175 /* a tainted subpage taints entire 16K page */
1176 UPL_SET_CS_TAINTED(upl_pl,
1177 cur_offset / PAGE_SIZE,
1178 VMP_CS_ALL_TRUE);
1179 /* also mark as "validated" for consisteny */
1180 UPL_SET_CS_VALIDATED(upl_pl,
1181 cur_offset / PAGE_SIZE,
1182 VMP_CS_ALL_TRUE);
1183 } else if (num_subpg_validated == num_subpg_signed) {
1184 /*
1185 * All the code-signed 4K subpages of this
1186 * 16K page are validated: our 16K page is
1187 * considered validated.
1188 */
1189 UPL_SET_CS_VALIDATED(upl_pl,
1190 cur_offset / PAGE_SIZE,
1191 VMP_CS_ALL_TRUE);
1192 }
1193 if (num_subpg_nx > 0) {
1194 UPL_SET_CS_NX(upl_pl,
1195 cur_offset / PAGE_SIZE,
1196 VMP_CS_ALL_TRUE);
1197 }
1198 }
1199 }
1200
1201 done:
1202 if (upl != NULL) {
1203 /* clean up the UPL */
1204
1205 /*
1206 * The pages are currently dirty because we've just been
1207 * writing on them, but as far as we're concerned, they're
1208 * clean since they contain their "original" contents as
1209 * provided by us, the pager.
1210 * Tell the UPL to mark them "clean".
1211 */
1212 upl_clear_dirty(upl, TRUE);
1213
1214 /* abort or commit the UPL */
1215 if (retval != KERN_SUCCESS) {
1216 upl_abort(upl, 0);
1217 if (retval == KERN_ABORTED) {
1218 wait_result_t wait_result;
1219
1220 /*
1221 * We aborted the fault and did not provide
1222 * any contents for the requested pages but
1223 * the pages themselves are not invalid, so
1224 * let's return success and let the caller
1225 * retry the fault, in case it might succeed
1226 * later (when the decryption code is up and
1227 * running in the kernel, for example).
1228 */
1229 retval = KERN_SUCCESS;
1230 /*
1231 * Wait a little bit first to avoid using
1232 * too much CPU time retrying and failing
1233 * the same fault over and over again.
1234 */
1235 wait_result = assert_wait_timeout(
1236 (event_t) fourk_pager_data_request,
1237 THREAD_UNINT,
1238 10000, /* 10ms */
1239 NSEC_PER_USEC);
1240 assert(wait_result == THREAD_WAITING);
1241 wait_result = thread_block(THREAD_CONTINUE_NULL);
1242 assert(wait_result == THREAD_TIMED_OUT);
1243 }
1244 } else {
1245 boolean_t empty;
1246 assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
1247 "upl %p offset 0x%llx size 0x%x",
1248 upl, upl->u_offset, upl->u_size);
1249 upl_commit_range(upl, 0, upl->u_size,
1250 UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
1251 upl_pl, pl_count, &empty);
1252 }
1253
1254 /* and deallocate the UPL */
1255 upl_deallocate(upl);
1256 upl = NULL;
1257 }
1258 if (kernel_mapping != 0) {
1259 /* clean up the mapping of the source and destination pages */
1260 kr = vm_map_remove(kernel_map,
1261 kernel_mapping,
1262 kernel_mapping + (2 * PAGE_SIZE_64),
1263 VM_MAP_REMOVE_NO_FLAGS);
1264 assert(kr == KERN_SUCCESS);
1265 kernel_mapping = 0;
1266 src_vaddr = 0;
1267 dst_vaddr = 0;
1268 }
1269
1270 return retval;
1271 }
1272
1273
1274
1275 kern_return_t
fourk_pager_populate(memory_object_t mem_obj,boolean_t overwrite,int index,vm_object_t new_backing_object,vm_object_offset_t new_backing_offset,vm_object_t * old_backing_object,vm_object_offset_t * old_backing_offset)1276 fourk_pager_populate(
1277 memory_object_t mem_obj,
1278 boolean_t overwrite,
1279 int index,
1280 vm_object_t new_backing_object,
1281 vm_object_offset_t new_backing_offset,
1282 vm_object_t *old_backing_object,
1283 vm_object_offset_t *old_backing_offset)
1284 {
1285 fourk_pager_t pager;
1286
1287 pager = fourk_pager_lookup(mem_obj);
1288 if (pager == NULL) {
1289 return KERN_INVALID_ARGUMENT;
1290 }
1291
1292 assert(os_ref_get_count_raw(&pager->fourk_pgr_hdr_ref) > 0);
1293 assert(pager->fourk_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL);
1294
1295 if (index < 0 || index > FOURK_PAGER_SLOTS) {
1296 return KERN_INVALID_ARGUMENT;
1297 }
1298
1299 if (!overwrite &&
1300 (pager->slots[index].backing_object != (vm_object_t) -1 ||
1301 pager->slots[index].backing_offset != (vm_object_offset_t) -1)) {
1302 return KERN_INVALID_ADDRESS;
1303 }
1304
1305 *old_backing_object = pager->slots[index].backing_object;
1306 *old_backing_offset = pager->slots[index].backing_offset;
1307
1308 pager->slots[index].backing_object = new_backing_object;
1309 pager->slots[index].backing_offset = new_backing_offset;
1310
1311 return KERN_SUCCESS;
1312 }
1313