xref: /xnu-10002.1.13/osfmk/vm/vm_map.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_map.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *	Date:	1985
62  *
63  *	Virtual memory mapping module.
64  */
65 
66 #include <mach/vm_types.h>
67 #include <mach_assert.h>
68 
69 #include <vm/vm_options.h>
70 
71 #include <libkern/OSAtomic.h>
72 
73 #include <mach/kern_return.h>
74 #include <mach/port.h>
75 #include <mach/vm_attributes.h>
76 #include <mach/vm_param.h>
77 #include <mach/vm_behavior.h>
78 #include <mach/vm_statistics.h>
79 #include <mach/memory_object.h>
80 #include <mach/mach_vm.h>
81 #include <machine/cpu_capabilities.h>
82 #include <mach/sdt.h>
83 
84 #include <kern/assert.h>
85 #include <kern/backtrace.h>
86 #include <kern/counter.h>
87 #include <kern/exc_guard.h>
88 #include <kern/kalloc.h>
89 #include <kern/zalloc_internal.h>
90 
91 #include <vm/cpm.h>
92 #include <vm/vm_compressor.h>
93 #include <vm/vm_compressor_pager.h>
94 #include <vm/vm_init.h>
95 #include <vm/vm_fault.h>
96 #include <vm/vm_map_internal.h>
97 #include <vm/vm_object.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_pageout.h>
100 #include <vm/pmap.h>
101 #include <vm/vm_kern.h>
102 #include <ipc/ipc_port.h>
103 #include <kern/sched_prim.h>
104 #include <kern/misc_protos.h>
105 
106 #include <mach/vm_map_server.h>
107 #include <mach/mach_host_server.h>
108 #include <vm/vm_memtag.h>
109 #include <vm/vm_protos.h>
110 #include <vm/vm_purgeable_internal.h>
111 #include <vm/vm_reclaim_internal.h>
112 
113 #include <vm/vm_protos.h>
114 #include <vm/vm_shared_region.h>
115 #include <vm/vm_map_store.h>
116 
117 #include <san/kasan.h>
118 
119 #include <sys/resource.h>
120 #include <sys/random.h>
121 #include <sys/codesign.h>
122 #include <sys/code_signing.h>
123 #include <sys/mman.h>
124 #include <sys/reboot.h>
125 #include <sys/kdebug_triage.h>
126 
127 #include <libkern/section_keywords.h>
128 
129 #if DEVELOPMENT || DEBUG
130 extern int proc_selfcsflags(void);
131 int vm_log_xnu_user_debug = 0;
132 int panic_on_unsigned_execute = 0;
133 int panic_on_mlock_failure = 0;
134 #endif /* DEVELOPMENT || DEBUG */
135 
136 #if MACH_ASSERT
137 int debug4k_filter = 0;
138 char debug4k_proc_name[1024] = "";
139 int debug4k_proc_filter = (int)-1 & ~(1 << __DEBUG4K_FAULT);
140 int debug4k_panic_on_misaligned_sharing = 0;
141 const char *debug4k_category_name[] = {
142 	"error",        /* 0 */
143 	"life",         /* 1 */
144 	"load",         /* 2 */
145 	"fault",        /* 3 */
146 	"copy",         /* 4 */
147 	"share",        /* 5 */
148 	"adjust",       /* 6 */
149 	"pmap",         /* 7 */
150 	"mementry",     /* 8 */
151 	"iokit",        /* 9 */
152 	"upl",          /* 10 */
153 	"exc",          /* 11 */
154 	"vfs"           /* 12 */
155 };
156 #endif /* MACH_ASSERT */
157 int debug4k_no_cow_copyin = 0;
158 
159 
160 #if __arm64__
161 extern const int fourk_binary_compatibility_unsafe;
162 extern const int fourk_binary_compatibility_allow_wx;
163 #endif /* __arm64__ */
164 extern void qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
165 extern int proc_selfpid(void);
166 extern char *proc_name_address(void *p);
167 extern char *proc_best_name(struct proc *p);
168 
169 #if VM_MAP_DEBUG_APPLE_PROTECT
170 int vm_map_debug_apple_protect = 0;
171 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
172 #if VM_MAP_DEBUG_FOURK
173 int vm_map_debug_fourk = 0;
174 #endif /* VM_MAP_DEBUG_FOURK */
175 
176 #if DEBUG || DEVELOPMENT
177 static TUNABLE(bool, vm_map_executable_immutable,
178     "vm_map_executable_immutable", true);
179 #else
180 #define vm_map_executable_immutable true
181 #endif
182 
183 os_refgrp_decl(static, map_refgrp, "vm_map", NULL);
184 
185 extern u_int32_t random(void);  /* from <libkern/libkern.h> */
186 /* Internal prototypes
187  */
188 
189 typedef struct vm_map_zap {
190 	vm_map_entry_t          vmz_head;
191 	vm_map_entry_t         *vmz_tail;
192 } *vm_map_zap_t;
193 
194 #define VM_MAP_ZAP_DECLARE(zap) \
195 	struct vm_map_zap zap = { .vmz_tail = &zap.vmz_head }
196 
197 static vm_map_entry_t   vm_map_entry_insert(
198 	vm_map_t                map,
199 	vm_map_entry_t          insp_entry,
200 	vm_map_offset_t         start,
201 	vm_map_offset_t         end,
202 	vm_object_t             object,
203 	vm_object_offset_t      offset,
204 	vm_map_kernel_flags_t   vmk_flags,
205 	boolean_t               needs_copy,
206 	vm_prot_t               cur_protection,
207 	vm_prot_t               max_protection,
208 	vm_inherit_t            inheritance,
209 	boolean_t               clear_map_aligned);
210 
211 static void vm_map_simplify_range(
212 	vm_map_t        map,
213 	vm_map_offset_t start,
214 	vm_map_offset_t end);   /* forward */
215 
216 static boolean_t        vm_map_range_check(
217 	vm_map_t        map,
218 	vm_map_offset_t start,
219 	vm_map_offset_t end,
220 	vm_map_entry_t  *entry);
221 
222 static void vm_map_submap_pmap_clean(
223 	vm_map_t        map,
224 	vm_map_offset_t start,
225 	vm_map_offset_t end,
226 	vm_map_t        sub_map,
227 	vm_map_offset_t offset);
228 
229 static void             vm_map_pmap_enter(
230 	vm_map_t                map,
231 	vm_map_offset_t         addr,
232 	vm_map_offset_t         end_addr,
233 	vm_object_t             object,
234 	vm_object_offset_t      offset,
235 	vm_prot_t               protection);
236 
237 static void             _vm_map_clip_end(
238 	struct vm_map_header    *map_header,
239 	vm_map_entry_t          entry,
240 	vm_map_offset_t         end);
241 
242 static void             _vm_map_clip_start(
243 	struct vm_map_header    *map_header,
244 	vm_map_entry_t          entry,
245 	vm_map_offset_t         start);
246 
247 static kmem_return_t vm_map_delete(
248 	vm_map_t        map,
249 	vm_map_offset_t start,
250 	vm_map_offset_t end,
251 	vmr_flags_t     flags,
252 	kmem_guard_t    guard,
253 	vm_map_zap_t    zap);
254 
255 static void             vm_map_copy_insert(
256 	vm_map_t        map,
257 	vm_map_entry_t  after_where,
258 	vm_map_copy_t   copy);
259 
260 static kern_return_t    vm_map_copy_overwrite_unaligned(
261 	vm_map_t        dst_map,
262 	vm_map_entry_t  entry,
263 	vm_map_copy_t   copy,
264 	vm_map_address_t start,
265 	boolean_t       discard_on_success);
266 
267 static kern_return_t    vm_map_copy_overwrite_aligned(
268 	vm_map_t        dst_map,
269 	vm_map_entry_t  tmp_entry,
270 	vm_map_copy_t   copy,
271 	vm_map_offset_t start,
272 	pmap_t          pmap);
273 
274 static kern_return_t    vm_map_copyin_kernel_buffer(
275 	vm_map_t        src_map,
276 	vm_map_address_t src_addr,
277 	vm_map_size_t   len,
278 	boolean_t       src_destroy,
279 	vm_map_copy_t   *copy_result);  /* OUT */
280 
281 static kern_return_t    vm_map_copyout_kernel_buffer(
282 	vm_map_t        map,
283 	vm_map_address_t *addr, /* IN/OUT */
284 	vm_map_copy_t   copy,
285 	vm_map_size_t   copy_size,
286 	boolean_t       overwrite,
287 	boolean_t       consume_on_success);
288 
289 static void             vm_map_fork_share(
290 	vm_map_t        old_map,
291 	vm_map_entry_t  old_entry,
292 	vm_map_t        new_map);
293 
294 static boolean_t        vm_map_fork_copy(
295 	vm_map_t        old_map,
296 	vm_map_entry_t  *old_entry_p,
297 	vm_map_t        new_map,
298 	int             vm_map_copyin_flags);
299 
300 static kern_return_t    vm_map_wire_nested(
301 	vm_map_t                   map,
302 	vm_map_offset_t            start,
303 	vm_map_offset_t            end,
304 	vm_prot_t                  caller_prot,
305 	vm_tag_t                   tag,
306 	boolean_t                  user_wire,
307 	pmap_t                     map_pmap,
308 	vm_map_offset_t            pmap_addr,
309 	ppnum_t                    *physpage_p);
310 
311 static kern_return_t    vm_map_unwire_nested(
312 	vm_map_t                   map,
313 	vm_map_offset_t            start,
314 	vm_map_offset_t            end,
315 	boolean_t                  user_wire,
316 	pmap_t                     map_pmap,
317 	vm_map_offset_t            pmap_addr);
318 
319 static kern_return_t    vm_map_overwrite_submap_recurse(
320 	vm_map_t                   dst_map,
321 	vm_map_offset_t            dst_addr,
322 	vm_map_size_t              dst_size);
323 
324 static kern_return_t    vm_map_copy_overwrite_nested(
325 	vm_map_t                   dst_map,
326 	vm_map_offset_t            dst_addr,
327 	vm_map_copy_t              copy,
328 	boolean_t                  interruptible,
329 	pmap_t                     pmap,
330 	boolean_t                  discard_on_success);
331 
332 static kern_return_t    vm_map_remap_extract(
333 	vm_map_t                map,
334 	vm_map_offset_t         addr,
335 	vm_map_size_t           size,
336 	boolean_t               copy,
337 	vm_map_copy_t           map_copy,
338 	vm_prot_t               *cur_protection,
339 	vm_prot_t               *max_protection,
340 	vm_inherit_t            inheritance,
341 	vm_map_kernel_flags_t   vmk_flags);
342 
343 static kern_return_t    vm_map_remap_range_allocate(
344 	vm_map_t                map,
345 	vm_map_address_t        *address,
346 	vm_map_size_t           size,
347 	vm_map_offset_t         mask,
348 	vm_map_kernel_flags_t   vmk_flags,
349 	vm_map_entry_t          *map_entry,
350 	vm_map_zap_t            zap_list);
351 
352 static void             vm_map_region_look_for_page(
353 	vm_map_t                   map,
354 	vm_map_offset_t            va,
355 	vm_object_t                object,
356 	vm_object_offset_t         offset,
357 	int                        max_refcnt,
358 	unsigned short             depth,
359 	vm_region_extended_info_t  extended,
360 	mach_msg_type_number_t count);
361 
362 static int              vm_map_region_count_obj_refs(
363 	vm_map_entry_t             entry,
364 	vm_object_t                object);
365 
366 
367 static kern_return_t    vm_map_willneed(
368 	vm_map_t        map,
369 	vm_map_offset_t start,
370 	vm_map_offset_t end);
371 
372 static kern_return_t    vm_map_reuse_pages(
373 	vm_map_t        map,
374 	vm_map_offset_t start,
375 	vm_map_offset_t end);
376 
377 static kern_return_t    vm_map_reusable_pages(
378 	vm_map_t        map,
379 	vm_map_offset_t start,
380 	vm_map_offset_t end);
381 
382 static kern_return_t    vm_map_can_reuse(
383 	vm_map_t        map,
384 	vm_map_offset_t start,
385 	vm_map_offset_t end);
386 
387 static kern_return_t    vm_map_random_address_for_size(
388 	vm_map_t                map,
389 	vm_map_offset_t        *address,
390 	vm_map_size_t           size,
391 	vm_map_kernel_flags_t   vmk_flags);
392 
393 
394 #if CONFIG_MAP_RANGES
395 
396 static vm_map_range_id_t vm_map_user_range_resolve(
397 	vm_map_t                map,
398 	mach_vm_address_t       addr,
399 	mach_vm_address_t       size,
400 	mach_vm_range_t         range);
401 
402 #endif /* CONFIG_MAP_RANGES */
403 #if MACH_ASSERT
404 static kern_return_t    vm_map_pageout(
405 	vm_map_t        map,
406 	vm_map_offset_t start,
407 	vm_map_offset_t end);
408 #endif /* MACH_ASSERT */
409 
410 kern_return_t vm_map_corpse_footprint_collect(
411 	vm_map_t        old_map,
412 	vm_map_entry_t  old_entry,
413 	vm_map_t        new_map);
414 void vm_map_corpse_footprint_collect_done(
415 	vm_map_t        new_map);
416 void vm_map_corpse_footprint_destroy(
417 	vm_map_t        map);
418 kern_return_t vm_map_corpse_footprint_query_page_info(
419 	vm_map_t        map,
420 	vm_map_offset_t va,
421 	int             *disposition_p);
422 void vm_map_footprint_query_page_info(
423 	vm_map_t        map,
424 	vm_map_entry_t  map_entry,
425 	vm_map_offset_t curr_s_offset,
426 	int             *disposition_p);
427 
428 #if CONFIG_MAP_RANGES
429 static void vm_map_range_map_init(void);
430 #endif /* CONFIG_MAP_RANGES */
431 
432 pid_t find_largest_process_vm_map_entries(void);
433 
434 extern int exit_with_guard_exception(void *p, mach_exception_data_type_t code,
435     mach_exception_data_type_t subcode);
436 
437 /*
438  * Macros to copy a vm_map_entry. We must be careful to correctly
439  * manage the wired page count. vm_map_entry_copy() creates a new
440  * map entry to the same memory - the wired count in the new entry
441  * must be set to zero. vm_map_entry_copy_full() creates a new
442  * entry that is identical to the old entry.  This preserves the
443  * wire count; it's used for map splitting and zone changing in
444  * vm_map_copyout.
445  */
446 
447 static inline void
vm_map_entry_copy_csm_assoc(vm_map_t map __unused,vm_map_entry_t new __unused,vm_map_entry_t old __unused)448 vm_map_entry_copy_csm_assoc(
449 	vm_map_t map __unused,
450 	vm_map_entry_t new __unused,
451 	vm_map_entry_t old __unused)
452 {
453 #if CODE_SIGNING_MONITOR
454 	/* when code signing monitor is enabled, we want to reset on copy */
455 	new->csm_associated = FALSE;
456 #else
457 	/* when code signing monitor is not enabled, assert as a sanity check */
458 	assert(new->csm_associated == FALSE);
459 #endif
460 #if DEVELOPMENT || DEBUG
461 	if (new->vme_xnu_user_debug && vm_log_xnu_user_debug) {
462 		printf("FBDP %d[%s] %s:%d map %p entry %p [ 0x%llx 0x%llx ] resetting vme_xnu_user_debug\n",
463 		    proc_selfpid(),
464 		    (get_bsdtask_info(current_task())
465 		    ? proc_name_address(get_bsdtask_info(current_task()))
466 		    : "?"),
467 		    __FUNCTION__, __LINE__,
468 		    map, new, new->vme_start, new->vme_end);
469 	}
470 #endif /* DEVELOPMENT || DEBUG */
471 	new->vme_xnu_user_debug = FALSE;
472 }
473 
474 /*
475  * The "used_for_jit" flag was copied from OLD to NEW in vm_map_entry_copy().
476  * But for security reasons on some platforms, we don't want the
477  * new mapping to be "used for jit", so we reset the flag here.
478  */
479 static inline void
vm_map_entry_copy_code_signing(vm_map_t map,vm_map_entry_t new,vm_map_entry_t old __unused)480 vm_map_entry_copy_code_signing(
481 	vm_map_t map,
482 	vm_map_entry_t new,
483 	vm_map_entry_t old __unused)
484 {
485 	if (VM_MAP_POLICY_ALLOW_JIT_COPY(map)) {
486 		assert(new->used_for_jit == old->used_for_jit);
487 	} else {
488 		new->used_for_jit = FALSE;
489 	}
490 }
491 
492 static inline void
vm_map_entry_copy_full(vm_map_entry_t new,vm_map_entry_t old)493 vm_map_entry_copy_full(
494 	vm_map_entry_t new,
495 	vm_map_entry_t old)
496 {
497 #if MAP_ENTRY_CREATION_DEBUG
498 	btref_put(new->vme_creation_bt);
499 	btref_retain(old->vme_creation_bt);
500 #endif
501 #if MAP_ENTRY_INSERTION_DEBUG
502 	btref_put(new->vme_insertion_bt);
503 	btref_retain(old->vme_insertion_bt);
504 #endif
505 #if VM_BTLOG_TAGS
506 	/* Discard the btref that might be in the new entry */
507 	if (new->vme_kernel_object) {
508 		btref_put(new->vme_tag_btref);
509 	}
510 	/* Retain the btref in the old entry to account for its copy */
511 	if (old->vme_kernel_object) {
512 		btref_retain(old->vme_tag_btref);
513 	}
514 #endif /* VM_BTLOG_TAGS */
515 	*new = *old;
516 }
517 
518 static inline void
vm_map_entry_copy(vm_map_t map,vm_map_entry_t new,vm_map_entry_t old)519 vm_map_entry_copy(
520 	vm_map_t map,
521 	vm_map_entry_t new,
522 	vm_map_entry_t old)
523 {
524 	vm_map_entry_copy_full(new, old);
525 
526 	new->is_shared = FALSE;
527 	new->needs_wakeup = FALSE;
528 	new->in_transition = FALSE;
529 	new->wired_count = 0;
530 	new->user_wired_count = 0;
531 	new->vme_permanent = FALSE;
532 	vm_map_entry_copy_code_signing(map, new, old);
533 	vm_map_entry_copy_csm_assoc(map, new, old);
534 	if (new->iokit_acct) {
535 		assertf(!new->use_pmap, "old %p new %p\n", old, new);
536 		new->iokit_acct = FALSE;
537 		new->use_pmap = TRUE;
538 	}
539 	new->vme_resilient_codesign = FALSE;
540 	new->vme_resilient_media = FALSE;
541 	new->vme_atomic = FALSE;
542 	new->vme_no_copy_on_read = FALSE;
543 }
544 
545 /*
546  * Normal lock_read_to_write() returns FALSE/0 on failure.
547  * These functions evaluate to zero on success and non-zero value on failure.
548  */
549 __attribute__((always_inline))
550 int
vm_map_lock_read_to_write(vm_map_t map)551 vm_map_lock_read_to_write(vm_map_t map)
552 {
553 	if (lck_rw_lock_shared_to_exclusive(&(map)->lock)) {
554 		DTRACE_VM(vm_map_lock_upgrade);
555 		return 0;
556 	}
557 	return 1;
558 }
559 
560 __attribute__((always_inline))
561 boolean_t
vm_map_try_lock(vm_map_t map)562 vm_map_try_lock(vm_map_t map)
563 {
564 	if (lck_rw_try_lock_exclusive(&(map)->lock)) {
565 		DTRACE_VM(vm_map_lock_w);
566 		return TRUE;
567 	}
568 	return FALSE;
569 }
570 
571 __attribute__((always_inline))
572 boolean_t
vm_map_try_lock_read(vm_map_t map)573 vm_map_try_lock_read(vm_map_t map)
574 {
575 	if (lck_rw_try_lock_shared(&(map)->lock)) {
576 		DTRACE_VM(vm_map_lock_r);
577 		return TRUE;
578 	}
579 	return FALSE;
580 }
581 
582 /*!
583  * @function kdp_vm_map_is_acquired_exclusive
584  *
585  * @abstract
586  * Checks if vm map is acquired exclusive.
587  *
588  * @discussion
589  * NOT SAFE: To be used only by kernel debugger.
590  *
591  * @param map map to check
592  *
593  * @returns TRUE if the map is acquired exclusively.
594  */
595 boolean_t
kdp_vm_map_is_acquired_exclusive(vm_map_t map)596 kdp_vm_map_is_acquired_exclusive(vm_map_t map)
597 {
598 	return kdp_lck_rw_lock_is_acquired_exclusive(&map->lock);
599 }
600 
601 /*
602  * Routines to get the page size the caller should
603  * use while inspecting the target address space.
604  * Use the "_safely" variant if the caller is dealing with a user-provided
605  * array whose size depends on the page size, to avoid any overflow or
606  * underflow of a user-allocated buffer.
607  */
608 int
vm_self_region_page_shift_safely(vm_map_t target_map)609 vm_self_region_page_shift_safely(
610 	vm_map_t target_map)
611 {
612 	int effective_page_shift = 0;
613 
614 	if (PAGE_SIZE == (4096)) {
615 		/* x86_64 and 4k watches: always use 4k */
616 		return PAGE_SHIFT;
617 	}
618 	/* did caller provide an explicit page size for this thread to use? */
619 	effective_page_shift = thread_self_region_page_shift();
620 	if (effective_page_shift) {
621 		/* use the explicitly-provided page size */
622 		return effective_page_shift;
623 	}
624 	/* no explicit page size: use the caller's page size... */
625 	effective_page_shift = VM_MAP_PAGE_SHIFT(current_map());
626 	if (effective_page_shift == VM_MAP_PAGE_SHIFT(target_map)) {
627 		/* page size match: safe to use */
628 		return effective_page_shift;
629 	}
630 	/* page size mismatch */
631 	return -1;
632 }
633 int
vm_self_region_page_shift(vm_map_t target_map)634 vm_self_region_page_shift(
635 	vm_map_t target_map)
636 {
637 	int effective_page_shift;
638 
639 	effective_page_shift = vm_self_region_page_shift_safely(target_map);
640 	if (effective_page_shift == -1) {
641 		/* no safe value but OK to guess for caller */
642 		effective_page_shift = MIN(VM_MAP_PAGE_SHIFT(current_map()),
643 		    VM_MAP_PAGE_SHIFT(target_map));
644 	}
645 	return effective_page_shift;
646 }
647 
648 
649 /*
650  *	Decide if we want to allow processes to execute from their data or stack areas.
651  *	override_nx() returns true if we do.  Data/stack execution can be enabled independently
652  *	for 32 and 64 bit processes.  Set the VM_ABI_32 or VM_ABI_64 flags in allow_data_exec
653  *	or allow_stack_exec to enable data execution for that type of data area for that particular
654  *	ABI (or both by or'ing the flags together).  These are initialized in the architecture
655  *	specific pmap files since the default behavior varies according to architecture.  The
656  *	main reason it varies is because of the need to provide binary compatibility with old
657  *	applications that were written before these restrictions came into being.  In the old
658  *	days, an app could execute anything it could read, but this has slowly been tightened
659  *	up over time.  The default behavior is:
660  *
661  *	32-bit PPC apps		may execute from both stack and data areas
662  *	32-bit Intel apps	may exeucte from data areas but not stack
663  *	64-bit PPC/Intel apps	may not execute from either data or stack
664  *
665  *	An application on any architecture may override these defaults by explicitly
666  *	adding PROT_EXEC permission to the page in question with the mprotect(2)
667  *	system call.  This code here just determines what happens when an app tries to
668  *      execute from a page that lacks execute permission.
669  *
670  *	Note that allow_data_exec or allow_stack_exec may also be modified by sysctl to change the
671  *	default behavior for both 32 and 64 bit apps on a system-wide basis. Furthermore,
672  *	a Mach-O header flag bit (MH_NO_HEAP_EXECUTION) can be used to forcibly disallow
673  *	execution from data areas for a particular binary even if the arch normally permits it. As
674  *	a final wrinkle, a posix_spawn attribute flag can be used to negate this opt-in header bit
675  *	to support some complicated use cases, notably browsers with out-of-process plugins that
676  *	are not all NX-safe.
677  */
678 
679 extern int allow_data_exec, allow_stack_exec;
680 
681 int
override_nx(vm_map_t map,uint32_t user_tag)682 override_nx(vm_map_t map, uint32_t user_tag) /* map unused on arm */
683 {
684 	int current_abi;
685 
686 	if (map->pmap == kernel_pmap) {
687 		return FALSE;
688 	}
689 
690 	/*
691 	 * Determine if the app is running in 32 or 64 bit mode.
692 	 */
693 
694 	if (vm_map_is_64bit(map)) {
695 		current_abi = VM_ABI_64;
696 	} else {
697 		current_abi = VM_ABI_32;
698 	}
699 
700 	/*
701 	 * Determine if we should allow the execution based on whether it's a
702 	 * stack or data area and the current architecture.
703 	 */
704 
705 	if (user_tag == VM_MEMORY_STACK) {
706 		return allow_stack_exec & current_abi;
707 	}
708 
709 	return (allow_data_exec & current_abi) && (map->map_disallow_data_exec == FALSE);
710 }
711 
712 
713 /*
714  *	Virtual memory maps provide for the mapping, protection,
715  *	and sharing of virtual memory objects.  In addition,
716  *	this module provides for an efficient virtual copy of
717  *	memory from one map to another.
718  *
719  *	Synchronization is required prior to most operations.
720  *
721  *	Maps consist of an ordered doubly-linked list of simple
722  *	entries; a single hint is used to speed up lookups.
723  *
724  *	Sharing maps have been deleted from this version of Mach.
725  *	All shared objects are now mapped directly into the respective
726  *	maps.  This requires a change in the copy on write strategy;
727  *	the asymmetric (delayed) strategy is used for shared temporary
728  *	objects instead of the symmetric (shadow) strategy.  All maps
729  *	are now "top level" maps (either task map, kernel map or submap
730  *	of the kernel map).
731  *
732  *	Since portions of maps are specified by start/end addreses,
733  *	which may not align with existing map entries, all
734  *	routines merely "clip" entries to these start/end values.
735  *	[That is, an entry is split into two, bordering at a
736  *	start or end value.]  Note that these clippings may not
737  *	always be necessary (as the two resulting entries are then
738  *	not changed); however, the clipping is done for convenience.
739  *	No attempt is currently made to "glue back together" two
740  *	abutting entries.
741  *
742  *	The symmetric (shadow) copy strategy implements virtual copy
743  *	by copying VM object references from one map to
744  *	another, and then marking both regions as copy-on-write.
745  *	It is important to note that only one writeable reference
746  *	to a VM object region exists in any map when this strategy
747  *	is used -- this means that shadow object creation can be
748  *	delayed until a write operation occurs.  The symmetric (delayed)
749  *	strategy allows multiple maps to have writeable references to
750  *	the same region of a vm object, and hence cannot delay creating
751  *	its copy objects.  See vm_object_copy_quickly() in vm_object.c.
752  *	Copying of permanent objects is completely different; see
753  *	vm_object_copy_strategically() in vm_object.c.
754  */
755 
756 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_COPY, struct vm_map_copy);
757 
758 #define VM_MAP_ZONE_NAME        "maps"
759 #define VM_MAP_ZFLAGS           (ZC_NOENCRYPT | ZC_VM)
760 
761 #define VM_MAP_ENTRY_ZONE_NAME  "VM map entries"
762 #define VM_MAP_ENTRY_ZFLAGS     (ZC_NOENCRYPT | ZC_VM)
763 
764 #define VM_MAP_HOLES_ZONE_NAME  "VM map holes"
765 #define VM_MAP_HOLES_ZFLAGS     (ZC_NOENCRYPT | ZC_VM)
766 
767 /*
768  * Asserts that a vm_map_copy object is coming from the
769  * vm_map_copy_zone to ensure that it isn't a fake constructed
770  * anywhere else.
771  */
772 void
vm_map_copy_require(struct vm_map_copy * copy)773 vm_map_copy_require(struct vm_map_copy *copy)
774 {
775 	zone_id_require(ZONE_ID_VM_MAP_COPY, sizeof(struct vm_map_copy), copy);
776 }
777 
778 /*
779  *	vm_map_require:
780  *
781  *	Ensures that the argument is memory allocated from the genuine
782  *	vm map zone. (See zone_id_require_allow_foreign).
783  */
784 void
vm_map_require(vm_map_t map)785 vm_map_require(vm_map_t map)
786 {
787 	zone_id_require(ZONE_ID_VM_MAP, sizeof(struct _vm_map), map);
788 }
789 
790 #define VM_MAP_EARLY_COUNT_MAX         16
791 static __startup_data vm_offset_t      map_data;
792 static __startup_data vm_size_t        map_data_size;
793 static __startup_data vm_offset_t      kentry_data;
794 static __startup_data vm_size_t        kentry_data_size;
795 static __startup_data vm_offset_t      map_holes_data;
796 static __startup_data vm_size_t        map_holes_data_size;
797 static __startup_data vm_map_t        *early_map_owners[VM_MAP_EARLY_COUNT_MAX];
798 static __startup_data uint32_t         early_map_count;
799 
800 #if XNU_TARGET_OS_OSX
801 #define         NO_COALESCE_LIMIT  ((1024 * 128) - 1)
802 #else /* XNU_TARGET_OS_OSX */
803 #define         NO_COALESCE_LIMIT  0
804 #endif /* XNU_TARGET_OS_OSX */
805 
806 /* Skip acquiring locks if we're in the midst of a kernel core dump */
807 unsigned int not_in_kdp = 1;
808 
809 unsigned int vm_map_set_cache_attr_count = 0;
810 
811 kern_return_t
vm_map_set_cache_attr(vm_map_t map,vm_map_offset_t va)812 vm_map_set_cache_attr(
813 	vm_map_t        map,
814 	vm_map_offset_t va)
815 {
816 	vm_map_entry_t  map_entry;
817 	vm_object_t     object;
818 	kern_return_t   kr = KERN_SUCCESS;
819 
820 	vm_map_lock_read(map);
821 
822 	if (!vm_map_lookup_entry(map, va, &map_entry) ||
823 	    map_entry->is_sub_map) {
824 		/*
825 		 * that memory is not properly mapped
826 		 */
827 		kr = KERN_INVALID_ARGUMENT;
828 		goto done;
829 	}
830 	object = VME_OBJECT(map_entry);
831 
832 	if (object == VM_OBJECT_NULL) {
833 		/*
834 		 * there should be a VM object here at this point
835 		 */
836 		kr = KERN_INVALID_ARGUMENT;
837 		goto done;
838 	}
839 	vm_object_lock(object);
840 	object->set_cache_attr = TRUE;
841 	vm_object_unlock(object);
842 
843 	vm_map_set_cache_attr_count++;
844 done:
845 	vm_map_unlock_read(map);
846 
847 	return kr;
848 }
849 
850 
851 #if CONFIG_CODE_DECRYPTION
852 /*
853  * vm_map_apple_protected:
854  * This remaps the requested part of the object with an object backed by
855  * the decrypting pager.
856  * crypt_info contains entry points and session data for the crypt module.
857  * The crypt_info block will be copied by vm_map_apple_protected. The data structures
858  * referenced in crypt_info must remain valid until crypt_info->crypt_end() is called.
859  */
860 kern_return_t
vm_map_apple_protected(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vm_object_offset_t crypto_backing_offset,struct pager_crypt_info * crypt_info,uint32_t cryptid)861 vm_map_apple_protected(
862 	vm_map_t                map,
863 	vm_map_offset_t         start,
864 	vm_map_offset_t         end,
865 	vm_object_offset_t      crypto_backing_offset,
866 	struct pager_crypt_info *crypt_info,
867 	uint32_t                cryptid)
868 {
869 	boolean_t       map_locked;
870 	kern_return_t   kr;
871 	vm_map_entry_t  map_entry;
872 	struct vm_map_entry tmp_entry;
873 	memory_object_t unprotected_mem_obj;
874 	vm_object_t     protected_object;
875 	vm_map_offset_t map_addr;
876 	vm_map_offset_t start_aligned, end_aligned;
877 	vm_object_offset_t      crypto_start, crypto_end;
878 	boolean_t       cache_pager;
879 
880 	map_locked = FALSE;
881 	unprotected_mem_obj = MEMORY_OBJECT_NULL;
882 
883 	if (__improbable(vm_map_range_overflows(map, start, end - start))) {
884 		return KERN_INVALID_ADDRESS;
885 	}
886 	start_aligned = vm_map_trunc_page(start, PAGE_MASK_64);
887 	end_aligned = vm_map_round_page(end, PAGE_MASK_64);
888 	start_aligned = vm_map_trunc_page(start_aligned, VM_MAP_PAGE_MASK(map));
889 	end_aligned = vm_map_round_page(end_aligned, VM_MAP_PAGE_MASK(map));
890 
891 #if __arm64__
892 	/*
893 	 * "start" and "end" might be 4K-aligned but not 16K-aligned,
894 	 * so we might have to loop and establish up to 3 mappings:
895 	 *
896 	 * + the first 16K-page, which might overlap with the previous
897 	 *   4K-aligned mapping,
898 	 * + the center,
899 	 * + the last 16K-page, which might overlap with the next
900 	 *   4K-aligned mapping.
901 	 * Each of these mapping might be backed by a vnode pager (if
902 	 * properly page-aligned) or a "fourk_pager", itself backed by a
903 	 * vnode pager (if 4K-aligned but not page-aligned).
904 	 */
905 #endif /* __arm64__ */
906 
907 	map_addr = start_aligned;
908 	for (map_addr = start_aligned;
909 	    map_addr < end;
910 	    map_addr = tmp_entry.vme_end) {
911 		vm_map_lock(map);
912 		map_locked = TRUE;
913 
914 		/* lookup the protected VM object */
915 		if (!vm_map_lookup_entry(map,
916 		    map_addr,
917 		    &map_entry) ||
918 		    map_entry->is_sub_map ||
919 		    VME_OBJECT(map_entry) == VM_OBJECT_NULL) {
920 			/* that memory is not properly mapped */
921 			kr = KERN_INVALID_ARGUMENT;
922 			goto done;
923 		}
924 
925 		/* ensure mapped memory is mapped as executable except
926 		 *  except for model decryption flow */
927 		if ((cryptid != CRYPTID_MODEL_ENCRYPTION) &&
928 		    !(map_entry->protection & VM_PROT_EXECUTE)) {
929 			kr = KERN_INVALID_ARGUMENT;
930 			goto done;
931 		}
932 
933 		/* get the protected object to be decrypted */
934 		protected_object = VME_OBJECT(map_entry);
935 		if (protected_object == VM_OBJECT_NULL) {
936 			/* there should be a VM object here at this point */
937 			kr = KERN_INVALID_ARGUMENT;
938 			goto done;
939 		}
940 		/* ensure protected object stays alive while map is unlocked */
941 		vm_object_reference(protected_object);
942 
943 		/* limit the map entry to the area we want to cover */
944 		vm_map_clip_start(map, map_entry, start_aligned);
945 		vm_map_clip_end(map, map_entry, end_aligned);
946 
947 		tmp_entry = *map_entry;
948 		map_entry = VM_MAP_ENTRY_NULL; /* not valid after unlocking map */
949 		vm_map_unlock(map);
950 		map_locked = FALSE;
951 
952 		/*
953 		 * This map entry might be only partially encrypted
954 		 * (if not fully "page-aligned").
955 		 */
956 		crypto_start = 0;
957 		crypto_end = tmp_entry.vme_end - tmp_entry.vme_start;
958 		if (tmp_entry.vme_start < start) {
959 			if (tmp_entry.vme_start != start_aligned) {
960 				kr = KERN_INVALID_ADDRESS;
961 			}
962 			crypto_start += (start - tmp_entry.vme_start);
963 		}
964 		if (tmp_entry.vme_end > end) {
965 			if (tmp_entry.vme_end != end_aligned) {
966 				kr = KERN_INVALID_ADDRESS;
967 			}
968 			crypto_end -= (tmp_entry.vme_end - end);
969 		}
970 
971 		/*
972 		 * This "extra backing offset" is needed to get the decryption
973 		 * routine to use the right key.  It adjusts for the possibly
974 		 * relative offset of an interposed "4K" pager...
975 		 */
976 		if (crypto_backing_offset == (vm_object_offset_t) -1) {
977 			crypto_backing_offset = VME_OFFSET(&tmp_entry);
978 		}
979 
980 		cache_pager = TRUE;
981 #if XNU_TARGET_OS_OSX
982 		if (vm_map_is_alien(map)) {
983 			cache_pager = FALSE;
984 		}
985 #endif /* XNU_TARGET_OS_OSX */
986 
987 		/*
988 		 * Lookup (and create if necessary) the protected memory object
989 		 * matching that VM object.
990 		 * If successful, this also grabs a reference on the memory object,
991 		 * to guarantee that it doesn't go away before we get a chance to map
992 		 * it.
993 		 */
994 		unprotected_mem_obj = apple_protect_pager_setup(
995 			protected_object,
996 			VME_OFFSET(&tmp_entry),
997 			crypto_backing_offset,
998 			crypt_info,
999 			crypto_start,
1000 			crypto_end,
1001 			cache_pager);
1002 
1003 		/* release extra ref on protected object */
1004 		vm_object_deallocate(protected_object);
1005 
1006 		if (unprotected_mem_obj == NULL) {
1007 			kr = KERN_FAILURE;
1008 			goto done;
1009 		}
1010 
1011 		/* can overwrite an immutable mapping */
1012 		vm_map_kernel_flags_t vmk_flags = {
1013 			.vmf_fixed = true,
1014 			.vmf_overwrite = true,
1015 			.vmkf_overwrite_immutable = true,
1016 		};
1017 #if __arm64__
1018 		if (tmp_entry.used_for_jit &&
1019 		    (VM_MAP_PAGE_SHIFT(map) != FOURK_PAGE_SHIFT ||
1020 		    PAGE_SHIFT != FOURK_PAGE_SHIFT) &&
1021 		    fourk_binary_compatibility_unsafe &&
1022 		    fourk_binary_compatibility_allow_wx) {
1023 			printf("** FOURK_COMPAT [%d]: "
1024 			    "allowing write+execute at 0x%llx\n",
1025 			    proc_selfpid(), tmp_entry.vme_start);
1026 			vmk_flags.vmkf_map_jit = TRUE;
1027 		}
1028 #endif /* __arm64__ */
1029 
1030 		/* map this memory object in place of the current one */
1031 		map_addr = tmp_entry.vme_start;
1032 		kr = vm_map_enter_mem_object(map,
1033 		    &map_addr,
1034 		    (tmp_entry.vme_end -
1035 		    tmp_entry.vme_start),
1036 		    (mach_vm_offset_t) 0,
1037 		    vmk_flags,
1038 		    (ipc_port_t)(uintptr_t) unprotected_mem_obj,
1039 		    0,
1040 		    TRUE,
1041 		    tmp_entry.protection,
1042 		    tmp_entry.max_protection,
1043 		    tmp_entry.inheritance);
1044 		assertf(kr == KERN_SUCCESS,
1045 		    "kr = 0x%x\n", kr);
1046 		assertf(map_addr == tmp_entry.vme_start,
1047 		    "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n",
1048 		    (uint64_t)map_addr,
1049 		    (uint64_t) tmp_entry.vme_start,
1050 		    &tmp_entry);
1051 
1052 #if VM_MAP_DEBUG_APPLE_PROTECT
1053 		if (vm_map_debug_apple_protect) {
1054 			printf("APPLE_PROTECT: map %p [0x%llx:0x%llx] pager %p:"
1055 			    " backing:[object:%p,offset:0x%llx,"
1056 			    "crypto_backing_offset:0x%llx,"
1057 			    "crypto_start:0x%llx,crypto_end:0x%llx]\n",
1058 			    map,
1059 			    (uint64_t) map_addr,
1060 			    (uint64_t) (map_addr + (tmp_entry.vme_end -
1061 			    tmp_entry.vme_start)),
1062 			    unprotected_mem_obj,
1063 			    protected_object,
1064 			    VME_OFFSET(&tmp_entry),
1065 			    crypto_backing_offset,
1066 			    crypto_start,
1067 			    crypto_end);
1068 		}
1069 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
1070 
1071 		/*
1072 		 * Release the reference obtained by
1073 		 * apple_protect_pager_setup().
1074 		 * The mapping (if it succeeded) is now holding a reference on
1075 		 * the memory object.
1076 		 */
1077 		memory_object_deallocate(unprotected_mem_obj);
1078 		unprotected_mem_obj = MEMORY_OBJECT_NULL;
1079 
1080 		/* continue with next map entry */
1081 		crypto_backing_offset += (tmp_entry.vme_end -
1082 		    tmp_entry.vme_start);
1083 		crypto_backing_offset -= crypto_start;
1084 	}
1085 	kr = KERN_SUCCESS;
1086 
1087 done:
1088 	if (map_locked) {
1089 		vm_map_unlock(map);
1090 	}
1091 	return kr;
1092 }
1093 #endif  /* CONFIG_CODE_DECRYPTION */
1094 
1095 
1096 LCK_GRP_DECLARE(vm_map_lck_grp, "vm_map");
1097 LCK_ATTR_DECLARE(vm_map_lck_attr, 0, 0);
1098 LCK_ATTR_DECLARE(vm_map_lck_rw_attr, 0, LCK_ATTR_DEBUG);
1099 
1100 #if XNU_TARGET_OS_OSX
1101 #define MALLOC_NO_COW_DEFAULT 1
1102 #define MALLOC_NO_COW_EXCEPT_FORK_DEFAULT 1
1103 #else /* XNU_TARGET_OS_OSX */
1104 #define MALLOC_NO_COW_DEFAULT 1
1105 #define MALLOC_NO_COW_EXCEPT_FORK_DEFAULT 0
1106 #endif /* XNU_TARGET_OS_OSX */
1107 TUNABLE(int, malloc_no_cow, "malloc_no_cow", MALLOC_NO_COW_DEFAULT);
1108 TUNABLE(int, malloc_no_cow_except_fork, "malloc_no_cow_except_fork", MALLOC_NO_COW_EXCEPT_FORK_DEFAULT);
1109 uint64_t vm_memory_malloc_no_cow_mask = 0ULL;
1110 #if DEBUG
1111 int vm_check_map_sanity = 0;
1112 #endif
1113 
1114 /*
1115  *	vm_map_init:
1116  *
1117  *	Initialize the vm_map module.  Must be called before
1118  *	any other vm_map routines.
1119  *
1120  *	Map and entry structures are allocated from zones -- we must
1121  *	initialize those zones.
1122  *
1123  *	There are three zones of interest:
1124  *
1125  *	vm_map_zone:		used to allocate maps.
1126  *	vm_map_entry_zone:	used to allocate map entries.
1127  *
1128  *	LP32:
1129  *	vm_map_entry_reserved_zone:     fallback zone for kernel map entries
1130  *
1131  *	The kernel allocates map entries from a special zone that is initially
1132  *	"crammed" with memory.  It would be difficult (perhaps impossible) for
1133  *	the kernel to allocate more memory to a entry zone when it became
1134  *	empty since the very act of allocating memory implies the creation
1135  *	of a new entry.
1136  */
1137 __startup_func
1138 void
vm_map_init(void)1139 vm_map_init(void)
1140 {
1141 
1142 #if MACH_ASSERT
1143 	PE_parse_boot_argn("debug4k_filter", &debug4k_filter,
1144 	    sizeof(debug4k_filter));
1145 #endif /* MACH_ASSERT */
1146 
1147 	zone_create_ext(VM_MAP_ZONE_NAME, sizeof(struct _vm_map),
1148 	    VM_MAP_ZFLAGS, ZONE_ID_VM_MAP, NULL);
1149 
1150 	/*
1151 	 * Don't quarantine because we always need elements available
1152 	 * Disallow GC on this zone... to aid the GC.
1153 	 */
1154 	zone_create_ext(VM_MAP_ENTRY_ZONE_NAME,
1155 	    sizeof(struct vm_map_entry), VM_MAP_ENTRY_ZFLAGS,
1156 	    ZONE_ID_VM_MAP_ENTRY, ^(zone_t z) {
1157 		z->z_elems_rsv = (uint16_t)(32 *
1158 		(ml_early_cpu_max_number() + 1));
1159 	});
1160 
1161 	zone_create_ext(VM_MAP_HOLES_ZONE_NAME,
1162 	    sizeof(struct vm_map_links), VM_MAP_HOLES_ZFLAGS,
1163 	    ZONE_ID_VM_MAP_HOLES, ^(zone_t z) {
1164 		z->z_elems_rsv = (uint16_t)(16 * 1024 / zone_elem_outer_size(z));
1165 	});
1166 
1167 	zone_create_ext("VM map copies", sizeof(struct vm_map_copy),
1168 	    ZC_NOENCRYPT, ZONE_ID_VM_MAP_COPY, NULL);
1169 
1170 	/*
1171 	 * Add the stolen memory to zones, adjust zone size and stolen counts.
1172 	 */
1173 	zone_cram_early(vm_map_zone, map_data, map_data_size);
1174 	zone_cram_early(vm_map_entry_zone, kentry_data, kentry_data_size);
1175 	zone_cram_early(vm_map_holes_zone, map_holes_data, map_holes_data_size);
1176 	printf("VM boostrap: %d maps, %d entries and %d holes available\n",
1177 	    zone_count_free(vm_map_zone),
1178 	    zone_count_free(vm_map_entry_zone),
1179 	    zone_count_free(vm_map_holes_zone));
1180 
1181 	/*
1182 	 * Since these are covered by zones, remove them from stolen page accounting.
1183 	 */
1184 	VM_PAGE_MOVE_STOLEN(atop_64(map_data_size) + atop_64(kentry_data_size) + atop_64(map_holes_data_size));
1185 
1186 #if VM_MAP_DEBUG_APPLE_PROTECT
1187 	PE_parse_boot_argn("vm_map_debug_apple_protect",
1188 	    &vm_map_debug_apple_protect,
1189 	    sizeof(vm_map_debug_apple_protect));
1190 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
1191 #if VM_MAP_DEBUG_APPLE_FOURK
1192 	PE_parse_boot_argn("vm_map_debug_fourk",
1193 	    &vm_map_debug_fourk,
1194 	    sizeof(vm_map_debug_fourk));
1195 #endif /* VM_MAP_DEBUG_FOURK */
1196 
1197 	if (malloc_no_cow) {
1198 		vm_memory_malloc_no_cow_mask = 0ULL;
1199 		vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC;
1200 		vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_SMALL;
1201 		vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_MEDIUM;
1202 #if XNU_TARGET_OS_OSX
1203 		/*
1204 		 * On macOS, keep copy-on-write for MALLOC_LARGE because
1205 		 * realloc() may use vm_copy() to transfer the old contents
1206 		 * to the new location.
1207 		 */
1208 #else /* XNU_TARGET_OS_OSX */
1209 		vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_LARGE;
1210 		vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_LARGE_REUSABLE;
1211 		vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_LARGE_REUSED;
1212 #endif /* XNU_TARGET_OS_OSX */
1213 //		vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_HUGE;
1214 //		vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_REALLOC;
1215 		vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_TINY;
1216 		vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_NANO;
1217 //		vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_TCMALLOC;
1218 		PE_parse_boot_argn("vm_memory_malloc_no_cow_mask",
1219 		    &vm_memory_malloc_no_cow_mask,
1220 		    sizeof(vm_memory_malloc_no_cow_mask));
1221 	}
1222 
1223 #if CONFIG_MAP_RANGES
1224 	vm_map_range_map_init();
1225 #endif /* CONFIG_MAP_RANGES */
1226 
1227 #if DEBUG
1228 	PE_parse_boot_argn("vm_check_map_sanity", &vm_check_map_sanity, sizeof(vm_check_map_sanity));
1229 	if (vm_check_map_sanity) {
1230 		kprintf("VM sanity checking enabled\n");
1231 	} else {
1232 		kprintf("VM sanity checking disabled. Set bootarg vm_check_map_sanity=1 to enable\n");
1233 	}
1234 #endif /* DEBUG */
1235 
1236 #if DEVELOPMENT || DEBUG
1237 	PE_parse_boot_argn("panic_on_unsigned_execute",
1238 	    &panic_on_unsigned_execute,
1239 	    sizeof(panic_on_unsigned_execute));
1240 	PE_parse_boot_argn("panic_on_mlock_failure",
1241 	    &panic_on_mlock_failure,
1242 	    sizeof(panic_on_mlock_failure));
1243 #endif /* DEVELOPMENT || DEBUG */
1244 }
1245 
1246 __startup_func
1247 static void
vm_map_steal_memory(void)1248 vm_map_steal_memory(void)
1249 {
1250 	/*
1251 	 * We need to reserve enough memory to support boostraping VM maps
1252 	 * and the zone subsystem.
1253 	 *
1254 	 * The VM Maps that need to function before zones can support them
1255 	 * are the ones registered with vm_map_will_allocate_early_map(),
1256 	 * which are:
1257 	 * - the kernel map
1258 	 * - the various submaps used by zones (pgz, meta, ...)
1259 	 *
1260 	 * We also need enough entries and holes to support them
1261 	 * until zone_metadata_init() is called, which is when
1262 	 * the zone allocator becomes capable of expanding dynamically.
1263 	 *
1264 	 * We need:
1265 	 * - VM_MAP_EARLY_COUNT_MAX worth of VM Maps.
1266 	 * - To allow for 3-4 entries per map, but the kernel map
1267 	 *   needs a multiple of VM_MAP_EARLY_COUNT_MAX entries
1268 	 *   to describe the submaps, so double it (and make it 8x too)
1269 	 * - To allow for holes between entries,
1270 	 *   hence needs the same budget as entries
1271 	 */
1272 	map_data_size = zone_get_early_alloc_size(VM_MAP_ZONE_NAME,
1273 	    sizeof(struct _vm_map), VM_MAP_ZFLAGS,
1274 	    VM_MAP_EARLY_COUNT_MAX);
1275 
1276 	kentry_data_size = zone_get_early_alloc_size(VM_MAP_ENTRY_ZONE_NAME,
1277 	    sizeof(struct vm_map_entry), VM_MAP_ENTRY_ZFLAGS,
1278 	    8 * VM_MAP_EARLY_COUNT_MAX);
1279 
1280 	map_holes_data_size = zone_get_early_alloc_size(VM_MAP_HOLES_ZONE_NAME,
1281 	    sizeof(struct vm_map_links), VM_MAP_HOLES_ZFLAGS,
1282 	    8 * VM_MAP_EARLY_COUNT_MAX);
1283 
1284 	/*
1285 	 * Steal a contiguous range of memory so that a simple range check
1286 	 * can validate early addresses being freed/crammed to these
1287 	 * zones
1288 	 */
1289 	map_data       = zone_early_mem_init(map_data_size + kentry_data_size +
1290 	    map_holes_data_size);
1291 	kentry_data    = map_data + map_data_size;
1292 	map_holes_data = kentry_data + kentry_data_size;
1293 }
1294 STARTUP(PMAP_STEAL, STARTUP_RANK_FIRST, vm_map_steal_memory);
1295 
1296 __startup_func
1297 static void
vm_kernel_boostraped(void)1298 vm_kernel_boostraped(void)
1299 {
1300 	zone_enable_caching(&zone_array[ZONE_ID_VM_MAP_ENTRY]);
1301 	zone_enable_caching(&zone_array[ZONE_ID_VM_MAP_HOLES]);
1302 	zone_enable_caching(&zone_array[ZONE_ID_VM_MAP_COPY]);
1303 
1304 	printf("VM bootstrap done: %d maps, %d entries and %d holes left\n",
1305 	    zone_count_free(vm_map_zone),
1306 	    zone_count_free(vm_map_entry_zone),
1307 	    zone_count_free(vm_map_holes_zone));
1308 }
1309 STARTUP(ZALLOC, STARTUP_RANK_SECOND, vm_kernel_boostraped);
1310 
1311 void
vm_map_disable_hole_optimization(vm_map_t map)1312 vm_map_disable_hole_optimization(vm_map_t map)
1313 {
1314 	vm_map_entry_t  head_entry, hole_entry, next_hole_entry;
1315 
1316 	if (map->holelistenabled) {
1317 		head_entry = hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
1318 
1319 		while (hole_entry != NULL) {
1320 			next_hole_entry = hole_entry->vme_next;
1321 
1322 			hole_entry->vme_next = NULL;
1323 			hole_entry->vme_prev = NULL;
1324 			zfree_id(ZONE_ID_VM_MAP_HOLES, hole_entry);
1325 
1326 			if (next_hole_entry == head_entry) {
1327 				hole_entry = NULL;
1328 			} else {
1329 				hole_entry = next_hole_entry;
1330 			}
1331 		}
1332 
1333 		map->holes_list = NULL;
1334 		map->holelistenabled = FALSE;
1335 
1336 		map->first_free = vm_map_first_entry(map);
1337 		SAVE_HINT_HOLE_WRITE(map, NULL);
1338 	}
1339 }
1340 
1341 boolean_t
vm_kernel_map_is_kernel(vm_map_t map)1342 vm_kernel_map_is_kernel(vm_map_t map)
1343 {
1344 	return map->pmap == kernel_pmap;
1345 }
1346 
1347 /*
1348  *	vm_map_create:
1349  *
1350  *	Creates and returns a new empty VM map with
1351  *	the given physical map structure, and having
1352  *	the given lower and upper address bounds.
1353  */
1354 
1355 extern vm_map_t vm_map_create_external(
1356 	pmap_t                  pmap,
1357 	vm_map_offset_t         min_off,
1358 	vm_map_offset_t         max_off,
1359 	boolean_t               pageable);
1360 
1361 vm_map_t
vm_map_create_external(pmap_t pmap,vm_map_offset_t min,vm_map_offset_t max,boolean_t pageable)1362 vm_map_create_external(
1363 	pmap_t                  pmap,
1364 	vm_map_offset_t         min,
1365 	vm_map_offset_t         max,
1366 	boolean_t               pageable)
1367 {
1368 	vm_map_create_options_t options = VM_MAP_CREATE_DEFAULT;
1369 
1370 	if (pageable) {
1371 		options |= VM_MAP_CREATE_PAGEABLE;
1372 	}
1373 	return vm_map_create_options(pmap, min, max, options);
1374 }
1375 
1376 __startup_func
1377 void
vm_map_will_allocate_early_map(vm_map_t * owner)1378 vm_map_will_allocate_early_map(vm_map_t *owner)
1379 {
1380 	if (early_map_count >= VM_MAP_EARLY_COUNT_MAX) {
1381 		panic("VM_MAP_EARLY_COUNT_MAX is too low");
1382 	}
1383 
1384 	early_map_owners[early_map_count++] = owner;
1385 }
1386 
1387 __startup_func
1388 void
vm_map_relocate_early_maps(vm_offset_t delta)1389 vm_map_relocate_early_maps(vm_offset_t delta)
1390 {
1391 	for (uint32_t i = 0; i < early_map_count; i++) {
1392 		vm_address_t addr = (vm_address_t)*early_map_owners[i];
1393 
1394 		*early_map_owners[i] = (vm_map_t)(addr + delta);
1395 	}
1396 
1397 	early_map_count = ~0u;
1398 }
1399 
1400 /*
1401  *	Routine:	vm_map_relocate_early_elem
1402  *
1403  *	Purpose:
1404  *		Early zone elements are allocated in a temporary part
1405  *		of the address space.
1406  *
1407  *		Once the zones live in their final place, the early
1408  *		VM maps, map entries and map holes need to be relocated.
1409  *
1410  *		It involves rewriting any vm_map_t, vm_map_entry_t or
1411  *		pointers to vm_map_links. Other pointers to other types
1412  *		are fine.
1413  *
1414  *		Fortunately, pointers to those types are self-contained
1415  *		in those zones, _except_ for pointers to VM maps,
1416  *		which are tracked during early boot and fixed with
1417  *		vm_map_relocate_early_maps().
1418  */
1419 __startup_func
1420 void
vm_map_relocate_early_elem(uint32_t zone_id,vm_offset_t new_addr,vm_offset_t delta)1421 vm_map_relocate_early_elem(
1422 	uint32_t                zone_id,
1423 	vm_offset_t             new_addr,
1424 	vm_offset_t             delta)
1425 {
1426 #define relocate(type_t, field)  ({ \
1427 	typeof(((type_t)NULL)->field) *__field = &((type_t)new_addr)->field;   \
1428 	if (*__field) {                                                        \
1429 	        *__field = (typeof(*__field))((vm_offset_t)*__field + delta);  \
1430 	}                                                                      \
1431 })
1432 
1433 	switch (zone_id) {
1434 	case ZONE_ID_VM_MAP:
1435 	case ZONE_ID_VM_MAP_ENTRY:
1436 	case ZONE_ID_VM_MAP_HOLES:
1437 		break;
1438 
1439 	default:
1440 		panic("Unexpected zone ID %d", zone_id);
1441 	}
1442 
1443 	if (zone_id == ZONE_ID_VM_MAP) {
1444 		relocate(vm_map_t, hdr.links.prev);
1445 		relocate(vm_map_t, hdr.links.next);
1446 		((vm_map_t)new_addr)->pmap = kernel_pmap;
1447 #ifdef VM_MAP_STORE_USE_RB
1448 		relocate(vm_map_t, hdr.rb_head_store.rbh_root);
1449 #endif /* VM_MAP_STORE_USE_RB */
1450 		relocate(vm_map_t, hint);
1451 		relocate(vm_map_t, hole_hint);
1452 		relocate(vm_map_t, first_free);
1453 		return;
1454 	}
1455 
1456 	relocate(struct vm_map_links *, prev);
1457 	relocate(struct vm_map_links *, next);
1458 
1459 	if (zone_id == ZONE_ID_VM_MAP_ENTRY) {
1460 #ifdef VM_MAP_STORE_USE_RB
1461 		relocate(vm_map_entry_t, store.entry.rbe_left);
1462 		relocate(vm_map_entry_t, store.entry.rbe_right);
1463 		relocate(vm_map_entry_t, store.entry.rbe_parent);
1464 #endif /* VM_MAP_STORE_USE_RB */
1465 		if (((vm_map_entry_t)new_addr)->is_sub_map) {
1466 			/* no object to relocate because we haven't made any */
1467 			((vm_map_entry_t)new_addr)->vme_submap +=
1468 			    delta >> VME_SUBMAP_SHIFT;
1469 		}
1470 #if MAP_ENTRY_CREATION_DEBUG
1471 		relocate(vm_map_entry_t, vme_creation_maphdr);
1472 #endif /* MAP_ENTRY_CREATION_DEBUG */
1473 	}
1474 
1475 #undef relocate
1476 }
1477 
1478 vm_map_t
vm_map_create_options(pmap_t pmap,vm_map_offset_t min,vm_map_offset_t max,vm_map_create_options_t options)1479 vm_map_create_options(
1480 	pmap_t                  pmap,
1481 	vm_map_offset_t         min,
1482 	vm_map_offset_t         max,
1483 	vm_map_create_options_t options)
1484 {
1485 	vm_map_t result;
1486 
1487 #if DEBUG || DEVELOPMENT
1488 	if (__improbable(startup_phase < STARTUP_SUB_ZALLOC)) {
1489 		if (early_map_count != ~0u && early_map_count !=
1490 		    zone_count_allocated(vm_map_zone) + 1) {
1491 			panic("allocating %dth early map, owner not known",
1492 			    zone_count_allocated(vm_map_zone) + 1);
1493 		}
1494 		if (early_map_count != ~0u && pmap && pmap != kernel_pmap) {
1495 			panic("allocating %dth early map for non kernel pmap",
1496 			    early_map_count);
1497 		}
1498 	}
1499 #endif /* DEBUG || DEVELOPMENT */
1500 
1501 	result = zalloc_id(ZONE_ID_VM_MAP, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1502 
1503 	vm_map_store_init(&result->hdr);
1504 	result->hdr.entries_pageable = (bool)(options & VM_MAP_CREATE_PAGEABLE);
1505 	vm_map_set_page_shift(result, PAGE_SHIFT);
1506 
1507 	result->size_limit      = RLIM_INFINITY;        /* default unlimited */
1508 	result->data_limit      = RLIM_INFINITY;        /* default unlimited */
1509 	result->user_wire_limit = MACH_VM_MAX_ADDRESS;  /* default limit is unlimited */
1510 	os_ref_init_count_raw(&result->map_refcnt, &map_refgrp, 1);
1511 	result->pmap = pmap;
1512 	result->min_offset = min;
1513 	result->max_offset = max;
1514 	result->first_free = vm_map_to_entry(result);
1515 	result->hint = vm_map_to_entry(result);
1516 
1517 	if (options & VM_MAP_CREATE_NEVER_FAULTS) {
1518 		assert(pmap == kernel_pmap);
1519 		result->never_faults = true;
1520 	}
1521 
1522 	/* "has_corpse_footprint" and "holelistenabled" are mutually exclusive */
1523 	if (options & VM_MAP_CREATE_CORPSE_FOOTPRINT) {
1524 		result->has_corpse_footprint = true;
1525 	} else if (!(options & VM_MAP_CREATE_DISABLE_HOLELIST)) {
1526 		struct vm_map_links *hole_entry;
1527 
1528 		hole_entry = zalloc_id(ZONE_ID_VM_MAP_HOLES, Z_WAITOK | Z_NOFAIL);
1529 		hole_entry->start = min;
1530 #if defined(__arm64__)
1531 		hole_entry->end = result->max_offset;
1532 #else
1533 		hole_entry->end = MAX(max, (vm_map_offset_t)MACH_VM_MAX_ADDRESS);
1534 #endif
1535 		result->holes_list = result->hole_hint = hole_entry;
1536 		hole_entry->prev = hole_entry->next = CAST_TO_VM_MAP_ENTRY(hole_entry);
1537 		result->holelistenabled = true;
1538 	}
1539 
1540 	vm_map_lock_init(result);
1541 
1542 	return result;
1543 }
1544 
1545 /*
1546  * Adjusts a submap that was made by kmem_suballoc()
1547  * before it knew where it would be mapped,
1548  * so that it has the right min/max offsets.
1549  *
1550  * We do not need to hold any locks:
1551  * only the caller knows about this map,
1552  * and it is not published on any entry yet.
1553  */
1554 static void
vm_map_adjust_offsets(vm_map_t map,vm_map_offset_t min_off,vm_map_offset_t max_off)1555 vm_map_adjust_offsets(
1556 	vm_map_t                map,
1557 	vm_map_offset_t         min_off,
1558 	vm_map_offset_t         max_off)
1559 {
1560 	assert(map->min_offset == 0);
1561 	assert(map->max_offset == max_off - min_off);
1562 	assert(map->hdr.nentries == 0);
1563 	assert(os_ref_get_count_raw(&map->map_refcnt) == 2);
1564 
1565 	map->min_offset = min_off;
1566 	map->max_offset = max_off;
1567 
1568 	if (map->holelistenabled) {
1569 		struct vm_map_links *hole = map->holes_list;
1570 
1571 		hole->start = min_off;
1572 #if defined(__arm64__)
1573 		hole->end = max_off;
1574 #else
1575 		hole->end = MAX(max_off, (vm_map_offset_t)MACH_VM_MAX_ADDRESS);
1576 #endif
1577 	}
1578 }
1579 
1580 
1581 vm_map_size_t
vm_map_adjusted_size(vm_map_t map)1582 vm_map_adjusted_size(vm_map_t map)
1583 {
1584 	const struct vm_reserved_region *regions = NULL;
1585 	size_t num_regions = 0;
1586 	mach_vm_size_t  reserved_size = 0, map_size = 0;
1587 
1588 	if (map == NULL || (map->size == 0)) {
1589 		return 0;
1590 	}
1591 
1592 	map_size = map->size;
1593 
1594 	if (map->reserved_regions == FALSE || !vm_map_is_exotic(map) || map->terminated) {
1595 		/*
1596 		 * No special reserved regions or not an exotic map or the task
1597 		 * is terminating and these special regions might have already
1598 		 * been deallocated.
1599 		 */
1600 		return map_size;
1601 	}
1602 
1603 	num_regions = ml_get_vm_reserved_regions(vm_map_is_64bit(map), &regions);
1604 	assert((num_regions == 0) || (num_regions > 0 && regions != NULL));
1605 
1606 	while (num_regions) {
1607 		reserved_size += regions[--num_regions].vmrr_size;
1608 	}
1609 
1610 	/*
1611 	 * There are a few places where the map is being switched out due to
1612 	 * 'termination' without that bit being set (e.g. exec and corpse purging).
1613 	 * In those cases, we could have the map's regions being deallocated on
1614 	 * a core while some accounting process is trying to get the map's size.
1615 	 * So this assert can't be enabled till all those places are uniform in
1616 	 * their use of the 'map->terminated' bit.
1617 	 *
1618 	 * assert(map_size >= reserved_size);
1619 	 */
1620 
1621 	return (map_size >= reserved_size) ? (map_size - reserved_size) : map_size;
1622 }
1623 
1624 /*
1625  *	vm_map_entry_create:	[ internal use only ]
1626  *
1627  *	Allocates a VM map entry for insertion in the
1628  *	given map (or map copy).  No fields are filled.
1629  *
1630  *	The VM entry will be zero initialized, except for:
1631  *	- behavior set to VM_BEHAVIOR_DEFAULT
1632  *	- inheritance set to VM_INHERIT_DEFAULT
1633  */
1634 #define vm_map_entry_create(map)    _vm_map_entry_create(&(map)->hdr)
1635 
1636 #define vm_map_copy_entry_create(copy) _vm_map_entry_create(&(copy)->cpy_hdr)
1637 
1638 static vm_map_entry_t
_vm_map_entry_create(struct vm_map_header * map_header __unused)1639 _vm_map_entry_create(
1640 	struct vm_map_header    *map_header __unused)
1641 {
1642 	vm_map_entry_t entry = NULL;
1643 
1644 	entry = zalloc_id(ZONE_ID_VM_MAP_ENTRY, Z_WAITOK | Z_ZERO);
1645 
1646 	/*
1647 	 * Help the compiler with what we know to be true,
1648 	 * so that the further bitfields inits have good codegen.
1649 	 *
1650 	 * See rdar://87041299
1651 	 */
1652 	__builtin_assume(entry->vme_object_value == 0);
1653 	__builtin_assume(*(uint64_t *)(&entry->vme_object_value + 1) == 0);
1654 	__builtin_assume(*(uint64_t *)(&entry->vme_object_value + 2) == 0);
1655 
1656 	static_assert(VM_MAX_TAG_VALUE <= VME_ALIAS_MASK,
1657 	    "VME_ALIAS_MASK covers tags");
1658 
1659 	static_assert(VM_BEHAVIOR_DEFAULT == 0,
1660 	    "can skip zeroing of the behavior field");
1661 	entry->inheritance = VM_INHERIT_DEFAULT;
1662 
1663 #if MAP_ENTRY_CREATION_DEBUG
1664 	entry->vme_creation_maphdr = map_header;
1665 	entry->vme_creation_bt = btref_get(__builtin_frame_address(0),
1666 	    BTREF_GET_NOWAIT);
1667 #endif
1668 	return entry;
1669 }
1670 
1671 /*
1672  *	vm_map_entry_dispose:	[ internal use only ]
1673  *
1674  *	Inverse of vm_map_entry_create.
1675  *
1676  *      write map lock held so no need to
1677  *	do anything special to insure correctness
1678  *      of the stores
1679  */
1680 static void
vm_map_entry_dispose(vm_map_entry_t entry)1681 vm_map_entry_dispose(
1682 	vm_map_entry_t          entry)
1683 {
1684 #if VM_BTLOG_TAGS
1685 	if (entry->vme_kernel_object) {
1686 		btref_put(entry->vme_tag_btref);
1687 	}
1688 #endif /* VM_BTLOG_TAGS */
1689 #if MAP_ENTRY_CREATION_DEBUG
1690 	btref_put(entry->vme_creation_bt);
1691 #endif
1692 #if MAP_ENTRY_INSERTION_DEBUG
1693 	btref_put(entry->vme_insertion_bt);
1694 #endif
1695 	zfree(vm_map_entry_zone, entry);
1696 }
1697 
1698 #define vm_map_copy_entry_dispose(copy_entry) \
1699 	vm_map_entry_dispose(copy_entry)
1700 
1701 static vm_map_entry_t
vm_map_zap_first_entry(vm_map_zap_t list)1702 vm_map_zap_first_entry(
1703 	vm_map_zap_t            list)
1704 {
1705 	return list->vmz_head;
1706 }
1707 
1708 static vm_map_entry_t
vm_map_zap_last_entry(vm_map_zap_t list)1709 vm_map_zap_last_entry(
1710 	vm_map_zap_t            list)
1711 {
1712 	assert(vm_map_zap_first_entry(list));
1713 	return __container_of(list->vmz_tail, struct vm_map_entry, vme_next);
1714 }
1715 
1716 static void
vm_map_zap_append(vm_map_zap_t list,vm_map_entry_t entry)1717 vm_map_zap_append(
1718 	vm_map_zap_t            list,
1719 	vm_map_entry_t          entry)
1720 {
1721 	entry->vme_next = VM_MAP_ENTRY_NULL;
1722 	*list->vmz_tail = entry;
1723 	list->vmz_tail = &entry->vme_next;
1724 }
1725 
1726 static vm_map_entry_t
vm_map_zap_pop(vm_map_zap_t list)1727 vm_map_zap_pop(
1728 	vm_map_zap_t            list)
1729 {
1730 	vm_map_entry_t head = list->vmz_head;
1731 
1732 	if (head != VM_MAP_ENTRY_NULL &&
1733 	    (list->vmz_head = head->vme_next) == VM_MAP_ENTRY_NULL) {
1734 		list->vmz_tail = &list->vmz_head;
1735 	}
1736 
1737 	return head;
1738 }
1739 
1740 static void
vm_map_zap_dispose(vm_map_zap_t list)1741 vm_map_zap_dispose(
1742 	vm_map_zap_t            list)
1743 {
1744 	vm_map_entry_t          entry;
1745 
1746 	while ((entry = vm_map_zap_pop(list))) {
1747 		if (entry->is_sub_map) {
1748 			vm_map_deallocate(VME_SUBMAP(entry));
1749 		} else {
1750 			vm_object_deallocate(VME_OBJECT(entry));
1751 		}
1752 
1753 		vm_map_entry_dispose(entry);
1754 	}
1755 }
1756 
1757 #if MACH_ASSERT
1758 static boolean_t first_free_check = FALSE;
1759 boolean_t
first_free_is_valid(vm_map_t map)1760 first_free_is_valid(
1761 	vm_map_t        map)
1762 {
1763 	if (!first_free_check) {
1764 		return TRUE;
1765 	}
1766 
1767 	return first_free_is_valid_store( map );
1768 }
1769 #endif /* MACH_ASSERT */
1770 
1771 
1772 #define vm_map_copy_entry_link(copy, after_where, entry)                \
1773 	_vm_map_store_entry_link(&(copy)->cpy_hdr, after_where, (entry))
1774 
1775 #define vm_map_copy_entry_unlink(copy, entry)                           \
1776 	_vm_map_store_entry_unlink(&(copy)->cpy_hdr, (entry), false)
1777 
1778 /*
1779  *	vm_map_destroy:
1780  *
1781  *	Actually destroy a map.
1782  */
1783 void
vm_map_destroy(vm_map_t map)1784 vm_map_destroy(
1785 	vm_map_t        map)
1786 {
1787 	/* final cleanup: this is not allowed to fail */
1788 	vmr_flags_t flags = VM_MAP_REMOVE_NO_FLAGS;
1789 
1790 	VM_MAP_ZAP_DECLARE(zap);
1791 
1792 	vm_map_lock(map);
1793 
1794 	map->terminated = true;
1795 	/* clean up regular map entries */
1796 	(void)vm_map_delete(map, map->min_offset, map->max_offset, flags,
1797 	    KMEM_GUARD_NONE, &zap);
1798 	/* clean up leftover special mappings (commpage, GPU carveout, etc...) */
1799 	(void)vm_map_delete(map, 0x0, 0xFFFFFFFFFFFFF000ULL, flags,
1800 	    KMEM_GUARD_NONE, &zap);
1801 
1802 	vm_map_disable_hole_optimization(map);
1803 	vm_map_corpse_footprint_destroy(map);
1804 
1805 	vm_map_unlock(map);
1806 
1807 	vm_map_zap_dispose(&zap);
1808 
1809 	assert(map->hdr.nentries == 0);
1810 
1811 	if (map->pmap) {
1812 		pmap_destroy(map->pmap);
1813 	}
1814 
1815 	lck_rw_destroy(&map->lock, &vm_map_lck_grp);
1816 
1817 #if CONFIG_MAP_RANGES
1818 	kfree_data(map->extra_ranges,
1819 	    map->extra_ranges_count * sizeof(struct vm_map_user_range));
1820 #endif
1821 
1822 	zfree_id(ZONE_ID_VM_MAP, map);
1823 }
1824 
1825 /*
1826  * Returns pid of the task with the largest number of VM map entries.
1827  * Used in the zone-map-exhaustion jetsam path.
1828  */
1829 pid_t
find_largest_process_vm_map_entries(void)1830 find_largest_process_vm_map_entries(void)
1831 {
1832 	pid_t victim_pid = -1;
1833 	int max_vm_map_entries = 0;
1834 	task_t task = TASK_NULL;
1835 	queue_head_t *task_list = &tasks;
1836 
1837 	lck_mtx_lock(&tasks_threads_lock);
1838 	queue_iterate(task_list, task, task_t, tasks) {
1839 		if (task == kernel_task || !task->active) {
1840 			continue;
1841 		}
1842 
1843 		vm_map_t task_map = task->map;
1844 		if (task_map != VM_MAP_NULL) {
1845 			int task_vm_map_entries = task_map->hdr.nentries;
1846 			if (task_vm_map_entries > max_vm_map_entries) {
1847 				max_vm_map_entries = task_vm_map_entries;
1848 				victim_pid = pid_from_task(task);
1849 			}
1850 		}
1851 	}
1852 	lck_mtx_unlock(&tasks_threads_lock);
1853 
1854 	printf("zone_map_exhaustion: victim pid %d, vm region count: %d\n", victim_pid, max_vm_map_entries);
1855 	return victim_pid;
1856 }
1857 
1858 
1859 /*
1860  *	vm_map_lookup_entry:	[ internal use only ]
1861  *
1862  *	Calls into the vm map store layer to find the map
1863  *	entry containing (or immediately preceding) the
1864  *	specified address in the given map; the entry is returned
1865  *	in the "entry" parameter.  The boolean
1866  *	result indicates whether the address is
1867  *	actually contained in the map.
1868  */
1869 boolean_t
vm_map_lookup_entry(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * entry)1870 vm_map_lookup_entry(
1871 	vm_map_t        map,
1872 	vm_map_offset_t address,
1873 	vm_map_entry_t  *entry)         /* OUT */
1874 {
1875 	if (VM_KERNEL_ADDRESS(address)) {
1876 		address = VM_KERNEL_STRIP_UPTR(address);
1877 	}
1878 #if CONFIG_PROB_GZALLOC
1879 	if (map->pmap == kernel_pmap) {
1880 		assertf(!pgz_owned(address),
1881 		    "it is the responsibility of callers to unguard PGZ addresses");
1882 	}
1883 #endif /* CONFIG_PROB_GZALLOC */
1884 	return vm_map_store_lookup_entry( map, address, entry );
1885 }
1886 
1887 boolean_t
vm_map_lookup_entry_or_next(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * entry)1888 vm_map_lookup_entry_or_next(
1889 	vm_map_t        map,
1890 	vm_map_offset_t address,
1891 	vm_map_entry_t  *entry)         /* OUT */
1892 {
1893 	if (vm_map_lookup_entry(map, address, entry)) {
1894 		return true;
1895 	}
1896 
1897 	*entry = (*entry)->vme_next;
1898 	return false;
1899 }
1900 
1901 #if CONFIG_PROB_GZALLOC
1902 boolean_t
vm_map_lookup_entry_allow_pgz(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * entry)1903 vm_map_lookup_entry_allow_pgz(
1904 	vm_map_t        map,
1905 	vm_map_offset_t address,
1906 	vm_map_entry_t  *entry)         /* OUT */
1907 {
1908 	if (VM_KERNEL_ADDRESS(address)) {
1909 		address = VM_KERNEL_STRIP_UPTR(address);
1910 	}
1911 	return vm_map_store_lookup_entry( map, address, entry );
1912 }
1913 #endif /* CONFIG_PROB_GZALLOC */
1914 
1915 /*
1916  *	Routine:	vm_map_range_invalid_panic
1917  *	Purpose:
1918  *			Panic on detection of an invalid range id.
1919  */
1920 __abortlike
1921 static void
vm_map_range_invalid_panic(vm_map_t map,vm_map_range_id_t range_id)1922 vm_map_range_invalid_panic(
1923 	vm_map_t                map,
1924 	vm_map_range_id_t       range_id)
1925 {
1926 	panic("invalid range ID (%u) for map %p", range_id, map);
1927 }
1928 
1929 /*
1930  *	Routine:	vm_map_get_range
1931  *	Purpose:
1932  *			Adjust bounds based on security policy.
1933  */
1934 static struct mach_vm_range
vm_map_get_range(vm_map_t map,vm_map_address_t * address,vm_map_kernel_flags_t * vmk_flags,vm_map_size_t size,bool * is_ptr)1935 vm_map_get_range(
1936 	vm_map_t                map,
1937 	vm_map_address_t       *address,
1938 	vm_map_kernel_flags_t  *vmk_flags,
1939 	vm_map_size_t           size,
1940 	bool                   *is_ptr)
1941 {
1942 	struct mach_vm_range effective_range = {};
1943 	vm_map_range_id_t range_id = vmk_flags->vmkf_range_id;
1944 
1945 	if (map == kernel_map) {
1946 		effective_range = kmem_ranges[range_id];
1947 
1948 		if (startup_phase >= STARTUP_SUB_KMEM) {
1949 			/*
1950 			 * Hint provided by caller is zeroed as the range is restricted to a
1951 			 * subset of the entire kernel_map VA, which could put the hint outside
1952 			 * the range, causing vm_map_store_find_space to fail.
1953 			 */
1954 			*address = 0ull;
1955 			/*
1956 			 * Ensure that range_id passed in by the caller is within meaningful
1957 			 * bounds. Range id of KMEM_RANGE_ID_NONE will cause vm_map_locate_space
1958 			 * to fail as the corresponding range is invalid. Range id larger than
1959 			 * KMEM_RANGE_ID_MAX will lead to an OOB access.
1960 			 */
1961 			if ((range_id == KMEM_RANGE_ID_NONE) ||
1962 			    (range_id > KMEM_RANGE_ID_MAX)) {
1963 				vm_map_range_invalid_panic(map, range_id);
1964 			}
1965 
1966 			/*
1967 			 * Pointer ranges use kmem_locate_space to do allocations.
1968 			 *
1969 			 * Non pointer fronts look like [ Small | Large | Permanent ]
1970 			 * Adjust range for allocations larger than KMEM_SMALLMAP_THRESHOLD.
1971 			 * Allocations smaller than KMEM_SMALLMAP_THRESHOLD are allowed to
1972 			 * use the entire range.
1973 			 */
1974 			if (range_id < KMEM_RANGE_ID_SPRAYQTN) {
1975 				*is_ptr = true;
1976 			} else if (size >= KMEM_SMALLMAP_THRESHOLD) {
1977 				effective_range = kmem_large_ranges[range_id];
1978 			}
1979 		}
1980 #if CONFIG_MAP_RANGES
1981 	} else if (map->uses_user_ranges) {
1982 		switch (range_id) {
1983 		case UMEM_RANGE_ID_DEFAULT:
1984 			effective_range = map->default_range;
1985 			break;
1986 		case UMEM_RANGE_ID_HEAP:
1987 			effective_range = map->data_range;
1988 			break;
1989 		case UMEM_RANGE_ID_FIXED:
1990 			/*
1991 			 * anywhere allocations with an address in "FIXED"
1992 			 * makes no sense, leave the range empty
1993 			 */
1994 			break;
1995 
1996 		default:
1997 			vm_map_range_invalid_panic(map, range_id);
1998 		}
1999 #endif /* CONFIG_MAP_RANGES */
2000 	} else {
2001 		/*
2002 		 * If minimum is 0, bump it up by PAGE_SIZE.  We want to limit
2003 		 * allocations of PAGEZERO to explicit requests since its
2004 		 * normal use is to catch dereferences of NULL and many
2005 		 * applications also treat pointers with a value of 0 as
2006 		 * special and suddenly having address 0 contain useable
2007 		 * memory would tend to confuse those applications.
2008 		 */
2009 		effective_range.min_address = MAX(map->min_offset, VM_MAP_PAGE_SIZE(map));
2010 		effective_range.max_address = map->max_offset;
2011 	}
2012 
2013 	return effective_range;
2014 }
2015 
2016 /*
2017  *	Routine:	vm_map_locate_space
2018  *	Purpose:
2019  *		Finds a range in the specified virtual address map,
2020  *		returning the start of that range,
2021  *		as well as the entry right before it.
2022  */
2023 kern_return_t
vm_map_locate_space(vm_map_t map,vm_map_size_t size,vm_map_offset_t mask,vm_map_kernel_flags_t vmk_flags,vm_map_offset_t * start_inout,vm_map_entry_t * entry_out)2024 vm_map_locate_space(
2025 	vm_map_t                map,
2026 	vm_map_size_t           size,
2027 	vm_map_offset_t         mask,
2028 	vm_map_kernel_flags_t   vmk_flags,
2029 	vm_map_offset_t        *start_inout,
2030 	vm_map_entry_t         *entry_out)
2031 {
2032 	struct mach_vm_range effective_range = {};
2033 	vm_map_size_t   guard_offset;
2034 	vm_map_offset_t hint, limit;
2035 	vm_map_entry_t  entry;
2036 	bool            is_kmem_ptr_range = false;
2037 
2038 	/*
2039 	 * Only supported by vm_map_enter() with a fixed address.
2040 	 */
2041 	assert(!vmk_flags.vmkf_beyond_max);
2042 
2043 	if (__improbable(map->wait_for_space)) {
2044 		/*
2045 		 * support for "wait_for_space" is minimal,
2046 		 * its only consumer is the ipc_kernel_copy_map.
2047 		 */
2048 		assert(!map->holelistenabled &&
2049 		    !vmk_flags.vmkf_last_free &&
2050 		    !vmk_flags.vmkf_keep_map_locked &&
2051 		    !vmk_flags.vmkf_map_jit &&
2052 		    !vmk_flags.vmf_random_addr &&
2053 		    *start_inout <= map->min_offset);
2054 	} else if (vmk_flags.vmkf_last_free) {
2055 		assert(!vmk_flags.vmkf_map_jit &&
2056 		    !vmk_flags.vmf_random_addr);
2057 	}
2058 
2059 	if (vmk_flags.vmkf_guard_before) {
2060 		guard_offset = VM_MAP_PAGE_SIZE(map);
2061 		assert(size > guard_offset);
2062 		size -= guard_offset;
2063 	} else {
2064 		assert(size != 0);
2065 		guard_offset = 0;
2066 	}
2067 
2068 	/*
2069 	 * Validate range_id from flags and get associated range
2070 	 */
2071 	effective_range = vm_map_get_range(map, start_inout, &vmk_flags, size,
2072 	    &is_kmem_ptr_range);
2073 
2074 	if (is_kmem_ptr_range) {
2075 		return kmem_locate_space(size + guard_offset, vmk_flags.vmkf_range_id,
2076 		           vmk_flags.vmkf_last_free, start_inout, entry_out);
2077 	}
2078 
2079 #if XNU_TARGET_OS_OSX
2080 	if (__improbable(vmk_flags.vmkf_32bit_map_va)) {
2081 		assert(map != kernel_map);
2082 		effective_range.max_address = MIN(map->max_offset, 0x00000000FFFFF000ULL);
2083 	}
2084 #endif /* XNU_TARGET_OS_OSX */
2085 
2086 again:
2087 	if (vmk_flags.vmkf_last_free) {
2088 		hint = *start_inout;
2089 
2090 		if (hint == 0 || hint > effective_range.max_address) {
2091 			hint = effective_range.max_address;
2092 		}
2093 		if (hint <= effective_range.min_address) {
2094 			return KERN_NO_SPACE;
2095 		}
2096 		limit = effective_range.min_address;
2097 	} else {
2098 		hint = *start_inout;
2099 
2100 		if (vmk_flags.vmkf_map_jit) {
2101 			if (map->jit_entry_exists &&
2102 			    !VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(map)) {
2103 				return KERN_INVALID_ARGUMENT;
2104 			}
2105 			if (VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(map)) {
2106 				vmk_flags.vmf_random_addr = true;
2107 			}
2108 		}
2109 
2110 		if (vmk_flags.vmf_random_addr) {
2111 			kern_return_t kr;
2112 
2113 			kr = vm_map_random_address_for_size(map, &hint, size, vmk_flags);
2114 			if (kr != KERN_SUCCESS) {
2115 				return kr;
2116 			}
2117 		}
2118 #if __x86_64__
2119 		else if ((hint == 0 || hint == vm_map_min(map)) &&
2120 		    !map->disable_vmentry_reuse &&
2121 		    map->vmmap_high_start != 0) {
2122 			hint = map->vmmap_high_start;
2123 		}
2124 #endif /* __x86_64__ */
2125 
2126 		if (hint < effective_range.min_address) {
2127 			hint = effective_range.min_address;
2128 		}
2129 		if (effective_range.max_address <= hint) {
2130 			return KERN_NO_SPACE;
2131 		}
2132 
2133 		limit = effective_range.max_address;
2134 	}
2135 	entry = vm_map_store_find_space(map,
2136 	    hint, limit, vmk_flags.vmkf_last_free,
2137 	    guard_offset, size, mask,
2138 	    start_inout);
2139 
2140 	if (__improbable(entry == NULL)) {
2141 		if (map->wait_for_space &&
2142 		    guard_offset + size <=
2143 		    effective_range.max_address - effective_range.min_address) {
2144 			assert_wait((event_t)map, THREAD_ABORTSAFE);
2145 			vm_map_unlock(map);
2146 			thread_block(THREAD_CONTINUE_NULL);
2147 			vm_map_lock(map);
2148 			goto again;
2149 		}
2150 		return KERN_NO_SPACE;
2151 	}
2152 
2153 	if (entry_out) {
2154 		*entry_out = entry;
2155 	}
2156 	return KERN_SUCCESS;
2157 }
2158 
2159 
2160 /*
2161  *	Routine:	vm_map_find_space
2162  *	Purpose:
2163  *		Allocate a range in the specified virtual address map,
2164  *		returning the entry allocated for that range.
2165  *		Used by kmem_alloc, etc.
2166  *
2167  *		The map must be NOT be locked. It will be returned locked
2168  *		on KERN_SUCCESS, unlocked on failure.
2169  *
2170  *		If an entry is allocated, the object/offset fields
2171  *		are initialized to zero.
2172  */
2173 kern_return_t
vm_map_find_space(vm_map_t map,vm_map_offset_t hint_address,vm_map_size_t size,vm_map_offset_t mask,vm_map_kernel_flags_t vmk_flags,vm_map_entry_t * o_entry)2174 vm_map_find_space(
2175 	vm_map_t                map,
2176 	vm_map_offset_t         hint_address,
2177 	vm_map_size_t           size,
2178 	vm_map_offset_t         mask,
2179 	vm_map_kernel_flags_t   vmk_flags,
2180 	vm_map_entry_t          *o_entry)       /* OUT */
2181 {
2182 	vm_map_entry_t          new_entry, entry;
2183 	kern_return_t           kr;
2184 
2185 	if (size == 0) {
2186 		return KERN_INVALID_ARGUMENT;
2187 	}
2188 
2189 	new_entry = vm_map_entry_create(map);
2190 	new_entry->use_pmap = true;
2191 	new_entry->protection = VM_PROT_DEFAULT;
2192 	new_entry->max_protection = VM_PROT_ALL;
2193 
2194 	if (VM_MAP_PAGE_SHIFT(map) != PAGE_SHIFT) {
2195 		new_entry->map_aligned = true;
2196 	}
2197 	if (vmk_flags.vmf_permanent) {
2198 		new_entry->vme_permanent = true;
2199 	}
2200 
2201 	vm_map_lock(map);
2202 
2203 	kr = vm_map_locate_space(map, size, mask, vmk_flags,
2204 	    &hint_address, &entry);
2205 	if (kr != KERN_SUCCESS) {
2206 		vm_map_unlock(map);
2207 		vm_map_entry_dispose(new_entry);
2208 		return kr;
2209 	}
2210 	new_entry->vme_start = hint_address;
2211 	new_entry->vme_end = hint_address + size;
2212 
2213 	/*
2214 	 *	At this point,
2215 	 *
2216 	 *	- new_entry's "vme_start" and "vme_end" should define
2217 	 *	  the endpoints of the available new range,
2218 	 *
2219 	 *	- and "entry" should refer to the region before
2220 	 *	  the new range,
2221 	 *
2222 	 *	- and the map should still be locked.
2223 	 */
2224 
2225 	assert(page_aligned(new_entry->vme_start));
2226 	assert(page_aligned(new_entry->vme_end));
2227 	assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_start, VM_MAP_PAGE_MASK(map)));
2228 	assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_end, VM_MAP_PAGE_MASK(map)));
2229 
2230 	/*
2231 	 *	Insert the new entry into the list
2232 	 */
2233 
2234 	vm_map_store_entry_link(map, entry, new_entry,
2235 	    VM_MAP_KERNEL_FLAGS_NONE);
2236 	map->size += size;
2237 
2238 	/*
2239 	 *	Update the lookup hint
2240 	 */
2241 	SAVE_HINT_MAP_WRITE(map, new_entry);
2242 
2243 	*o_entry = new_entry;
2244 	return KERN_SUCCESS;
2245 }
2246 
2247 int vm_map_pmap_enter_print = FALSE;
2248 int vm_map_pmap_enter_enable = FALSE;
2249 
2250 /*
2251  *	Routine:	vm_map_pmap_enter [internal only]
2252  *
2253  *	Description:
2254  *		Force pages from the specified object to be entered into
2255  *		the pmap at the specified address if they are present.
2256  *		As soon as a page not found in the object the scan ends.
2257  *
2258  *	Returns:
2259  *		Nothing.
2260  *
2261  *	In/out conditions:
2262  *		The source map should not be locked on entry.
2263  */
2264 __unused static void
vm_map_pmap_enter(vm_map_t map,vm_map_offset_t addr,vm_map_offset_t end_addr,vm_object_t object,vm_object_offset_t offset,vm_prot_t protection)2265 vm_map_pmap_enter(
2266 	vm_map_t                map,
2267 	vm_map_offset_t         addr,
2268 	vm_map_offset_t         end_addr,
2269 	vm_object_t             object,
2270 	vm_object_offset_t      offset,
2271 	vm_prot_t               protection)
2272 {
2273 	int                     type_of_fault;
2274 	kern_return_t           kr;
2275 	uint8_t                 object_lock_type = 0;
2276 	struct vm_object_fault_info fault_info = {};
2277 
2278 	if (map->pmap == 0) {
2279 		return;
2280 	}
2281 
2282 	assert(VM_MAP_PAGE_SHIFT(map) == PAGE_SHIFT);
2283 
2284 	while (addr < end_addr) {
2285 		vm_page_t       m;
2286 
2287 
2288 		/*
2289 		 * TODO:
2290 		 * From vm_map_enter(), we come into this function without the map
2291 		 * lock held or the object lock held.
2292 		 * We haven't taken a reference on the object either.
2293 		 * We should do a proper lookup on the map to make sure
2294 		 * that things are sane before we go locking objects that
2295 		 * could have been deallocated from under us.
2296 		 */
2297 
2298 		object_lock_type = OBJECT_LOCK_EXCLUSIVE;
2299 		vm_object_lock(object);
2300 
2301 		m = vm_page_lookup(object, offset);
2302 
2303 		if (m == VM_PAGE_NULL || m->vmp_busy || m->vmp_fictitious ||
2304 		    (m->vmp_unusual && (VMP_ERROR_GET(m) || m->vmp_restart || m->vmp_absent))) {
2305 			vm_object_unlock(object);
2306 			return;
2307 		}
2308 
2309 		if (vm_map_pmap_enter_print) {
2310 			printf("vm_map_pmap_enter:");
2311 			printf("map: %p, addr: %llx, object: %p, offset: %llx\n",
2312 			    map, (unsigned long long)addr, object, (unsigned long long)offset);
2313 		}
2314 		type_of_fault = DBG_CACHE_HIT_FAULT;
2315 		kr = vm_fault_enter(m, map->pmap,
2316 		    addr,
2317 		    PAGE_SIZE, 0,
2318 		    protection, protection,
2319 		    VM_PAGE_WIRED(m),
2320 		    FALSE,                 /* change_wiring */
2321 		    VM_KERN_MEMORY_NONE,                 /* tag - not wiring */
2322 		    &fault_info,
2323 		    NULL,                  /* need_retry */
2324 		    &type_of_fault,
2325 		    &object_lock_type); /* Exclusive lock mode. Will remain unchanged.*/
2326 
2327 		vm_object_unlock(object);
2328 
2329 		offset += PAGE_SIZE_64;
2330 		addr += PAGE_SIZE;
2331 	}
2332 }
2333 
2334 #define MAX_TRIES_TO_GET_RANDOM_ADDRESS 1000
2335 static kern_return_t
vm_map_random_address_for_size(vm_map_t map,vm_map_offset_t * address,vm_map_size_t size,vm_map_kernel_flags_t vmk_flags)2336 vm_map_random_address_for_size(
2337 	vm_map_t                map,
2338 	vm_map_offset_t        *address,
2339 	vm_map_size_t           size,
2340 	vm_map_kernel_flags_t   vmk_flags)
2341 {
2342 	kern_return_t   kr = KERN_SUCCESS;
2343 	int             tries = 0;
2344 	vm_map_offset_t random_addr = 0;
2345 	vm_map_offset_t hole_end;
2346 
2347 	vm_map_entry_t  next_entry = VM_MAP_ENTRY_NULL;
2348 	vm_map_entry_t  prev_entry = VM_MAP_ENTRY_NULL;
2349 	vm_map_size_t   vm_hole_size = 0;
2350 	vm_map_size_t   addr_space_size;
2351 	bool            is_kmem_ptr;
2352 	struct mach_vm_range effective_range;
2353 
2354 	effective_range = vm_map_get_range(map, address, &vmk_flags, size,
2355 	    &is_kmem_ptr);
2356 
2357 	addr_space_size = effective_range.max_address - effective_range.min_address;
2358 	if (size >= addr_space_size) {
2359 		return KERN_NO_SPACE;
2360 	}
2361 	addr_space_size -= size;
2362 
2363 	assert(VM_MAP_PAGE_ALIGNED(size, VM_MAP_PAGE_MASK(map)));
2364 
2365 	while (tries < MAX_TRIES_TO_GET_RANDOM_ADDRESS) {
2366 		if (startup_phase < STARTUP_SUB_ZALLOC) {
2367 			random_addr = (vm_map_offset_t)early_random();
2368 		} else {
2369 			random_addr = (vm_map_offset_t)random();
2370 		}
2371 		random_addr <<= VM_MAP_PAGE_SHIFT(map);
2372 		random_addr = vm_map_trunc_page(
2373 			effective_range.min_address + (random_addr % addr_space_size),
2374 			VM_MAP_PAGE_MASK(map));
2375 
2376 #if CONFIG_PROB_GZALLOC
2377 		if (map->pmap == kernel_pmap && pgz_owned(random_addr)) {
2378 			continue;
2379 		}
2380 #endif /* CONFIG_PROB_GZALLOC */
2381 
2382 		if (vm_map_lookup_entry(map, random_addr, &prev_entry) == FALSE) {
2383 			if (prev_entry == vm_map_to_entry(map)) {
2384 				next_entry = vm_map_first_entry(map);
2385 			} else {
2386 				next_entry = prev_entry->vme_next;
2387 			}
2388 			if (next_entry == vm_map_to_entry(map)) {
2389 				hole_end = vm_map_max(map);
2390 			} else {
2391 				hole_end = next_entry->vme_start;
2392 			}
2393 			vm_hole_size = hole_end - random_addr;
2394 			if (vm_hole_size >= size) {
2395 				*address = random_addr;
2396 				break;
2397 			}
2398 		}
2399 		tries++;
2400 	}
2401 
2402 	if (tries == MAX_TRIES_TO_GET_RANDOM_ADDRESS) {
2403 		kr = KERN_NO_SPACE;
2404 	}
2405 	return kr;
2406 }
2407 
2408 static boolean_t
vm_memory_malloc_no_cow(int alias)2409 vm_memory_malloc_no_cow(
2410 	int alias)
2411 {
2412 	uint64_t alias_mask;
2413 
2414 	if (!malloc_no_cow) {
2415 		return FALSE;
2416 	}
2417 	if (alias > 63) {
2418 		return FALSE;
2419 	}
2420 	alias_mask = 1ULL << alias;
2421 	if (alias_mask & vm_memory_malloc_no_cow_mask) {
2422 		return TRUE;
2423 	}
2424 	return FALSE;
2425 }
2426 
2427 uint64_t vm_map_enter_RLIMIT_AS_count = 0;
2428 uint64_t vm_map_enter_RLIMIT_DATA_count = 0;
2429 /*
2430  *	Routine:	vm_map_enter
2431  *
2432  *	Description:
2433  *		Allocate a range in the specified virtual address map.
2434  *		The resulting range will refer to memory defined by
2435  *		the given memory object and offset into that object.
2436  *
2437  *		Arguments are as defined in the vm_map call.
2438  */
2439 static unsigned int vm_map_enter_restore_successes = 0;
2440 static unsigned int vm_map_enter_restore_failures = 0;
2441 kern_return_t
vm_map_enter(vm_map_t map,vm_map_offset_t * address,vm_map_size_t size,vm_map_offset_t mask,vm_map_kernel_flags_t vmk_flags,vm_object_t object,vm_object_offset_t offset,boolean_t needs_copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)2442 vm_map_enter(
2443 	vm_map_t                map,
2444 	vm_map_offset_t         *address,       /* IN/OUT */
2445 	vm_map_size_t           size,
2446 	vm_map_offset_t         mask,
2447 	vm_map_kernel_flags_t   vmk_flags,
2448 	vm_object_t             object,
2449 	vm_object_offset_t      offset,
2450 	boolean_t               needs_copy,
2451 	vm_prot_t               cur_protection,
2452 	vm_prot_t               max_protection,
2453 	vm_inherit_t            inheritance)
2454 {
2455 	vm_map_entry_t          entry, new_entry;
2456 	vm_map_offset_t         start, tmp_start, tmp_offset;
2457 	vm_map_offset_t         end, tmp_end;
2458 	vm_map_offset_t         tmp2_start, tmp2_end;
2459 	vm_map_offset_t         step;
2460 	kern_return_t           result = KERN_SUCCESS;
2461 	bool                    map_locked = FALSE;
2462 	bool                    pmap_empty = TRUE;
2463 	bool                    new_mapping_established = FALSE;
2464 	const bool              keep_map_locked = vmk_flags.vmkf_keep_map_locked;
2465 	const bool              anywhere = !vmk_flags.vmf_fixed;
2466 	const bool              purgable = vmk_flags.vmf_purgeable;
2467 	const bool              overwrite = vmk_flags.vmf_overwrite;
2468 	const bool              no_cache = vmk_flags.vmf_no_cache;
2469 	const bool              is_submap = vmk_flags.vmkf_submap;
2470 	const bool              permanent = vmk_flags.vmf_permanent;
2471 	const bool              no_copy_on_read = vmk_flags.vmkf_no_copy_on_read;
2472 	const bool              entry_for_jit = vmk_flags.vmkf_map_jit;
2473 	const bool              iokit_acct = vmk_flags.vmkf_iokit_acct;
2474 	const bool              resilient_codesign = vmk_flags.vmf_resilient_codesign;
2475 	const bool              resilient_media = vmk_flags.vmf_resilient_media;
2476 	const bool              entry_for_tpro = vmk_flags.vmf_tpro;
2477 	const unsigned int      superpage_size = vmk_flags.vmf_superpage_size;
2478 	const vm_tag_t          alias = vmk_flags.vm_tag;
2479 	vm_tag_t                user_alias;
2480 	kern_return_t           kr;
2481 	bool                    clear_map_aligned = FALSE;
2482 	vm_map_size_t           chunk_size = 0;
2483 	vm_object_t             caller_object;
2484 	VM_MAP_ZAP_DECLARE(zap_old_list);
2485 	VM_MAP_ZAP_DECLARE(zap_new_list);
2486 
2487 	caller_object = object;
2488 
2489 	assertf(vmk_flags.__vmkf_unused == 0, "vmk_flags unused=0x%x\n", vmk_flags.__vmkf_unused);
2490 
2491 	if (vmk_flags.vmf_4gb_chunk) {
2492 #if defined(__LP64__)
2493 		chunk_size = (4ULL * 1024 * 1024 * 1024); /* max. 4GB chunks for the new allocation */
2494 #else /* __LP64__ */
2495 		chunk_size = ANON_CHUNK_SIZE;
2496 #endif /* __LP64__ */
2497 	} else {
2498 		chunk_size = ANON_CHUNK_SIZE;
2499 	}
2500 
2501 
2502 
2503 	if (superpage_size) {
2504 		switch (superpage_size) {
2505 			/*
2506 			 * Note that the current implementation only supports
2507 			 * a single size for superpages, SUPERPAGE_SIZE, per
2508 			 * architecture. As soon as more sizes are supposed
2509 			 * to be supported, SUPERPAGE_SIZE has to be replaced
2510 			 * with a lookup of the size depending on superpage_size.
2511 			 */
2512 #ifdef __x86_64__
2513 		case SUPERPAGE_SIZE_ANY:
2514 			/* handle it like 2 MB and round up to page size */
2515 			size = (size + 2 * 1024 * 1024 - 1) & ~(2 * 1024 * 1024 - 1);
2516 			OS_FALLTHROUGH;
2517 		case SUPERPAGE_SIZE_2MB:
2518 			break;
2519 #endif
2520 		default:
2521 			return KERN_INVALID_ARGUMENT;
2522 		}
2523 		mask = SUPERPAGE_SIZE - 1;
2524 		if (size & (SUPERPAGE_SIZE - 1)) {
2525 			return KERN_INVALID_ARGUMENT;
2526 		}
2527 		inheritance = VM_INHERIT_NONE;  /* fork() children won't inherit superpages */
2528 	}
2529 
2530 
2531 	if ((cur_protection & VM_PROT_WRITE) &&
2532 	    (cur_protection & VM_PROT_EXECUTE) &&
2533 #if XNU_TARGET_OS_OSX
2534 	    map->pmap != kernel_pmap &&
2535 	    (cs_process_global_enforcement() ||
2536 	    (vmk_flags.vmkf_cs_enforcement_override
2537 	    ? vmk_flags.vmkf_cs_enforcement
2538 	    : (vm_map_cs_enforcement(map)
2539 #if __arm64__
2540 	    || !VM_MAP_IS_EXOTIC(map)
2541 #endif /* __arm64__ */
2542 	    ))) &&
2543 #endif /* XNU_TARGET_OS_OSX */
2544 #if CODE_SIGNING_MONITOR
2545 	    (csm_address_space_exempt(map->pmap) != KERN_SUCCESS) &&
2546 #endif
2547 	    (VM_MAP_POLICY_WX_FAIL(map) ||
2548 	    VM_MAP_POLICY_WX_STRIP_X(map)) &&
2549 	    !entry_for_jit) {
2550 		boolean_t vm_protect_wx_fail = VM_MAP_POLICY_WX_FAIL(map);
2551 
2552 		DTRACE_VM3(cs_wx,
2553 		    uint64_t, 0,
2554 		    uint64_t, 0,
2555 		    vm_prot_t, cur_protection);
2556 		printf("CODE SIGNING: %d[%s] %s: curprot cannot be write+execute. %s\n",
2557 		    proc_selfpid(),
2558 		    (get_bsdtask_info(current_task())
2559 		    ? proc_name_address(get_bsdtask_info(current_task()))
2560 		    : "?"),
2561 		    __FUNCTION__,
2562 		    (vm_protect_wx_fail ? "failing" : "turning off execute"));
2563 		cur_protection &= ~VM_PROT_EXECUTE;
2564 		if (vm_protect_wx_fail) {
2565 			return KERN_PROTECTION_FAILURE;
2566 		}
2567 	}
2568 
2569 	/*
2570 	 * If the task has requested executable lockdown,
2571 	 * deny any new executable mapping.
2572 	 */
2573 	if (map->map_disallow_new_exec == TRUE) {
2574 		if (cur_protection & VM_PROT_EXECUTE) {
2575 			return KERN_PROTECTION_FAILURE;
2576 		}
2577 	}
2578 
2579 	if (resilient_codesign) {
2580 		assert(!is_submap);
2581 		int reject_prot = (needs_copy ? VM_PROT_ALLEXEC : (VM_PROT_WRITE | VM_PROT_ALLEXEC));
2582 		if ((cur_protection | max_protection) & reject_prot) {
2583 			return KERN_PROTECTION_FAILURE;
2584 		}
2585 	}
2586 
2587 	if (resilient_media) {
2588 		assert(!is_submap);
2589 //		assert(!needs_copy);
2590 		if (object != VM_OBJECT_NULL &&
2591 		    !object->internal) {
2592 			/*
2593 			 * This mapping is directly backed by an external
2594 			 * memory manager (e.g. a vnode pager for a file):
2595 			 * we would not have any safe place to inject
2596 			 * a zero-filled page if an actual page is not
2597 			 * available, without possibly impacting the actual
2598 			 * contents of the mapped object (e.g. the file),
2599 			 * so we can't provide any media resiliency here.
2600 			 */
2601 			return KERN_INVALID_ARGUMENT;
2602 		}
2603 	}
2604 
2605 	if (entry_for_tpro) {
2606 		/*
2607 		 * TPRO overrides the effective permissions of the region
2608 		 * and explicitly maps as RW. Ensure we have been passed
2609 		 * the expected permissions. We accept `cur_protections`
2610 		 * RO as that will be handled on fault.
2611 		 */
2612 		if (!(max_protection & VM_PROT_READ) ||
2613 		    !(max_protection & VM_PROT_WRITE) ||
2614 		    !(cur_protection & VM_PROT_READ)) {
2615 			return KERN_PROTECTION_FAILURE;
2616 		}
2617 
2618 		/*
2619 		 * We can now downgrade the cur_protection to RO. This is a mild lie
2620 		 * to the VM layer. But TPRO will be responsible for toggling the
2621 		 * protections between RO/RW
2622 		 */
2623 		cur_protection = VM_PROT_READ;
2624 	}
2625 
2626 	if (is_submap) {
2627 		vm_map_t submap;
2628 		if (purgable) {
2629 			/* submaps can not be purgeable */
2630 			return KERN_INVALID_ARGUMENT;
2631 		}
2632 		if (object == VM_OBJECT_NULL) {
2633 			/* submaps can not be created lazily */
2634 			return KERN_INVALID_ARGUMENT;
2635 		}
2636 		submap = (vm_map_t) object;
2637 		if (VM_MAP_PAGE_SHIFT(submap) != VM_MAP_PAGE_SHIFT(map)) {
2638 			/* page size mismatch */
2639 			return KERN_INVALID_ARGUMENT;
2640 		}
2641 	}
2642 	if (vmk_flags.vmkf_already) {
2643 		/*
2644 		 * VM_FLAGS_ALREADY says that it's OK if the same mapping
2645 		 * is already present.  For it to be meaningul, the requested
2646 		 * mapping has to be at a fixed address (!VM_FLAGS_ANYWHERE) and
2647 		 * we shouldn't try and remove what was mapped there first
2648 		 * (!VM_FLAGS_OVERWRITE).
2649 		 */
2650 		if (!vmk_flags.vmf_fixed || vmk_flags.vmf_overwrite) {
2651 			return KERN_INVALID_ARGUMENT;
2652 		}
2653 	}
2654 
2655 	if (size == 0 ||
2656 	    (offset & MIN(VM_MAP_PAGE_MASK(map), PAGE_MASK_64)) != 0) {
2657 		*address = 0;
2658 		return KERN_INVALID_ARGUMENT;
2659 	}
2660 
2661 	if (map->pmap == kernel_pmap) {
2662 		user_alias = VM_KERN_MEMORY_NONE;
2663 	} else {
2664 		user_alias = alias;
2665 	}
2666 
2667 	if (user_alias == VM_MEMORY_MALLOC_MEDIUM) {
2668 		chunk_size = MALLOC_MEDIUM_CHUNK_SIZE;
2669 	}
2670 
2671 #define RETURN(value)   { result = value; goto BailOut; }
2672 
2673 	assertf(VM_MAP_PAGE_ALIGNED(*address, FOURK_PAGE_MASK), "0x%llx", (uint64_t)*address);
2674 	assertf(VM_MAP_PAGE_ALIGNED(size, FOURK_PAGE_MASK), "0x%llx", (uint64_t)size);
2675 	if (VM_MAP_PAGE_MASK(map) >= PAGE_MASK) {
2676 		assertf(page_aligned(*address), "0x%llx", (uint64_t)*address);
2677 		assertf(page_aligned(size), "0x%llx", (uint64_t)size);
2678 	}
2679 
2680 	if (VM_MAP_PAGE_MASK(map) >= PAGE_MASK &&
2681 	    !VM_MAP_PAGE_ALIGNED(size, VM_MAP_PAGE_MASK(map))) {
2682 		/*
2683 		 * In most cases, the caller rounds the size up to the
2684 		 * map's page size.
2685 		 * If we get a size that is explicitly not map-aligned here,
2686 		 * we'll have to respect the caller's wish and mark the
2687 		 * mapping as "not map-aligned" to avoid tripping the
2688 		 * map alignment checks later.
2689 		 */
2690 		clear_map_aligned = TRUE;
2691 	}
2692 	if (!anywhere &&
2693 	    VM_MAP_PAGE_MASK(map) >= PAGE_MASK &&
2694 	    !VM_MAP_PAGE_ALIGNED(*address, VM_MAP_PAGE_MASK(map))) {
2695 		/*
2696 		 * We've been asked to map at a fixed address and that
2697 		 * address is not aligned to the map's specific alignment.
2698 		 * The caller should know what it's doing (i.e. most likely
2699 		 * mapping some fragmented copy map, transferring memory from
2700 		 * a VM map with a different alignment), so clear map_aligned
2701 		 * for this new VM map entry and proceed.
2702 		 */
2703 		clear_map_aligned = TRUE;
2704 	}
2705 
2706 	/*
2707 	 * Only zero-fill objects are allowed to be purgable.
2708 	 * LP64todo - limit purgable objects to 32-bits for now
2709 	 */
2710 	if (purgable &&
2711 	    (offset != 0 ||
2712 	    (object != VM_OBJECT_NULL &&
2713 	    (object->vo_size != size ||
2714 	    object->purgable == VM_PURGABLE_DENY))
2715 #if __LP64__
2716 	    || size > ANON_MAX_SIZE
2717 #endif
2718 	    )) {
2719 		return KERN_INVALID_ARGUMENT;
2720 	}
2721 
2722 	start = *address;
2723 
2724 	if (anywhere) {
2725 		vm_map_lock(map);
2726 		map_locked = TRUE;
2727 
2728 		result = vm_map_locate_space(map, size, mask, vmk_flags,
2729 		    &start, &entry);
2730 		if (result != KERN_SUCCESS) {
2731 			goto BailOut;
2732 		}
2733 
2734 		*address = start;
2735 		end = start + size;
2736 		assert(VM_MAP_PAGE_ALIGNED(*address,
2737 		    VM_MAP_PAGE_MASK(map)));
2738 	} else {
2739 		vm_map_offset_t effective_min_offset, effective_max_offset;
2740 
2741 		effective_min_offset = map->min_offset;
2742 		effective_max_offset = map->max_offset;
2743 
2744 		if (vmk_flags.vmkf_beyond_max) {
2745 			/*
2746 			 * Allow an insertion beyond the map's max offset.
2747 			 */
2748 			effective_max_offset = 0x00000000FFFFF000ULL;
2749 			if (vm_map_is_64bit(map)) {
2750 				effective_max_offset = 0xFFFFFFFFFFFFF000ULL;
2751 			}
2752 #if XNU_TARGET_OS_OSX
2753 		} else if (__improbable(vmk_flags.vmkf_32bit_map_va)) {
2754 			effective_max_offset = MIN(map->max_offset, 0x00000000FFFFF000ULL);
2755 #endif /* XNU_TARGET_OS_OSX */
2756 		}
2757 
2758 		if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT &&
2759 		    !overwrite &&
2760 		    user_alias == VM_MEMORY_REALLOC) {
2761 			/*
2762 			 * Force realloc() to switch to a new allocation,
2763 			 * to prevent 4k-fragmented virtual ranges.
2764 			 */
2765 //			DEBUG4K_ERROR("no realloc in place");
2766 			return KERN_NO_SPACE;
2767 		}
2768 
2769 		/*
2770 		 *	Verify that:
2771 		 *		the address doesn't itself violate
2772 		 *		the mask requirement.
2773 		 */
2774 
2775 		vm_map_lock(map);
2776 		map_locked = TRUE;
2777 		if ((start & mask) != 0) {
2778 			RETURN(KERN_NO_SPACE);
2779 		}
2780 
2781 #if CONFIG_MAP_RANGES
2782 		if (map->uses_user_ranges) {
2783 			struct mach_vm_range r;
2784 
2785 			vm_map_user_range_resolve(map, start, 1, &r);
2786 			if (r.max_address == 0) {
2787 				RETURN(KERN_INVALID_ADDRESS);
2788 			}
2789 			effective_min_offset = r.min_address;
2790 			effective_max_offset = r.max_address;
2791 		}
2792 #endif /* CONFIG_MAP_RANGES */
2793 
2794 		if ((startup_phase >= STARTUP_SUB_KMEM) && !is_submap &&
2795 		    (map == kernel_map)) {
2796 			mach_vm_range_t r = kmem_validate_range_for_overwrite(start, size);
2797 			effective_min_offset = r->min_address;
2798 			effective_max_offset = r->max_address;
2799 		}
2800 
2801 		/*
2802 		 *	...	the address is within bounds
2803 		 */
2804 
2805 		end = start + size;
2806 
2807 		if ((start < effective_min_offset) ||
2808 		    (end > effective_max_offset) ||
2809 		    (start >= end)) {
2810 			RETURN(KERN_INVALID_ADDRESS);
2811 		}
2812 
2813 		if (overwrite) {
2814 			vmr_flags_t remove_flags = VM_MAP_REMOVE_NO_MAP_ALIGN;
2815 			kern_return_t remove_kr;
2816 
2817 			/*
2818 			 * Fixed mapping and "overwrite" flag: attempt to
2819 			 * remove all existing mappings in the specified
2820 			 * address range, saving them in our "zap_old_list".
2821 			 *
2822 			 * This avoids releasing the VM map lock in
2823 			 * vm_map_entry_delete() and allows atomicity
2824 			 * when we want to replace some mappings with a new one.
2825 			 * It also allows us to restore the old VM mappings if the
2826 			 * new mapping fails.
2827 			 */
2828 			remove_flags |= VM_MAP_REMOVE_NO_YIELD;
2829 
2830 			if (vmk_flags.vmkf_overwrite_immutable) {
2831 				/* we can overwrite immutable mappings */
2832 				remove_flags |= VM_MAP_REMOVE_IMMUTABLE;
2833 			}
2834 			if (vmk_flags.vmkf_remap_prot_copy) {
2835 				remove_flags |= VM_MAP_REMOVE_IMMUTABLE_CODE;
2836 			}
2837 			remove_kr = vm_map_delete(map, start, end, remove_flags,
2838 			    KMEM_GUARD_NONE, &zap_old_list).kmr_return;
2839 			if (remove_kr) {
2840 				/* XXX FBDP restore zap_old_list? */
2841 				RETURN(remove_kr);
2842 			}
2843 		}
2844 
2845 		/*
2846 		 *	...	the starting address isn't allocated
2847 		 */
2848 
2849 		if (vm_map_lookup_entry(map, start, &entry)) {
2850 			if (!(vmk_flags.vmkf_already)) {
2851 				RETURN(KERN_NO_SPACE);
2852 			}
2853 			/*
2854 			 * Check if what's already there is what we want.
2855 			 */
2856 			tmp_start = start;
2857 			tmp_offset = offset;
2858 			if (entry->vme_start < start) {
2859 				tmp_start -= start - entry->vme_start;
2860 				tmp_offset -= start - entry->vme_start;
2861 			}
2862 			for (; entry->vme_start < end;
2863 			    entry = entry->vme_next) {
2864 				/*
2865 				 * Check if the mapping's attributes
2866 				 * match the existing map entry.
2867 				 */
2868 				if (entry == vm_map_to_entry(map) ||
2869 				    entry->vme_start != tmp_start ||
2870 				    entry->is_sub_map != is_submap ||
2871 				    VME_OFFSET(entry) != tmp_offset ||
2872 				    entry->needs_copy != needs_copy ||
2873 				    entry->protection != cur_protection ||
2874 				    entry->max_protection != max_protection ||
2875 				    entry->inheritance != inheritance ||
2876 				    entry->iokit_acct != iokit_acct ||
2877 				    VME_ALIAS(entry) != alias) {
2878 					/* not the same mapping ! */
2879 					RETURN(KERN_NO_SPACE);
2880 				}
2881 				/*
2882 				 * Check if the same object is being mapped.
2883 				 */
2884 				if (is_submap) {
2885 					if (VME_SUBMAP(entry) !=
2886 					    (vm_map_t) object) {
2887 						/* not the same submap */
2888 						RETURN(KERN_NO_SPACE);
2889 					}
2890 				} else {
2891 					if (VME_OBJECT(entry) != object) {
2892 						/* not the same VM object... */
2893 						vm_object_t obj2;
2894 
2895 						obj2 = VME_OBJECT(entry);
2896 						if ((obj2 == VM_OBJECT_NULL ||
2897 						    obj2->internal) &&
2898 						    (object == VM_OBJECT_NULL ||
2899 						    object->internal)) {
2900 							/*
2901 							 * ... but both are
2902 							 * anonymous memory,
2903 							 * so equivalent.
2904 							 */
2905 						} else {
2906 							RETURN(KERN_NO_SPACE);
2907 						}
2908 					}
2909 				}
2910 
2911 				tmp_offset += entry->vme_end - entry->vme_start;
2912 				tmp_start += entry->vme_end - entry->vme_start;
2913 				if (entry->vme_end >= end) {
2914 					/* reached the end of our mapping */
2915 					break;
2916 				}
2917 			}
2918 			/* it all matches:  let's use what's already there ! */
2919 			RETURN(KERN_MEMORY_PRESENT);
2920 		}
2921 
2922 		/*
2923 		 *	...	the next region doesn't overlap the
2924 		 *		end point.
2925 		 */
2926 
2927 		if ((entry->vme_next != vm_map_to_entry(map)) &&
2928 		    (entry->vme_next->vme_start < end)) {
2929 			RETURN(KERN_NO_SPACE);
2930 		}
2931 	}
2932 
2933 	/*
2934 	 *	At this point,
2935 	 *		"start" and "end" should define the endpoints of the
2936 	 *			available new range, and
2937 	 *		"entry" should refer to the region before the new
2938 	 *			range, and
2939 	 *
2940 	 *		the map should be locked.
2941 	 */
2942 
2943 	/*
2944 	 *	See whether we can avoid creating a new entry (and object) by
2945 	 *	extending one of our neighbors.  [So far, we only attempt to
2946 	 *	extend from below.]  Note that we can never extend/join
2947 	 *	purgable objects because they need to remain distinct
2948 	 *	entities in order to implement their "volatile object"
2949 	 *	semantics.
2950 	 */
2951 
2952 	if (purgable ||
2953 	    entry_for_jit ||
2954 	    entry_for_tpro ||
2955 	    vm_memory_malloc_no_cow(user_alias)) {
2956 		if (object == VM_OBJECT_NULL) {
2957 			object = vm_object_allocate(size);
2958 			object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
2959 			object->true_share = FALSE;
2960 			if (malloc_no_cow_except_fork &&
2961 			    !purgable &&
2962 			    !entry_for_jit &&
2963 			    !entry_for_tpro &&
2964 			    vm_memory_malloc_no_cow(user_alias)) {
2965 				object->copy_strategy = MEMORY_OBJECT_COPY_DELAY_FORK;
2966 				object->true_share = TRUE;
2967 			}
2968 			if (purgable) {
2969 				task_t owner;
2970 				object->purgable = VM_PURGABLE_NONVOLATILE;
2971 				if (map->pmap == kernel_pmap) {
2972 					/*
2973 					 * Purgeable mappings made in a kernel
2974 					 * map are "owned" by the kernel itself
2975 					 * rather than the current user task
2976 					 * because they're likely to be used by
2977 					 * more than this user task (see
2978 					 * execargs_purgeable_allocate(), for
2979 					 * example).
2980 					 */
2981 					owner = kernel_task;
2982 				} else {
2983 					owner = current_task();
2984 				}
2985 				assert(object->vo_owner == NULL);
2986 				assert(object->resident_page_count == 0);
2987 				assert(object->wired_page_count == 0);
2988 				vm_object_lock(object);
2989 				vm_purgeable_nonvolatile_enqueue(object, owner);
2990 				vm_object_unlock(object);
2991 			}
2992 			offset = (vm_object_offset_t)0;
2993 		}
2994 	} else if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
2995 		/* no coalescing if address space uses sub-pages */
2996 	} else if ((is_submap == FALSE) &&
2997 	    (object == VM_OBJECT_NULL) &&
2998 	    (entry != vm_map_to_entry(map)) &&
2999 	    (entry->vme_end == start) &&
3000 	    (!entry->is_shared) &&
3001 	    (!entry->is_sub_map) &&
3002 	    (!entry->in_transition) &&
3003 	    (!entry->needs_wakeup) &&
3004 	    (entry->behavior == VM_BEHAVIOR_DEFAULT) &&
3005 	    (entry->protection == cur_protection) &&
3006 	    (entry->max_protection == max_protection) &&
3007 	    (entry->inheritance == inheritance) &&
3008 	    ((user_alias == VM_MEMORY_REALLOC) ||
3009 	    (VME_ALIAS(entry) == alias)) &&
3010 	    (entry->no_cache == no_cache) &&
3011 	    (entry->vme_permanent == permanent) &&
3012 	    /* no coalescing for immutable executable mappings */
3013 	    !((entry->protection & VM_PROT_EXECUTE) &&
3014 	    entry->vme_permanent) &&
3015 	    (!entry->superpage_size && !superpage_size) &&
3016 	    /*
3017 	     * No coalescing if not map-aligned, to avoid propagating
3018 	     * that condition any further than needed:
3019 	     */
3020 	    (!entry->map_aligned || !clear_map_aligned) &&
3021 	    (!entry->zero_wired_pages) &&
3022 	    (!entry->used_for_jit && !entry_for_jit) &&
3023 #if __arm64e__
3024 	    (!entry->used_for_tpro && !entry_for_tpro) &&
3025 #endif
3026 	    (!entry->csm_associated) &&
3027 	    (entry->iokit_acct == iokit_acct) &&
3028 	    (!entry->vme_resilient_codesign) &&
3029 	    (!entry->vme_resilient_media) &&
3030 	    (!entry->vme_atomic) &&
3031 	    (entry->vme_no_copy_on_read == no_copy_on_read) &&
3032 
3033 	    ((entry->vme_end - entry->vme_start) + size <=
3034 	    (user_alias == VM_MEMORY_REALLOC ?
3035 	    ANON_CHUNK_SIZE :
3036 	    NO_COALESCE_LIMIT)) &&
3037 
3038 	    (entry->wired_count == 0)) {        /* implies user_wired_count == 0 */
3039 		if (vm_object_coalesce(VME_OBJECT(entry),
3040 		    VM_OBJECT_NULL,
3041 		    VME_OFFSET(entry),
3042 		    (vm_object_offset_t) 0,
3043 		    (vm_map_size_t)(entry->vme_end - entry->vme_start),
3044 		    (vm_map_size_t)(end - entry->vme_end))) {
3045 			/*
3046 			 *	Coalesced the two objects - can extend
3047 			 *	the previous map entry to include the
3048 			 *	new range.
3049 			 */
3050 			map->size += (end - entry->vme_end);
3051 			assert(entry->vme_start < end);
3052 			assert(VM_MAP_PAGE_ALIGNED(end,
3053 			    VM_MAP_PAGE_MASK(map)));
3054 			if (__improbable(vm_debug_events)) {
3055 				DTRACE_VM5(map_entry_extend, vm_map_t, map, vm_map_entry_t, entry, vm_address_t, entry->vme_start, vm_address_t, entry->vme_end, vm_address_t, end);
3056 			}
3057 			entry->vme_end = end;
3058 			if (map->holelistenabled) {
3059 				vm_map_store_update_first_free(map, entry, TRUE);
3060 			} else {
3061 				vm_map_store_update_first_free(map, map->first_free, TRUE);
3062 			}
3063 			new_mapping_established = TRUE;
3064 			RETURN(KERN_SUCCESS);
3065 		}
3066 	}
3067 
3068 	step = superpage_size ? SUPERPAGE_SIZE : (end - start);
3069 	new_entry = NULL;
3070 
3071 	if (vmk_flags.vmkf_submap_adjust) {
3072 		vm_map_adjust_offsets((vm_map_t)caller_object, start, end);
3073 		offset = start;
3074 	}
3075 
3076 	for (tmp2_start = start; tmp2_start < end; tmp2_start += step) {
3077 		tmp2_end = tmp2_start + step;
3078 		/*
3079 		 *	Create a new entry
3080 		 *
3081 		 * XXX FBDP
3082 		 * The reserved "page zero" in each process's address space can
3083 		 * be arbitrarily large.  Splitting it into separate objects and
3084 		 * therefore different VM map entries serves no purpose and just
3085 		 * slows down operations on the VM map, so let's not split the
3086 		 * allocation into chunks if the max protection is NONE.  That
3087 		 * memory should never be accessible, so it will never get to the
3088 		 * default pager.
3089 		 */
3090 		tmp_start = tmp2_start;
3091 		if (!is_submap &&
3092 		    object == VM_OBJECT_NULL &&
3093 		    size > chunk_size &&
3094 		    max_protection != VM_PROT_NONE &&
3095 		    superpage_size == 0) {
3096 			tmp_end = tmp_start + chunk_size;
3097 		} else {
3098 			tmp_end = tmp2_end;
3099 		}
3100 		do {
3101 			if (!is_submap &&
3102 			    object != VM_OBJECT_NULL &&
3103 			    object->internal &&
3104 			    offset + (tmp_end - tmp_start) > object->vo_size) {
3105 //				printf("FBDP object %p size 0x%llx overmapping offset 0x%llx size 0x%llx\n", object, object->vo_size, offset, (uint64_t)(tmp_end - tmp_start));
3106 				DTRACE_VM5(vm_map_enter_overmap,
3107 				    vm_map_t, map,
3108 				    vm_map_address_t, tmp_start,
3109 				    vm_map_address_t, tmp_end,
3110 				    vm_object_offset_t, offset,
3111 				    vm_object_size_t, object->vo_size);
3112 			}
3113 			new_entry = vm_map_entry_insert(map,
3114 			    entry, tmp_start, tmp_end,
3115 			    object, offset, vmk_flags,
3116 			    needs_copy,
3117 			    cur_protection, max_protection,
3118 			    (entry_for_jit && !VM_MAP_POLICY_ALLOW_JIT_INHERIT(map) ?
3119 			    VM_INHERIT_NONE : inheritance),
3120 			    clear_map_aligned);
3121 
3122 			assert(!is_kernel_object(object) || (VM_KERN_MEMORY_NONE != alias));
3123 
3124 			if (resilient_codesign) {
3125 				int reject_prot = (needs_copy ? VM_PROT_ALLEXEC : (VM_PROT_WRITE | VM_PROT_ALLEXEC));
3126 				if (!((cur_protection | max_protection) & reject_prot)) {
3127 					new_entry->vme_resilient_codesign = TRUE;
3128 				}
3129 			}
3130 
3131 			if (resilient_media &&
3132 			    (object == VM_OBJECT_NULL ||
3133 			    object->internal)) {
3134 				new_entry->vme_resilient_media = TRUE;
3135 			}
3136 
3137 			assert(!new_entry->iokit_acct);
3138 			if (!is_submap &&
3139 			    object != VM_OBJECT_NULL &&
3140 			    (object->purgable != VM_PURGABLE_DENY ||
3141 			    object->vo_ledger_tag)) {
3142 				assert(new_entry->use_pmap);
3143 				assert(!new_entry->iokit_acct);
3144 				/*
3145 				 * Turn off pmap accounting since
3146 				 * purgeable (or tagged) objects have their
3147 				 * own ledgers.
3148 				 */
3149 				new_entry->use_pmap = FALSE;
3150 			} else if (!is_submap &&
3151 			    iokit_acct &&
3152 			    object != VM_OBJECT_NULL &&
3153 			    object->internal) {
3154 				/* alternate accounting */
3155 				assert(!new_entry->iokit_acct);
3156 				assert(new_entry->use_pmap);
3157 				new_entry->iokit_acct = TRUE;
3158 				new_entry->use_pmap = FALSE;
3159 				DTRACE_VM4(
3160 					vm_map_iokit_mapped_region,
3161 					vm_map_t, map,
3162 					vm_map_offset_t, new_entry->vme_start,
3163 					vm_map_offset_t, new_entry->vme_end,
3164 					int, VME_ALIAS(new_entry));
3165 				vm_map_iokit_mapped_region(
3166 					map,
3167 					(new_entry->vme_end -
3168 					new_entry->vme_start));
3169 			} else if (!is_submap) {
3170 				assert(!new_entry->iokit_acct);
3171 				assert(new_entry->use_pmap);
3172 			}
3173 
3174 			if (is_submap) {
3175 				vm_map_t        submap;
3176 				boolean_t       submap_is_64bit;
3177 				boolean_t       use_pmap;
3178 
3179 				assert(new_entry->is_sub_map);
3180 				assert(!new_entry->use_pmap);
3181 				assert(!new_entry->iokit_acct);
3182 				submap = (vm_map_t) object;
3183 				submap_is_64bit = vm_map_is_64bit(submap);
3184 				use_pmap = vmk_flags.vmkf_nested_pmap;
3185 #ifndef NO_NESTED_PMAP
3186 				if (use_pmap && submap->pmap == NULL) {
3187 					ledger_t ledger = map->pmap->ledger;
3188 					/* we need a sub pmap to nest... */
3189 					submap->pmap = pmap_create_options(ledger, 0,
3190 					    submap_is_64bit ? PMAP_CREATE_64BIT : 0);
3191 					if (submap->pmap == NULL) {
3192 						/* let's proceed without nesting... */
3193 					}
3194 #if defined(__arm64__)
3195 					else {
3196 						pmap_set_nested(submap->pmap);
3197 					}
3198 #endif
3199 				}
3200 				if (use_pmap && submap->pmap != NULL) {
3201 					if (VM_MAP_PAGE_SHIFT(map) != VM_MAP_PAGE_SHIFT(submap)) {
3202 						DEBUG4K_ERROR("map %p (%d) submap %p (%d): incompatible page sizes\n", map, VM_MAP_PAGE_SHIFT(map), submap, VM_MAP_PAGE_SHIFT(submap));
3203 						kr = KERN_FAILURE;
3204 					} else {
3205 						kr = pmap_nest(map->pmap,
3206 						    submap->pmap,
3207 						    tmp_start,
3208 						    tmp_end - tmp_start);
3209 					}
3210 					if (kr != KERN_SUCCESS) {
3211 						printf("vm_map_enter: "
3212 						    "pmap_nest(0x%llx,0x%llx) "
3213 						    "error 0x%x\n",
3214 						    (long long)tmp_start,
3215 						    (long long)tmp_end,
3216 						    kr);
3217 					} else {
3218 						/* we're now nested ! */
3219 						new_entry->use_pmap = TRUE;
3220 						pmap_empty = FALSE;
3221 					}
3222 				}
3223 #endif /* NO_NESTED_PMAP */
3224 			}
3225 			entry = new_entry;
3226 
3227 			if (superpage_size) {
3228 				vm_page_t pages, m;
3229 				vm_object_t sp_object;
3230 				vm_object_offset_t sp_offset;
3231 
3232 				VME_OFFSET_SET(entry, 0);
3233 
3234 				/* allocate one superpage */
3235 				kr = cpm_allocate(SUPERPAGE_SIZE, &pages, 0, SUPERPAGE_NBASEPAGES - 1, TRUE, 0);
3236 				if (kr != KERN_SUCCESS) {
3237 					/* deallocate whole range... */
3238 					new_mapping_established = TRUE;
3239 					/* ... but only up to "tmp_end" */
3240 					size -= end - tmp_end;
3241 					RETURN(kr);
3242 				}
3243 
3244 				/* create one vm_object per superpage */
3245 				sp_object = vm_object_allocate((vm_map_size_t)(entry->vme_end - entry->vme_start));
3246 				sp_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
3247 				sp_object->phys_contiguous = TRUE;
3248 				sp_object->vo_shadow_offset = (vm_object_offset_t)VM_PAGE_GET_PHYS_PAGE(pages) * PAGE_SIZE;
3249 				VME_OBJECT_SET(entry, sp_object, false, 0);
3250 				assert(entry->use_pmap);
3251 
3252 				/* enter the base pages into the object */
3253 				vm_object_lock(sp_object);
3254 				for (sp_offset = 0;
3255 				    sp_offset < SUPERPAGE_SIZE;
3256 				    sp_offset += PAGE_SIZE) {
3257 					m = pages;
3258 					pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
3259 					pages = NEXT_PAGE(m);
3260 					*(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
3261 					vm_page_insert_wired(m, sp_object, sp_offset, VM_KERN_MEMORY_OSFMK);
3262 				}
3263 				vm_object_unlock(sp_object);
3264 			}
3265 		} while (tmp_end != tmp2_end &&
3266 		    (tmp_start = tmp_end) &&
3267 		    (tmp_end = (tmp2_end - tmp_end > chunk_size) ?
3268 		    tmp_end + chunk_size : tmp2_end));
3269 	}
3270 
3271 	new_mapping_established = TRUE;
3272 
3273 BailOut:
3274 	assert(map_locked == TRUE);
3275 
3276 	/*
3277 	 * Address space limit enforcement (RLIMIT_AS and RLIMIT_DATA):
3278 	 * If we have identified and possibly established the new mapping(s),
3279 	 * make sure we did not go beyond the address space limit.
3280 	 */
3281 	if (result == KERN_SUCCESS) {
3282 		if (map->size_limit != RLIM_INFINITY &&
3283 		    map->size > map->size_limit) {
3284 			/*
3285 			 * Establishing the requested mappings would exceed
3286 			 * the process's RLIMIT_AS limit: fail with
3287 			 * KERN_NO_SPACE.
3288 			 */
3289 			result = KERN_NO_SPACE;
3290 			printf("%d[%s] %s: map size 0x%llx over RLIMIT_AS 0x%llx\n",
3291 			    proc_selfpid(),
3292 			    (get_bsdtask_info(current_task())
3293 			    ? proc_name_address(get_bsdtask_info(current_task()))
3294 			    : "?"),
3295 			    __FUNCTION__,
3296 			    (uint64_t) map->size,
3297 			    (uint64_t) map->size_limit);
3298 			DTRACE_VM2(vm_map_enter_RLIMIT_AS,
3299 			    vm_map_size_t, map->size,
3300 			    uint64_t, map->size_limit);
3301 			vm_map_enter_RLIMIT_AS_count++;
3302 		} else if (map->data_limit != RLIM_INFINITY &&
3303 		    map->size > map->data_limit) {
3304 			/*
3305 			 * Establishing the requested mappings would exceed
3306 			 * the process's RLIMIT_DATA limit: fail with
3307 			 * KERN_NO_SPACE.
3308 			 */
3309 			result = KERN_NO_SPACE;
3310 			printf("%d[%s] %s: map size 0x%llx over RLIMIT_DATA 0x%llx\n",
3311 			    proc_selfpid(),
3312 			    (get_bsdtask_info(current_task())
3313 			    ? proc_name_address(get_bsdtask_info(current_task()))
3314 			    : "?"),
3315 			    __FUNCTION__,
3316 			    (uint64_t) map->size,
3317 			    (uint64_t) map->data_limit);
3318 			DTRACE_VM2(vm_map_enter_RLIMIT_DATA,
3319 			    vm_map_size_t, map->size,
3320 			    uint64_t, map->data_limit);
3321 			vm_map_enter_RLIMIT_DATA_count++;
3322 		}
3323 	}
3324 
3325 	if (result == KERN_SUCCESS) {
3326 		vm_prot_t pager_prot;
3327 		memory_object_t pager;
3328 
3329 #if DEBUG
3330 		if (pmap_empty &&
3331 		    !(vmk_flags.vmkf_no_pmap_check)) {
3332 			assert(pmap_is_empty(map->pmap,
3333 			    *address,
3334 			    *address + size));
3335 		}
3336 #endif /* DEBUG */
3337 
3338 		/*
3339 		 * For "named" VM objects, let the pager know that the
3340 		 * memory object is being mapped.  Some pagers need to keep
3341 		 * track of this, to know when they can reclaim the memory
3342 		 * object, for example.
3343 		 * VM calls memory_object_map() for each mapping (specifying
3344 		 * the protection of each mapping) and calls
3345 		 * memory_object_last_unmap() when all the mappings are gone.
3346 		 */
3347 		pager_prot = max_protection;
3348 		if (needs_copy) {
3349 			/*
3350 			 * Copy-On-Write mapping: won't modify
3351 			 * the memory object.
3352 			 */
3353 			pager_prot &= ~VM_PROT_WRITE;
3354 		}
3355 		if (!is_submap &&
3356 		    object != VM_OBJECT_NULL &&
3357 		    object->named &&
3358 		    object->pager != MEMORY_OBJECT_NULL) {
3359 			vm_object_lock(object);
3360 			pager = object->pager;
3361 			if (object->named &&
3362 			    pager != MEMORY_OBJECT_NULL) {
3363 				assert(object->pager_ready);
3364 				vm_object_mapping_wait(object, THREAD_UNINT);
3365 				vm_object_mapping_begin(object);
3366 				vm_object_unlock(object);
3367 
3368 				kr = memory_object_map(pager, pager_prot);
3369 				assert(kr == KERN_SUCCESS);
3370 
3371 				vm_object_lock(object);
3372 				vm_object_mapping_end(object);
3373 			}
3374 			vm_object_unlock(object);
3375 		}
3376 	}
3377 
3378 	assert(map_locked == TRUE);
3379 
3380 	if (new_mapping_established) {
3381 		/*
3382 		 * If we release the map lock for any reason below,
3383 		 * another thread could deallocate our new mapping,
3384 		 * releasing the caller's reference on "caller_object",
3385 		 * which was transferred to the mapping.
3386 		 * If this was the only reference, the object could be
3387 		 * destroyed.
3388 		 *
3389 		 * We need to take an extra reference on "caller_object"
3390 		 * to keep it alive if we need to return the caller's
3391 		 * reference to the caller in case of failure.
3392 		 */
3393 		if (is_submap) {
3394 			vm_map_reference((vm_map_t)caller_object);
3395 		} else {
3396 			vm_object_reference(caller_object);
3397 		}
3398 	}
3399 
3400 	if (!keep_map_locked) {
3401 		vm_map_unlock(map);
3402 		map_locked = FALSE;
3403 		entry = VM_MAP_ENTRY_NULL;
3404 		new_entry = VM_MAP_ENTRY_NULL;
3405 	}
3406 
3407 	/*
3408 	 * We can't hold the map lock if we enter this block.
3409 	 */
3410 
3411 	if (result == KERN_SUCCESS) {
3412 		/*	Wire down the new entry if the user
3413 		 *	requested all new map entries be wired.
3414 		 */
3415 		if ((map->wiring_required) || (superpage_size)) {
3416 			assert(!keep_map_locked);
3417 			pmap_empty = FALSE; /* pmap won't be empty */
3418 			kr = vm_map_wire_kernel(map, start, end,
3419 			    cur_protection, VM_KERN_MEMORY_MLOCK,
3420 			    TRUE);
3421 			result = kr;
3422 		}
3423 
3424 	}
3425 
3426 	if (result != KERN_SUCCESS) {
3427 		if (new_mapping_established) {
3428 			vmr_flags_t remove_flags = VM_MAP_REMOVE_NO_FLAGS;
3429 
3430 			/*
3431 			 * We have to get rid of the new mappings since we
3432 			 * won't make them available to the user.
3433 			 * Try and do that atomically, to minimize the risk
3434 			 * that someone else create new mappings that range.
3435 			 */
3436 			if (!map_locked) {
3437 				vm_map_lock(map);
3438 				map_locked = TRUE;
3439 			}
3440 			remove_flags |= VM_MAP_REMOVE_NO_MAP_ALIGN;
3441 			remove_flags |= VM_MAP_REMOVE_NO_YIELD;
3442 			if (permanent) {
3443 				remove_flags |= VM_MAP_REMOVE_IMMUTABLE;
3444 			}
3445 			(void) vm_map_delete(map,
3446 			    *address, *address + size,
3447 			    remove_flags,
3448 			    KMEM_GUARD_NONE, &zap_new_list);
3449 		}
3450 
3451 		if (vm_map_zap_first_entry(&zap_old_list)) {
3452 			vm_map_entry_t entry1, entry2;
3453 
3454 			/*
3455 			 * The new mapping failed.  Attempt to restore
3456 			 * the old mappings, saved in the "zap_old_map".
3457 			 */
3458 			if (!map_locked) {
3459 				vm_map_lock(map);
3460 				map_locked = TRUE;
3461 			}
3462 
3463 			/* first check if the coast is still clear */
3464 			start = vm_map_zap_first_entry(&zap_old_list)->vme_start;
3465 			end   = vm_map_zap_last_entry(&zap_old_list)->vme_end;
3466 
3467 			if (vm_map_lookup_entry(map, start, &entry1) ||
3468 			    vm_map_lookup_entry(map, end, &entry2) ||
3469 			    entry1 != entry2) {
3470 				/*
3471 				 * Part of that range has already been
3472 				 * re-mapped:  we can't restore the old
3473 				 * mappings...
3474 				 */
3475 				vm_map_enter_restore_failures++;
3476 			} else {
3477 				/*
3478 				 * Transfer the saved map entries from
3479 				 * "zap_old_map" to the original "map",
3480 				 * inserting them all after "entry1".
3481 				 */
3482 				while ((entry2 = vm_map_zap_pop(&zap_old_list))) {
3483 					vm_map_size_t entry_size;
3484 
3485 					entry_size = (entry2->vme_end -
3486 					    entry2->vme_start);
3487 					vm_map_store_entry_link(map, entry1, entry2,
3488 					    VM_MAP_KERNEL_FLAGS_NONE);
3489 					map->size += entry_size;
3490 					entry1 = entry2;
3491 				}
3492 				if (map->wiring_required) {
3493 					/*
3494 					 * XXX TODO: we should rewire the
3495 					 * old pages here...
3496 					 */
3497 				}
3498 				vm_map_enter_restore_successes++;
3499 			}
3500 		}
3501 	}
3502 
3503 	/*
3504 	 * The caller is responsible for releasing the lock if it requested to
3505 	 * keep the map locked.
3506 	 */
3507 	if (map_locked && !keep_map_locked) {
3508 		vm_map_unlock(map);
3509 	}
3510 
3511 	vm_map_zap_dispose(&zap_old_list);
3512 	vm_map_zap_dispose(&zap_new_list);
3513 
3514 	if (new_mapping_established) {
3515 		/*
3516 		 * The caller had a reference on "caller_object" and we
3517 		 * transferred that reference to the mapping.
3518 		 * We also took an extra reference on "caller_object" to keep
3519 		 * it alive while the map was unlocked.
3520 		 */
3521 		if (result == KERN_SUCCESS) {
3522 			/*
3523 			 * On success, the caller's reference on the object gets
3524 			 * tranferred to the mapping.
3525 			 * Release our extra reference.
3526 			 */
3527 			if (is_submap) {
3528 				vm_map_deallocate((vm_map_t)caller_object);
3529 			} else {
3530 				vm_object_deallocate(caller_object);
3531 			}
3532 		} else {
3533 			/*
3534 			 * On error, the caller expects to still have a
3535 			 * reference on the object it gave us.
3536 			 * Let's use our extra reference for that.
3537 			 */
3538 		}
3539 	}
3540 
3541 	return result;
3542 
3543 #undef  RETURN
3544 }
3545 
3546 #if __arm64__
3547 extern const struct memory_object_pager_ops fourk_pager_ops;
3548 kern_return_t
vm_map_enter_fourk(vm_map_t map,vm_map_offset_t * address,vm_map_size_t size,vm_map_offset_t mask,vm_map_kernel_flags_t vmk_flags,vm_object_t object,vm_object_offset_t offset,boolean_t needs_copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)3549 vm_map_enter_fourk(
3550 	vm_map_t                map,
3551 	vm_map_offset_t         *address,       /* IN/OUT */
3552 	vm_map_size_t           size,
3553 	vm_map_offset_t         mask,
3554 	vm_map_kernel_flags_t   vmk_flags,
3555 	vm_object_t             object,
3556 	vm_object_offset_t      offset,
3557 	boolean_t               needs_copy,
3558 	vm_prot_t               cur_protection,
3559 	vm_prot_t               max_protection,
3560 	vm_inherit_t            inheritance)
3561 {
3562 	vm_map_entry_t          entry, new_entry;
3563 	vm_map_offset_t         start, fourk_start;
3564 	vm_map_offset_t         end, fourk_end;
3565 	vm_map_size_t           fourk_size;
3566 	kern_return_t           result = KERN_SUCCESS;
3567 	boolean_t               map_locked = FALSE;
3568 	boolean_t               pmap_empty = TRUE;
3569 	boolean_t               new_mapping_established = FALSE;
3570 	const bool              keep_map_locked = vmk_flags.vmkf_keep_map_locked;
3571 	const bool              anywhere = !vmk_flags.vmf_fixed;
3572 	const bool              purgable = vmk_flags.vmf_purgeable;
3573 	const bool              overwrite = vmk_flags.vmf_overwrite;
3574 	const bool              is_submap = vmk_flags.vmkf_submap;
3575 	const bool              entry_for_jit = vmk_flags.vmkf_map_jit;
3576 	const unsigned int      superpage_size = vmk_flags.vmf_superpage_size;
3577 	vm_map_offset_t         effective_min_offset, effective_max_offset;
3578 	kern_return_t           kr;
3579 	boolean_t               clear_map_aligned = FALSE;
3580 	memory_object_t         fourk_mem_obj;
3581 	vm_object_t             fourk_object;
3582 	vm_map_offset_t         fourk_pager_offset;
3583 	int                     fourk_pager_index_start, fourk_pager_index_num;
3584 	int                     cur_idx;
3585 	boolean_t               fourk_copy;
3586 	vm_object_t             copy_object;
3587 	vm_object_offset_t      copy_offset;
3588 	VM_MAP_ZAP_DECLARE(zap_list);
3589 
3590 	if (VM_MAP_PAGE_MASK(map) < PAGE_MASK) {
3591 		panic("%s:%d", __FUNCTION__, __LINE__);
3592 	}
3593 	fourk_mem_obj = MEMORY_OBJECT_NULL;
3594 	fourk_object = VM_OBJECT_NULL;
3595 
3596 	if (superpage_size) {
3597 		return KERN_NOT_SUPPORTED;
3598 	}
3599 
3600 	if ((cur_protection & VM_PROT_WRITE) &&
3601 	    (cur_protection & VM_PROT_EXECUTE) &&
3602 #if XNU_TARGET_OS_OSX
3603 	    map->pmap != kernel_pmap &&
3604 	    (vm_map_cs_enforcement(map)
3605 #if __arm64__
3606 	    || !VM_MAP_IS_EXOTIC(map)
3607 #endif /* __arm64__ */
3608 	    ) &&
3609 #endif /* XNU_TARGET_OS_OSX */
3610 #if CODE_SIGNING_MONITOR
3611 	    (csm_address_space_exempt(map->pmap) != KERN_SUCCESS) &&
3612 #endif
3613 	    !entry_for_jit) {
3614 		DTRACE_VM3(cs_wx,
3615 		    uint64_t, 0,
3616 		    uint64_t, 0,
3617 		    vm_prot_t, cur_protection);
3618 		printf("CODE SIGNING: %d[%s] %s: curprot cannot be write+execute. "
3619 		    "turning off execute\n",
3620 		    proc_selfpid(),
3621 		    (get_bsdtask_info(current_task())
3622 		    ? proc_name_address(get_bsdtask_info(current_task()))
3623 		    : "?"),
3624 		    __FUNCTION__);
3625 		cur_protection &= ~VM_PROT_EXECUTE;
3626 	}
3627 
3628 	/*
3629 	 * If the task has requested executable lockdown,
3630 	 * deny any new executable mapping.
3631 	 */
3632 	if (map->map_disallow_new_exec == TRUE) {
3633 		if (cur_protection & VM_PROT_EXECUTE) {
3634 			return KERN_PROTECTION_FAILURE;
3635 		}
3636 	}
3637 
3638 	if (is_submap) {
3639 		return KERN_NOT_SUPPORTED;
3640 	}
3641 	if (vmk_flags.vmkf_already) {
3642 		return KERN_NOT_SUPPORTED;
3643 	}
3644 	if (purgable || entry_for_jit) {
3645 		return KERN_NOT_SUPPORTED;
3646 	}
3647 
3648 	effective_min_offset = map->min_offset;
3649 
3650 	if (vmk_flags.vmkf_beyond_max) {
3651 		return KERN_NOT_SUPPORTED;
3652 	} else {
3653 		effective_max_offset = map->max_offset;
3654 	}
3655 
3656 	if (size == 0 ||
3657 	    (offset & FOURK_PAGE_MASK) != 0) {
3658 		*address = 0;
3659 		return KERN_INVALID_ARGUMENT;
3660 	}
3661 
3662 #define RETURN(value)   { result = value; goto BailOut; }
3663 
3664 	assert(VM_MAP_PAGE_ALIGNED(*address, FOURK_PAGE_MASK));
3665 	assert(VM_MAP_PAGE_ALIGNED(size, FOURK_PAGE_MASK));
3666 
3667 	if (!anywhere && overwrite) {
3668 		return KERN_NOT_SUPPORTED;
3669 	}
3670 
3671 	fourk_start = *address;
3672 	fourk_size = size;
3673 	fourk_end = fourk_start + fourk_size;
3674 
3675 	start = vm_map_trunc_page(*address, VM_MAP_PAGE_MASK(map));
3676 	end = vm_map_round_page(fourk_end, VM_MAP_PAGE_MASK(map));
3677 	size = end - start;
3678 
3679 	if (anywhere) {
3680 		return KERN_NOT_SUPPORTED;
3681 	} else {
3682 		/*
3683 		 *	Verify that:
3684 		 *		the address doesn't itself violate
3685 		 *		the mask requirement.
3686 		 */
3687 
3688 		vm_map_lock(map);
3689 		map_locked = TRUE;
3690 		if ((start & mask) != 0) {
3691 			RETURN(KERN_NO_SPACE);
3692 		}
3693 
3694 		/*
3695 		 *	...	the address is within bounds
3696 		 */
3697 
3698 		end = start + size;
3699 
3700 		if ((start < effective_min_offset) ||
3701 		    (end > effective_max_offset) ||
3702 		    (start >= end)) {
3703 			RETURN(KERN_INVALID_ADDRESS);
3704 		}
3705 
3706 		/*
3707 		 *	...	the starting address isn't allocated
3708 		 */
3709 		if (vm_map_lookup_entry(map, start, &entry)) {
3710 			vm_object_t cur_object, shadow_object;
3711 
3712 			/*
3713 			 * We might already some 4K mappings
3714 			 * in a 16K page here.
3715 			 */
3716 
3717 			if (entry->vme_end - entry->vme_start
3718 			    != SIXTEENK_PAGE_SIZE) {
3719 				RETURN(KERN_NO_SPACE);
3720 			}
3721 			if (entry->is_sub_map) {
3722 				RETURN(KERN_NO_SPACE);
3723 			}
3724 			if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
3725 				RETURN(KERN_NO_SPACE);
3726 			}
3727 
3728 			/* go all the way down the shadow chain */
3729 			cur_object = VME_OBJECT(entry);
3730 			vm_object_lock(cur_object);
3731 			while (cur_object->shadow != VM_OBJECT_NULL) {
3732 				shadow_object = cur_object->shadow;
3733 				vm_object_lock(shadow_object);
3734 				vm_object_unlock(cur_object);
3735 				cur_object = shadow_object;
3736 				shadow_object = VM_OBJECT_NULL;
3737 			}
3738 			if (cur_object->internal ||
3739 			    cur_object->pager == NULL) {
3740 				vm_object_unlock(cur_object);
3741 				RETURN(KERN_NO_SPACE);
3742 			}
3743 			if (cur_object->pager->mo_pager_ops
3744 			    != &fourk_pager_ops) {
3745 				vm_object_unlock(cur_object);
3746 				RETURN(KERN_NO_SPACE);
3747 			}
3748 			fourk_object = cur_object;
3749 			fourk_mem_obj = fourk_object->pager;
3750 
3751 			/* keep the "4K" object alive */
3752 			vm_object_reference_locked(fourk_object);
3753 			memory_object_reference(fourk_mem_obj);
3754 			vm_object_unlock(fourk_object);
3755 
3756 			/* merge permissions */
3757 			entry->protection |= cur_protection;
3758 			entry->max_protection |= max_protection;
3759 
3760 			if ((entry->protection & VM_PROT_WRITE) &&
3761 			    (entry->protection & VM_PROT_ALLEXEC) &&
3762 			    fourk_binary_compatibility_unsafe &&
3763 			    fourk_binary_compatibility_allow_wx) {
3764 				/* write+execute: need to be "jit" */
3765 				entry->used_for_jit = TRUE;
3766 			}
3767 			goto map_in_fourk_pager;
3768 		}
3769 
3770 		/*
3771 		 *	...	the next region doesn't overlap the
3772 		 *		end point.
3773 		 */
3774 
3775 		if ((entry->vme_next != vm_map_to_entry(map)) &&
3776 		    (entry->vme_next->vme_start < end)) {
3777 			RETURN(KERN_NO_SPACE);
3778 		}
3779 	}
3780 
3781 	/*
3782 	 *	At this point,
3783 	 *		"start" and "end" should define the endpoints of the
3784 	 *			available new range, and
3785 	 *		"entry" should refer to the region before the new
3786 	 *			range, and
3787 	 *
3788 	 *		the map should be locked.
3789 	 */
3790 
3791 	/* create a new "4K" pager */
3792 	fourk_mem_obj = fourk_pager_create();
3793 	fourk_object = fourk_pager_to_vm_object(fourk_mem_obj);
3794 	assert(fourk_object);
3795 
3796 	/* keep the "4" object alive */
3797 	vm_object_reference(fourk_object);
3798 
3799 	/* create a "copy" object, to map the "4K" object copy-on-write */
3800 	fourk_copy = TRUE;
3801 	result = vm_object_copy_strategically(fourk_object,
3802 	    0,
3803 	    end - start,
3804 	    false,                                   /* forking */
3805 	    &copy_object,
3806 	    &copy_offset,
3807 	    &fourk_copy);
3808 	assert(result == KERN_SUCCESS);
3809 	assert(copy_object != VM_OBJECT_NULL);
3810 	assert(copy_offset == 0);
3811 
3812 	/* map the "4K" pager's copy object */
3813 	new_entry = vm_map_entry_insert(map,
3814 	    entry,
3815 	    vm_map_trunc_page(start, VM_MAP_PAGE_MASK(map)),
3816 	    vm_map_round_page(end, VM_MAP_PAGE_MASK(map)),
3817 	    copy_object,
3818 	    0,                      /* offset */
3819 	    vmk_flags,
3820 	    FALSE,                  /* needs_copy */
3821 	    cur_protection, max_protection,
3822 	    (entry_for_jit && !VM_MAP_POLICY_ALLOW_JIT_INHERIT(map) ?
3823 	    VM_INHERIT_NONE : inheritance),
3824 	    clear_map_aligned);
3825 	entry = new_entry;
3826 
3827 #if VM_MAP_DEBUG_FOURK
3828 	if (vm_map_debug_fourk) {
3829 		printf("FOURK_PAGER: map %p [0x%llx:0x%llx] new pager %p\n",
3830 		    map,
3831 		    (uint64_t) entry->vme_start,
3832 		    (uint64_t) entry->vme_end,
3833 		    fourk_mem_obj);
3834 	}
3835 #endif /* VM_MAP_DEBUG_FOURK */
3836 
3837 	new_mapping_established = TRUE;
3838 
3839 map_in_fourk_pager:
3840 	/* "map" the original "object" where it belongs in the "4K" pager */
3841 	fourk_pager_offset = (fourk_start & SIXTEENK_PAGE_MASK);
3842 	fourk_pager_index_start = (int) (fourk_pager_offset / FOURK_PAGE_SIZE);
3843 	if (fourk_size > SIXTEENK_PAGE_SIZE) {
3844 		fourk_pager_index_num = 4;
3845 	} else {
3846 		fourk_pager_index_num = (int) (fourk_size / FOURK_PAGE_SIZE);
3847 	}
3848 	if (fourk_pager_index_start + fourk_pager_index_num > 4) {
3849 		fourk_pager_index_num = 4 - fourk_pager_index_start;
3850 	}
3851 	for (cur_idx = 0;
3852 	    cur_idx < fourk_pager_index_num;
3853 	    cur_idx++) {
3854 		vm_object_t             old_object;
3855 		vm_object_offset_t      old_offset;
3856 
3857 		kr = fourk_pager_populate(fourk_mem_obj,
3858 		    TRUE,                       /* overwrite */
3859 		    fourk_pager_index_start + cur_idx,
3860 		    object,
3861 		    (object
3862 		    ? (offset +
3863 		    (cur_idx * FOURK_PAGE_SIZE))
3864 		    : 0),
3865 		    &old_object,
3866 		    &old_offset);
3867 #if VM_MAP_DEBUG_FOURK
3868 		if (vm_map_debug_fourk) {
3869 			if (old_object == (vm_object_t) -1 &&
3870 			    old_offset == (vm_object_offset_t) -1) {
3871 				printf("FOURK_PAGER: map %p [0x%llx:0x%llx] "
3872 				    "pager [%p:0x%llx] "
3873 				    "populate[%d] "
3874 				    "[object:%p,offset:0x%llx]\n",
3875 				    map,
3876 				    (uint64_t) entry->vme_start,
3877 				    (uint64_t) entry->vme_end,
3878 				    fourk_mem_obj,
3879 				    VME_OFFSET(entry),
3880 				    fourk_pager_index_start + cur_idx,
3881 				    object,
3882 				    (object
3883 				    ? (offset + (cur_idx * FOURK_PAGE_SIZE))
3884 				    : 0));
3885 			} else {
3886 				printf("FOURK_PAGER: map %p [0x%llx:0x%llx] "
3887 				    "pager [%p:0x%llx] "
3888 				    "populate[%d] [object:%p,offset:0x%llx] "
3889 				    "old [%p:0x%llx]\n",
3890 				    map,
3891 				    (uint64_t) entry->vme_start,
3892 				    (uint64_t) entry->vme_end,
3893 				    fourk_mem_obj,
3894 				    VME_OFFSET(entry),
3895 				    fourk_pager_index_start + cur_idx,
3896 				    object,
3897 				    (object
3898 				    ? (offset + (cur_idx * FOURK_PAGE_SIZE))
3899 				    : 0),
3900 				    old_object,
3901 				    old_offset);
3902 			}
3903 		}
3904 #endif /* VM_MAP_DEBUG_FOURK */
3905 
3906 		assert(kr == KERN_SUCCESS);
3907 		if (object != old_object &&
3908 		    object != VM_OBJECT_NULL &&
3909 		    object != (vm_object_t) -1) {
3910 			vm_object_reference(object);
3911 		}
3912 		if (object != old_object &&
3913 		    old_object != VM_OBJECT_NULL &&
3914 		    old_object != (vm_object_t) -1) {
3915 			vm_object_deallocate(old_object);
3916 		}
3917 	}
3918 
3919 BailOut:
3920 	assert(map_locked == TRUE);
3921 
3922 	if (result == KERN_SUCCESS) {
3923 		vm_prot_t pager_prot;
3924 		memory_object_t pager;
3925 
3926 #if DEBUG
3927 		if (pmap_empty &&
3928 		    !(vmk_flags.vmkf_no_pmap_check)) {
3929 			assert(pmap_is_empty(map->pmap,
3930 			    *address,
3931 			    *address + size));
3932 		}
3933 #endif /* DEBUG */
3934 
3935 		/*
3936 		 * For "named" VM objects, let the pager know that the
3937 		 * memory object is being mapped.  Some pagers need to keep
3938 		 * track of this, to know when they can reclaim the memory
3939 		 * object, for example.
3940 		 * VM calls memory_object_map() for each mapping (specifying
3941 		 * the protection of each mapping) and calls
3942 		 * memory_object_last_unmap() when all the mappings are gone.
3943 		 */
3944 		pager_prot = max_protection;
3945 		if (needs_copy) {
3946 			/*
3947 			 * Copy-On-Write mapping: won't modify
3948 			 * the memory object.
3949 			 */
3950 			pager_prot &= ~VM_PROT_WRITE;
3951 		}
3952 		if (!is_submap &&
3953 		    object != VM_OBJECT_NULL &&
3954 		    object->named &&
3955 		    object->pager != MEMORY_OBJECT_NULL) {
3956 			vm_object_lock(object);
3957 			pager = object->pager;
3958 			if (object->named &&
3959 			    pager != MEMORY_OBJECT_NULL) {
3960 				assert(object->pager_ready);
3961 				vm_object_mapping_wait(object, THREAD_UNINT);
3962 				vm_object_mapping_begin(object);
3963 				vm_object_unlock(object);
3964 
3965 				kr = memory_object_map(pager, pager_prot);
3966 				assert(kr == KERN_SUCCESS);
3967 
3968 				vm_object_lock(object);
3969 				vm_object_mapping_end(object);
3970 			}
3971 			vm_object_unlock(object);
3972 		}
3973 		if (!is_submap &&
3974 		    fourk_object != VM_OBJECT_NULL &&
3975 		    fourk_object->named &&
3976 		    fourk_object->pager != MEMORY_OBJECT_NULL) {
3977 			vm_object_lock(fourk_object);
3978 			pager = fourk_object->pager;
3979 			if (fourk_object->named &&
3980 			    pager != MEMORY_OBJECT_NULL) {
3981 				assert(fourk_object->pager_ready);
3982 				vm_object_mapping_wait(fourk_object,
3983 				    THREAD_UNINT);
3984 				vm_object_mapping_begin(fourk_object);
3985 				vm_object_unlock(fourk_object);
3986 
3987 				kr = memory_object_map(pager, VM_PROT_READ);
3988 				assert(kr == KERN_SUCCESS);
3989 
3990 				vm_object_lock(fourk_object);
3991 				vm_object_mapping_end(fourk_object);
3992 			}
3993 			vm_object_unlock(fourk_object);
3994 		}
3995 	}
3996 
3997 	if (fourk_object != VM_OBJECT_NULL) {
3998 		vm_object_deallocate(fourk_object);
3999 		fourk_object = VM_OBJECT_NULL;
4000 		memory_object_deallocate(fourk_mem_obj);
4001 		fourk_mem_obj = MEMORY_OBJECT_NULL;
4002 	}
4003 
4004 	assert(map_locked == TRUE);
4005 
4006 	if (!keep_map_locked) {
4007 		vm_map_unlock(map);
4008 		map_locked = FALSE;
4009 	}
4010 
4011 	/*
4012 	 * We can't hold the map lock if we enter this block.
4013 	 */
4014 
4015 	if (result == KERN_SUCCESS) {
4016 		/*	Wire down the new entry if the user
4017 		 *	requested all new map entries be wired.
4018 		 */
4019 		if ((map->wiring_required) || (superpage_size)) {
4020 			assert(!keep_map_locked);
4021 			pmap_empty = FALSE; /* pmap won't be empty */
4022 			kr = vm_map_wire_kernel(map, start, end,
4023 			    new_entry->protection, VM_KERN_MEMORY_MLOCK,
4024 			    TRUE);
4025 			result = kr;
4026 		}
4027 
4028 	}
4029 
4030 	if (result != KERN_SUCCESS) {
4031 		if (new_mapping_established) {
4032 			/*
4033 			 * We have to get rid of the new mappings since we
4034 			 * won't make them available to the user.
4035 			 * Try and do that atomically, to minimize the risk
4036 			 * that someone else create new mappings that range.
4037 			 */
4038 
4039 			if (!map_locked) {
4040 				vm_map_lock(map);
4041 				map_locked = TRUE;
4042 			}
4043 			(void)vm_map_delete(map, *address, *address + size,
4044 			    VM_MAP_REMOVE_NO_MAP_ALIGN | VM_MAP_REMOVE_NO_YIELD,
4045 			    KMEM_GUARD_NONE, &zap_list);
4046 		}
4047 	}
4048 
4049 	/*
4050 	 * The caller is responsible for releasing the lock if it requested to
4051 	 * keep the map locked.
4052 	 */
4053 	if (map_locked && !keep_map_locked) {
4054 		vm_map_unlock(map);
4055 	}
4056 
4057 	vm_map_zap_dispose(&zap_list);
4058 
4059 	return result;
4060 
4061 #undef  RETURN
4062 }
4063 #endif /* __arm64__ */
4064 
4065 /*
4066  * Counters for the prefault optimization.
4067  */
4068 int64_t vm_prefault_nb_pages = 0;
4069 int64_t vm_prefault_nb_bailout = 0;
4070 
4071 static kern_return_t
vm_map_enter_mem_object_helper(vm_map_t target_map,vm_map_offset_t * address,vm_map_size_t initial_size,vm_map_offset_t mask,vm_map_kernel_flags_t vmk_flags,ipc_port_t port,vm_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance,upl_page_list_ptr_t page_list,unsigned int page_list_count)4072 vm_map_enter_mem_object_helper(
4073 	vm_map_t                target_map,
4074 	vm_map_offset_t         *address,
4075 	vm_map_size_t           initial_size,
4076 	vm_map_offset_t         mask,
4077 	vm_map_kernel_flags_t   vmk_flags,
4078 	ipc_port_t              port,
4079 	vm_object_offset_t      offset,
4080 	boolean_t               copy,
4081 	vm_prot_t               cur_protection,
4082 	vm_prot_t               max_protection,
4083 	vm_inherit_t            inheritance,
4084 	upl_page_list_ptr_t     page_list,
4085 	unsigned int            page_list_count)
4086 {
4087 	vm_map_address_t        map_addr;
4088 	vm_map_size_t           map_size;
4089 	vm_object_t             object;
4090 	vm_object_size_t        size;
4091 	kern_return_t           result;
4092 	boolean_t               mask_cur_protection, mask_max_protection;
4093 	boolean_t               kernel_prefault, try_prefault = (page_list_count != 0);
4094 	vm_map_offset_t         offset_in_mapping = 0;
4095 #if __arm64__
4096 	boolean_t               fourk = vmk_flags.vmkf_fourk;
4097 #endif /* __arm64__ */
4098 
4099 	if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) {
4100 		/* XXX TODO4K prefaulting depends on page size... */
4101 		try_prefault = FALSE;
4102 	}
4103 
4104 	assertf(vmk_flags.__vmkf_unused == 0, "vmk_flags unused=0x%x\n", vmk_flags.__vmkf_unused);
4105 	vm_map_kernel_flags_update_range_id(&vmk_flags, target_map);
4106 
4107 	mask_cur_protection = cur_protection & VM_PROT_IS_MASK;
4108 	mask_max_protection = max_protection & VM_PROT_IS_MASK;
4109 	cur_protection &= ~VM_PROT_IS_MASK;
4110 	max_protection &= ~VM_PROT_IS_MASK;
4111 
4112 	/*
4113 	 * Check arguments for validity
4114 	 */
4115 	if ((target_map == VM_MAP_NULL) ||
4116 	    (cur_protection & ~(VM_PROT_ALL | VM_PROT_ALLEXEC)) ||
4117 	    (max_protection & ~(VM_PROT_ALL | VM_PROT_ALLEXEC)) ||
4118 	    (inheritance > VM_INHERIT_LAST_VALID) ||
4119 	    (try_prefault && (copy || !page_list)) ||
4120 	    initial_size == 0) {
4121 		return KERN_INVALID_ARGUMENT;
4122 	}
4123 
4124 #if __arm64__
4125 	if (cur_protection & VM_PROT_EXECUTE) {
4126 		cur_protection |= VM_PROT_READ;
4127 	}
4128 
4129 	if (fourk && VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) {
4130 		/* no "fourk" if map is using a sub-page page size */
4131 		fourk = FALSE;
4132 	}
4133 	if (fourk) {
4134 		map_addr = vm_map_trunc_page(*address, FOURK_PAGE_MASK);
4135 		map_size = vm_map_round_page(initial_size, FOURK_PAGE_MASK);
4136 	} else
4137 #endif /* __arm64__ */
4138 	{
4139 		map_addr = vm_map_trunc_page(*address,
4140 		    VM_MAP_PAGE_MASK(target_map));
4141 		map_size = vm_map_round_page(initial_size,
4142 		    VM_MAP_PAGE_MASK(target_map));
4143 	}
4144 	if (map_size == 0) {
4145 		return KERN_INVALID_ARGUMENT;
4146 	}
4147 	size = vm_object_round_page(initial_size);
4148 
4149 	/*
4150 	 * Find the vm object (if any) corresponding to this port.
4151 	 */
4152 	if (!IP_VALID(port)) {
4153 		object = VM_OBJECT_NULL;
4154 		offset = 0;
4155 		copy = FALSE;
4156 	} else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
4157 		vm_named_entry_t        named_entry;
4158 		vm_object_offset_t      data_offset;
4159 
4160 		named_entry = mach_memory_entry_from_port(port);
4161 
4162 		if (vmk_flags.vmf_return_data_addr ||
4163 		    vmk_flags.vmf_return_4k_data_addr) {
4164 			data_offset = named_entry->data_offset;
4165 			offset += named_entry->data_offset;
4166 		} else {
4167 			data_offset = 0;
4168 		}
4169 
4170 		/* a few checks to make sure user is obeying rules */
4171 		if (mask_max_protection) {
4172 			max_protection &= named_entry->protection;
4173 		}
4174 		if (mask_cur_protection) {
4175 			cur_protection &= named_entry->protection;
4176 		}
4177 		if ((named_entry->protection & max_protection) !=
4178 		    max_protection) {
4179 			return KERN_INVALID_RIGHT;
4180 		}
4181 		if ((named_entry->protection & cur_protection) !=
4182 		    cur_protection) {
4183 			return KERN_INVALID_RIGHT;
4184 		}
4185 		if (offset + size <= offset) {
4186 			/* overflow */
4187 			return KERN_INVALID_ARGUMENT;
4188 		}
4189 		if (named_entry->size < (offset + initial_size)) {
4190 			return KERN_INVALID_ARGUMENT;
4191 		}
4192 
4193 		if (named_entry->is_copy) {
4194 			/* for a vm_map_copy, we can only map it whole */
4195 			if ((size != named_entry->size) &&
4196 			    (vm_map_round_page(size,
4197 			    VM_MAP_PAGE_MASK(target_map)) ==
4198 			    named_entry->size)) {
4199 				/* XXX FBDP use the rounded size... */
4200 				size = vm_map_round_page(
4201 					size,
4202 					VM_MAP_PAGE_MASK(target_map));
4203 			}
4204 		}
4205 
4206 		/* the callers parameter offset is defined to be the */
4207 		/* offset from beginning of named entry offset in object */
4208 		offset = offset + named_entry->offset;
4209 
4210 		if (!VM_MAP_PAGE_ALIGNED(size,
4211 		    VM_MAP_PAGE_MASK(target_map))) {
4212 			/*
4213 			 * Let's not map more than requested;
4214 			 * vm_map_enter() will handle this "not map-aligned"
4215 			 * case.
4216 			 */
4217 			map_size = size;
4218 		}
4219 
4220 		named_entry_lock(named_entry);
4221 		if (named_entry->is_sub_map) {
4222 			vm_map_t                submap;
4223 
4224 			if (vmk_flags.vmf_return_data_addr ||
4225 			    vmk_flags.vmf_return_4k_data_addr) {
4226 				panic("VM_FLAGS_RETURN_DATA_ADDR not expected for submap.");
4227 			}
4228 
4229 			submap = named_entry->backing.map;
4230 			vm_map_reference(submap);
4231 			named_entry_unlock(named_entry);
4232 
4233 			vmk_flags.vmkf_submap = TRUE;
4234 
4235 			result = vm_map_enter(target_map,
4236 			    &map_addr,
4237 			    map_size,
4238 			    mask,
4239 			    vmk_flags,
4240 			    (vm_object_t)(uintptr_t) submap,
4241 			    offset,
4242 			    copy,
4243 			    cur_protection,
4244 			    max_protection,
4245 			    inheritance);
4246 			if (result != KERN_SUCCESS) {
4247 				vm_map_deallocate(submap);
4248 			} else {
4249 				/*
4250 				 * No need to lock "submap" just to check its
4251 				 * "mapped" flag: that flag is never reset
4252 				 * once it's been set and if we race, we'll
4253 				 * just end up setting it twice, which is OK.
4254 				 */
4255 				if (submap->mapped_in_other_pmaps == FALSE &&
4256 				    vm_map_pmap(submap) != PMAP_NULL &&
4257 				    vm_map_pmap(submap) !=
4258 				    vm_map_pmap(target_map)) {
4259 					/*
4260 					 * This submap is being mapped in a map
4261 					 * that uses a different pmap.
4262 					 * Set its "mapped_in_other_pmaps" flag
4263 					 * to indicate that we now need to
4264 					 * remove mappings from all pmaps rather
4265 					 * than just the submap's pmap.
4266 					 */
4267 					vm_map_lock(submap);
4268 					submap->mapped_in_other_pmaps = TRUE;
4269 					vm_map_unlock(submap);
4270 				}
4271 				*address = map_addr;
4272 			}
4273 			return result;
4274 		} else if (named_entry->is_copy) {
4275 			kern_return_t   kr;
4276 			vm_map_copy_t   copy_map;
4277 			vm_map_entry_t  copy_entry;
4278 			vm_map_offset_t copy_addr;
4279 			vm_map_copy_t   target_copy_map;
4280 			vm_map_offset_t overmap_start, overmap_end;
4281 			vm_map_offset_t trimmed_start;
4282 			vm_map_size_t   target_size;
4283 
4284 			if (!vm_map_kernel_flags_check_vmflags(vmk_flags,
4285 			    (VM_FLAGS_FIXED |
4286 			    VM_FLAGS_ANYWHERE |
4287 			    VM_FLAGS_OVERWRITE |
4288 			    VM_FLAGS_RETURN_4K_DATA_ADDR |
4289 			    VM_FLAGS_RETURN_DATA_ADDR))) {
4290 				named_entry_unlock(named_entry);
4291 				return KERN_INVALID_ARGUMENT;
4292 			}
4293 
4294 			copy_map = named_entry->backing.copy;
4295 			assert(copy_map->type == VM_MAP_COPY_ENTRY_LIST);
4296 			if (copy_map->type != VM_MAP_COPY_ENTRY_LIST) {
4297 				/* unsupported type; should not happen */
4298 				printf("vm_map_enter_mem_object: "
4299 				    "memory_entry->backing.copy "
4300 				    "unsupported type 0x%x\n",
4301 				    copy_map->type);
4302 				named_entry_unlock(named_entry);
4303 				return KERN_INVALID_ARGUMENT;
4304 			}
4305 
4306 			if (VM_MAP_PAGE_SHIFT(target_map) != copy_map->cpy_hdr.page_shift) {
4307 				DEBUG4K_SHARE("copy_map %p offset %llx size 0x%llx pgshift %d -> target_map %p pgshift %d\n", copy_map, offset, (uint64_t)map_size, copy_map->cpy_hdr.page_shift, target_map, VM_MAP_PAGE_SHIFT(target_map));
4308 			}
4309 
4310 			if (vmk_flags.vmf_return_data_addr ||
4311 			    vmk_flags.vmf_return_4k_data_addr) {
4312 				offset_in_mapping = offset & VM_MAP_PAGE_MASK(target_map);
4313 				if (vmk_flags.vmf_return_4k_data_addr) {
4314 					offset_in_mapping &= ~((signed)(0xFFF));
4315 				}
4316 			}
4317 
4318 			target_copy_map = VM_MAP_COPY_NULL;
4319 			target_size = copy_map->size;
4320 			overmap_start = 0;
4321 			overmap_end = 0;
4322 			trimmed_start = 0;
4323 			if (copy_map->cpy_hdr.page_shift != VM_MAP_PAGE_SHIFT(target_map)) {
4324 				DEBUG4K_ADJUST("adjusting...\n");
4325 				kr = vm_map_copy_adjust_to_target(
4326 					copy_map,
4327 					offset /* includes data_offset */,
4328 					initial_size,
4329 					target_map,
4330 					copy,
4331 					&target_copy_map,
4332 					&overmap_start,
4333 					&overmap_end,
4334 					&trimmed_start);
4335 				if (kr != KERN_SUCCESS) {
4336 					named_entry_unlock(named_entry);
4337 					return kr;
4338 				}
4339 				target_size = target_copy_map->size;
4340 				if (trimmed_start >= data_offset) {
4341 					data_offset = offset & VM_MAP_PAGE_MASK(target_map);
4342 				} else {
4343 					data_offset -= trimmed_start;
4344 				}
4345 			} else {
4346 				/*
4347 				 * Assert that the vm_map_copy is coming from the right
4348 				 * zone and hasn't been forged
4349 				 */
4350 				vm_map_copy_require(copy_map);
4351 				target_copy_map = copy_map;
4352 			}
4353 
4354 			vm_map_kernel_flags_t rsv_flags = vmk_flags;
4355 
4356 			vm_map_kernel_flags_and_vmflags(&rsv_flags,
4357 			    (VM_FLAGS_FIXED |
4358 			    VM_FLAGS_ANYWHERE |
4359 			    VM_FLAGS_OVERWRITE |
4360 			    VM_FLAGS_RETURN_4K_DATA_ADDR |
4361 			    VM_FLAGS_RETURN_DATA_ADDR));
4362 
4363 			/* reserve a contiguous range */
4364 			kr = vm_map_enter(target_map,
4365 			    &map_addr,
4366 			    vm_map_round_page(target_size, VM_MAP_PAGE_MASK(target_map)),
4367 			    mask,
4368 			    rsv_flags,
4369 			    VM_OBJECT_NULL,
4370 			    0,
4371 			    FALSE,               /* copy */
4372 			    cur_protection,
4373 			    max_protection,
4374 			    inheritance);
4375 			if (kr != KERN_SUCCESS) {
4376 				DEBUG4K_ERROR("kr 0x%x\n", kr);
4377 				if (target_copy_map != copy_map) {
4378 					vm_map_copy_discard(target_copy_map);
4379 					target_copy_map = VM_MAP_COPY_NULL;
4380 				}
4381 				named_entry_unlock(named_entry);
4382 				return kr;
4383 			}
4384 
4385 			copy_addr = map_addr;
4386 
4387 			for (copy_entry = vm_map_copy_first_entry(target_copy_map);
4388 			    copy_entry != vm_map_copy_to_entry(target_copy_map);
4389 			    copy_entry = copy_entry->vme_next) {
4390 				vm_map_t                copy_submap = VM_MAP_NULL;
4391 				vm_object_t             copy_object = VM_OBJECT_NULL;
4392 				vm_map_size_t           copy_size;
4393 				vm_object_offset_t      copy_offset;
4394 				boolean_t               do_copy = false;
4395 
4396 				if (copy_entry->is_sub_map) {
4397 					copy_submap = VME_SUBMAP(copy_entry);
4398 					copy_object = (vm_object_t)copy_submap;
4399 				} else {
4400 					copy_object = VME_OBJECT(copy_entry);
4401 				}
4402 				copy_offset = VME_OFFSET(copy_entry);
4403 				copy_size = (copy_entry->vme_end -
4404 				    copy_entry->vme_start);
4405 
4406 				/* sanity check */
4407 				if ((copy_addr + copy_size) >
4408 				    (map_addr +
4409 				    overmap_start + overmap_end +
4410 				    named_entry->size /* XXX full size */)) {
4411 					/* over-mapping too much !? */
4412 					kr = KERN_INVALID_ARGUMENT;
4413 					DEBUG4K_ERROR("kr 0x%x\n", kr);
4414 					/* abort */
4415 					break;
4416 				}
4417 
4418 				/* take a reference on the object */
4419 				if (copy_entry->is_sub_map) {
4420 					vm_map_reference(copy_submap);
4421 				} else {
4422 					if (!copy &&
4423 					    copy_object != VM_OBJECT_NULL &&
4424 					    copy_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
4425 						/*
4426 						 * We need to resolve our side of this
4427 						 * "symmetric" copy-on-write now; we
4428 						 * need a new object to map and share,
4429 						 * instead of the current one which
4430 						 * might still be shared with the
4431 						 * original mapping.
4432 						 *
4433 						 * Note: A "vm_map_copy_t" does not
4434 						 * have a lock but we're protected by
4435 						 * the named entry's lock here.
4436 						 */
4437 						// assert(copy_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
4438 						VME_OBJECT_SHADOW(copy_entry, copy_size, TRUE);
4439 						assert(copy_object != VME_OBJECT(copy_entry));
4440 						if (!copy_entry->needs_copy &&
4441 						    copy_entry->protection & VM_PROT_WRITE) {
4442 							vm_prot_t prot;
4443 
4444 							prot = copy_entry->protection & ~VM_PROT_WRITE;
4445 							vm_object_pmap_protect(copy_object,
4446 							    copy_offset,
4447 							    copy_size,
4448 							    PMAP_NULL,
4449 							    PAGE_SIZE,
4450 							    0,
4451 							    prot);
4452 						}
4453 						copy_entry->needs_copy = FALSE;
4454 						copy_entry->is_shared = TRUE;
4455 						copy_object = VME_OBJECT(copy_entry);
4456 						copy_offset = VME_OFFSET(copy_entry);
4457 						vm_object_lock(copy_object);
4458 						/* we're about to make a shared mapping of this object */
4459 						copy_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
4460 						copy_object->true_share = TRUE;
4461 						vm_object_unlock(copy_object);
4462 					}
4463 
4464 					if (copy_object != VM_OBJECT_NULL &&
4465 					    copy_object->named &&
4466 					    copy_object->pager != MEMORY_OBJECT_NULL &&
4467 					    copy_object->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
4468 						memory_object_t pager;
4469 						vm_prot_t       pager_prot;
4470 
4471 						/*
4472 						 * For "named" VM objects, let the pager know that the
4473 						 * memory object is being mapped.  Some pagers need to keep
4474 						 * track of this, to know when they can reclaim the memory
4475 						 * object, for example.
4476 						 * VM calls memory_object_map() for each mapping (specifying
4477 						 * the protection of each mapping) and calls
4478 						 * memory_object_last_unmap() when all the mappings are gone.
4479 						 */
4480 						pager_prot = max_protection;
4481 						if (copy) {
4482 							/*
4483 							 * Copy-On-Write mapping: won't modify the
4484 							 * memory object.
4485 							 */
4486 							pager_prot &= ~VM_PROT_WRITE;
4487 						}
4488 						vm_object_lock(copy_object);
4489 						pager = copy_object->pager;
4490 						if (copy_object->named &&
4491 						    pager != MEMORY_OBJECT_NULL &&
4492 						    copy_object->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
4493 							assert(copy_object->pager_ready);
4494 							vm_object_mapping_wait(copy_object, THREAD_UNINT);
4495 							vm_object_mapping_begin(copy_object);
4496 							vm_object_unlock(copy_object);
4497 
4498 							kr = memory_object_map(pager, pager_prot);
4499 							assert(kr == KERN_SUCCESS);
4500 
4501 							vm_object_lock(copy_object);
4502 							vm_object_mapping_end(copy_object);
4503 						}
4504 						vm_object_unlock(copy_object);
4505 					}
4506 
4507 					/*
4508 					 *	Perform the copy if requested
4509 					 */
4510 
4511 					if (copy && copy_object != VM_OBJECT_NULL) {
4512 						vm_object_t             new_object;
4513 						vm_object_offset_t      new_offset;
4514 
4515 						result = vm_object_copy_strategically(copy_object, copy_offset,
4516 						    copy_size,
4517 						    false,                                   /* forking */
4518 						    &new_object, &new_offset,
4519 						    &do_copy);
4520 
4521 
4522 						if (result == KERN_MEMORY_RESTART_COPY) {
4523 							boolean_t success;
4524 							boolean_t src_needs_copy;
4525 
4526 							/*
4527 							 * XXX
4528 							 * We currently ignore src_needs_copy.
4529 							 * This really is the issue of how to make
4530 							 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
4531 							 * non-kernel users to use. Solution forthcoming.
4532 							 * In the meantime, since we don't allow non-kernel
4533 							 * memory managers to specify symmetric copy,
4534 							 * we won't run into problems here.
4535 							 */
4536 							new_object = copy_object;
4537 							new_offset = copy_offset;
4538 							success = vm_object_copy_quickly(new_object,
4539 							    new_offset,
4540 							    copy_size,
4541 							    &src_needs_copy,
4542 							    &do_copy);
4543 							assert(success);
4544 							result = KERN_SUCCESS;
4545 						}
4546 						if (result != KERN_SUCCESS) {
4547 							kr = result;
4548 							break;
4549 						}
4550 
4551 						copy_object = new_object;
4552 						copy_offset = new_offset;
4553 						/*
4554 						 * No extra object reference for the mapping:
4555 						 * the mapping should be the only thing keeping
4556 						 * this new object alive.
4557 						 */
4558 					} else {
4559 						/*
4560 						 * We already have the right object
4561 						 * to map.
4562 						 */
4563 						copy_object = VME_OBJECT(copy_entry);
4564 						/* take an extra ref for the mapping below */
4565 						vm_object_reference(copy_object);
4566 					}
4567 				}
4568 
4569 				/*
4570 				 * If the caller does not want a specific
4571 				 * tag for this new mapping:  use
4572 				 * the tag of the original mapping.
4573 				 */
4574 				vm_map_kernel_flags_t vmk_remap_flags = {
4575 					.vmkf_submap = copy_entry->is_sub_map,
4576 				};
4577 
4578 				vm_map_kernel_flags_set_vmflags(&vmk_remap_flags,
4579 				    vm_map_kernel_flags_vmflags(vmk_flags),
4580 				    vmk_flags.vm_tag ?: VME_ALIAS(copy_entry));
4581 
4582 				/* over-map the object into destination */
4583 				vmk_remap_flags.vmf_fixed = true;
4584 				vmk_remap_flags.vmf_overwrite = true;
4585 
4586 				if (!copy && !copy_entry->is_sub_map) {
4587 					/*
4588 					 * copy-on-write should have been
4589 					 * resolved at this point, or we would
4590 					 * end up sharing instead of copying.
4591 					 */
4592 					assert(!copy_entry->needs_copy);
4593 				}
4594 #if XNU_TARGET_OS_OSX
4595 				if (copy_entry->used_for_jit) {
4596 					vmk_remap_flags.vmkf_map_jit = TRUE;
4597 				}
4598 #endif /* XNU_TARGET_OS_OSX */
4599 
4600 				kr = vm_map_enter(target_map,
4601 				    &copy_addr,
4602 				    copy_size,
4603 				    (vm_map_offset_t) 0,
4604 				    vmk_remap_flags,
4605 				    copy_object,
4606 				    copy_offset,
4607 				    ((copy_object == NULL)
4608 				    ? FALSE
4609 				    : (copy || copy_entry->needs_copy)),
4610 				    cur_protection,
4611 				    max_protection,
4612 				    inheritance);
4613 				if (kr != KERN_SUCCESS) {
4614 					DEBUG4K_SHARE("failed kr 0x%x\n", kr);
4615 					if (copy_entry->is_sub_map) {
4616 						vm_map_deallocate(copy_submap);
4617 					} else {
4618 						vm_object_deallocate(copy_object);
4619 					}
4620 					/* abort */
4621 					break;
4622 				}
4623 
4624 				/* next mapping */
4625 				copy_addr += copy_size;
4626 			}
4627 
4628 			if (kr == KERN_SUCCESS) {
4629 				if (vmk_flags.vmf_return_data_addr ||
4630 				    vmk_flags.vmf_return_4k_data_addr) {
4631 					*address = map_addr + offset_in_mapping;
4632 				} else {
4633 					*address = map_addr;
4634 				}
4635 				if (overmap_start) {
4636 					*address += overmap_start;
4637 					DEBUG4K_SHARE("map %p map_addr 0x%llx offset_in_mapping 0x%llx overmap_start 0x%llx -> *address 0x%llx\n", target_map, (uint64_t)map_addr, (uint64_t) offset_in_mapping, (uint64_t)overmap_start, (uint64_t)*address);
4638 				}
4639 			}
4640 			named_entry_unlock(named_entry);
4641 			if (target_copy_map != copy_map) {
4642 				vm_map_copy_discard(target_copy_map);
4643 				target_copy_map = VM_MAP_COPY_NULL;
4644 			}
4645 
4646 			if (kr != KERN_SUCCESS && !vmk_flags.vmf_overwrite) {
4647 				/* deallocate the contiguous range */
4648 				(void) vm_deallocate(target_map,
4649 				    map_addr,
4650 				    map_size);
4651 			}
4652 
4653 			return kr;
4654 		}
4655 
4656 		if (named_entry->is_object) {
4657 			unsigned int    access;
4658 			unsigned int    wimg_mode;
4659 
4660 			/* we are mapping a VM object */
4661 
4662 			access = named_entry->access;
4663 
4664 			if (vmk_flags.vmf_return_data_addr ||
4665 			    vmk_flags.vmf_return_4k_data_addr) {
4666 				offset_in_mapping = offset - VM_MAP_TRUNC_PAGE(offset, VM_MAP_PAGE_MASK(target_map));
4667 				if (vmk_flags.vmf_return_4k_data_addr) {
4668 					offset_in_mapping &= ~((signed)(0xFFF));
4669 				}
4670 				offset = VM_MAP_TRUNC_PAGE(offset, VM_MAP_PAGE_MASK(target_map));
4671 				map_size = VM_MAP_ROUND_PAGE((offset + offset_in_mapping + initial_size) - offset, VM_MAP_PAGE_MASK(target_map));
4672 			}
4673 
4674 			object = vm_named_entry_to_vm_object(named_entry);
4675 			assert(object != VM_OBJECT_NULL);
4676 			vm_object_lock(object);
4677 			named_entry_unlock(named_entry);
4678 
4679 			vm_object_reference_locked(object);
4680 
4681 			wimg_mode = object->wimg_bits;
4682 			vm_prot_to_wimg(access, &wimg_mode);
4683 			if (object->wimg_bits != wimg_mode) {
4684 				vm_object_change_wimg_mode(object, wimg_mode);
4685 			}
4686 
4687 			vm_object_unlock(object);
4688 		} else {
4689 			panic("invalid VM named entry %p", named_entry);
4690 		}
4691 	} else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) {
4692 		/*
4693 		 * JMM - This is temporary until we unify named entries
4694 		 * and raw memory objects.
4695 		 *
4696 		 * Detected fake ip_kotype for a memory object.  In
4697 		 * this case, the port isn't really a port at all, but
4698 		 * instead is just a raw memory object.
4699 		 */
4700 		if (vmk_flags.vmf_return_data_addr ||
4701 		    vmk_flags.vmf_return_4k_data_addr) {
4702 			panic("VM_FLAGS_RETURN_DATA_ADDR not expected for raw memory object.");
4703 		}
4704 
4705 		object = memory_object_to_vm_object((memory_object_t)port);
4706 		if (object == VM_OBJECT_NULL) {
4707 			return KERN_INVALID_OBJECT;
4708 		}
4709 		vm_object_reference(object);
4710 
4711 		/* wait for object (if any) to be ready */
4712 		if (object != VM_OBJECT_NULL) {
4713 			if (is_kernel_object(object)) {
4714 				printf("Warning: Attempt to map kernel object"
4715 				    " by a non-private kernel entity\n");
4716 				return KERN_INVALID_OBJECT;
4717 			}
4718 			if (!object->pager_ready) {
4719 				vm_object_lock(object);
4720 
4721 				while (!object->pager_ready) {
4722 					vm_object_wait(object,
4723 					    VM_OBJECT_EVENT_PAGER_READY,
4724 					    THREAD_UNINT);
4725 					vm_object_lock(object);
4726 				}
4727 				vm_object_unlock(object);
4728 			}
4729 		}
4730 	} else {
4731 		return KERN_INVALID_OBJECT;
4732 	}
4733 
4734 	if (object != VM_OBJECT_NULL &&
4735 	    object->named &&
4736 	    object->pager != MEMORY_OBJECT_NULL &&
4737 	    object->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
4738 		memory_object_t pager;
4739 		vm_prot_t       pager_prot;
4740 		kern_return_t   kr;
4741 
4742 		/*
4743 		 * For "named" VM objects, let the pager know that the
4744 		 * memory object is being mapped.  Some pagers need to keep
4745 		 * track of this, to know when they can reclaim the memory
4746 		 * object, for example.
4747 		 * VM calls memory_object_map() for each mapping (specifying
4748 		 * the protection of each mapping) and calls
4749 		 * memory_object_last_unmap() when all the mappings are gone.
4750 		 */
4751 		pager_prot = max_protection;
4752 		if (copy) {
4753 			/*
4754 			 * Copy-On-Write mapping: won't modify the
4755 			 * memory object.
4756 			 */
4757 			pager_prot &= ~VM_PROT_WRITE;
4758 		}
4759 		vm_object_lock(object);
4760 		pager = object->pager;
4761 		if (object->named &&
4762 		    pager != MEMORY_OBJECT_NULL &&
4763 		    object->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
4764 			assert(object->pager_ready);
4765 			vm_object_mapping_wait(object, THREAD_UNINT);
4766 			vm_object_mapping_begin(object);
4767 			vm_object_unlock(object);
4768 
4769 			kr = memory_object_map(pager, pager_prot);
4770 			assert(kr == KERN_SUCCESS);
4771 
4772 			vm_object_lock(object);
4773 			vm_object_mapping_end(object);
4774 		}
4775 		vm_object_unlock(object);
4776 	}
4777 
4778 	/*
4779 	 *	Perform the copy if requested
4780 	 */
4781 
4782 	if (copy) {
4783 		vm_object_t             new_object;
4784 		vm_object_offset_t      new_offset;
4785 
4786 		result = vm_object_copy_strategically(object, offset,
4787 		    map_size,
4788 		    false,                                   /* forking */
4789 		    &new_object, &new_offset,
4790 		    &copy);
4791 
4792 
4793 		if (result == KERN_MEMORY_RESTART_COPY) {
4794 			boolean_t success;
4795 			boolean_t src_needs_copy;
4796 
4797 			/*
4798 			 * XXX
4799 			 * We currently ignore src_needs_copy.
4800 			 * This really is the issue of how to make
4801 			 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
4802 			 * non-kernel users to use. Solution forthcoming.
4803 			 * In the meantime, since we don't allow non-kernel
4804 			 * memory managers to specify symmetric copy,
4805 			 * we won't run into problems here.
4806 			 */
4807 			new_object = object;
4808 			new_offset = offset;
4809 			success = vm_object_copy_quickly(new_object,
4810 			    new_offset,
4811 			    map_size,
4812 			    &src_needs_copy,
4813 			    &copy);
4814 			assert(success);
4815 			result = KERN_SUCCESS;
4816 		}
4817 		/*
4818 		 *	Throw away the reference to the
4819 		 *	original object, as it won't be mapped.
4820 		 */
4821 
4822 		vm_object_deallocate(object);
4823 
4824 		if (result != KERN_SUCCESS) {
4825 			return result;
4826 		}
4827 
4828 		object = new_object;
4829 		offset = new_offset;
4830 	}
4831 
4832 	/*
4833 	 * If non-kernel users want to try to prefault pages, the mapping and prefault
4834 	 * needs to be atomic.
4835 	 */
4836 	kernel_prefault = (try_prefault && vm_kernel_map_is_kernel(target_map));
4837 	vmk_flags.vmkf_keep_map_locked = (try_prefault && !kernel_prefault);
4838 
4839 #if __arm64__
4840 	if (fourk) {
4841 		/* map this object in a "4K" pager */
4842 		result = vm_map_enter_fourk(target_map,
4843 		    &map_addr,
4844 		    map_size,
4845 		    (vm_map_offset_t) mask,
4846 		    vmk_flags,
4847 		    object,
4848 		    offset,
4849 		    copy,
4850 		    cur_protection,
4851 		    max_protection,
4852 		    inheritance);
4853 	} else
4854 #endif /* __arm64__ */
4855 	{
4856 		result = vm_map_enter(target_map,
4857 		    &map_addr, map_size,
4858 		    (vm_map_offset_t)mask,
4859 		    vmk_flags,
4860 		    object, offset,
4861 		    copy,
4862 		    cur_protection, max_protection,
4863 		    inheritance);
4864 	}
4865 	if (result != KERN_SUCCESS) {
4866 		vm_object_deallocate(object);
4867 	}
4868 
4869 	/*
4870 	 * Try to prefault, and do not forget to release the vm map lock.
4871 	 */
4872 	if (result == KERN_SUCCESS && try_prefault) {
4873 		mach_vm_address_t va = map_addr;
4874 		kern_return_t kr = KERN_SUCCESS;
4875 		unsigned int i = 0;
4876 		int pmap_options;
4877 
4878 		pmap_options = kernel_prefault ? 0 : PMAP_OPTIONS_NOWAIT;
4879 		if (object->internal) {
4880 			pmap_options |= PMAP_OPTIONS_INTERNAL;
4881 		}
4882 
4883 		for (i = 0; i < page_list_count; ++i) {
4884 			if (!UPL_VALID_PAGE(page_list, i)) {
4885 				if (kernel_prefault) {
4886 					assertf(FALSE, "kernel_prefault && !UPL_VALID_PAGE");
4887 					result = KERN_MEMORY_ERROR;
4888 					break;
4889 				}
4890 			} else {
4891 				/*
4892 				 * If this function call failed, we should stop
4893 				 * trying to optimize, other calls are likely
4894 				 * going to fail too.
4895 				 *
4896 				 * We are not gonna report an error for such
4897 				 * failure though. That's an optimization, not
4898 				 * something critical.
4899 				 */
4900 				kr = pmap_enter_options(target_map->pmap,
4901 				    va, UPL_PHYS_PAGE(page_list, i),
4902 				    cur_protection, VM_PROT_NONE,
4903 				    0, TRUE, pmap_options, NULL);
4904 				if (kr != KERN_SUCCESS) {
4905 					OSIncrementAtomic64(&vm_prefault_nb_bailout);
4906 					if (kernel_prefault) {
4907 						result = kr;
4908 					}
4909 					break;
4910 				}
4911 				OSIncrementAtomic64(&vm_prefault_nb_pages);
4912 			}
4913 
4914 			/* Next virtual address */
4915 			va += PAGE_SIZE;
4916 		}
4917 		if (vmk_flags.vmkf_keep_map_locked) {
4918 			vm_map_unlock(target_map);
4919 		}
4920 	}
4921 
4922 	if (vmk_flags.vmf_return_data_addr ||
4923 	    vmk_flags.vmf_return_4k_data_addr) {
4924 		*address = map_addr + offset_in_mapping;
4925 	} else {
4926 		*address = map_addr;
4927 	}
4928 	return result;
4929 }
4930 
4931 kern_return_t
vm_map_enter_mem_object(vm_map_t target_map,vm_map_offset_t * address,vm_map_size_t initial_size,vm_map_offset_t mask,vm_map_kernel_flags_t vmk_flags,ipc_port_t port,vm_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)4932 vm_map_enter_mem_object(
4933 	vm_map_t                target_map,
4934 	vm_map_offset_t         *address,
4935 	vm_map_size_t           initial_size,
4936 	vm_map_offset_t         mask,
4937 	vm_map_kernel_flags_t   vmk_flags,
4938 	ipc_port_t              port,
4939 	vm_object_offset_t      offset,
4940 	boolean_t               copy,
4941 	vm_prot_t               cur_protection,
4942 	vm_prot_t               max_protection,
4943 	vm_inherit_t            inheritance)
4944 {
4945 	kern_return_t ret;
4946 
4947 	/* range_id is set by vm_map_enter_mem_object_helper */
4948 	ret = vm_map_enter_mem_object_helper(target_map,
4949 	    address,
4950 	    initial_size,
4951 	    mask,
4952 	    vmk_flags,
4953 	    port,
4954 	    offset,
4955 	    copy,
4956 	    cur_protection,
4957 	    max_protection,
4958 	    inheritance,
4959 	    NULL,
4960 	    0);
4961 
4962 #if KASAN
4963 	if (ret == KERN_SUCCESS && address && target_map->pmap == kernel_pmap) {
4964 		kasan_notify_address(*address, initial_size);
4965 	}
4966 #endif
4967 
4968 	return ret;
4969 }
4970 
4971 kern_return_t
vm_map_enter_mem_object_prefault(vm_map_t target_map,vm_map_offset_t * address,vm_map_size_t initial_size,vm_map_offset_t mask,vm_map_kernel_flags_t vmk_flags,ipc_port_t port,vm_object_offset_t offset,vm_prot_t cur_protection,vm_prot_t max_protection,upl_page_list_ptr_t page_list,unsigned int page_list_count)4972 vm_map_enter_mem_object_prefault(
4973 	vm_map_t                target_map,
4974 	vm_map_offset_t         *address,
4975 	vm_map_size_t           initial_size,
4976 	vm_map_offset_t         mask,
4977 	vm_map_kernel_flags_t   vmk_flags,
4978 	ipc_port_t              port,
4979 	vm_object_offset_t      offset,
4980 	vm_prot_t               cur_protection,
4981 	vm_prot_t               max_protection,
4982 	upl_page_list_ptr_t     page_list,
4983 	unsigned int            page_list_count)
4984 {
4985 	kern_return_t ret;
4986 
4987 	/* range_id is set by vm_map_enter_mem_object_helper */
4988 	ret = vm_map_enter_mem_object_helper(target_map,
4989 	    address,
4990 	    initial_size,
4991 	    mask,
4992 	    vmk_flags,
4993 	    port,
4994 	    offset,
4995 	    FALSE,
4996 	    cur_protection,
4997 	    max_protection,
4998 	    VM_INHERIT_DEFAULT,
4999 	    page_list,
5000 	    page_list_count);
5001 
5002 #if KASAN
5003 	if (ret == KERN_SUCCESS && address && target_map->pmap == kernel_pmap) {
5004 		kasan_notify_address(*address, initial_size);
5005 	}
5006 #endif
5007 
5008 	return ret;
5009 }
5010 
5011 
5012 kern_return_t
vm_map_enter_mem_object_control(vm_map_t target_map,vm_map_offset_t * address,vm_map_size_t initial_size,vm_map_offset_t mask,vm_map_kernel_flags_t vmk_flags,memory_object_control_t control,vm_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)5013 vm_map_enter_mem_object_control(
5014 	vm_map_t                target_map,
5015 	vm_map_offset_t         *address,
5016 	vm_map_size_t           initial_size,
5017 	vm_map_offset_t         mask,
5018 	vm_map_kernel_flags_t   vmk_flags,
5019 	memory_object_control_t control,
5020 	vm_object_offset_t      offset,
5021 	boolean_t               copy,
5022 	vm_prot_t               cur_protection,
5023 	vm_prot_t               max_protection,
5024 	vm_inherit_t            inheritance)
5025 {
5026 	vm_map_address_t        map_addr;
5027 	vm_map_size_t           map_size;
5028 	vm_object_t             object;
5029 	vm_object_size_t        size;
5030 	kern_return_t           result;
5031 	memory_object_t         pager;
5032 	vm_prot_t               pager_prot;
5033 	kern_return_t           kr;
5034 #if __arm64__
5035 	boolean_t               fourk = vmk_flags.vmkf_fourk;
5036 #endif /* __arm64__ */
5037 
5038 	/*
5039 	 * Check arguments for validity
5040 	 */
5041 	if ((target_map == VM_MAP_NULL) ||
5042 	    (cur_protection & ~(VM_PROT_ALL | VM_PROT_ALLEXEC)) ||
5043 	    (max_protection & ~(VM_PROT_ALL | VM_PROT_ALLEXEC)) ||
5044 	    (inheritance > VM_INHERIT_LAST_VALID) ||
5045 	    initial_size == 0) {
5046 		return KERN_INVALID_ARGUMENT;
5047 	}
5048 
5049 #if __arm64__
5050 	if (fourk && VM_MAP_PAGE_MASK(target_map) < PAGE_MASK) {
5051 		fourk = FALSE;
5052 	}
5053 
5054 	if (fourk) {
5055 		map_addr = vm_map_trunc_page(*address,
5056 		    FOURK_PAGE_MASK);
5057 		map_size = vm_map_round_page(initial_size,
5058 		    FOURK_PAGE_MASK);
5059 	} else
5060 #endif /* __arm64__ */
5061 	{
5062 		map_addr = vm_map_trunc_page(*address,
5063 		    VM_MAP_PAGE_MASK(target_map));
5064 		map_size = vm_map_round_page(initial_size,
5065 		    VM_MAP_PAGE_MASK(target_map));
5066 	}
5067 	size = vm_object_round_page(initial_size);
5068 
5069 	object = memory_object_control_to_vm_object(control);
5070 
5071 	if (object == VM_OBJECT_NULL) {
5072 		return KERN_INVALID_OBJECT;
5073 	}
5074 
5075 	if (is_kernel_object(object)) {
5076 		printf("Warning: Attempt to map kernel object"
5077 		    " by a non-private kernel entity\n");
5078 		return KERN_INVALID_OBJECT;
5079 	}
5080 
5081 	vm_object_lock(object);
5082 	object->ref_count++;
5083 
5084 	/*
5085 	 * For "named" VM objects, let the pager know that the
5086 	 * memory object is being mapped.  Some pagers need to keep
5087 	 * track of this, to know when they can reclaim the memory
5088 	 * object, for example.
5089 	 * VM calls memory_object_map() for each mapping (specifying
5090 	 * the protection of each mapping) and calls
5091 	 * memory_object_last_unmap() when all the mappings are gone.
5092 	 */
5093 	pager_prot = max_protection;
5094 	if (copy) {
5095 		pager_prot &= ~VM_PROT_WRITE;
5096 	}
5097 	pager = object->pager;
5098 	if (object->named &&
5099 	    pager != MEMORY_OBJECT_NULL &&
5100 	    object->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
5101 		assert(object->pager_ready);
5102 		vm_object_mapping_wait(object, THREAD_UNINT);
5103 		vm_object_mapping_begin(object);
5104 		vm_object_unlock(object);
5105 
5106 		kr = memory_object_map(pager, pager_prot);
5107 		assert(kr == KERN_SUCCESS);
5108 
5109 		vm_object_lock(object);
5110 		vm_object_mapping_end(object);
5111 	}
5112 	vm_object_unlock(object);
5113 
5114 	/*
5115 	 *	Perform the copy if requested
5116 	 */
5117 
5118 	if (copy) {
5119 		vm_object_t             new_object;
5120 		vm_object_offset_t      new_offset;
5121 
5122 		result = vm_object_copy_strategically(object, offset, size,
5123 		    false,                                   /* forking */
5124 		    &new_object, &new_offset,
5125 		    &copy);
5126 
5127 
5128 		if (result == KERN_MEMORY_RESTART_COPY) {
5129 			boolean_t success;
5130 			boolean_t src_needs_copy;
5131 
5132 			/*
5133 			 * XXX
5134 			 * We currently ignore src_needs_copy.
5135 			 * This really is the issue of how to make
5136 			 * MEMORY_OBJECT_COPY_SYMMETRIC safe for
5137 			 * non-kernel users to use. Solution forthcoming.
5138 			 * In the meantime, since we don't allow non-kernel
5139 			 * memory managers to specify symmetric copy,
5140 			 * we won't run into problems here.
5141 			 */
5142 			new_object = object;
5143 			new_offset = offset;
5144 			success = vm_object_copy_quickly(new_object,
5145 			    new_offset, size,
5146 			    &src_needs_copy,
5147 			    &copy);
5148 			assert(success);
5149 			result = KERN_SUCCESS;
5150 		}
5151 		/*
5152 		 *	Throw away the reference to the
5153 		 *	original object, as it won't be mapped.
5154 		 */
5155 
5156 		vm_object_deallocate(object);
5157 
5158 		if (result != KERN_SUCCESS) {
5159 			return result;
5160 		}
5161 
5162 		object = new_object;
5163 		offset = new_offset;
5164 	}
5165 
5166 #if __arm64__
5167 	if (fourk) {
5168 		result = vm_map_enter_fourk(target_map,
5169 		    &map_addr,
5170 		    map_size,
5171 		    (vm_map_offset_t)mask,
5172 		    vmk_flags,
5173 		    object, offset,
5174 		    copy,
5175 		    cur_protection, max_protection,
5176 		    inheritance);
5177 	} else
5178 #endif /* __arm64__ */
5179 	{
5180 		result = vm_map_enter(target_map,
5181 		    &map_addr, map_size,
5182 		    (vm_map_offset_t)mask,
5183 		    vmk_flags,
5184 		    object, offset,
5185 		    copy,
5186 		    cur_protection, max_protection,
5187 		    inheritance);
5188 	}
5189 	if (result != KERN_SUCCESS) {
5190 		vm_object_deallocate(object);
5191 	}
5192 	*address = map_addr;
5193 
5194 	return result;
5195 }
5196 
5197 
5198 #if     VM_CPM
5199 
5200 #ifdef MACH_ASSERT
5201 extern pmap_paddr_t     avail_start, avail_end;
5202 #endif
5203 
5204 /*
5205  *	Allocate memory in the specified map, with the caveat that
5206  *	the memory is physically contiguous.  This call may fail
5207  *	if the system can't find sufficient contiguous memory.
5208  *	This call may cause or lead to heart-stopping amounts of
5209  *	paging activity.
5210  *
5211  *	Memory obtained from this call should be freed in the
5212  *	normal way, viz., via vm_deallocate.
5213  */
5214 kern_return_t
vm_map_enter_cpm(vm_map_t map,vm_map_offset_t * addr,vm_map_size_t size,vm_map_kernel_flags_t vmk_flags)5215 vm_map_enter_cpm(
5216 	vm_map_t                map,
5217 	vm_map_offset_t        *addr,
5218 	vm_map_size_t           size,
5219 	vm_map_kernel_flags_t   vmk_flags)
5220 {
5221 	vm_object_t             cpm_obj;
5222 	pmap_t                  pmap;
5223 	vm_page_t               m, pages;
5224 	kern_return_t           kr;
5225 	vm_map_offset_t         va, start, end, offset;
5226 #if     MACH_ASSERT
5227 	vm_map_offset_t         prev_addr = 0;
5228 #endif  /* MACH_ASSERT */
5229 	uint8_t                 object_lock_type = 0;
5230 
5231 	if (VM_MAP_PAGE_SHIFT(map) != PAGE_SHIFT) {
5232 		/* XXX TODO4K do we need to support this? */
5233 		*addr = 0;
5234 		return KERN_NOT_SUPPORTED;
5235 	}
5236 
5237 	if (size == 0) {
5238 		*addr = 0;
5239 		return KERN_SUCCESS;
5240 	}
5241 	if (vmk_flags.vmf_fixed) {
5242 		*addr = vm_map_trunc_page(*addr,
5243 		    VM_MAP_PAGE_MASK(map));
5244 	} else {
5245 		*addr = vm_map_min(map);
5246 	}
5247 	size = vm_map_round_page(size,
5248 	    VM_MAP_PAGE_MASK(map));
5249 
5250 	/*
5251 	 * LP64todo - cpm_allocate should probably allow
5252 	 * allocations of >4GB, but not with the current
5253 	 * algorithm, so just cast down the size for now.
5254 	 */
5255 	if (size > VM_MAX_ADDRESS) {
5256 		return KERN_RESOURCE_SHORTAGE;
5257 	}
5258 	if ((kr = cpm_allocate(CAST_DOWN(vm_size_t, size),
5259 	    &pages, 0, 0, TRUE, flags)) != KERN_SUCCESS) {
5260 		return kr;
5261 	}
5262 
5263 	cpm_obj = vm_object_allocate((vm_object_size_t)size);
5264 	assert(cpm_obj != VM_OBJECT_NULL);
5265 	assert(cpm_obj->internal);
5266 	assert(cpm_obj->vo_size == (vm_object_size_t)size);
5267 	assert(cpm_obj->can_persist == FALSE);
5268 	assert(cpm_obj->pager_created == FALSE);
5269 	assert(cpm_obj->pageout == FALSE);
5270 	assert(cpm_obj->shadow == VM_OBJECT_NULL);
5271 
5272 	/*
5273 	 *	Insert pages into object.
5274 	 */
5275 	object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5276 	vm_object_lock(cpm_obj);
5277 	for (offset = 0; offset < size; offset += PAGE_SIZE) {
5278 		m = pages;
5279 		pages = NEXT_PAGE(m);
5280 		*(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
5281 
5282 		assert(!m->vmp_gobbled);
5283 		assert(!m->vmp_wanted);
5284 		assert(!m->vmp_pageout);
5285 		assert(!m->vmp_tabled);
5286 		assert(VM_PAGE_WIRED(m));
5287 		assert(m->vmp_busy);
5288 		assert(VM_PAGE_GET_PHYS_PAGE(m) >= (avail_start >> PAGE_SHIFT) && VM_PAGE_GET_PHYS_PAGE(m) <= (avail_end >> PAGE_SHIFT));
5289 
5290 		m->vmp_busy = FALSE;
5291 		vm_page_insert(m, cpm_obj, offset);
5292 	}
5293 	assert(cpm_obj->resident_page_count == size / PAGE_SIZE);
5294 	vm_object_unlock(cpm_obj);
5295 
5296 	/*
5297 	 *	Hang onto a reference on the object in case a
5298 	 *	multi-threaded application for some reason decides
5299 	 *	to deallocate the portion of the address space into
5300 	 *	which we will insert this object.
5301 	 *
5302 	 *	Unfortunately, we must insert the object now before
5303 	 *	we can talk to the pmap module about which addresses
5304 	 *	must be wired down.  Hence, the race with a multi-
5305 	 *	threaded app.
5306 	 */
5307 	vm_object_reference(cpm_obj);
5308 
5309 	/*
5310 	 *	Insert object into map.
5311 	 */
5312 
5313 	kr = vm_map_enter(
5314 		map,
5315 		addr,
5316 		size,
5317 		(vm_map_offset_t)0,
5318 		vmk_flags,
5319 		cpm_obj,
5320 		(vm_object_offset_t)0,
5321 		FALSE,
5322 		VM_PROT_ALL,
5323 		VM_PROT_ALL,
5324 		VM_INHERIT_DEFAULT);
5325 
5326 	if (kr != KERN_SUCCESS) {
5327 		/*
5328 		 *	A CPM object doesn't have can_persist set,
5329 		 *	so all we have to do is deallocate it to
5330 		 *	free up these pages.
5331 		 */
5332 		assert(cpm_obj->pager_created == FALSE);
5333 		assert(cpm_obj->can_persist == FALSE);
5334 		assert(cpm_obj->pageout == FALSE);
5335 		assert(cpm_obj->shadow == VM_OBJECT_NULL);
5336 		vm_object_deallocate(cpm_obj); /* kill acquired ref */
5337 		vm_object_deallocate(cpm_obj); /* kill creation ref */
5338 	}
5339 
5340 	/*
5341 	 *	Inform the physical mapping system that the
5342 	 *	range of addresses may not fault, so that
5343 	 *	page tables and such can be locked down as well.
5344 	 */
5345 	start = *addr;
5346 	end = start + size;
5347 	pmap = vm_map_pmap(map);
5348 	pmap_pageable(pmap, start, end, FALSE);
5349 
5350 	/*
5351 	 *	Enter each page into the pmap, to avoid faults.
5352 	 *	Note that this loop could be coded more efficiently,
5353 	 *	if the need arose, rather than looking up each page
5354 	 *	again.
5355 	 */
5356 	for (offset = 0, va = start; offset < size;
5357 	    va += PAGE_SIZE, offset += PAGE_SIZE) {
5358 		int type_of_fault;
5359 
5360 		vm_object_lock(cpm_obj);
5361 		m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
5362 		assert(m != VM_PAGE_NULL);
5363 
5364 		vm_page_zero_fill(m);
5365 
5366 		type_of_fault = DBG_ZERO_FILL_FAULT;
5367 
5368 		vm_fault_enter(m, pmap, va,
5369 		    PAGE_SIZE, 0,
5370 		    VM_PROT_ALL, VM_PROT_WRITE,
5371 		    VM_PAGE_WIRED(m),
5372 		    FALSE,                             /* change_wiring */
5373 		    VM_KERN_MEMORY_NONE,                             /* tag - not wiring */
5374 		    FALSE,                             /* cs_bypass */
5375 		    0,                                 /* user_tag */
5376 		    0,                             /* pmap_options */
5377 		    NULL,                              /* need_retry */
5378 		    &type_of_fault,
5379 		    &object_lock_type);                 /* Exclusive lock mode. Will remain unchanged.*/
5380 
5381 		vm_object_unlock(cpm_obj);
5382 	}
5383 
5384 #if     MACH_ASSERT
5385 	/*
5386 	 *	Verify ordering in address space.
5387 	 */
5388 	for (offset = 0; offset < size; offset += PAGE_SIZE) {
5389 		vm_object_lock(cpm_obj);
5390 		m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
5391 		vm_object_unlock(cpm_obj);
5392 		if (m == VM_PAGE_NULL) {
5393 			panic("vm_allocate_cpm:  obj %p off 0x%llx no page",
5394 			    cpm_obj, (uint64_t)offset);
5395 		}
5396 		assert(m->vmp_tabled);
5397 		assert(!m->vmp_busy);
5398 		assert(!m->vmp_wanted);
5399 		assert(!m->vmp_fictitious);
5400 		assert(!m->vmp_private);
5401 		assert(!m->vmp_absent);
5402 		assert(!m->vmp_cleaning);
5403 		assert(!m->vmp_laundry);
5404 		assert(!m->vmp_precious);
5405 		assert(!m->vmp_clustered);
5406 		if (offset != 0) {
5407 			if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
5408 				printf("start 0x%llx end 0x%llx va 0x%llx\n",
5409 				    (uint64_t)start, (uint64_t)end, (uint64_t)va);
5410 				printf("obj %p off 0x%llx\n", cpm_obj, (uint64_t)offset);
5411 				printf("m %p prev_address 0x%llx\n", m, (uint64_t)prev_addr);
5412 				panic("vm_allocate_cpm:  pages not contig!");
5413 			}
5414 		}
5415 		prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
5416 	}
5417 #endif  /* MACH_ASSERT */
5418 
5419 	vm_object_deallocate(cpm_obj); /* kill extra ref */
5420 
5421 	return kr;
5422 }
5423 
5424 
5425 #else   /* VM_CPM */
5426 
5427 /*
5428  *	Interface is defined in all cases, but unless the kernel
5429  *	is built explicitly for this option, the interface does
5430  *	nothing.
5431  */
5432 
5433 kern_return_t
vm_map_enter_cpm(__unused vm_map_t map,__unused vm_map_offset_t * addr,__unused vm_map_size_t size,__unused vm_map_kernel_flags_t vmk_flags)5434 vm_map_enter_cpm(
5435 	__unused vm_map_t                map,
5436 	__unused vm_map_offset_t        *addr,
5437 	__unused vm_map_size_t           size,
5438 	__unused vm_map_kernel_flags_t   vmk_flags)
5439 {
5440 	return KERN_FAILURE;
5441 }
5442 #endif /* VM_CPM */
5443 
5444 /* Not used without nested pmaps */
5445 #ifndef NO_NESTED_PMAP
5446 /*
5447  * Clip and unnest a portion of a nested submap mapping.
5448  */
5449 
5450 
5451 static void
vm_map_clip_unnest(vm_map_t map,vm_map_entry_t entry,vm_map_offset_t start_unnest,vm_map_offset_t end_unnest)5452 vm_map_clip_unnest(
5453 	vm_map_t        map,
5454 	vm_map_entry_t  entry,
5455 	vm_map_offset_t start_unnest,
5456 	vm_map_offset_t end_unnest)
5457 {
5458 	vm_map_offset_t old_start_unnest = start_unnest;
5459 	vm_map_offset_t old_end_unnest = end_unnest;
5460 
5461 	assert(entry->is_sub_map);
5462 	assert(VME_SUBMAP(entry) != NULL);
5463 	assert(entry->use_pmap);
5464 
5465 	/*
5466 	 * Query the platform for the optimal unnest range.
5467 	 * DRK: There's some duplication of effort here, since
5468 	 * callers may have adjusted the range to some extent. This
5469 	 * routine was introduced to support 1GiB subtree nesting
5470 	 * for x86 platforms, which can also nest on 2MiB boundaries
5471 	 * depending on size/alignment.
5472 	 */
5473 	if (pmap_adjust_unnest_parameters(map->pmap, &start_unnest, &end_unnest)) {
5474 		assert(VME_SUBMAP(entry)->is_nested_map);
5475 		assert(!VME_SUBMAP(entry)->disable_vmentry_reuse);
5476 		log_unnest_badness(map,
5477 		    old_start_unnest,
5478 		    old_end_unnest,
5479 		    VME_SUBMAP(entry)->is_nested_map,
5480 		    (entry->vme_start +
5481 		    VME_SUBMAP(entry)->lowest_unnestable_start -
5482 		    VME_OFFSET(entry)));
5483 	}
5484 
5485 	if (entry->vme_start > start_unnest ||
5486 	    entry->vme_end < end_unnest) {
5487 		panic("vm_map_clip_unnest(0x%llx,0x%llx): "
5488 		    "bad nested entry: start=0x%llx end=0x%llx\n",
5489 		    (long long)start_unnest, (long long)end_unnest,
5490 		    (long long)entry->vme_start, (long long)entry->vme_end);
5491 	}
5492 
5493 	if (start_unnest > entry->vme_start) {
5494 		_vm_map_clip_start(&map->hdr,
5495 		    entry,
5496 		    start_unnest);
5497 		if (map->holelistenabled) {
5498 			vm_map_store_update_first_free(map, NULL, FALSE);
5499 		} else {
5500 			vm_map_store_update_first_free(map, map->first_free, FALSE);
5501 		}
5502 	}
5503 	if (entry->vme_end > end_unnest) {
5504 		_vm_map_clip_end(&map->hdr,
5505 		    entry,
5506 		    end_unnest);
5507 		if (map->holelistenabled) {
5508 			vm_map_store_update_first_free(map, NULL, FALSE);
5509 		} else {
5510 			vm_map_store_update_first_free(map, map->first_free, FALSE);
5511 		}
5512 	}
5513 
5514 	pmap_unnest(map->pmap,
5515 	    entry->vme_start,
5516 	    entry->vme_end - entry->vme_start);
5517 	if ((map->mapped_in_other_pmaps) && os_ref_get_count_raw(&map->map_refcnt) != 0) {
5518 		/* clean up parent map/maps */
5519 		vm_map_submap_pmap_clean(
5520 			map, entry->vme_start,
5521 			entry->vme_end,
5522 			VME_SUBMAP(entry),
5523 			VME_OFFSET(entry));
5524 	}
5525 	entry->use_pmap = FALSE;
5526 	if ((map->pmap != kernel_pmap) &&
5527 	    (VME_ALIAS(entry) == VM_MEMORY_SHARED_PMAP)) {
5528 		VME_ALIAS_SET(entry, VM_MEMORY_UNSHARED_PMAP);
5529 	}
5530 }
5531 #endif  /* NO_NESTED_PMAP */
5532 
5533 __abortlike
5534 static void
__vm_map_clip_atomic_entry_panic(vm_map_t map,vm_map_entry_t entry,vm_map_offset_t where)5535 __vm_map_clip_atomic_entry_panic(
5536 	vm_map_t        map,
5537 	vm_map_entry_t  entry,
5538 	vm_map_offset_t where)
5539 {
5540 	panic("vm_map_clip(%p): Attempting to clip an atomic VM map entry "
5541 	    "%p [0x%llx:0x%llx] at 0x%llx", map, entry,
5542 	    (uint64_t)entry->vme_start,
5543 	    (uint64_t)entry->vme_end,
5544 	    (uint64_t)where);
5545 }
5546 
5547 /*
5548  *	vm_map_clip_start:	[ internal use only ]
5549  *
5550  *	Asserts that the given entry begins at or after
5551  *	the specified address; if necessary,
5552  *	it splits the entry into two.
5553  */
5554 void
vm_map_clip_start(vm_map_t map,vm_map_entry_t entry,vm_map_offset_t startaddr)5555 vm_map_clip_start(
5556 	vm_map_t        map,
5557 	vm_map_entry_t  entry,
5558 	vm_map_offset_t startaddr)
5559 {
5560 #ifndef NO_NESTED_PMAP
5561 	if (entry->is_sub_map &&
5562 	    entry->use_pmap &&
5563 	    startaddr >= entry->vme_start) {
5564 		vm_map_offset_t start_unnest, end_unnest;
5565 
5566 		/*
5567 		 * Make sure "startaddr" is no longer in a nested range
5568 		 * before we clip.  Unnest only the minimum range the platform
5569 		 * can handle.
5570 		 * vm_map_clip_unnest may perform additional adjustments to
5571 		 * the unnest range.
5572 		 */
5573 		start_unnest = startaddr & ~(pmap_shared_region_size_min(map->pmap) - 1);
5574 		end_unnest = start_unnest + pmap_shared_region_size_min(map->pmap);
5575 		vm_map_clip_unnest(map, entry, start_unnest, end_unnest);
5576 	}
5577 #endif /* NO_NESTED_PMAP */
5578 	if (startaddr > entry->vme_start) {
5579 		if (!entry->is_sub_map &&
5580 		    VME_OBJECT(entry) &&
5581 		    VME_OBJECT(entry)->phys_contiguous) {
5582 			pmap_remove(map->pmap,
5583 			    (addr64_t)(entry->vme_start),
5584 			    (addr64_t)(entry->vme_end));
5585 		}
5586 		if (entry->vme_atomic) {
5587 			__vm_map_clip_atomic_entry_panic(map, entry, startaddr);
5588 		}
5589 
5590 		DTRACE_VM5(
5591 			vm_map_clip_start,
5592 			vm_map_t, map,
5593 			vm_map_offset_t, entry->vme_start,
5594 			vm_map_offset_t, entry->vme_end,
5595 			vm_map_offset_t, startaddr,
5596 			int, VME_ALIAS(entry));
5597 
5598 		_vm_map_clip_start(&map->hdr, entry, startaddr);
5599 		if (map->holelistenabled) {
5600 			vm_map_store_update_first_free(map, NULL, FALSE);
5601 		} else {
5602 			vm_map_store_update_first_free(map, map->first_free, FALSE);
5603 		}
5604 	}
5605 }
5606 
5607 
5608 #define vm_map_copy_clip_start(copy, entry, startaddr) \
5609 	MACRO_BEGIN \
5610 	if ((startaddr) > (entry)->vme_start) \
5611 	        _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
5612 	MACRO_END
5613 
5614 /*
5615  *	This routine is called only when it is known that
5616  *	the entry must be split.
5617  */
5618 static void
_vm_map_clip_start(struct vm_map_header * map_header,vm_map_entry_t entry,vm_map_offset_t start)5619 _vm_map_clip_start(
5620 	struct vm_map_header    *map_header,
5621 	vm_map_entry_t          entry,
5622 	vm_map_offset_t         start)
5623 {
5624 	vm_map_entry_t  new_entry;
5625 
5626 	/*
5627 	 *	Split off the front portion --
5628 	 *	note that we must insert the new
5629 	 *	entry BEFORE this one, so that
5630 	 *	this entry has the specified starting
5631 	 *	address.
5632 	 */
5633 
5634 	if (entry->map_aligned) {
5635 		assert(VM_MAP_PAGE_ALIGNED(start,
5636 		    VM_MAP_HDR_PAGE_MASK(map_header)));
5637 	}
5638 
5639 	new_entry = _vm_map_entry_create(map_header);
5640 	vm_map_entry_copy_full(new_entry, entry);
5641 
5642 	new_entry->vme_end = start;
5643 	assert(new_entry->vme_start < new_entry->vme_end);
5644 	VME_OFFSET_SET(entry, VME_OFFSET(entry) + (start - entry->vme_start));
5645 	if (__improbable(start >= entry->vme_end)) {
5646 		panic("mapHdr %p entry %p start 0x%llx end 0x%llx new start 0x%llx", map_header, entry, entry->vme_start, entry->vme_end, start);
5647 	}
5648 	assert(start < entry->vme_end);
5649 	entry->vme_start = start;
5650 
5651 #if VM_BTLOG_TAGS
5652 	if (new_entry->vme_kernel_object) {
5653 		btref_retain(new_entry->vme_tag_btref);
5654 	}
5655 #endif /* VM_BTLOG_TAGS */
5656 
5657 	_vm_map_store_entry_link(map_header, entry->vme_prev, new_entry);
5658 
5659 	if (entry->is_sub_map) {
5660 		vm_map_reference(VME_SUBMAP(new_entry));
5661 	} else {
5662 		vm_object_reference(VME_OBJECT(new_entry));
5663 	}
5664 }
5665 
5666 
5667 /*
5668  *	vm_map_clip_end:	[ internal use only ]
5669  *
5670  *	Asserts that the given entry ends at or before
5671  *	the specified address; if necessary,
5672  *	it splits the entry into two.
5673  */
5674 void
vm_map_clip_end(vm_map_t map,vm_map_entry_t entry,vm_map_offset_t endaddr)5675 vm_map_clip_end(
5676 	vm_map_t        map,
5677 	vm_map_entry_t  entry,
5678 	vm_map_offset_t endaddr)
5679 {
5680 	if (endaddr > entry->vme_end) {
5681 		/*
5682 		 * Within the scope of this clipping, limit "endaddr" to
5683 		 * the end of this map entry...
5684 		 */
5685 		endaddr = entry->vme_end;
5686 	}
5687 #ifndef NO_NESTED_PMAP
5688 	if (entry->is_sub_map && entry->use_pmap) {
5689 		vm_map_offset_t start_unnest, end_unnest;
5690 
5691 		/*
5692 		 * Make sure the range between the start of this entry and
5693 		 * the new "endaddr" is no longer nested before we clip.
5694 		 * Unnest only the minimum range the platform can handle.
5695 		 * vm_map_clip_unnest may perform additional adjustments to
5696 		 * the unnest range.
5697 		 */
5698 		start_unnest = entry->vme_start;
5699 		end_unnest =
5700 		    (endaddr + pmap_shared_region_size_min(map->pmap) - 1) &
5701 		    ~(pmap_shared_region_size_min(map->pmap) - 1);
5702 		vm_map_clip_unnest(map, entry, start_unnest, end_unnest);
5703 	}
5704 #endif /* NO_NESTED_PMAP */
5705 	if (endaddr < entry->vme_end) {
5706 		if (!entry->is_sub_map &&
5707 		    VME_OBJECT(entry) &&
5708 		    VME_OBJECT(entry)->phys_contiguous) {
5709 			pmap_remove(map->pmap,
5710 			    (addr64_t)(entry->vme_start),
5711 			    (addr64_t)(entry->vme_end));
5712 		}
5713 		if (entry->vme_atomic) {
5714 			__vm_map_clip_atomic_entry_panic(map, entry, endaddr);
5715 		}
5716 		DTRACE_VM5(
5717 			vm_map_clip_end,
5718 			vm_map_t, map,
5719 			vm_map_offset_t, entry->vme_start,
5720 			vm_map_offset_t, entry->vme_end,
5721 			vm_map_offset_t, endaddr,
5722 			int, VME_ALIAS(entry));
5723 
5724 		_vm_map_clip_end(&map->hdr, entry, endaddr);
5725 		if (map->holelistenabled) {
5726 			vm_map_store_update_first_free(map, NULL, FALSE);
5727 		} else {
5728 			vm_map_store_update_first_free(map, map->first_free, FALSE);
5729 		}
5730 	}
5731 }
5732 
5733 
5734 #define vm_map_copy_clip_end(copy, entry, endaddr) \
5735 	MACRO_BEGIN \
5736 	if ((endaddr) < (entry)->vme_end) \
5737 	        _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
5738 	MACRO_END
5739 
5740 /*
5741  *	This routine is called only when it is known that
5742  *	the entry must be split.
5743  */
5744 static void
_vm_map_clip_end(struct vm_map_header * map_header,vm_map_entry_t entry,vm_map_offset_t end)5745 _vm_map_clip_end(
5746 	struct vm_map_header    *map_header,
5747 	vm_map_entry_t          entry,
5748 	vm_map_offset_t         end)
5749 {
5750 	vm_map_entry_t  new_entry;
5751 
5752 	/*
5753 	 *	Create a new entry and insert it
5754 	 *	AFTER the specified entry
5755 	 */
5756 
5757 	if (entry->map_aligned) {
5758 		assert(VM_MAP_PAGE_ALIGNED(end,
5759 		    VM_MAP_HDR_PAGE_MASK(map_header)));
5760 	}
5761 
5762 	new_entry = _vm_map_entry_create(map_header);
5763 	vm_map_entry_copy_full(new_entry, entry);
5764 
5765 	if (__improbable(end <= entry->vme_start)) {
5766 		panic("mapHdr %p entry %p start 0x%llx end 0x%llx new end 0x%llx", map_header, entry, entry->vme_start, entry->vme_end, end);
5767 	}
5768 	assert(entry->vme_start < end);
5769 	new_entry->vme_start = entry->vme_end = end;
5770 	VME_OFFSET_SET(new_entry,
5771 	    VME_OFFSET(new_entry) + (end - entry->vme_start));
5772 	assert(new_entry->vme_start < new_entry->vme_end);
5773 
5774 #if VM_BTLOG_TAGS
5775 	if (new_entry->vme_kernel_object) {
5776 		btref_retain(new_entry->vme_tag_btref);
5777 	}
5778 #endif /* VM_BTLOG_TAGS */
5779 
5780 	_vm_map_store_entry_link(map_header, entry, new_entry);
5781 
5782 	if (entry->is_sub_map) {
5783 		vm_map_reference(VME_SUBMAP(new_entry));
5784 	} else {
5785 		vm_object_reference(VME_OBJECT(new_entry));
5786 	}
5787 }
5788 
5789 
5790 /*
5791  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
5792  *
5793  *	Asserts that the starting and ending region
5794  *	addresses fall within the valid range of the map.
5795  */
5796 #define VM_MAP_RANGE_CHECK(map, start, end)     \
5797 	MACRO_BEGIN                             \
5798 	if (start < vm_map_min(map))            \
5799 	        start = vm_map_min(map);        \
5800 	if (end > vm_map_max(map))              \
5801 	        end = vm_map_max(map);          \
5802 	if (start > end)                        \
5803 	        start = end;                    \
5804 	MACRO_END
5805 
5806 /*
5807  *	vm_map_range_check:	[ internal use only ]
5808  *
5809  *	Check that the region defined by the specified start and
5810  *	end addresses are wholly contained within a single map
5811  *	entry or set of adjacent map entries of the spacified map,
5812  *	i.e. the specified region contains no unmapped space.
5813  *	If any or all of the region is unmapped, FALSE is returned.
5814  *	Otherwise, TRUE is returned and if the output argument 'entry'
5815  *	is not NULL it points to the map entry containing the start
5816  *	of the region.
5817  *
5818  *	The map is locked for reading on entry and is left locked.
5819  */
5820 static boolean_t
vm_map_range_check(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vm_map_entry_t * entry)5821 vm_map_range_check(
5822 	vm_map_t                map,
5823 	vm_map_offset_t         start,
5824 	vm_map_offset_t         end,
5825 	vm_map_entry_t          *entry)
5826 {
5827 	vm_map_entry_t          cur;
5828 	vm_map_offset_t         prev;
5829 
5830 	/*
5831 	 *      Basic sanity checks first
5832 	 */
5833 	if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) {
5834 		return FALSE;
5835 	}
5836 
5837 	/*
5838 	 *      Check first if the region starts within a valid
5839 	 *	mapping for the map.
5840 	 */
5841 	if (!vm_map_lookup_entry(map, start, &cur)) {
5842 		return FALSE;
5843 	}
5844 
5845 	/*
5846 	 *	Optimize for the case that the region is contained
5847 	 *	in a single map entry.
5848 	 */
5849 	if (entry != (vm_map_entry_t *) NULL) {
5850 		*entry = cur;
5851 	}
5852 	if (end <= cur->vme_end) {
5853 		return TRUE;
5854 	}
5855 
5856 	/*
5857 	 *      If the region is not wholly contained within a
5858 	 *      single entry, walk the entries looking for holes.
5859 	 */
5860 	prev = cur->vme_end;
5861 	cur = cur->vme_next;
5862 	while ((cur != vm_map_to_entry(map)) && (prev == cur->vme_start)) {
5863 		if (end <= cur->vme_end) {
5864 			return TRUE;
5865 		}
5866 		prev = cur->vme_end;
5867 		cur = cur->vme_next;
5868 	}
5869 	return FALSE;
5870 }
5871 
5872 /*
5873  *	vm_map_protect:
5874  *
5875  *	Sets the protection of the specified address
5876  *	region in the target map.  If "set_max" is
5877  *	specified, the maximum protection is to be set;
5878  *	otherwise, only the current protection is affected.
5879  */
5880 kern_return_t
vm_map_protect(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vm_prot_t new_prot,boolean_t set_max)5881 vm_map_protect(
5882 	vm_map_t        map,
5883 	vm_map_offset_t start,
5884 	vm_map_offset_t end,
5885 	vm_prot_t       new_prot,
5886 	boolean_t       set_max)
5887 {
5888 	vm_map_entry_t                  current;
5889 	vm_map_offset_t                 prev;
5890 	vm_map_entry_t                  entry;
5891 	vm_prot_t                       new_max;
5892 	int                             pmap_options = 0;
5893 	kern_return_t                   kr;
5894 
5895 	if (__improbable(vm_map_range_overflows(map, start, end - start))) {
5896 		return KERN_INVALID_ARGUMENT;
5897 	}
5898 
5899 	if (new_prot & VM_PROT_COPY) {
5900 		vm_map_offset_t         new_start;
5901 		vm_prot_t               cur_prot, max_prot;
5902 		vm_map_kernel_flags_t   kflags;
5903 
5904 		/* LP64todo - see below */
5905 		if (start >= map->max_offset) {
5906 			return KERN_INVALID_ADDRESS;
5907 		}
5908 
5909 		if ((new_prot & VM_PROT_ALLEXEC) &&
5910 		    map->pmap != kernel_pmap &&
5911 		    (vm_map_cs_enforcement(map)
5912 #if XNU_TARGET_OS_OSX && __arm64__
5913 		    || !VM_MAP_IS_EXOTIC(map)
5914 #endif /* XNU_TARGET_OS_OSX && __arm64__ */
5915 		    ) &&
5916 		    VM_MAP_POLICY_WX_FAIL(map)) {
5917 			DTRACE_VM3(cs_wx,
5918 			    uint64_t, (uint64_t) start,
5919 			    uint64_t, (uint64_t) end,
5920 			    vm_prot_t, new_prot);
5921 			printf("CODE SIGNING: %d[%s] %s:%d(0x%llx,0x%llx,0x%x) can't have both write and exec at the same time\n",
5922 			    proc_selfpid(),
5923 			    (get_bsdtask_info(current_task())
5924 			    ? proc_name_address(get_bsdtask_info(current_task()))
5925 			    : "?"),
5926 			    __FUNCTION__, __LINE__,
5927 #if DEVELOPMENT || DEBUG
5928 			    (uint64_t)start,
5929 			    (uint64_t)end,
5930 #else /* DEVELOPMENT || DEBUG */
5931 			    (uint64_t)0,
5932 			    (uint64_t)0,
5933 #endif /* DEVELOPMENT || DEBUG */
5934 			    new_prot);
5935 			return KERN_PROTECTION_FAILURE;
5936 		}
5937 
5938 		/*
5939 		 * Let vm_map_remap_extract() know that it will need to:
5940 		 * + make a copy of the mapping
5941 		 * + add VM_PROT_WRITE to the max protections
5942 		 * + remove any protections that are no longer allowed from the
5943 		 *   max protections (to avoid any WRITE/EXECUTE conflict, for
5944 		 *   example).
5945 		 * Note that "max_prot" is an IN/OUT parameter only for this
5946 		 * specific (VM_PROT_COPY) case.  It's usually an OUT parameter
5947 		 * only.
5948 		 */
5949 		max_prot = new_prot & (VM_PROT_ALL | VM_PROT_ALLEXEC);
5950 		cur_prot = VM_PROT_NONE;
5951 		kflags = VM_MAP_KERNEL_FLAGS_FIXED(.vmf_overwrite = true);
5952 		kflags.vmkf_remap_prot_copy = true;
5953 		kflags.vmkf_tpro_enforcement_override = !vm_map_tpro_enforcement(map);
5954 		new_start = start;
5955 		kr = vm_map_remap(map,
5956 		    &new_start,
5957 		    end - start,
5958 		    0, /* mask */
5959 		    kflags,
5960 		    map,
5961 		    start,
5962 		    TRUE, /* copy-on-write remapping! */
5963 		    &cur_prot, /* IN/OUT */
5964 		    &max_prot, /* IN/OUT */
5965 		    VM_INHERIT_DEFAULT);
5966 		if (kr != KERN_SUCCESS) {
5967 			return kr;
5968 		}
5969 		new_prot &= ~VM_PROT_COPY;
5970 	}
5971 
5972 	vm_map_lock(map);
5973 
5974 	/* LP64todo - remove this check when vm_map_commpage64()
5975 	 * no longer has to stuff in a map_entry for the commpage
5976 	 * above the map's max_offset.
5977 	 */
5978 	if (start >= map->max_offset) {
5979 		vm_map_unlock(map);
5980 		return KERN_INVALID_ADDRESS;
5981 	}
5982 
5983 	while (1) {
5984 		/*
5985 		 *      Lookup the entry.  If it doesn't start in a valid
5986 		 *	entry, return an error.
5987 		 */
5988 		if (!vm_map_lookup_entry(map, start, &entry)) {
5989 			vm_map_unlock(map);
5990 			return KERN_INVALID_ADDRESS;
5991 		}
5992 
5993 		if (entry->superpage_size && (start & (SUPERPAGE_SIZE - 1))) { /* extend request to whole entry */
5994 			start = SUPERPAGE_ROUND_DOWN(start);
5995 			continue;
5996 		}
5997 		break;
5998 	}
5999 	if (entry->superpage_size) {
6000 		end = SUPERPAGE_ROUND_UP(end);
6001 	}
6002 
6003 	/*
6004 	 *	Make a first pass to check for protection and address
6005 	 *	violations.
6006 	 */
6007 
6008 	current = entry;
6009 	prev = current->vme_start;
6010 	while ((current != vm_map_to_entry(map)) &&
6011 	    (current->vme_start < end)) {
6012 		/*
6013 		 * If there is a hole, return an error.
6014 		 */
6015 		if (current->vme_start != prev) {
6016 			vm_map_unlock(map);
6017 			return KERN_INVALID_ADDRESS;
6018 		}
6019 
6020 		new_max = current->max_protection;
6021 
6022 #if defined(__x86_64__)
6023 		/* Allow max mask to include execute prot bits if this map doesn't enforce CS */
6024 		if (set_max && (new_prot & VM_PROT_ALLEXEC) && !vm_map_cs_enforcement(map)) {
6025 			new_max = (new_max & ~VM_PROT_ALLEXEC) | (new_prot & VM_PROT_ALLEXEC);
6026 		}
6027 #elif CODE_SIGNING_MONITOR
6028 		if (set_max && (new_prot & VM_PROT_EXECUTE) && (csm_address_space_exempt(map->pmap) == KERN_SUCCESS)) {
6029 			new_max |= VM_PROT_EXECUTE;
6030 		}
6031 #endif
6032 		if ((new_prot & new_max) != new_prot) {
6033 			vm_map_unlock(map);
6034 			return KERN_PROTECTION_FAILURE;
6035 		}
6036 
6037 		if (current->used_for_jit &&
6038 		    pmap_has_prot_policy(map->pmap, current->translated_allow_execute, current->protection)) {
6039 			vm_map_unlock(map);
6040 			return KERN_PROTECTION_FAILURE;
6041 		}
6042 
6043 #if __arm64e__
6044 		/* Disallow remapping hw assisted TPRO mappings */
6045 		if (current->used_for_tpro) {
6046 			vm_map_unlock(map);
6047 			return KERN_PROTECTION_FAILURE;
6048 		}
6049 #endif /* __arm64e__ */
6050 
6051 
6052 		if ((new_prot & VM_PROT_WRITE) &&
6053 		    (new_prot & VM_PROT_ALLEXEC) &&
6054 #if XNU_TARGET_OS_OSX
6055 		    map->pmap != kernel_pmap &&
6056 		    (vm_map_cs_enforcement(map)
6057 #if __arm64__
6058 		    || !VM_MAP_IS_EXOTIC(map)
6059 #endif /* __arm64__ */
6060 		    ) &&
6061 #endif /* XNU_TARGET_OS_OSX */
6062 #if CODE_SIGNING_MONITOR
6063 		    (csm_address_space_exempt(map->pmap) != KERN_SUCCESS) &&
6064 #endif
6065 		    !(current->used_for_jit)) {
6066 			DTRACE_VM3(cs_wx,
6067 			    uint64_t, (uint64_t) current->vme_start,
6068 			    uint64_t, (uint64_t) current->vme_end,
6069 			    vm_prot_t, new_prot);
6070 			printf("CODE SIGNING: %d[%s] %s:%d(0x%llx,0x%llx,0x%x) can't have both write and exec at the same time\n",
6071 			    proc_selfpid(),
6072 			    (get_bsdtask_info(current_task())
6073 			    ? proc_name_address(get_bsdtask_info(current_task()))
6074 			    : "?"),
6075 			    __FUNCTION__, __LINE__,
6076 #if DEVELOPMENT || DEBUG
6077 			    (uint64_t)current->vme_start,
6078 			    (uint64_t)current->vme_end,
6079 #else /* DEVELOPMENT || DEBUG */
6080 			    (uint64_t)0,
6081 			    (uint64_t)0,
6082 #endif /* DEVELOPMENT || DEBUG */
6083 			    new_prot);
6084 			new_prot &= ~VM_PROT_ALLEXEC;
6085 			if (VM_MAP_POLICY_WX_FAIL(map)) {
6086 				vm_map_unlock(map);
6087 				return KERN_PROTECTION_FAILURE;
6088 			}
6089 		}
6090 
6091 		/*
6092 		 * If the task has requested executable lockdown,
6093 		 * deny both:
6094 		 * - adding executable protections OR
6095 		 * - adding write protections to an existing executable mapping.
6096 		 */
6097 		if (map->map_disallow_new_exec == TRUE) {
6098 			if ((new_prot & VM_PROT_ALLEXEC) ||
6099 			    ((current->protection & VM_PROT_EXECUTE) && (new_prot & VM_PROT_WRITE))) {
6100 				vm_map_unlock(map);
6101 				return KERN_PROTECTION_FAILURE;
6102 			}
6103 		}
6104 
6105 		prev = current->vme_end;
6106 		current = current->vme_next;
6107 	}
6108 
6109 #if __arm64__
6110 	if (end > prev &&
6111 	    end == vm_map_round_page(prev, VM_MAP_PAGE_MASK(map))) {
6112 		vm_map_entry_t prev_entry;
6113 
6114 		prev_entry = current->vme_prev;
6115 		if (prev_entry != vm_map_to_entry(map) &&
6116 		    !prev_entry->map_aligned &&
6117 		    (vm_map_round_page(prev_entry->vme_end,
6118 		    VM_MAP_PAGE_MASK(map))
6119 		    == end)) {
6120 			/*
6121 			 * The last entry in our range is not "map-aligned"
6122 			 * but it would have reached all the way to "end"
6123 			 * if it had been map-aligned, so this is not really
6124 			 * a hole in the range and we can proceed.
6125 			 */
6126 			prev = end;
6127 		}
6128 	}
6129 #endif /* __arm64__ */
6130 
6131 	if (end > prev) {
6132 		vm_map_unlock(map);
6133 		return KERN_INVALID_ADDRESS;
6134 	}
6135 
6136 	/*
6137 	 *	Go back and fix up protections.
6138 	 *	Clip to start here if the range starts within
6139 	 *	the entry.
6140 	 */
6141 
6142 	current = entry;
6143 	if (current != vm_map_to_entry(map)) {
6144 		/* clip and unnest if necessary */
6145 		vm_map_clip_start(map, current, start);
6146 	}
6147 
6148 	while ((current != vm_map_to_entry(map)) &&
6149 	    (current->vme_start < end)) {
6150 		vm_prot_t       old_prot;
6151 
6152 		vm_map_clip_end(map, current, end);
6153 
6154 #if DEVELOPMENT || DEBUG
6155 		if (current->csm_associated && vm_log_xnu_user_debug) {
6156 			printf("FBDP %d[%s] %s(0x%llx,0x%llx,0x%x) on map %p entry %p [0x%llx:0x%llx 0x%x/0x%x] csm_associated\n",
6157 			    proc_selfpid(),
6158 			    (get_bsdtask_info(current_task())
6159 			    ? proc_name_address(get_bsdtask_info(current_task()))
6160 			    : "?"),
6161 			    __FUNCTION__,
6162 			    (uint64_t)start,
6163 			    (uint64_t)end,
6164 			    new_prot,
6165 			    map, current,
6166 			    current->vme_start,
6167 			    current->vme_end,
6168 			    current->protection,
6169 			    current->max_protection);
6170 		}
6171 #endif /* DEVELOPMENT || DEBUG */
6172 
6173 		if (current->is_sub_map) {
6174 			/* clipping did unnest if needed */
6175 			assert(!current->use_pmap);
6176 		}
6177 
6178 		old_prot = current->protection;
6179 
6180 		if (set_max) {
6181 			current->max_protection = new_prot;
6182 			/* Consider either EXECUTE or UEXEC as EXECUTE for this masking */
6183 			current->protection = (new_prot & old_prot);
6184 		} else {
6185 			current->protection = new_prot;
6186 		}
6187 
6188 #if CODE_SIGNING_MONITOR
6189 		if (!current->vme_xnu_user_debug &&
6190 		    /* a !csm_associated mapping becoming executable */
6191 		    ((!current->csm_associated &&
6192 		    !(old_prot & VM_PROT_EXECUTE) &&
6193 		    (current->protection & VM_PROT_EXECUTE))
6194 		    ||
6195 		    /* a csm_associated mapping becoming writable */
6196 		    (current->csm_associated &&
6197 		    !(old_prot & VM_PROT_WRITE) &&
6198 		    (current->protection & VM_PROT_WRITE)))) {
6199 			/*
6200 			 * This mapping has not already been marked as
6201 			 * "user_debug" and it is either:
6202 			 * 1. not code-signing-monitored and becoming executable
6203 			 * 2. code-signing-monitored and becoming writable,
6204 			 * so inform the CodeSigningMonitor and mark the
6205 			 * mapping as "user_debug" if appropriate.
6206 			 */
6207 			vm_map_kernel_flags_t vmk_flags;
6208 			vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
6209 			/* pretend it's a vm_protect(VM_PROT_COPY)... */
6210 			vmk_flags.vmkf_remap_prot_copy = true;
6211 			kr = vm_map_entry_cs_associate(map, current, vmk_flags);
6212 #if DEVELOPMENT || DEBUG
6213 			if (vm_log_xnu_user_debug) {
6214 				printf("FBDP %d[%s] %s:%d map %p entry %p [ 0x%llx 0x%llx ] prot 0x%x -> 0x%x cs_associate -> %d user_debug=%d\n",
6215 				    proc_selfpid(),
6216 				    (get_bsdtask_info(current_task()) ? proc_name_address(get_bsdtask_info(current_task())) : "?"),
6217 				    __FUNCTION__, __LINE__,
6218 				    map, current,
6219 				    current->vme_start, current->vme_end,
6220 				    old_prot, current->protection,
6221 				    kr, current->vme_xnu_user_debug);
6222 			}
6223 #endif /* DEVELOPMENT || DEBUG */
6224 		}
6225 #endif /* CODE_SIGNING_MONITOR */
6226 
6227 		/*
6228 		 *	Update physical map if necessary.
6229 		 *	If the request is to turn off write protection,
6230 		 *	we won't do it for real (in pmap). This is because
6231 		 *	it would cause copy-on-write to fail.  We've already
6232 		 *	set, the new protection in the map, so if a
6233 		 *	write-protect fault occurred, it will be fixed up
6234 		 *	properly, COW or not.
6235 		 */
6236 		if (current->protection != old_prot) {
6237 			/* Look one level in we support nested pmaps */
6238 			/* from mapped submaps which are direct entries */
6239 			/* in our map */
6240 
6241 			vm_prot_t prot;
6242 
6243 			prot = current->protection;
6244 			if (current->is_sub_map || (VME_OBJECT(current) == NULL) || (VME_OBJECT(current) != compressor_object)) {
6245 				prot &= ~VM_PROT_WRITE;
6246 			} else {
6247 				assert(!VME_OBJECT(current)->code_signed);
6248 				assert(VME_OBJECT(current)->copy_strategy == MEMORY_OBJECT_COPY_NONE);
6249 				if (prot & VM_PROT_WRITE) {
6250 					/*
6251 					 * For write requests on the
6252 					 * compressor, we wil ask the
6253 					 * pmap layer to prevent us from
6254 					 * taking a write fault when we
6255 					 * attempt to access the mapping
6256 					 * next.
6257 					 */
6258 					pmap_options |= PMAP_OPTIONS_PROTECT_IMMEDIATE;
6259 				}
6260 			}
6261 
6262 			if (override_nx(map, VME_ALIAS(current)) && prot) {
6263 				prot |= VM_PROT_EXECUTE;
6264 			}
6265 
6266 #if DEVELOPMENT || DEBUG
6267 			if (!(old_prot & VM_PROT_EXECUTE) &&
6268 			    (prot & VM_PROT_EXECUTE) &&
6269 			    panic_on_unsigned_execute &&
6270 			    (proc_selfcsflags() & CS_KILL)) {
6271 				panic("vm_map_protect(%p,0x%llx,0x%llx) old=0x%x new=0x%x - <rdar://23770418> code-signing bypass?", map, (uint64_t)current->vme_start, (uint64_t)current->vme_end, old_prot, prot);
6272 			}
6273 #endif /* DEVELOPMENT || DEBUG */
6274 
6275 			if (pmap_has_prot_policy(map->pmap, current->translated_allow_execute, prot)) {
6276 				if (current->wired_count) {
6277 					panic("vm_map_protect(%p,0x%llx,0x%llx) new=0x%x wired=%x",
6278 					    map, (uint64_t)current->vme_start, (uint64_t)current->vme_end, prot, current->wired_count);
6279 				}
6280 
6281 				/* If the pmap layer cares about this
6282 				 * protection type, force a fault for
6283 				 * each page so that vm_fault will
6284 				 * repopulate the page with the full
6285 				 * set of protections.
6286 				 */
6287 				/*
6288 				 * TODO: We don't seem to need this,
6289 				 * but this is due to an internal
6290 				 * implementation detail of
6291 				 * pmap_protect.  Do we want to rely
6292 				 * on this?
6293 				 */
6294 				prot = VM_PROT_NONE;
6295 			}
6296 
6297 			if (current->is_sub_map && current->use_pmap) {
6298 				pmap_protect(VME_SUBMAP(current)->pmap,
6299 				    current->vme_start,
6300 				    current->vme_end,
6301 				    prot);
6302 			} else {
6303 				pmap_protect_options(map->pmap,
6304 				    current->vme_start,
6305 				    current->vme_end,
6306 				    prot,
6307 				    pmap_options,
6308 				    NULL);
6309 			}
6310 		}
6311 		current = current->vme_next;
6312 	}
6313 
6314 	current = entry;
6315 	while ((current != vm_map_to_entry(map)) &&
6316 	    (current->vme_start <= end)) {
6317 		vm_map_simplify_entry(map, current);
6318 		current = current->vme_next;
6319 	}
6320 
6321 	vm_map_unlock(map);
6322 	return KERN_SUCCESS;
6323 }
6324 
6325 /*
6326  *	vm_map_inherit:
6327  *
6328  *	Sets the inheritance of the specified address
6329  *	range in the target map.  Inheritance
6330  *	affects how the map will be shared with
6331  *	child maps at the time of vm_map_fork.
6332  */
6333 kern_return_t
vm_map_inherit(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vm_inherit_t new_inheritance)6334 vm_map_inherit(
6335 	vm_map_t        map,
6336 	vm_map_offset_t start,
6337 	vm_map_offset_t end,
6338 	vm_inherit_t    new_inheritance)
6339 {
6340 	vm_map_entry_t  entry;
6341 	vm_map_entry_t  temp_entry;
6342 
6343 	vm_map_lock(map);
6344 
6345 	VM_MAP_RANGE_CHECK(map, start, end);
6346 
6347 	if (__improbable(vm_map_range_overflows(map, start, end - start))) {
6348 		vm_map_unlock(map);
6349 		return KERN_INVALID_ADDRESS;
6350 	}
6351 
6352 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
6353 		entry = temp_entry;
6354 	} else {
6355 		temp_entry = temp_entry->vme_next;
6356 		entry = temp_entry;
6357 	}
6358 
6359 	/* first check entire range for submaps which can't support the */
6360 	/* given inheritance. */
6361 	while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
6362 		if (entry->is_sub_map) {
6363 			if (new_inheritance == VM_INHERIT_COPY) {
6364 				vm_map_unlock(map);
6365 				return KERN_INVALID_ARGUMENT;
6366 			}
6367 		}
6368 
6369 		entry = entry->vme_next;
6370 	}
6371 
6372 	entry = temp_entry;
6373 	if (entry != vm_map_to_entry(map)) {
6374 		/* clip and unnest if necessary */
6375 		vm_map_clip_start(map, entry, start);
6376 	}
6377 
6378 	while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
6379 		vm_map_clip_end(map, entry, end);
6380 		if (entry->is_sub_map) {
6381 			/* clip did unnest if needed */
6382 			assert(!entry->use_pmap);
6383 		}
6384 
6385 		entry->inheritance = new_inheritance;
6386 
6387 		entry = entry->vme_next;
6388 	}
6389 
6390 	vm_map_unlock(map);
6391 	return KERN_SUCCESS;
6392 }
6393 
6394 /*
6395  * Update the accounting for the amount of wired memory in this map.  If the user has
6396  * exceeded the defined limits, then we fail.  Wiring on behalf of the kernel never fails.
6397  */
6398 
6399 static kern_return_t
add_wire_counts(vm_map_t map,vm_map_entry_t entry,boolean_t user_wire)6400 add_wire_counts(
6401 	vm_map_t        map,
6402 	vm_map_entry_t  entry,
6403 	boolean_t       user_wire)
6404 {
6405 	vm_map_size_t   size;
6406 
6407 	bool first_wire = entry->wired_count == 0 && entry->user_wired_count == 0;
6408 
6409 	if (user_wire) {
6410 		unsigned int total_wire_count =  vm_page_wire_count + vm_lopage_free_count;
6411 
6412 		/*
6413 		 * We're wiring memory at the request of the user.  Check if this is the first time the user is wiring
6414 		 * this map entry.
6415 		 */
6416 
6417 		if (entry->user_wired_count == 0) {
6418 			size = entry->vme_end - entry->vme_start;
6419 
6420 			/*
6421 			 * Since this is the first time the user is wiring this map entry, check to see if we're
6422 			 * exceeding the user wire limits.  There is a per map limit which is the smaller of either
6423 			 * the process's rlimit or the global vm_per_task_user_wire_limit which caps this value.  There is also
6424 			 * a system-wide limit on the amount of memory all users can wire.  If the user is over either
6425 			 * limit, then we fail.
6426 			 */
6427 
6428 			if (size + map->user_wire_size > MIN(map->user_wire_limit, vm_per_task_user_wire_limit) ||
6429 			    size + ptoa_64(total_wire_count) > vm_global_user_wire_limit) {
6430 				if (size + ptoa_64(total_wire_count) > vm_global_user_wire_limit) {
6431 #if DEVELOPMENT || DEBUG
6432 					if (panic_on_mlock_failure) {
6433 						panic("mlock: Over global wire limit. %llu bytes wired and requested to wire %llu bytes more", ptoa_64(total_wire_count), (uint64_t) size);
6434 					}
6435 #endif /* DEVELOPMENT || DEBUG */
6436 					os_atomic_inc(&vm_add_wire_count_over_global_limit, relaxed);
6437 				} else {
6438 					os_atomic_inc(&vm_add_wire_count_over_user_limit, relaxed);
6439 #if DEVELOPMENT || DEBUG
6440 					if (panic_on_mlock_failure) {
6441 						panic("mlock: Over process wire limit. %llu bytes wired and requested to wire %llu bytes more", (uint64_t) map->user_wire_size, (uint64_t) size);
6442 					}
6443 #endif /* DEVELOPMENT || DEBUG */
6444 				}
6445 				return KERN_RESOURCE_SHORTAGE;
6446 			}
6447 
6448 			/*
6449 			 * The first time the user wires an entry, we also increment the wired_count and add this to
6450 			 * the total that has been wired in the map.
6451 			 */
6452 
6453 			if (entry->wired_count >= MAX_WIRE_COUNT) {
6454 				return KERN_FAILURE;
6455 			}
6456 
6457 			entry->wired_count++;
6458 			map->user_wire_size += size;
6459 		}
6460 
6461 		if (entry->user_wired_count >= MAX_WIRE_COUNT) {
6462 			return KERN_FAILURE;
6463 		}
6464 
6465 		entry->user_wired_count++;
6466 	} else {
6467 		/*
6468 		 * The kernel's wiring the memory.  Just bump the count and continue.
6469 		 */
6470 
6471 		if (entry->wired_count >= MAX_WIRE_COUNT) {
6472 			panic("vm_map_wire: too many wirings");
6473 		}
6474 
6475 		entry->wired_count++;
6476 	}
6477 
6478 	if (first_wire) {
6479 		vme_btref_consider_and_set(entry, __builtin_frame_address(0));
6480 	}
6481 
6482 	return KERN_SUCCESS;
6483 }
6484 
6485 /*
6486  * Update the memory wiring accounting now that the given map entry is being unwired.
6487  */
6488 
6489 static void
subtract_wire_counts(vm_map_t map,vm_map_entry_t entry,boolean_t user_wire)6490 subtract_wire_counts(
6491 	vm_map_t        map,
6492 	vm_map_entry_t  entry,
6493 	boolean_t       user_wire)
6494 {
6495 	if (user_wire) {
6496 		/*
6497 		 * We're unwiring memory at the request of the user.  See if we're removing the last user wire reference.
6498 		 */
6499 
6500 		if (entry->user_wired_count == 1) {
6501 			/*
6502 			 * We're removing the last user wire reference.  Decrement the wired_count and the total
6503 			 * user wired memory for this map.
6504 			 */
6505 
6506 			assert(entry->wired_count >= 1);
6507 			entry->wired_count--;
6508 			map->user_wire_size -= entry->vme_end - entry->vme_start;
6509 		}
6510 
6511 		assert(entry->user_wired_count >= 1);
6512 		entry->user_wired_count--;
6513 	} else {
6514 		/*
6515 		 * The kernel is unwiring the memory.   Just update the count.
6516 		 */
6517 
6518 		assert(entry->wired_count >= 1);
6519 		entry->wired_count--;
6520 	}
6521 
6522 	vme_btref_consider_and_put(entry);
6523 }
6524 
6525 int cs_executable_wire = 0;
6526 
6527 /*
6528  *	vm_map_wire:
6529  *
6530  *	Sets the pageability of the specified address range in the
6531  *	target map as wired.  Regions specified as not pageable require
6532  *	locked-down physical memory and physical page maps.  The
6533  *	access_type variable indicates types of accesses that must not
6534  *	generate page faults.  This is checked against protection of
6535  *	memory being locked-down.
6536  *
6537  *	The map must not be locked, but a reference must remain to the
6538  *	map throughout the call.
6539  */
6540 static kern_return_t
vm_map_wire_nested(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vm_prot_t caller_prot,vm_tag_t tag,boolean_t user_wire,pmap_t map_pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)6541 vm_map_wire_nested(
6542 	vm_map_t                map,
6543 	vm_map_offset_t         start,
6544 	vm_map_offset_t         end,
6545 	vm_prot_t               caller_prot,
6546 	vm_tag_t                tag,
6547 	boolean_t               user_wire,
6548 	pmap_t                  map_pmap,
6549 	vm_map_offset_t         pmap_addr,
6550 	ppnum_t                 *physpage_p)
6551 {
6552 	vm_map_entry_t          entry;
6553 	vm_prot_t               access_type;
6554 	struct vm_map_entry     *first_entry, tmp_entry;
6555 	vm_map_t                real_map;
6556 	vm_map_offset_t         s, e;
6557 	kern_return_t           rc;
6558 	boolean_t               need_wakeup;
6559 	boolean_t               main_map = FALSE;
6560 	wait_interrupt_t        interruptible_state;
6561 	thread_t                cur_thread;
6562 	unsigned int            last_timestamp;
6563 	vm_map_size_t           size;
6564 	boolean_t               wire_and_extract;
6565 	vm_prot_t               extra_prots;
6566 
6567 	extra_prots = VM_PROT_COPY;
6568 	extra_prots |= VM_PROT_COPY_FAIL_IF_EXECUTABLE;
6569 #if XNU_TARGET_OS_OSX
6570 	if (map->pmap == kernel_pmap ||
6571 	    !vm_map_cs_enforcement(map)) {
6572 		extra_prots &= ~VM_PROT_COPY_FAIL_IF_EXECUTABLE;
6573 	}
6574 #endif /* XNU_TARGET_OS_OSX */
6575 #if CODE_SIGNING_MONITOR
6576 	if (csm_address_space_exempt(map->pmap) == KERN_SUCCESS) {
6577 		extra_prots &= ~VM_PROT_COPY_FAIL_IF_EXECUTABLE;
6578 	}
6579 #endif /* CODE_SIGNING_MONITOR */
6580 
6581 	access_type = (caller_prot & (VM_PROT_ALL | VM_PROT_ALLEXEC));
6582 
6583 	wire_and_extract = FALSE;
6584 	if (physpage_p != NULL) {
6585 		/*
6586 		 * The caller wants the physical page number of the
6587 		 * wired page.  We return only one physical page number
6588 		 * so this works for only one page at a time.
6589 		 */
6590 		if ((end - start) != PAGE_SIZE) {
6591 			return KERN_INVALID_ARGUMENT;
6592 		}
6593 		wire_and_extract = TRUE;
6594 		*physpage_p = 0;
6595 	}
6596 
6597 	vm_map_lock(map);
6598 	if (map_pmap == NULL) {
6599 		main_map = TRUE;
6600 	}
6601 	last_timestamp = map->timestamp;
6602 
6603 	VM_MAP_RANGE_CHECK(map, start, end);
6604 	assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map)));
6605 	assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map)));
6606 
6607 	if (start == end) {
6608 		/* We wired what the caller asked for, zero pages */
6609 		vm_map_unlock(map);
6610 		return KERN_SUCCESS;
6611 	}
6612 
6613 	if (__improbable(vm_map_range_overflows(map, start, end - start))) {
6614 		vm_map_unlock(map);
6615 		return KERN_INVALID_ADDRESS;
6616 	}
6617 
6618 	need_wakeup = FALSE;
6619 	cur_thread = current_thread();
6620 
6621 	s = start;
6622 	rc = KERN_SUCCESS;
6623 
6624 	if (vm_map_lookup_entry(map, s, &first_entry)) {
6625 		entry = first_entry;
6626 		/*
6627 		 * vm_map_clip_start will be done later.
6628 		 * We don't want to unnest any nested submaps here !
6629 		 */
6630 	} else {
6631 		/* Start address is not in map */
6632 		rc = KERN_INVALID_ADDRESS;
6633 		goto done;
6634 	}
6635 
6636 	while ((entry != vm_map_to_entry(map)) && (s < end)) {
6637 		/*
6638 		 * At this point, we have wired from "start" to "s".
6639 		 * We still need to wire from "s" to "end".
6640 		 *
6641 		 * "entry" hasn't been clipped, so it could start before "s"
6642 		 * and/or end after "end".
6643 		 */
6644 
6645 		/* "e" is how far we want to wire in this entry */
6646 		e = entry->vme_end;
6647 		if (e > end) {
6648 			e = end;
6649 		}
6650 
6651 		/*
6652 		 * If another thread is wiring/unwiring this entry then
6653 		 * block after informing other thread to wake us up.
6654 		 */
6655 		if (entry->in_transition) {
6656 			wait_result_t wait_result;
6657 
6658 			/*
6659 			 * We have not clipped the entry.  Make sure that
6660 			 * the start address is in range so that the lookup
6661 			 * below will succeed.
6662 			 * "s" is the current starting point: we've already
6663 			 * wired from "start" to "s" and we still have
6664 			 * to wire from "s" to "end".
6665 			 */
6666 
6667 			entry->needs_wakeup = TRUE;
6668 
6669 			/*
6670 			 * wake up anybody waiting on entries that we have
6671 			 * already wired.
6672 			 */
6673 			if (need_wakeup) {
6674 				vm_map_entry_wakeup(map);
6675 				need_wakeup = FALSE;
6676 			}
6677 			/*
6678 			 * User wiring is interruptible
6679 			 */
6680 			wait_result = vm_map_entry_wait(map,
6681 			    (user_wire) ? THREAD_ABORTSAFE :
6682 			    THREAD_UNINT);
6683 			if (user_wire && wait_result == THREAD_INTERRUPTED) {
6684 				/*
6685 				 * undo the wirings we have done so far
6686 				 * We do not clear the needs_wakeup flag,
6687 				 * because we cannot tell if we were the
6688 				 * only one waiting.
6689 				 */
6690 				rc = KERN_FAILURE;
6691 				goto done;
6692 			}
6693 
6694 			/*
6695 			 * Cannot avoid a lookup here. reset timestamp.
6696 			 */
6697 			last_timestamp = map->timestamp;
6698 
6699 			/*
6700 			 * The entry could have been clipped, look it up again.
6701 			 * Worse that can happen is, it may not exist anymore.
6702 			 */
6703 			if (!vm_map_lookup_entry(map, s, &first_entry)) {
6704 				/*
6705 				 * User: undo everything upto the previous
6706 				 * entry.  let vm_map_unwire worry about
6707 				 * checking the validity of the range.
6708 				 */
6709 				rc = KERN_FAILURE;
6710 				goto done;
6711 			}
6712 			entry = first_entry;
6713 			continue;
6714 		}
6715 
6716 		if (entry->is_sub_map) {
6717 			vm_map_offset_t sub_start;
6718 			vm_map_offset_t sub_end;
6719 			vm_map_offset_t local_start;
6720 			vm_map_offset_t local_end;
6721 			pmap_t          pmap;
6722 
6723 			if (wire_and_extract) {
6724 				/*
6725 				 * Wiring would result in copy-on-write
6726 				 * which would not be compatible with
6727 				 * the sharing we have with the original
6728 				 * provider of this memory.
6729 				 */
6730 				rc = KERN_INVALID_ARGUMENT;
6731 				goto done;
6732 			}
6733 
6734 			vm_map_clip_start(map, entry, s);
6735 			vm_map_clip_end(map, entry, end);
6736 
6737 			sub_start = VME_OFFSET(entry);
6738 			sub_end = entry->vme_end;
6739 			sub_end += VME_OFFSET(entry) - entry->vme_start;
6740 
6741 			local_end = entry->vme_end;
6742 			if (map_pmap == NULL) {
6743 				vm_object_t             object;
6744 				vm_object_offset_t      offset;
6745 				vm_prot_t               prot;
6746 				boolean_t               wired;
6747 				vm_map_entry_t          local_entry;
6748 				vm_map_version_t         version;
6749 				vm_map_t                lookup_map;
6750 
6751 				if (entry->use_pmap) {
6752 					pmap = VME_SUBMAP(entry)->pmap;
6753 					/* ppc implementation requires that */
6754 					/* submaps pmap address ranges line */
6755 					/* up with parent map */
6756 #ifdef notdef
6757 					pmap_addr = sub_start;
6758 #endif
6759 					pmap_addr = s;
6760 				} else {
6761 					pmap = map->pmap;
6762 					pmap_addr = s;
6763 				}
6764 
6765 				if (entry->wired_count) {
6766 					if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) {
6767 						goto done;
6768 					}
6769 
6770 					/*
6771 					 * The map was not unlocked:
6772 					 * no need to goto re-lookup.
6773 					 * Just go directly to next entry.
6774 					 */
6775 					entry = entry->vme_next;
6776 					s = entry->vme_start;
6777 					continue;
6778 				}
6779 
6780 				/* call vm_map_lookup_and_lock_object to */
6781 				/* cause any needs copy to be   */
6782 				/* evaluated */
6783 				local_start = entry->vme_start;
6784 				lookup_map = map;
6785 				vm_map_lock_write_to_read(map);
6786 				rc = vm_map_lookup_and_lock_object(
6787 					&lookup_map, local_start,
6788 					(access_type | extra_prots),
6789 					OBJECT_LOCK_EXCLUSIVE,
6790 					&version, &object,
6791 					&offset, &prot, &wired,
6792 					NULL,
6793 					&real_map, NULL);
6794 				if (rc != KERN_SUCCESS) {
6795 					vm_map_unlock_read(lookup_map);
6796 					assert(map_pmap == NULL);
6797 					vm_map_unwire(map, start,
6798 					    s, user_wire);
6799 					return rc;
6800 				}
6801 				vm_object_unlock(object);
6802 				if (real_map != lookup_map) {
6803 					vm_map_unlock(real_map);
6804 				}
6805 				vm_map_unlock_read(lookup_map);
6806 				vm_map_lock(map);
6807 
6808 				/* we unlocked, so must re-lookup */
6809 				if (!vm_map_lookup_entry(map,
6810 				    local_start,
6811 				    &local_entry)) {
6812 					rc = KERN_FAILURE;
6813 					goto done;
6814 				}
6815 
6816 				/*
6817 				 * entry could have been "simplified",
6818 				 * so re-clip
6819 				 */
6820 				entry = local_entry;
6821 				assert(s == local_start);
6822 				vm_map_clip_start(map, entry, s);
6823 				vm_map_clip_end(map, entry, end);
6824 				/* re-compute "e" */
6825 				e = entry->vme_end;
6826 				if (e > end) {
6827 					e = end;
6828 				}
6829 
6830 				/* did we have a change of type? */
6831 				if (!entry->is_sub_map) {
6832 					last_timestamp = map->timestamp;
6833 					continue;
6834 				}
6835 			} else {
6836 				local_start = entry->vme_start;
6837 				pmap = map_pmap;
6838 			}
6839 
6840 			if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) {
6841 				goto done;
6842 			}
6843 
6844 			entry->in_transition = TRUE;
6845 
6846 			vm_map_unlock(map);
6847 			rc = vm_map_wire_nested(VME_SUBMAP(entry),
6848 			    sub_start, sub_end,
6849 			    caller_prot, tag,
6850 			    user_wire, pmap, pmap_addr,
6851 			    NULL);
6852 			vm_map_lock(map);
6853 
6854 			/*
6855 			 * Find the entry again.  It could have been clipped
6856 			 * after we unlocked the map.
6857 			 */
6858 			if (!vm_map_lookup_entry(map, local_start,
6859 			    &first_entry)) {
6860 				panic("vm_map_wire: re-lookup failed");
6861 			}
6862 			entry = first_entry;
6863 
6864 			assert(local_start == s);
6865 			/* re-compute "e" */
6866 			e = entry->vme_end;
6867 			if (e > end) {
6868 				e = end;
6869 			}
6870 
6871 			last_timestamp = map->timestamp;
6872 			while ((entry != vm_map_to_entry(map)) &&
6873 			    (entry->vme_start < e)) {
6874 				assert(entry->in_transition);
6875 				entry->in_transition = FALSE;
6876 				if (entry->needs_wakeup) {
6877 					entry->needs_wakeup = FALSE;
6878 					need_wakeup = TRUE;
6879 				}
6880 				if (rc != KERN_SUCCESS) {/* from vm_*_wire */
6881 					subtract_wire_counts(map, entry, user_wire);
6882 				}
6883 				entry = entry->vme_next;
6884 			}
6885 			if (rc != KERN_SUCCESS) {       /* from vm_*_wire */
6886 				goto done;
6887 			}
6888 
6889 			/* no need to relookup again */
6890 			s = entry->vme_start;
6891 			continue;
6892 		}
6893 
6894 		/*
6895 		 * If this entry is already wired then increment
6896 		 * the appropriate wire reference count.
6897 		 */
6898 		if (entry->wired_count) {
6899 			if ((entry->protection & access_type) != access_type) {
6900 				/* found a protection problem */
6901 
6902 				/*
6903 				 * XXX FBDP
6904 				 * We should always return an error
6905 				 * in this case but since we didn't
6906 				 * enforce it before, let's do
6907 				 * it only for the new "wire_and_extract"
6908 				 * code path for now...
6909 				 */
6910 				if (wire_and_extract) {
6911 					rc = KERN_PROTECTION_FAILURE;
6912 					goto done;
6913 				}
6914 			}
6915 
6916 			/*
6917 			 * entry is already wired down, get our reference
6918 			 * after clipping to our range.
6919 			 */
6920 			vm_map_clip_start(map, entry, s);
6921 			vm_map_clip_end(map, entry, end);
6922 
6923 			if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) {
6924 				goto done;
6925 			}
6926 
6927 			if (wire_and_extract) {
6928 				vm_object_t             object;
6929 				vm_object_offset_t      offset;
6930 				vm_page_t               m;
6931 
6932 				/*
6933 				 * We don't have to "wire" the page again
6934 				 * bit we still have to "extract" its
6935 				 * physical page number, after some sanity
6936 				 * checks.
6937 				 */
6938 				assert((entry->vme_end - entry->vme_start)
6939 				    == PAGE_SIZE);
6940 				assert(!entry->needs_copy);
6941 				assert(!entry->is_sub_map);
6942 				assert(VME_OBJECT(entry));
6943 				if (((entry->vme_end - entry->vme_start)
6944 				    != PAGE_SIZE) ||
6945 				    entry->needs_copy ||
6946 				    entry->is_sub_map ||
6947 				    VME_OBJECT(entry) == VM_OBJECT_NULL) {
6948 					rc = KERN_INVALID_ARGUMENT;
6949 					goto done;
6950 				}
6951 
6952 				object = VME_OBJECT(entry);
6953 				offset = VME_OFFSET(entry);
6954 				/* need exclusive lock to update m->dirty */
6955 				if (entry->protection & VM_PROT_WRITE) {
6956 					vm_object_lock(object);
6957 				} else {
6958 					vm_object_lock_shared(object);
6959 				}
6960 				m = vm_page_lookup(object, offset);
6961 				assert(m != VM_PAGE_NULL);
6962 				assert(VM_PAGE_WIRED(m));
6963 				if (m != VM_PAGE_NULL && VM_PAGE_WIRED(m)) {
6964 					*physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
6965 					if (entry->protection & VM_PROT_WRITE) {
6966 						vm_object_lock_assert_exclusive(
6967 							object);
6968 						m->vmp_dirty = TRUE;
6969 					}
6970 				} else {
6971 					/* not already wired !? */
6972 					*physpage_p = 0;
6973 				}
6974 				vm_object_unlock(object);
6975 			}
6976 
6977 			/* map was not unlocked: no need to relookup */
6978 			entry = entry->vme_next;
6979 			s = entry->vme_start;
6980 			continue;
6981 		}
6982 
6983 		/*
6984 		 * Unwired entry or wire request transmitted via submap
6985 		 */
6986 
6987 		/*
6988 		 * Wiring would copy the pages to the shadow object.
6989 		 * The shadow object would not be code-signed so
6990 		 * attempting to execute code from these copied pages
6991 		 * would trigger a code-signing violation.
6992 		 */
6993 
6994 		if ((entry->protection & VM_PROT_EXECUTE)
6995 #if XNU_TARGET_OS_OSX
6996 		    &&
6997 		    map->pmap != kernel_pmap &&
6998 		    (vm_map_cs_enforcement(map)
6999 #if __arm64__
7000 		    || !VM_MAP_IS_EXOTIC(map)
7001 #endif /* __arm64__ */
7002 		    )
7003 #endif /* XNU_TARGET_OS_OSX */
7004 #if CODE_SIGNING_MONITOR
7005 		    &&
7006 		    (csm_address_space_exempt(map->pmap) != KERN_SUCCESS)
7007 #endif
7008 		    ) {
7009 #if MACH_ASSERT
7010 			printf("pid %d[%s] wiring executable range from "
7011 			    "0x%llx to 0x%llx: rejected to preserve "
7012 			    "code-signing\n",
7013 			    proc_selfpid(),
7014 			    (get_bsdtask_info(current_task())
7015 			    ? proc_name_address(get_bsdtask_info(current_task()))
7016 			    : "?"),
7017 			    (uint64_t) entry->vme_start,
7018 			    (uint64_t) entry->vme_end);
7019 #endif /* MACH_ASSERT */
7020 			DTRACE_VM2(cs_executable_wire,
7021 			    uint64_t, (uint64_t)entry->vme_start,
7022 			    uint64_t, (uint64_t)entry->vme_end);
7023 			cs_executable_wire++;
7024 			rc = KERN_PROTECTION_FAILURE;
7025 			goto done;
7026 		}
7027 
7028 		/*
7029 		 * Perform actions of vm_map_lookup that need the write
7030 		 * lock on the map: create a shadow object for a
7031 		 * copy-on-write region, or an object for a zero-fill
7032 		 * region.
7033 		 */
7034 		size = entry->vme_end - entry->vme_start;
7035 		/*
7036 		 * If wiring a copy-on-write page, we need to copy it now
7037 		 * even if we're only (currently) requesting read access.
7038 		 * This is aggressive, but once it's wired we can't move it.
7039 		 */
7040 		if (entry->needs_copy) {
7041 			if (wire_and_extract) {
7042 				/*
7043 				 * We're supposed to share with the original
7044 				 * provider so should not be "needs_copy"
7045 				 */
7046 				rc = KERN_INVALID_ARGUMENT;
7047 				goto done;
7048 			}
7049 
7050 			VME_OBJECT_SHADOW(entry, size,
7051 			    vm_map_always_shadow(map));
7052 			entry->needs_copy = FALSE;
7053 		} else if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
7054 			if (wire_and_extract) {
7055 				/*
7056 				 * We're supposed to share with the original
7057 				 * provider so should already have an object.
7058 				 */
7059 				rc = KERN_INVALID_ARGUMENT;
7060 				goto done;
7061 			}
7062 			VME_OBJECT_SET(entry, vm_object_allocate(size), false, 0);
7063 			VME_OFFSET_SET(entry, (vm_object_offset_t)0);
7064 			assert(entry->use_pmap);
7065 		} else if (VME_OBJECT(entry)->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
7066 			if (wire_and_extract) {
7067 				/*
7068 				 * We're supposed to share with the original
7069 				 * provider so should not be COPY_SYMMETRIC.
7070 				 */
7071 				rc = KERN_INVALID_ARGUMENT;
7072 				goto done;
7073 			}
7074 			/*
7075 			 * Force an unrequested "copy-on-write" but only for
7076 			 * the range we're wiring.
7077 			 */
7078 //			printf("FBDP %s:%d map %p entry %p [ 0x%llx 0x%llx ] s 0x%llx end 0x%llx wire&extract=%d\n", __FUNCTION__, __LINE__, map, entry, (uint64_t)entry->vme_start, (uint64_t)entry->vme_end, (uint64_t)s, (uint64_t)end, wire_and_extract);
7079 			vm_map_clip_start(map, entry, s);
7080 			vm_map_clip_end(map, entry, end);
7081 			/* recompute "size" */
7082 			size = entry->vme_end - entry->vme_start;
7083 			/* make a shadow object */
7084 			vm_object_t orig_object;
7085 			vm_object_offset_t orig_offset;
7086 			orig_object = VME_OBJECT(entry);
7087 			orig_offset = VME_OFFSET(entry);
7088 			VME_OBJECT_SHADOW(entry, size, vm_map_always_shadow(map));
7089 			if (VME_OBJECT(entry) != orig_object) {
7090 				/*
7091 				 * This mapping has not been shared (or it would be
7092 				 * COPY_DELAY instead of COPY_SYMMETRIC) and it has
7093 				 * not been copied-on-write (or it would be marked
7094 				 * as "needs_copy" and would have been handled above
7095 				 * and also already write-protected).
7096 				 * We still need to write-protect here to prevent
7097 				 * other threads from modifying these pages while
7098 				 * we're in the process of copying and wiring
7099 				 * the copied pages.
7100 				 * Since the mapping is neither shared nor COWed,
7101 				 * we only need to write-protect the PTEs for this
7102 				 * mapping.
7103 				 */
7104 				vm_object_pmap_protect(orig_object,
7105 				    orig_offset,
7106 				    size,
7107 				    map->pmap,
7108 				    VM_MAP_PAGE_SIZE(map),
7109 				    entry->vme_start,
7110 				    entry->protection & ~VM_PROT_WRITE);
7111 			}
7112 		}
7113 		if (VME_OBJECT(entry)->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
7114 			/*
7115 			 * Make the object COPY_DELAY to get a stable object
7116 			 * to wire.
7117 			 * That should avoid creating long shadow chains while
7118 			 * wiring/unwiring the same range repeatedly.
7119 			 * That also prevents part of the object from being
7120 			 * wired while another part is "needs_copy", which
7121 			 * could result in conflicting rules wrt copy-on-write.
7122 			 */
7123 			vm_object_t object;
7124 
7125 			object = VME_OBJECT(entry);
7126 			vm_object_lock(object);
7127 			if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
7128 				assertf(vm_object_round_page(VME_OFFSET(entry) + size) - vm_object_trunc_page(VME_OFFSET(entry)) == object->vo_size,
7129 				    "object %p size 0x%llx entry %p [0x%llx:0x%llx:0x%llx] size 0x%llx\n",
7130 				    object, (uint64_t)object->vo_size,
7131 				    entry,
7132 				    (uint64_t)entry->vme_start,
7133 				    (uint64_t)entry->vme_end,
7134 				    (uint64_t)VME_OFFSET(entry),
7135 				    (uint64_t)size);
7136 				assertf(object->ref_count == 1,
7137 				    "object %p ref_count %d\n",
7138 				    object, object->ref_count);
7139 				assertf(!entry->needs_copy,
7140 				    "entry %p\n", entry);
7141 				object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
7142 				object->true_share = TRUE;
7143 			}
7144 			vm_object_unlock(object);
7145 		}
7146 
7147 		vm_map_clip_start(map, entry, s);
7148 		vm_map_clip_end(map, entry, end);
7149 
7150 		/* re-compute "e" */
7151 		e = entry->vme_end;
7152 		if (e > end) {
7153 			e = end;
7154 		}
7155 
7156 		/*
7157 		 * Check for holes and protection mismatch.
7158 		 * Holes: Next entry should be contiguous unless this
7159 		 *	  is the end of the region.
7160 		 * Protection: Access requested must be allowed, unless
7161 		 *	wiring is by protection class
7162 		 */
7163 		if ((entry->vme_end < end) &&
7164 		    ((entry->vme_next == vm_map_to_entry(map)) ||
7165 		    (entry->vme_next->vme_start > entry->vme_end))) {
7166 			/* found a hole */
7167 			rc = KERN_INVALID_ADDRESS;
7168 			goto done;
7169 		}
7170 		if ((entry->protection & access_type) != access_type) {
7171 			/* found a protection problem */
7172 			rc = KERN_PROTECTION_FAILURE;
7173 			goto done;
7174 		}
7175 
7176 		assert(entry->wired_count == 0 && entry->user_wired_count == 0);
7177 
7178 		if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) {
7179 			goto done;
7180 		}
7181 
7182 		entry->in_transition = TRUE;
7183 
7184 		/*
7185 		 * This entry might get split once we unlock the map.
7186 		 * In vm_fault_wire(), we need the current range as
7187 		 * defined by this entry.  In order for this to work
7188 		 * along with a simultaneous clip operation, we make a
7189 		 * temporary copy of this entry and use that for the
7190 		 * wiring.  Note that the underlying objects do not
7191 		 * change during a clip.
7192 		 */
7193 		tmp_entry = *entry;
7194 
7195 		/*
7196 		 * The in_transition state guarentees that the entry
7197 		 * (or entries for this range, if split occured) will be
7198 		 * there when the map lock is acquired for the second time.
7199 		 */
7200 		vm_map_unlock(map);
7201 
7202 		if (!user_wire && cur_thread != THREAD_NULL) {
7203 			interruptible_state = thread_interrupt_level(THREAD_UNINT);
7204 		} else {
7205 			interruptible_state = THREAD_UNINT;
7206 		}
7207 
7208 		if (map_pmap) {
7209 			rc = vm_fault_wire(map,
7210 			    &tmp_entry, caller_prot, tag, map_pmap, pmap_addr,
7211 			    physpage_p);
7212 		} else {
7213 			rc = vm_fault_wire(map,
7214 			    &tmp_entry, caller_prot, tag, map->pmap,
7215 			    tmp_entry.vme_start,
7216 			    physpage_p);
7217 		}
7218 
7219 		if (!user_wire && cur_thread != THREAD_NULL) {
7220 			thread_interrupt_level(interruptible_state);
7221 		}
7222 
7223 		vm_map_lock(map);
7224 
7225 		if (last_timestamp + 1 != map->timestamp) {
7226 			/*
7227 			 * Find the entry again.  It could have been clipped
7228 			 * after we unlocked the map.
7229 			 */
7230 			if (!vm_map_lookup_entry(map, tmp_entry.vme_start,
7231 			    &first_entry)) {
7232 				panic("vm_map_wire: re-lookup failed");
7233 			}
7234 
7235 			entry = first_entry;
7236 		}
7237 
7238 		last_timestamp = map->timestamp;
7239 
7240 		while ((entry != vm_map_to_entry(map)) &&
7241 		    (entry->vme_start < tmp_entry.vme_end)) {
7242 			assert(entry->in_transition);
7243 			entry->in_transition = FALSE;
7244 			if (entry->needs_wakeup) {
7245 				entry->needs_wakeup = FALSE;
7246 				need_wakeup = TRUE;
7247 			}
7248 			if (rc != KERN_SUCCESS) {       /* from vm_*_wire */
7249 				subtract_wire_counts(map, entry, user_wire);
7250 			}
7251 			entry = entry->vme_next;
7252 		}
7253 
7254 		if (rc != KERN_SUCCESS) {               /* from vm_*_wire */
7255 			goto done;
7256 		}
7257 
7258 		if ((entry != vm_map_to_entry(map)) && /* we still have entries in the map */
7259 		    (tmp_entry.vme_end != end) &&    /* AND, we are not at the end of the requested range */
7260 		    (entry->vme_start != tmp_entry.vme_end)) { /* AND, the next entry is not contiguous. */
7261 			/* found a "new" hole */
7262 			s = tmp_entry.vme_end;
7263 			rc = KERN_INVALID_ADDRESS;
7264 			goto done;
7265 		}
7266 
7267 		s = entry->vme_start;
7268 	} /* end while loop through map entries */
7269 
7270 done:
7271 	if (rc == KERN_SUCCESS) {
7272 		/* repair any damage we may have made to the VM map */
7273 		vm_map_simplify_range(map, start, end);
7274 	}
7275 
7276 	vm_map_unlock(map);
7277 
7278 	/*
7279 	 * wake up anybody waiting on entries we wired.
7280 	 */
7281 	if (need_wakeup) {
7282 		vm_map_entry_wakeup(map);
7283 	}
7284 
7285 	if (rc != KERN_SUCCESS) {
7286 		/* undo what has been wired so far */
7287 		vm_map_unwire_nested(map, start, s, user_wire,
7288 		    map_pmap, pmap_addr);
7289 		if (physpage_p) {
7290 			*physpage_p = 0;
7291 		}
7292 	}
7293 
7294 	return rc;
7295 }
7296 
7297 kern_return_t
vm_map_wire_external(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vm_prot_t caller_prot,boolean_t user_wire)7298 vm_map_wire_external(
7299 	vm_map_t                map,
7300 	vm_map_offset_t         start,
7301 	vm_map_offset_t         end,
7302 	vm_prot_t               caller_prot,
7303 	boolean_t               user_wire)
7304 {
7305 	kern_return_t   kret;
7306 
7307 	kret = vm_map_wire_nested(map, start, end, caller_prot, vm_tag_bt(),
7308 	    user_wire, (pmap_t)NULL, 0, NULL);
7309 	return kret;
7310 }
7311 
7312 kern_return_t
vm_map_wire_kernel(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vm_prot_t caller_prot,vm_tag_t tag,boolean_t user_wire)7313 vm_map_wire_kernel(
7314 	vm_map_t                map,
7315 	vm_map_offset_t         start,
7316 	vm_map_offset_t         end,
7317 	vm_prot_t               caller_prot,
7318 	vm_tag_t                tag,
7319 	boolean_t               user_wire)
7320 {
7321 	kern_return_t   kret;
7322 
7323 	kret = vm_map_wire_nested(map, start, end, caller_prot, tag,
7324 	    user_wire, (pmap_t)NULL, 0, NULL);
7325 	return kret;
7326 }
7327 
7328 kern_return_t
vm_map_wire_and_extract_external(vm_map_t map,vm_map_offset_t start,vm_prot_t caller_prot,boolean_t user_wire,ppnum_t * physpage_p)7329 vm_map_wire_and_extract_external(
7330 	vm_map_t        map,
7331 	vm_map_offset_t start,
7332 	vm_prot_t       caller_prot,
7333 	boolean_t       user_wire,
7334 	ppnum_t         *physpage_p)
7335 {
7336 	kern_return_t   kret;
7337 
7338 	kret = vm_map_wire_nested(map,
7339 	    start,
7340 	    start + VM_MAP_PAGE_SIZE(map),
7341 	    caller_prot,
7342 	    vm_tag_bt(),
7343 	    user_wire,
7344 	    (pmap_t)NULL,
7345 	    0,
7346 	    physpage_p);
7347 	if (kret != KERN_SUCCESS &&
7348 	    physpage_p != NULL) {
7349 		*physpage_p = 0;
7350 	}
7351 	return kret;
7352 }
7353 
7354 /*
7355  *	vm_map_unwire:
7356  *
7357  *	Sets the pageability of the specified address range in the target
7358  *	as pageable.  Regions specified must have been wired previously.
7359  *
7360  *	The map must not be locked, but a reference must remain to the map
7361  *	throughout the call.
7362  *
7363  *	Kernel will panic on failures.  User unwire ignores holes and
7364  *	unwired and intransition entries to avoid losing memory by leaving
7365  *	it unwired.
7366  */
7367 static kern_return_t
vm_map_unwire_nested(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,boolean_t user_wire,pmap_t map_pmap,vm_map_offset_t pmap_addr)7368 vm_map_unwire_nested(
7369 	vm_map_t                map,
7370 	vm_map_offset_t         start,
7371 	vm_map_offset_t         end,
7372 	boolean_t               user_wire,
7373 	pmap_t                  map_pmap,
7374 	vm_map_offset_t         pmap_addr)
7375 {
7376 	vm_map_entry_t          entry;
7377 	struct vm_map_entry     *first_entry, tmp_entry;
7378 	boolean_t               need_wakeup;
7379 	boolean_t               main_map = FALSE;
7380 	unsigned int            last_timestamp;
7381 
7382 	vm_map_lock(map);
7383 	if (map_pmap == NULL) {
7384 		main_map = TRUE;
7385 	}
7386 	last_timestamp = map->timestamp;
7387 
7388 	VM_MAP_RANGE_CHECK(map, start, end);
7389 	assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map)));
7390 	assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map)));
7391 
7392 	if (start == end) {
7393 		/* We unwired what the caller asked for: zero pages */
7394 		vm_map_unlock(map);
7395 		return KERN_SUCCESS;
7396 	}
7397 
7398 	if (__improbable(vm_map_range_overflows(map, start, end - start))) {
7399 		vm_map_unlock(map);
7400 		return KERN_INVALID_ADDRESS;
7401 	}
7402 
7403 	if (vm_map_lookup_entry(map, start, &first_entry)) {
7404 		entry = first_entry;
7405 		/*
7406 		 * vm_map_clip_start will be done later.
7407 		 * We don't want to unnest any nested sub maps here !
7408 		 */
7409 	} else {
7410 		if (!user_wire) {
7411 			panic("vm_map_unwire: start not found");
7412 		}
7413 		/*	Start address is not in map. */
7414 		vm_map_unlock(map);
7415 		return KERN_INVALID_ADDRESS;
7416 	}
7417 
7418 	if (entry->superpage_size) {
7419 		/* superpages are always wired */
7420 		vm_map_unlock(map);
7421 		return KERN_INVALID_ADDRESS;
7422 	}
7423 
7424 	need_wakeup = FALSE;
7425 	while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
7426 		if (entry->in_transition) {
7427 			/*
7428 			 * 1)
7429 			 * Another thread is wiring down this entry. Note
7430 			 * that if it is not for the other thread we would
7431 			 * be unwiring an unwired entry.  This is not
7432 			 * permitted.  If we wait, we will be unwiring memory
7433 			 * we did not wire.
7434 			 *
7435 			 * 2)
7436 			 * Another thread is unwiring this entry.  We did not
7437 			 * have a reference to it, because if we did, this
7438 			 * entry will not be getting unwired now.
7439 			 */
7440 			if (!user_wire) {
7441 				/*
7442 				 * XXX FBDP
7443 				 * This could happen:  there could be some
7444 				 * overlapping vslock/vsunlock operations
7445 				 * going on.
7446 				 * We should probably just wait and retry,
7447 				 * but then we have to be careful that this
7448 				 * entry could get "simplified" after
7449 				 * "in_transition" gets unset and before
7450 				 * we re-lookup the entry, so we would
7451 				 * have to re-clip the entry to avoid
7452 				 * re-unwiring what we have already unwired...
7453 				 * See vm_map_wire_nested().
7454 				 *
7455 				 * Or we could just ignore "in_transition"
7456 				 * here and proceed to decement the wired
7457 				 * count(s) on this entry.  That should be fine
7458 				 * as long as "wired_count" doesn't drop all
7459 				 * the way to 0 (and we should panic if THAT
7460 				 * happens).
7461 				 */
7462 				panic("vm_map_unwire: in_transition entry");
7463 			}
7464 
7465 			entry = entry->vme_next;
7466 			continue;
7467 		}
7468 
7469 		if (entry->is_sub_map) {
7470 			vm_map_offset_t sub_start;
7471 			vm_map_offset_t sub_end;
7472 			vm_map_offset_t local_end;
7473 			pmap_t          pmap;
7474 
7475 			vm_map_clip_start(map, entry, start);
7476 			vm_map_clip_end(map, entry, end);
7477 
7478 			sub_start = VME_OFFSET(entry);
7479 			sub_end = entry->vme_end - entry->vme_start;
7480 			sub_end += VME_OFFSET(entry);
7481 			local_end = entry->vme_end;
7482 			if (map_pmap == NULL) {
7483 				if (entry->use_pmap) {
7484 					pmap = VME_SUBMAP(entry)->pmap;
7485 					pmap_addr = sub_start;
7486 				} else {
7487 					pmap = map->pmap;
7488 					pmap_addr = start;
7489 				}
7490 				if (entry->wired_count == 0 ||
7491 				    (user_wire && entry->user_wired_count == 0)) {
7492 					if (!user_wire) {
7493 						panic("vm_map_unwire: entry is unwired");
7494 					}
7495 					entry = entry->vme_next;
7496 					continue;
7497 				}
7498 
7499 				/*
7500 				 * Check for holes
7501 				 * Holes: Next entry should be contiguous unless
7502 				 * this is the end of the region.
7503 				 */
7504 				if (((entry->vme_end < end) &&
7505 				    ((entry->vme_next == vm_map_to_entry(map)) ||
7506 				    (entry->vme_next->vme_start
7507 				    > entry->vme_end)))) {
7508 					if (!user_wire) {
7509 						panic("vm_map_unwire: non-contiguous region");
7510 					}
7511 /*
7512  *                                       entry = entry->vme_next;
7513  *                                       continue;
7514  */
7515 				}
7516 
7517 				subtract_wire_counts(map, entry, user_wire);
7518 
7519 				if (entry->wired_count != 0) {
7520 					entry = entry->vme_next;
7521 					continue;
7522 				}
7523 
7524 				entry->in_transition = TRUE;
7525 				tmp_entry = *entry;/* see comment in vm_map_wire() */
7526 
7527 				/*
7528 				 * We can unlock the map now. The in_transition state
7529 				 * guarantees existance of the entry.
7530 				 */
7531 				vm_map_unlock(map);
7532 				vm_map_unwire_nested(VME_SUBMAP(entry),
7533 				    sub_start, sub_end, user_wire, pmap, pmap_addr);
7534 				vm_map_lock(map);
7535 
7536 				if (last_timestamp + 1 != map->timestamp) {
7537 					/*
7538 					 * Find the entry again.  It could have been
7539 					 * clipped or deleted after we unlocked the map.
7540 					 */
7541 					if (!vm_map_lookup_entry(map,
7542 					    tmp_entry.vme_start,
7543 					    &first_entry)) {
7544 						if (!user_wire) {
7545 							panic("vm_map_unwire: re-lookup failed");
7546 						}
7547 						entry = first_entry->vme_next;
7548 					} else {
7549 						entry = first_entry;
7550 					}
7551 				}
7552 				last_timestamp = map->timestamp;
7553 
7554 				/*
7555 				 * clear transition bit for all constituent entries
7556 				 * that were in the original entry (saved in
7557 				 * tmp_entry).  Also check for waiters.
7558 				 */
7559 				while ((entry != vm_map_to_entry(map)) &&
7560 				    (entry->vme_start < tmp_entry.vme_end)) {
7561 					assert(entry->in_transition);
7562 					entry->in_transition = FALSE;
7563 					if (entry->needs_wakeup) {
7564 						entry->needs_wakeup = FALSE;
7565 						need_wakeup = TRUE;
7566 					}
7567 					entry = entry->vme_next;
7568 				}
7569 				continue;
7570 			} else {
7571 				tmp_entry = *entry;
7572 				vm_map_unlock(map);
7573 				vm_map_unwire_nested(VME_SUBMAP(entry),
7574 				    sub_start, sub_end, user_wire, map_pmap,
7575 				    pmap_addr);
7576 				vm_map_lock(map);
7577 
7578 				if (last_timestamp + 1 != map->timestamp) {
7579 					/*
7580 					 * Find the entry again.  It could have been
7581 					 * clipped or deleted after we unlocked the map.
7582 					 */
7583 					if (!vm_map_lookup_entry(map,
7584 					    tmp_entry.vme_start,
7585 					    &first_entry)) {
7586 						if (!user_wire) {
7587 							panic("vm_map_unwire: re-lookup failed");
7588 						}
7589 						entry = first_entry->vme_next;
7590 					} else {
7591 						entry = first_entry;
7592 					}
7593 				}
7594 				last_timestamp = map->timestamp;
7595 			}
7596 		}
7597 
7598 
7599 		if ((entry->wired_count == 0) ||
7600 		    (user_wire && entry->user_wired_count == 0)) {
7601 			if (!user_wire) {
7602 				panic("vm_map_unwire: entry is unwired");
7603 			}
7604 
7605 			entry = entry->vme_next;
7606 			continue;
7607 		}
7608 
7609 		assert(entry->wired_count > 0 &&
7610 		    (!user_wire || entry->user_wired_count > 0));
7611 
7612 		vm_map_clip_start(map, entry, start);
7613 		vm_map_clip_end(map, entry, end);
7614 
7615 		/*
7616 		 * Check for holes
7617 		 * Holes: Next entry should be contiguous unless
7618 		 *	  this is the end of the region.
7619 		 */
7620 		if (((entry->vme_end < end) &&
7621 		    ((entry->vme_next == vm_map_to_entry(map)) ||
7622 		    (entry->vme_next->vme_start > entry->vme_end)))) {
7623 			if (!user_wire) {
7624 				panic("vm_map_unwire: non-contiguous region");
7625 			}
7626 			entry = entry->vme_next;
7627 			continue;
7628 		}
7629 
7630 		subtract_wire_counts(map, entry, user_wire);
7631 
7632 		if (entry->wired_count != 0) {
7633 			entry = entry->vme_next;
7634 			continue;
7635 		}
7636 
7637 		if (entry->zero_wired_pages) {
7638 			entry->zero_wired_pages = FALSE;
7639 		}
7640 
7641 		entry->in_transition = TRUE;
7642 		tmp_entry = *entry;     /* see comment in vm_map_wire() */
7643 
7644 		/*
7645 		 * We can unlock the map now. The in_transition state
7646 		 * guarantees existance of the entry.
7647 		 */
7648 		vm_map_unlock(map);
7649 		if (map_pmap) {
7650 			vm_fault_unwire(map, &tmp_entry, FALSE, map_pmap,
7651 			    pmap_addr, tmp_entry.vme_end);
7652 		} else {
7653 			vm_fault_unwire(map, &tmp_entry, FALSE, map->pmap,
7654 			    tmp_entry.vme_start, tmp_entry.vme_end);
7655 		}
7656 		vm_map_lock(map);
7657 
7658 		if (last_timestamp + 1 != map->timestamp) {
7659 			/*
7660 			 * Find the entry again.  It could have been clipped
7661 			 * or deleted after we unlocked the map.
7662 			 */
7663 			if (!vm_map_lookup_entry(map, tmp_entry.vme_start,
7664 			    &first_entry)) {
7665 				if (!user_wire) {
7666 					panic("vm_map_unwire: re-lookup failed");
7667 				}
7668 				entry = first_entry->vme_next;
7669 			} else {
7670 				entry = first_entry;
7671 			}
7672 		}
7673 		last_timestamp = map->timestamp;
7674 
7675 		/*
7676 		 * clear transition bit for all constituent entries that
7677 		 * were in the original entry (saved in tmp_entry).  Also
7678 		 * check for waiters.
7679 		 */
7680 		while ((entry != vm_map_to_entry(map)) &&
7681 		    (entry->vme_start < tmp_entry.vme_end)) {
7682 			assert(entry->in_transition);
7683 			entry->in_transition = FALSE;
7684 			if (entry->needs_wakeup) {
7685 				entry->needs_wakeup = FALSE;
7686 				need_wakeup = TRUE;
7687 			}
7688 			entry = entry->vme_next;
7689 		}
7690 	}
7691 
7692 	/*
7693 	 * We might have fragmented the address space when we wired this
7694 	 * range of addresses.  Attempt to re-coalesce these VM map entries
7695 	 * with their neighbors now that they're no longer wired.
7696 	 * Under some circumstances, address space fragmentation can
7697 	 * prevent VM object shadow chain collapsing, which can cause
7698 	 * swap space leaks.
7699 	 */
7700 	vm_map_simplify_range(map, start, end);
7701 
7702 	vm_map_unlock(map);
7703 	/*
7704 	 * wake up anybody waiting on entries that we have unwired.
7705 	 */
7706 	if (need_wakeup) {
7707 		vm_map_entry_wakeup(map);
7708 	}
7709 	return KERN_SUCCESS;
7710 }
7711 
7712 kern_return_t
vm_map_unwire(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,boolean_t user_wire)7713 vm_map_unwire(
7714 	vm_map_t                map,
7715 	vm_map_offset_t         start,
7716 	vm_map_offset_t         end,
7717 	boolean_t               user_wire)
7718 {
7719 	return vm_map_unwire_nested(map, start, end,
7720 	           user_wire, (pmap_t)NULL, 0);
7721 }
7722 
7723 
7724 /*
7725  *	vm_map_entry_zap:	[ internal use only ]
7726  *
7727  *	Remove the entry from the target map
7728  *	and put it on a zap list.
7729  */
7730 static void
vm_map_entry_zap(vm_map_t map,vm_map_entry_t entry,vm_map_zap_t zap)7731 vm_map_entry_zap(
7732 	vm_map_t                map,
7733 	vm_map_entry_t          entry,
7734 	vm_map_zap_t            zap)
7735 {
7736 	vm_map_offset_t s, e;
7737 
7738 	s = entry->vme_start;
7739 	e = entry->vme_end;
7740 	assert(VM_MAP_PAGE_ALIGNED(s, FOURK_PAGE_MASK));
7741 	assert(VM_MAP_PAGE_ALIGNED(e, FOURK_PAGE_MASK));
7742 	if (VM_MAP_PAGE_MASK(map) >= PAGE_MASK) {
7743 		assert(page_aligned(s));
7744 		assert(page_aligned(e));
7745 	}
7746 	if (entry->map_aligned == TRUE) {
7747 		assert(VM_MAP_PAGE_ALIGNED(s, VM_MAP_PAGE_MASK(map)));
7748 		assert(VM_MAP_PAGE_ALIGNED(e, VM_MAP_PAGE_MASK(map)));
7749 	}
7750 	assert(entry->wired_count == 0);
7751 	assert(entry->user_wired_count == 0);
7752 	assert(!entry->vme_permanent);
7753 
7754 	vm_map_store_entry_unlink(map, entry, false);
7755 	map->size -= e - s;
7756 
7757 	vm_map_zap_append(zap, entry);
7758 }
7759 
7760 static void
vm_map_submap_pmap_clean(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vm_map_t sub_map,vm_map_offset_t offset)7761 vm_map_submap_pmap_clean(
7762 	vm_map_t        map,
7763 	vm_map_offset_t start,
7764 	vm_map_offset_t end,
7765 	vm_map_t        sub_map,
7766 	vm_map_offset_t offset)
7767 {
7768 	vm_map_offset_t submap_start;
7769 	vm_map_offset_t submap_end;
7770 	vm_map_size_t   remove_size;
7771 	vm_map_entry_t  entry;
7772 
7773 	submap_end = offset + (end - start);
7774 	submap_start = offset;
7775 
7776 	vm_map_lock_read(sub_map);
7777 	if (vm_map_lookup_entry(sub_map, offset, &entry)) {
7778 		remove_size = (entry->vme_end - entry->vme_start);
7779 		if (offset > entry->vme_start) {
7780 			remove_size -= offset - entry->vme_start;
7781 		}
7782 
7783 
7784 		if (submap_end < entry->vme_end) {
7785 			remove_size -=
7786 			    entry->vme_end - submap_end;
7787 		}
7788 		if (entry->is_sub_map) {
7789 			vm_map_submap_pmap_clean(
7790 				sub_map,
7791 				start,
7792 				start + remove_size,
7793 				VME_SUBMAP(entry),
7794 				VME_OFFSET(entry));
7795 		} else {
7796 			if (map->mapped_in_other_pmaps &&
7797 			    os_ref_get_count_raw(&map->map_refcnt) != 0 &&
7798 			    VME_OBJECT(entry) != NULL) {
7799 				vm_object_pmap_protect_options(
7800 					VME_OBJECT(entry),
7801 					(VME_OFFSET(entry) +
7802 					offset -
7803 					entry->vme_start),
7804 					remove_size,
7805 					PMAP_NULL,
7806 					PAGE_SIZE,
7807 					entry->vme_start,
7808 					VM_PROT_NONE,
7809 					PMAP_OPTIONS_REMOVE);
7810 			} else {
7811 				pmap_remove(map->pmap,
7812 				    (addr64_t)start,
7813 				    (addr64_t)(start + remove_size));
7814 			}
7815 		}
7816 	}
7817 
7818 	entry = entry->vme_next;
7819 
7820 	while ((entry != vm_map_to_entry(sub_map))
7821 	    && (entry->vme_start < submap_end)) {
7822 		remove_size = (entry->vme_end - entry->vme_start);
7823 		if (submap_end < entry->vme_end) {
7824 			remove_size -= entry->vme_end - submap_end;
7825 		}
7826 		if (entry->is_sub_map) {
7827 			vm_map_submap_pmap_clean(
7828 				sub_map,
7829 				(start + entry->vme_start) - offset,
7830 				((start + entry->vme_start) - offset) + remove_size,
7831 				VME_SUBMAP(entry),
7832 				VME_OFFSET(entry));
7833 		} else {
7834 			if (map->mapped_in_other_pmaps &&
7835 			    os_ref_get_count_raw(&map->map_refcnt) != 0 &&
7836 			    VME_OBJECT(entry) != NULL) {
7837 				vm_object_pmap_protect_options(
7838 					VME_OBJECT(entry),
7839 					VME_OFFSET(entry),
7840 					remove_size,
7841 					PMAP_NULL,
7842 					PAGE_SIZE,
7843 					entry->vme_start,
7844 					VM_PROT_NONE,
7845 					PMAP_OPTIONS_REMOVE);
7846 			} else {
7847 				pmap_remove(map->pmap,
7848 				    (addr64_t)((start + entry->vme_start)
7849 				    - offset),
7850 				    (addr64_t)(((start + entry->vme_start)
7851 				    - offset) + remove_size));
7852 			}
7853 		}
7854 		entry = entry->vme_next;
7855 	}
7856 	vm_map_unlock_read(sub_map);
7857 	return;
7858 }
7859 
7860 /*
7861  *     virt_memory_guard_ast:
7862  *
7863  *     Handle the AST callout for a virtual memory guard.
7864  *	   raise an EXC_GUARD exception and terminate the task
7865  *     if configured to do so.
7866  */
7867 void
virt_memory_guard_ast(thread_t thread,mach_exception_data_type_t code,mach_exception_data_type_t subcode)7868 virt_memory_guard_ast(
7869 	thread_t thread,
7870 	mach_exception_data_type_t code,
7871 	mach_exception_data_type_t subcode)
7872 {
7873 	task_t task = get_threadtask(thread);
7874 	assert(task != kernel_task);
7875 	assert(task == current_task());
7876 	kern_return_t sync_exception_result;
7877 	uint32_t behavior;
7878 
7879 	behavior = task->task_exc_guard;
7880 
7881 	/* Is delivery enabled */
7882 	if ((behavior & TASK_EXC_GUARD_VM_DELIVER) == 0) {
7883 		return;
7884 	}
7885 
7886 	/* If only once, make sure we're that once */
7887 	while (behavior & TASK_EXC_GUARD_VM_ONCE) {
7888 		uint32_t new_behavior = behavior & ~TASK_EXC_GUARD_VM_DELIVER;
7889 
7890 		if (OSCompareAndSwap(behavior, new_behavior, &task->task_exc_guard)) {
7891 			break;
7892 		}
7893 		behavior = task->task_exc_guard;
7894 		if ((behavior & TASK_EXC_GUARD_VM_DELIVER) == 0) {
7895 			return;
7896 		}
7897 	}
7898 
7899 	/* Raise exception synchronously and see if handler claimed it */
7900 	sync_exception_result = task_exception_notify(EXC_GUARD, code, subcode);
7901 
7902 	if (task->task_exc_guard & TASK_EXC_GUARD_VM_FATAL) {
7903 		/*
7904 		 * If Synchronous EXC_GUARD delivery was successful then
7905 		 * kill the process and return, else kill the process
7906 		 * and deliver the exception via EXC_CORPSE_NOTIFY.
7907 		 */
7908 		if (sync_exception_result == KERN_SUCCESS) {
7909 			task_bsdtask_kill(current_task());
7910 		} else {
7911 			exit_with_guard_exception(current_proc(), code, subcode);
7912 		}
7913 	} else if (task->task_exc_guard & TASK_EXC_GUARD_VM_CORPSE) {
7914 		/*
7915 		 * If the synchronous EXC_GUARD delivery was not successful,
7916 		 * raise a simulated crash.
7917 		 */
7918 		if (sync_exception_result != KERN_SUCCESS) {
7919 			task_violated_guard(code, subcode, NULL, FALSE);
7920 		}
7921 	}
7922 }
7923 
7924 /*
7925  *     vm_map_guard_exception:
7926  *
7927  *     Generate a GUARD_TYPE_VIRTUAL_MEMORY EXC_GUARD exception.
7928  *
7929  *     Right now, we do this when we find nothing mapped, or a
7930  *     gap in the mapping when a user address space deallocate
7931  *     was requested. We report the address of the first gap found.
7932  */
7933 static void
vm_map_guard_exception(vm_map_offset_t gap_start,unsigned reason)7934 vm_map_guard_exception(
7935 	vm_map_offset_t gap_start,
7936 	unsigned reason)
7937 {
7938 	mach_exception_code_t code = 0;
7939 	unsigned int guard_type = GUARD_TYPE_VIRT_MEMORY;
7940 	unsigned int target = 0; /* should we pass in pid associated with map? */
7941 	mach_exception_data_type_t subcode = (uint64_t)gap_start;
7942 	boolean_t fatal = FALSE;
7943 
7944 	task_t task = current_task_early();
7945 
7946 	/* Can't deliver exceptions to a NULL task (early boot) or kernel task */
7947 	if (task == NULL || task == kernel_task) {
7948 		return;
7949 	}
7950 
7951 	EXC_GUARD_ENCODE_TYPE(code, guard_type);
7952 	EXC_GUARD_ENCODE_FLAVOR(code, reason);
7953 	EXC_GUARD_ENCODE_TARGET(code, target);
7954 
7955 	if (task->task_exc_guard & TASK_EXC_GUARD_VM_FATAL) {
7956 		fatal = TRUE;
7957 	}
7958 	thread_guard_violation(current_thread(), code, subcode, fatal);
7959 }
7960 
7961 static kern_return_t
vm_map_delete_submap_recurse(vm_map_t submap,vm_map_offset_t submap_start,vm_map_offset_t submap_end)7962 vm_map_delete_submap_recurse(
7963 	vm_map_t submap,
7964 	vm_map_offset_t submap_start,
7965 	vm_map_offset_t submap_end)
7966 {
7967 	vm_map_entry_t submap_entry;
7968 
7969 	/*
7970 	 * Verify that the submap does not contain any "permanent" entries
7971 	 * within the specified range.
7972 	 * We do not care about gaps.
7973 	 */
7974 
7975 	vm_map_lock(submap);
7976 
7977 	if (!vm_map_lookup_entry(submap, submap_start, &submap_entry)) {
7978 		submap_entry = submap_entry->vme_next;
7979 	}
7980 
7981 	for (;
7982 	    submap_entry != vm_map_to_entry(submap) &&
7983 	    submap_entry->vme_start < submap_end;
7984 	    submap_entry = submap_entry->vme_next) {
7985 		if (submap_entry->vme_permanent) {
7986 			/* "permanent" entry -> fail */
7987 			vm_map_unlock(submap);
7988 			return KERN_PROTECTION_FAILURE;
7989 		}
7990 	}
7991 	/* no "permanent" entries in the range -> success */
7992 	vm_map_unlock(submap);
7993 	return KERN_SUCCESS;
7994 }
7995 
7996 __abortlike
7997 static void
__vm_map_delete_misaligned_panic(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)7998 __vm_map_delete_misaligned_panic(
7999 	vm_map_t                map,
8000 	vm_map_offset_t         start,
8001 	vm_map_offset_t         end)
8002 {
8003 	panic("vm_map_delete(%p,0x%llx,0x%llx): start is not aligned to 0x%x",
8004 	    map, (uint64_t)start, (uint64_t)end, VM_MAP_PAGE_SIZE(map));
8005 }
8006 
8007 __abortlike
8008 static void
__vm_map_delete_failed_panic(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,kern_return_t kr)8009 __vm_map_delete_failed_panic(
8010 	vm_map_t                map,
8011 	vm_map_offset_t         start,
8012 	vm_map_offset_t         end,
8013 	kern_return_t           kr)
8014 {
8015 	panic("vm_map_delete(%p,0x%llx,0x%llx): failed unexpected with %d",
8016 	    map, (uint64_t)start, (uint64_t)end, kr);
8017 }
8018 
8019 __abortlike
8020 static void
__vm_map_delete_gap_panic(vm_map_t map,vm_map_offset_t where,vm_map_offset_t start,vm_map_offset_t end)8021 __vm_map_delete_gap_panic(
8022 	vm_map_t                map,
8023 	vm_map_offset_t         where,
8024 	vm_map_offset_t         start,
8025 	vm_map_offset_t         end)
8026 {
8027 	panic("vm_map_delete(%p,0x%llx,0x%llx): no map entry at 0x%llx",
8028 	    map, (uint64_t)start, (uint64_t)end, (uint64_t)where);
8029 }
8030 
8031 __abortlike
8032 static void
__vm_map_delete_permanent_panic(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vm_map_entry_t entry)8033 __vm_map_delete_permanent_panic(
8034 	vm_map_t                map,
8035 	vm_map_offset_t         start,
8036 	vm_map_offset_t         end,
8037 	vm_map_entry_t          entry)
8038 {
8039 	panic("vm_map_delete(%p,0x%llx,0x%llx): "
8040 	    "Attempting to remove permanent VM map entry %p [0x%llx:0x%llx]",
8041 	    map, (uint64_t)start, (uint64_t)end, entry,
8042 	    (uint64_t)entry->vme_start,
8043 	    (uint64_t)entry->vme_end);
8044 }
8045 
8046 __options_decl(vm_map_delete_state_t, uint32_t, {
8047 	VMDS_NONE               = 0x0000,
8048 
8049 	VMDS_FOUND_GAP          = 0x0001,
8050 	VMDS_GAPS_OK            = 0x0002,
8051 
8052 	VMDS_KERNEL_PMAP        = 0x0004,
8053 	VMDS_NEEDS_LOOKUP       = 0x0008,
8054 	VMDS_NEEDS_WAKEUP       = 0x0010,
8055 	VMDS_KERNEL_KMEMPTR     = 0x0020
8056 });
8057 
8058 /*
8059  *	vm_map_delete:	[ internal use only ]
8060  *
8061  *	Deallocates the given address range from the target map.
8062  *	Removes all user wirings. Unwires one kernel wiring if
8063  *	VM_MAP_REMOVE_KUNWIRE is set.  Waits for kernel wirings to go
8064  *	away if VM_MAP_REMOVE_WAIT_FOR_KWIRE is set.  Sleeps
8065  *	interruptibly if VM_MAP_REMOVE_INTERRUPTIBLE is set.
8066  *
8067  *
8068  *	When the map is a kernel map, then any error in removing mappings
8069  *	will lead to a panic so that clients do not have to repeat the panic
8070  *	code at each call site.  If VM_MAP_REMOVE_INTERRUPTIBLE
8071  *	is also passed, then KERN_ABORTED will not lead to a panic.
8072  *
8073  *	This routine is called with map locked and leaves map locked.
8074  */
8075 static kmem_return_t
vm_map_delete(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vmr_flags_t flags,kmem_guard_t guard,vm_map_zap_t zap_list)8076 vm_map_delete(
8077 	vm_map_t                map,
8078 	vm_map_offset_t         start,
8079 	vm_map_offset_t         end,
8080 	vmr_flags_t             flags,
8081 	kmem_guard_t            guard,
8082 	vm_map_zap_t            zap_list)
8083 {
8084 	vm_map_entry_t          entry, next;
8085 	int                     interruptible;
8086 	vm_map_offset_t         gap_start = 0;
8087 	vm_map_offset_t         clear_in_transition_end = 0;
8088 	__unused vm_map_offset_t save_start = start;
8089 	__unused vm_map_offset_t save_end = end;
8090 	vm_map_delete_state_t   state = VMDS_NONE;
8091 	kmem_return_t           ret = { };
8092 	vm_map_range_id_t       range_id = 0;
8093 	struct kmem_page_meta  *meta = NULL;
8094 	uint32_t                size_idx, slot_idx;
8095 	struct mach_vm_range    slot;
8096 
8097 	if (vm_map_pmap(map) == kernel_pmap) {
8098 		state |= VMDS_KERNEL_PMAP;
8099 		range_id = kmem_addr_get_range(start, end - start);
8100 		if (kmem_is_ptr_range(range_id)) {
8101 			state |= VMDS_KERNEL_KMEMPTR;
8102 			slot_idx = kmem_addr_get_slot_idx(start, end, range_id, &meta,
8103 			    &size_idx, &slot);
8104 		}
8105 	}
8106 
8107 	if (map->terminated || os_ref_get_count_raw(&map->map_refcnt) == 0) {
8108 		state |= VMDS_GAPS_OK;
8109 	}
8110 
8111 	interruptible = (flags & VM_MAP_REMOVE_INTERRUPTIBLE) ?
8112 	    THREAD_ABORTSAFE : THREAD_UNINT;
8113 
8114 	if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) == 0 &&
8115 	    (start & VM_MAP_PAGE_MASK(map))) {
8116 		__vm_map_delete_misaligned_panic(map, start, end);
8117 	}
8118 
8119 	if ((state & VMDS_GAPS_OK) == 0) {
8120 		/*
8121 		 * If the map isn't terminated then all deletions must have
8122 		 * no gaps, and be within the [min, max) of the map.
8123 		 *
8124 		 * We got here without VM_MAP_RANGE_CHECK() being called,
8125 		 * and hence must validate bounds manually.
8126 		 *
8127 		 * It is worth noting that because vm_deallocate() will
8128 		 * round_page() the deallocation size, it's possible for "end"
8129 		 * to be 0 here due to overflow. We hence must treat it as being
8130 		 * beyond vm_map_max(map).
8131 		 *
8132 		 * Similarly, end < start means some wrap around happend,
8133 		 * which should cause an error or panic.
8134 		 */
8135 		if (end == 0 || end > vm_map_max(map)) {
8136 			state |= VMDS_FOUND_GAP;
8137 			gap_start = vm_map_max(map);
8138 			if (state & VMDS_KERNEL_PMAP) {
8139 				__vm_map_delete_gap_panic(map,
8140 				    gap_start, start, end);
8141 			}
8142 			goto out;
8143 		}
8144 
8145 		if (end < start) {
8146 			if (state & VMDS_KERNEL_PMAP) {
8147 				__vm_map_delete_gap_panic(map,
8148 				    vm_map_max(map), start, end);
8149 			}
8150 			ret.kmr_return = KERN_INVALID_ARGUMENT;
8151 			goto out;
8152 		}
8153 
8154 		if (start < vm_map_min(map)) {
8155 			state |= VMDS_FOUND_GAP;
8156 			gap_start = start;
8157 			if (state & VMDS_KERNEL_PMAP) {
8158 				__vm_map_delete_gap_panic(map,
8159 				    gap_start, start, end);
8160 			}
8161 			goto out;
8162 		}
8163 	} else {
8164 		/*
8165 		 * If the map is terminated, we must accept start/end
8166 		 * being beyond the boundaries of the map as this is
8167 		 * how some of the mappings like commpage mappings
8168 		 * can be destroyed (they're outside of those bounds).
8169 		 *
8170 		 * end < start is still something we can't cope with,
8171 		 * so just bail.
8172 		 */
8173 		if (end < start) {
8174 			goto out;
8175 		}
8176 	}
8177 
8178 
8179 	/*
8180 	 *	Find the start of the region.
8181 	 *
8182 	 *	If in a superpage, extend the range
8183 	 *	to include the start of the mapping.
8184 	 */
8185 	while (vm_map_lookup_entry_or_next(map, start, &entry)) {
8186 		if (entry->superpage_size && (start & ~SUPERPAGE_MASK)) {
8187 			start = SUPERPAGE_ROUND_DOWN(start);
8188 		} else {
8189 			SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
8190 			break;
8191 		}
8192 	}
8193 
8194 	if (entry->superpage_size) {
8195 		end = SUPERPAGE_ROUND_UP(end);
8196 	}
8197 
8198 	/*
8199 	 *	Step through all entries in this region
8200 	 */
8201 	for (vm_map_offset_t s = start; s < end;) {
8202 		/*
8203 		 * At this point, we have deleted all the memory entries
8204 		 * in [start, s) and are proceeding with the [s, end) range.
8205 		 *
8206 		 * This loop might drop the map lock, and it is possible that
8207 		 * some memory was already reallocated within [start, s)
8208 		 * and we don't want to mess with those entries.
8209 		 *
8210 		 * Some of those entries could even have been re-assembled
8211 		 * with an entry after "s" (in vm_map_simplify_entry()), so
8212 		 * we may have to vm_map_clip_start() again.
8213 		 *
8214 		 * When clear_in_transition_end is set, the we had marked
8215 		 * [start, clear_in_transition_end) as "in_transition"
8216 		 * during a previous iteration and we need to clear it.
8217 		 */
8218 
8219 		/*
8220 		 * Step 1: If needed (because we dropped locks),
8221 		 *         lookup the entry again.
8222 		 *
8223 		 *         If we're coming back from unwiring (Step 5),
8224 		 *         we also need to mark the entries as no longer
8225 		 *         in transition after that.
8226 		 */
8227 
8228 		if (state & VMDS_NEEDS_LOOKUP) {
8229 			state &= ~VMDS_NEEDS_LOOKUP;
8230 
8231 			if (vm_map_lookup_entry_or_next(map, s, &entry)) {
8232 				SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
8233 			}
8234 
8235 			if (state & VMDS_KERNEL_KMEMPTR) {
8236 				kmem_validate_slot(s, meta, size_idx, slot_idx);
8237 			}
8238 		}
8239 
8240 		if (clear_in_transition_end) {
8241 			for (vm_map_entry_t it = entry;
8242 			    it != vm_map_to_entry(map) &&
8243 			    it->vme_start < clear_in_transition_end;
8244 			    it = it->vme_next) {
8245 				assert(it->in_transition);
8246 				it->in_transition = FALSE;
8247 				if (it->needs_wakeup) {
8248 					it->needs_wakeup = FALSE;
8249 					state |= VMDS_NEEDS_WAKEUP;
8250 				}
8251 			}
8252 
8253 			clear_in_transition_end = 0;
8254 		}
8255 
8256 
8257 		/*
8258 		 * Step 2: Perform various policy checks
8259 		 *         before we do _anything_ to this entry.
8260 		 */
8261 
8262 		if (entry == vm_map_to_entry(map) || s < entry->vme_start) {
8263 			if (state & (VMDS_GAPS_OK | VMDS_FOUND_GAP)) {
8264 				/*
8265 				 * Either we found a gap already,
8266 				 * or we are tearing down a map,
8267 				 * keep going.
8268 				 */
8269 			} else if (state & VMDS_KERNEL_PMAP) {
8270 				__vm_map_delete_gap_panic(map, s, start, end);
8271 			} else if (s < end) {
8272 				state |= VMDS_FOUND_GAP;
8273 				gap_start = s;
8274 			}
8275 
8276 			if (entry == vm_map_to_entry(map) ||
8277 			    end <= entry->vme_start) {
8278 				break;
8279 			}
8280 
8281 			s = entry->vme_start;
8282 		}
8283 
8284 		if (state & VMDS_KERNEL_PMAP) {
8285 			/*
8286 			 * In the kernel map and its submaps,
8287 			 * permanent entries never die, even
8288 			 * if VM_MAP_REMOVE_IMMUTABLE is passed.
8289 			 */
8290 			if (entry->vme_permanent) {
8291 				__vm_map_delete_permanent_panic(map, start, end, entry);
8292 			}
8293 
8294 			if (flags & VM_MAP_REMOVE_GUESS_SIZE) {
8295 				end = entry->vme_end;
8296 				flags &= ~VM_MAP_REMOVE_GUESS_SIZE;
8297 			}
8298 
8299 			/*
8300 			 * In the kernel map and its submaps,
8301 			 * the removal of an atomic/guarded entry is strict.
8302 			 *
8303 			 * An atomic entry is processed only if it was
8304 			 * specifically targeted.
8305 			 *
8306 			 * We might have deleted non-atomic entries before
8307 			 * we reach this this point however...
8308 			 */
8309 			kmem_entry_validate_guard(map, entry,
8310 			    start, end - start, guard);
8311 		}
8312 
8313 		/*
8314 		 * Step 2.1: handle "permanent" and "submap" entries
8315 		 * *before* clipping to avoid triggering some unnecessary
8316 		 * un-nesting of the shared region.
8317 		 */
8318 		if (entry->vme_permanent && entry->is_sub_map) {
8319 //			printf("FBDP %s:%d permanent submap...\n", __FUNCTION__, __LINE__);
8320 			/*
8321 			 * Un-mapping a "permanent" mapping of a user-space
8322 			 * submap is not allowed unless...
8323 			 */
8324 			if (flags & VM_MAP_REMOVE_IMMUTABLE) {
8325 				/*
8326 				 * a. explicitly requested by the kernel caller.
8327 				 */
8328 //				printf("FBDP %s:%d flags & REMOVE_IMMUTABLE\n", __FUNCTION__, __LINE__);
8329 			} else if ((flags & VM_MAP_REMOVE_IMMUTABLE_CODE) &&
8330 			    developer_mode_state()) {
8331 				/*
8332 				 * b. we're in "developer" mode (for
8333 				 *    breakpoints, dtrace probes, ...).
8334 				 */
8335 //				printf("FBDP %s:%d flags & REMOVE_IMMUTABLE_CODE\n", __FUNCTION__, __LINE__);
8336 			} else if (map->terminated) {
8337 				/*
8338 				 * c. this is the final address space cleanup.
8339 				 */
8340 //				printf("FBDP %s:%d map->terminated\n", __FUNCTION__, __LINE__);
8341 			} else {
8342 				vm_map_offset_t submap_start, submap_end;
8343 				kern_return_t submap_kr;
8344 
8345 				/*
8346 				 * Check if there are any "permanent" mappings
8347 				 * in this range in the submap.
8348 				 */
8349 				if (entry->in_transition) {
8350 					/* can that even happen ? */
8351 					goto in_transition;
8352 				}
8353 				/* compute the clipped range in the submap */
8354 				submap_start = s - entry->vme_start;
8355 				submap_start += VME_OFFSET(entry);
8356 				submap_end = end - entry->vme_start;
8357 				submap_end += VME_OFFSET(entry);
8358 				submap_kr = vm_map_delete_submap_recurse(
8359 					VME_SUBMAP(entry),
8360 					submap_start,
8361 					submap_end);
8362 				if (submap_kr != KERN_SUCCESS) {
8363 					/*
8364 					 * There are some "permanent" mappings
8365 					 * in the submap: we are not allowed
8366 					 * to remove this range.
8367 					 */
8368 					printf("%d[%s] removing permanent submap entry "
8369 					    "%p [0x%llx:0x%llx] prot 0x%x/0x%x -> KERN_PROT_FAILURE\n",
8370 					    proc_selfpid(),
8371 					    (get_bsdtask_info(current_task())
8372 					    ? proc_name_address(get_bsdtask_info(current_task()))
8373 					    : "?"), entry,
8374 					    (uint64_t)entry->vme_start,
8375 					    (uint64_t)entry->vme_end,
8376 					    entry->protection,
8377 					    entry->max_protection);
8378 					DTRACE_VM6(vm_map_delete_permanent_deny_submap,
8379 					    vm_map_entry_t, entry,
8380 					    vm_map_offset_t, entry->vme_start,
8381 					    vm_map_offset_t, entry->vme_end,
8382 					    vm_prot_t, entry->protection,
8383 					    vm_prot_t, entry->max_protection,
8384 					    int, VME_ALIAS(entry));
8385 					ret.kmr_return = KERN_PROTECTION_FAILURE;
8386 					goto out;
8387 				}
8388 				/* no permanent mappings: proceed */
8389 			}
8390 		}
8391 
8392 		/*
8393 		 * Step 3: Perform any clipping needed.
8394 		 *
8395 		 *         After this, "entry" starts at "s", ends before "end"
8396 		 */
8397 
8398 		if (entry->vme_start < s) {
8399 			if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) &&
8400 			    entry->map_aligned &&
8401 			    !VM_MAP_PAGE_ALIGNED(s, VM_MAP_PAGE_MASK(map))) {
8402 				/*
8403 				 * The entry will no longer be map-aligned
8404 				 * after clipping and the caller said it's OK.
8405 				 */
8406 				entry->map_aligned = FALSE;
8407 			}
8408 			vm_map_clip_start(map, entry, s);
8409 			SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
8410 		}
8411 
8412 		if (end < entry->vme_end) {
8413 			if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) &&
8414 			    entry->map_aligned &&
8415 			    !VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map))) {
8416 				/*
8417 				 * The entry will no longer be map-aligned
8418 				 * after clipping and the caller said it's OK.
8419 				 */
8420 				entry->map_aligned = FALSE;
8421 			}
8422 			vm_map_clip_end(map, entry, end);
8423 		}
8424 
8425 		if (entry->vme_permanent && entry->is_sub_map) {
8426 			/*
8427 			 * We already went through step 2.1 which did not deny
8428 			 * the removal of this "permanent" and "is_sub_map"
8429 			 * entry.
8430 			 * Now that we've clipped what we actually want to
8431 			 * delete, undo the "permanent" part to allow the
8432 			 * removal to proceed.
8433 			 */
8434 			DTRACE_VM6(vm_map_delete_permanent_allow_submap,
8435 			    vm_map_entry_t, entry,
8436 			    vm_map_offset_t, entry->vme_start,
8437 			    vm_map_offset_t, entry->vme_end,
8438 			    vm_prot_t, entry->protection,
8439 			    vm_prot_t, entry->max_protection,
8440 			    int, VME_ALIAS(entry));
8441 			entry->vme_permanent = false;
8442 		}
8443 
8444 		assert(s == entry->vme_start);
8445 		assert(entry->vme_end <= end);
8446 
8447 
8448 		/*
8449 		 * Step 4: If the entry is in flux, wait for this to resolve.
8450 		 */
8451 
8452 		if (entry->in_transition) {
8453 			wait_result_t wait_result;
8454 
8455 in_transition:
8456 			/*
8457 			 * Another thread is wiring/unwiring this entry.
8458 			 * Let the other thread know we are waiting.
8459 			 */
8460 
8461 			entry->needs_wakeup = TRUE;
8462 
8463 			/*
8464 			 * wake up anybody waiting on entries that we have
8465 			 * already unwired/deleted.
8466 			 */
8467 			if (state & VMDS_NEEDS_WAKEUP) {
8468 				vm_map_entry_wakeup(map);
8469 				state &= ~VMDS_NEEDS_WAKEUP;
8470 			}
8471 
8472 			wait_result = vm_map_entry_wait(map, interruptible);
8473 
8474 			if (interruptible &&
8475 			    wait_result == THREAD_INTERRUPTED) {
8476 				/*
8477 				 * We do not clear the needs_wakeup flag,
8478 				 * since we cannot tell if we were the only one.
8479 				 */
8480 				ret.kmr_return = KERN_ABORTED;
8481 				return ret;
8482 			}
8483 
8484 			/*
8485 			 * The entry could have been clipped or it
8486 			 * may not exist anymore.  Look it up again.
8487 			 */
8488 			state |= VMDS_NEEDS_LOOKUP;
8489 			continue;
8490 		}
8491 
8492 
8493 		/*
8494 		 * Step 5: Handle wiring
8495 		 */
8496 
8497 		if (entry->wired_count) {
8498 			struct vm_map_entry tmp_entry;
8499 			boolean_t           user_wire;
8500 			unsigned int        last_timestamp;
8501 
8502 			user_wire = entry->user_wired_count > 0;
8503 
8504 			/*
8505 			 *      Remove a kernel wiring if requested
8506 			 */
8507 			if (flags & VM_MAP_REMOVE_KUNWIRE) {
8508 				entry->wired_count--;
8509 				vme_btref_consider_and_put(entry);
8510 			}
8511 
8512 			/*
8513 			 *	Remove all user wirings for proper accounting
8514 			 */
8515 			while (entry->user_wired_count) {
8516 				subtract_wire_counts(map, entry, user_wire);
8517 			}
8518 
8519 			/*
8520 			 * All our DMA I/O operations in IOKit are currently
8521 			 * done by wiring through the map entries of the task
8522 			 * requesting the I/O.
8523 			 *
8524 			 * Because of this, we must always wait for kernel wirings
8525 			 * to go away on the entries before deleting them.
8526 			 *
8527 			 * Any caller who wants to actually remove a kernel wiring
8528 			 * should explicitly set the VM_MAP_REMOVE_KUNWIRE flag to
8529 			 * properly remove one wiring instead of blasting through
8530 			 * them all.
8531 			 */
8532 			if (entry->wired_count != 0) {
8533 				assert(map != kernel_map);
8534 				/*
8535 				 * Cannot continue.  Typical case is when
8536 				 * a user thread has physical io pending on
8537 				 * on this page.  Either wait for the
8538 				 * kernel wiring to go away or return an
8539 				 * error.
8540 				 */
8541 				wait_result_t wait_result;
8542 
8543 				entry->needs_wakeup = TRUE;
8544 				wait_result = vm_map_entry_wait(map,
8545 				    interruptible);
8546 
8547 				if (interruptible &&
8548 				    wait_result == THREAD_INTERRUPTED) {
8549 					/*
8550 					 * We do not clear the
8551 					 * needs_wakeup flag, since we
8552 					 * cannot tell if we were the
8553 					 * only one.
8554 					 */
8555 					ret.kmr_return = KERN_ABORTED;
8556 					return ret;
8557 				}
8558 
8559 
8560 				/*
8561 				 * The entry could have been clipped or
8562 				 * it may not exist anymore.  Look it
8563 				 * up again.
8564 				 */
8565 				state |= VMDS_NEEDS_LOOKUP;
8566 				continue;
8567 			}
8568 
8569 			/*
8570 			 * We can unlock the map now.
8571 			 *
8572 			 * The entry might be split once we unlock the map,
8573 			 * but we need the range as defined by this entry
8574 			 * to be stable. So we must make a local copy.
8575 			 *
8576 			 * The underlying objects do not change during clips,
8577 			 * and the in_transition state guarentees existence
8578 			 * of the entry.
8579 			 */
8580 			last_timestamp = map->timestamp;
8581 			entry->in_transition = TRUE;
8582 			tmp_entry = *entry;
8583 			vm_map_unlock(map);
8584 
8585 			if (tmp_entry.is_sub_map) {
8586 				vm_map_t sub_map;
8587 				vm_map_offset_t sub_start, sub_end;
8588 				pmap_t pmap;
8589 				vm_map_offset_t pmap_addr;
8590 
8591 
8592 				sub_map = VME_SUBMAP(&tmp_entry);
8593 				sub_start = VME_OFFSET(&tmp_entry);
8594 				sub_end = sub_start + (tmp_entry.vme_end -
8595 				    tmp_entry.vme_start);
8596 				if (tmp_entry.use_pmap) {
8597 					pmap = sub_map->pmap;
8598 					pmap_addr = tmp_entry.vme_start;
8599 				} else {
8600 					pmap = map->pmap;
8601 					pmap_addr = tmp_entry.vme_start;
8602 				}
8603 				(void) vm_map_unwire_nested(sub_map,
8604 				    sub_start, sub_end,
8605 				    user_wire,
8606 				    pmap, pmap_addr);
8607 			} else {
8608 				vm_map_offset_t entry_end = tmp_entry.vme_end;
8609 				vm_map_offset_t max_end;
8610 
8611 				if (flags & VM_MAP_REMOVE_NOKUNWIRE_LAST) {
8612 					max_end = end - VM_MAP_PAGE_SIZE(map);
8613 					if (entry_end > max_end) {
8614 						entry_end = max_end;
8615 					}
8616 				}
8617 
8618 				if (tmp_entry.vme_kernel_object) {
8619 					pmap_protect_options(
8620 						map->pmap,
8621 						tmp_entry.vme_start,
8622 						entry_end,
8623 						VM_PROT_NONE,
8624 						PMAP_OPTIONS_REMOVE,
8625 						NULL);
8626 				}
8627 				vm_fault_unwire(map, &tmp_entry,
8628 				    tmp_entry.vme_kernel_object, map->pmap,
8629 				    tmp_entry.vme_start, entry_end);
8630 			}
8631 
8632 			vm_map_lock(map);
8633 
8634 			/*
8635 			 * Unwiring happened, we can now go back to deleting
8636 			 * them (after we clear the in_transition bit for the range).
8637 			 */
8638 			if (last_timestamp + 1 != map->timestamp) {
8639 				state |= VMDS_NEEDS_LOOKUP;
8640 			}
8641 			clear_in_transition_end = tmp_entry.vme_end;
8642 			continue;
8643 		}
8644 
8645 		assert(entry->wired_count == 0);
8646 		assert(entry->user_wired_count == 0);
8647 
8648 
8649 		/*
8650 		 * Step 6: Entry is unwired and ready for us to delete !
8651 		 */
8652 
8653 		if (!entry->vme_permanent) {
8654 			/*
8655 			 * Typical case: the entry really shouldn't be permanent
8656 			 */
8657 		} else if ((flags & VM_MAP_REMOVE_IMMUTABLE_CODE) &&
8658 		    (entry->protection & VM_PROT_EXECUTE) &&
8659 		    developer_mode_state()) {
8660 			/*
8661 			 * Allow debuggers to undo executable mappings
8662 			 * when developer mode is on.
8663 			 */
8664 #if 0
8665 			printf("FBDP %d[%s] removing permanent executable entry "
8666 			    "%p [0x%llx:0x%llx] prot 0x%x/0x%x\n",
8667 			    proc_selfpid(),
8668 			    (current_task()->bsd_info
8669 			    ? proc_name_address(current_task()->bsd_info)
8670 			    : "?"), entry,
8671 			    (uint64_t)entry->vme_start,
8672 			    (uint64_t)entry->vme_end,
8673 			    entry->protection,
8674 			    entry->max_protection);
8675 #endif
8676 			entry->vme_permanent = FALSE;
8677 		} else if ((flags & VM_MAP_REMOVE_IMMUTABLE) || map->terminated) {
8678 #if 0
8679 			printf("FBDP %d[%s] removing permanent entry "
8680 			    "%p [0x%llx:0x%llx] prot 0x%x/0x%x\n",
8681 			    proc_selfpid(),
8682 			    (current_task()->bsd_info
8683 			    ? proc_name_address(current_task()->bsd_info)
8684 			    : "?"), entry,
8685 			    (uint64_t)entry->vme_start,
8686 			    (uint64_t)entry->vme_end,
8687 			    entry->protection,
8688 			    entry->max_protection);
8689 #endif
8690 			entry->vme_permanent = FALSE;
8691 #if CODE_SIGNING_MONITOR
8692 		} else if ((entry->protection & VM_PROT_EXECUTE) && !csm_enabled()) {
8693 			entry->vme_permanent = FALSE;
8694 
8695 			printf("%d[%s] %s(0x%llx,0x%llx): "
8696 			    "code signing monitor disabled, allowing for permanent executable entry [0x%llx:0x%llx] "
8697 			    "prot 0x%x/0x%x\n",
8698 			    proc_selfpid(),
8699 			    (get_bsdtask_info(current_task())
8700 			    ? proc_name_address(get_bsdtask_info(current_task()))
8701 			    : "?"),
8702 			    __FUNCTION__,
8703 			    (uint64_t)start,
8704 			    (uint64_t)end,
8705 			    (uint64_t)entry->vme_start,
8706 			    (uint64_t)entry->vme_end,
8707 			    entry->protection,
8708 			    entry->max_protection);
8709 #endif
8710 		} else {
8711 			DTRACE_VM6(vm_map_delete_permanent,
8712 			    vm_map_entry_t, entry,
8713 			    vm_map_offset_t, entry->vme_start,
8714 			    vm_map_offset_t, entry->vme_end,
8715 			    vm_prot_t, entry->protection,
8716 			    vm_prot_t, entry->max_protection,
8717 			    int, VME_ALIAS(entry));
8718 		}
8719 
8720 		if (entry->is_sub_map) {
8721 			assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map),
8722 			    "map %p (%d) entry %p submap %p (%d)\n",
8723 			    map, VM_MAP_PAGE_SHIFT(map), entry,
8724 			    VME_SUBMAP(entry),
8725 			    VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)));
8726 			if (entry->use_pmap) {
8727 #ifndef NO_NESTED_PMAP
8728 				int pmap_flags;
8729 
8730 				if (map->terminated) {
8731 					/*
8732 					 * This is the final cleanup of the
8733 					 * address space being terminated.
8734 					 * No new mappings are expected and
8735 					 * we don't really need to unnest the
8736 					 * shared region (and lose the "global"
8737 					 * pmap mappings, if applicable).
8738 					 *
8739 					 * Tell the pmap layer that we're
8740 					 * "clean" wrt nesting.
8741 					 */
8742 					pmap_flags = PMAP_UNNEST_CLEAN;
8743 				} else {
8744 					/*
8745 					 * We're unmapping part of the nested
8746 					 * shared region, so we can't keep the
8747 					 * nested pmap.
8748 					 */
8749 					pmap_flags = 0;
8750 				}
8751 				pmap_unnest_options(
8752 					map->pmap,
8753 					(addr64_t)entry->vme_start,
8754 					entry->vme_end - entry->vme_start,
8755 					pmap_flags);
8756 #endif  /* NO_NESTED_PMAP */
8757 				if (map->mapped_in_other_pmaps &&
8758 				    os_ref_get_count_raw(&map->map_refcnt) != 0) {
8759 					/* clean up parent map/maps */
8760 					vm_map_submap_pmap_clean(
8761 						map, entry->vme_start,
8762 						entry->vme_end,
8763 						VME_SUBMAP(entry),
8764 						VME_OFFSET(entry));
8765 				}
8766 			} else {
8767 				vm_map_submap_pmap_clean(
8768 					map, entry->vme_start, entry->vme_end,
8769 					VME_SUBMAP(entry),
8770 					VME_OFFSET(entry));
8771 			}
8772 		} else if (entry->vme_kernel_object ||
8773 		    VME_OBJECT(entry) == compressor_object) {
8774 			/*
8775 			 * nothing to do
8776 			 */
8777 		} else if (map->mapped_in_other_pmaps &&
8778 		    os_ref_get_count_raw(&map->map_refcnt) != 0) {
8779 			vm_object_pmap_protect_options(
8780 				VME_OBJECT(entry), VME_OFFSET(entry),
8781 				entry->vme_end - entry->vme_start,
8782 				PMAP_NULL,
8783 				PAGE_SIZE,
8784 				entry->vme_start,
8785 				VM_PROT_NONE,
8786 				PMAP_OPTIONS_REMOVE);
8787 		} else if ((VME_OBJECT(entry) != VM_OBJECT_NULL) ||
8788 		    (state & VMDS_KERNEL_PMAP)) {
8789 			/* Remove translations associated
8790 			 * with this range unless the entry
8791 			 * does not have an object, or
8792 			 * it's the kernel map or a descendant
8793 			 * since the platform could potentially
8794 			 * create "backdoor" mappings invisible
8795 			 * to the VM. It is expected that
8796 			 * objectless, non-kernel ranges
8797 			 * do not have such VM invisible
8798 			 * translations.
8799 			 */
8800 			pmap_remove_options(map->pmap,
8801 			    (addr64_t)entry->vme_start,
8802 			    (addr64_t)entry->vme_end,
8803 			    PMAP_OPTIONS_REMOVE);
8804 		}
8805 
8806 #if DEBUG
8807 		/*
8808 		 * All pmap mappings for this map entry must have been
8809 		 * cleared by now.
8810 		 */
8811 		assert(pmap_is_empty(map->pmap,
8812 		    entry->vme_start,
8813 		    entry->vme_end));
8814 #endif /* DEBUG */
8815 
8816 		if (entry->iokit_acct) {
8817 			/* alternate accounting */
8818 			DTRACE_VM4(vm_map_iokit_unmapped_region,
8819 			    vm_map_t, map,
8820 			    vm_map_offset_t, entry->vme_start,
8821 			    vm_map_offset_t, entry->vme_end,
8822 			    int, VME_ALIAS(entry));
8823 			vm_map_iokit_unmapped_region(map,
8824 			    (entry->vme_end -
8825 			    entry->vme_start));
8826 			entry->iokit_acct = FALSE;
8827 			entry->use_pmap = FALSE;
8828 		}
8829 
8830 		/* move "s" forward */
8831 		s    = entry->vme_end;
8832 		next = entry->vme_next;
8833 		if (!entry->map_aligned) {
8834 			vm_map_offset_t rounded_s;
8835 
8836 			/*
8837 			 * Skip artificial gap due to mis-aligned entry
8838 			 * on devices with a page size smaller than the
8839 			 * map's page size (i.e. 16k task on a 4k device).
8840 			 */
8841 			rounded_s = VM_MAP_ROUND_PAGE(s, VM_MAP_PAGE_MASK(map));
8842 			if (next == vm_map_to_entry(map)) {
8843 				s = rounded_s;
8844 			} else if (s < rounded_s) {
8845 				s = MIN(rounded_s, next->vme_start);
8846 			}
8847 		}
8848 		ret.kmr_size += s - entry->vme_start;
8849 
8850 		if (entry->vme_permanent) {
8851 			/*
8852 			 * A permanent entry can not be removed, so leave it
8853 			 * in place but remove all access permissions.
8854 			 */
8855 			if (!entry->csm_associated) {
8856 				printf("%s:%d %d[%s] map %p entry %p [ 0x%llx - 0x%llx ] submap %d prot 0x%x/0x%x -> 0/0\n",
8857 				    __FUNCTION__, __LINE__,
8858 				    proc_selfpid(),
8859 				    (get_bsdtask_info(current_task())
8860 				    ? proc_name_address(get_bsdtask_info(current_task()))
8861 				    : "?"),
8862 				    map,
8863 				    entry,
8864 				    (uint64_t)entry->vme_start,
8865 				    (uint64_t)entry->vme_end,
8866 				    entry->is_sub_map,
8867 				    entry->protection,
8868 				    entry->max_protection);
8869 			}
8870 			DTRACE_VM6(vm_map_delete_permanent_prot_none,
8871 			    vm_map_entry_t, entry,
8872 			    vm_map_offset_t, entry->vme_start,
8873 			    vm_map_offset_t, entry->vme_end,
8874 			    vm_prot_t, entry->protection,
8875 			    vm_prot_t, entry->max_protection,
8876 			    int, VME_ALIAS(entry));
8877 			entry->protection = VM_PROT_NONE;
8878 			entry->max_protection = VM_PROT_NONE;
8879 		} else {
8880 			vm_map_entry_zap(map, entry, zap_list);
8881 		}
8882 
8883 		entry = next;
8884 		next  = VM_MAP_ENTRY_NULL;
8885 
8886 		if ((flags & VM_MAP_REMOVE_NO_YIELD) == 0 && s < end) {
8887 			unsigned int last_timestamp = map->timestamp++;
8888 
8889 			if (lck_rw_lock_yield_exclusive(&map->lock,
8890 			    LCK_RW_YIELD_ANY_WAITER)) {
8891 				if (last_timestamp != map->timestamp + 1) {
8892 					state |= VMDS_NEEDS_LOOKUP;
8893 				}
8894 			} else {
8895 				/* we didn't yield, undo our change */
8896 				map->timestamp--;
8897 			}
8898 		}
8899 	}
8900 
8901 	if (map->wait_for_space) {
8902 		thread_wakeup((event_t) map);
8903 	}
8904 
8905 	if (state & VMDS_NEEDS_WAKEUP) {
8906 		vm_map_entry_wakeup(map);
8907 	}
8908 
8909 out:
8910 	if ((state & VMDS_KERNEL_PMAP) && ret.kmr_return) {
8911 		__vm_map_delete_failed_panic(map, start, end, ret.kmr_return);
8912 	}
8913 
8914 	if (state & VMDS_KERNEL_KMEMPTR) {
8915 		kmem_free_space(start, end, range_id, &slot);
8916 	}
8917 
8918 	if (state & VMDS_FOUND_GAP) {
8919 		DTRACE_VM3(kern_vm_deallocate_gap,
8920 		    vm_map_offset_t, gap_start,
8921 		    vm_map_offset_t, save_start,
8922 		    vm_map_offset_t, save_end);
8923 		if (flags & VM_MAP_REMOVE_GAPS_FAIL) {
8924 			ret.kmr_return = KERN_INVALID_VALUE;
8925 		} else {
8926 			vm_map_guard_exception(gap_start, kGUARD_EXC_DEALLOC_GAP);
8927 		}
8928 	}
8929 
8930 	return ret;
8931 }
8932 
8933 kmem_return_t
vm_map_remove_and_unlock(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vmr_flags_t flags,kmem_guard_t guard)8934 vm_map_remove_and_unlock(
8935 	vm_map_t        map,
8936 	vm_map_offset_t start,
8937 	vm_map_offset_t end,
8938 	vmr_flags_t     flags,
8939 	kmem_guard_t    guard)
8940 {
8941 	kmem_return_t ret;
8942 	VM_MAP_ZAP_DECLARE(zap);
8943 
8944 	ret = vm_map_delete(map, start, end, flags, guard, &zap);
8945 	vm_map_unlock(map);
8946 
8947 	vm_map_zap_dispose(&zap);
8948 
8949 	return ret;
8950 }
8951 
8952 /*
8953  *	vm_map_remove_guard:
8954  *
8955  *	Remove the given address range from the target map.
8956  *	This is the exported form of vm_map_delete.
8957  */
8958 kmem_return_t
vm_map_remove_guard(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vmr_flags_t flags,kmem_guard_t guard)8959 vm_map_remove_guard(
8960 	vm_map_t        map,
8961 	vm_map_offset_t start,
8962 	vm_map_offset_t end,
8963 	vmr_flags_t     flags,
8964 	kmem_guard_t    guard)
8965 {
8966 	vm_map_lock(map);
8967 	return vm_map_remove_and_unlock(map, start, end, flags, guard);
8968 }
8969 
8970 /*
8971  *	vm_map_terminate:
8972  *
8973  *	Clean out a task's map.
8974  */
8975 kern_return_t
vm_map_terminate(vm_map_t map)8976 vm_map_terminate(
8977 	vm_map_t        map)
8978 {
8979 	vm_map_lock(map);
8980 	map->terminated = TRUE;
8981 	vm_map_disable_hole_optimization(map);
8982 	(void)vm_map_remove_and_unlock(map, map->min_offset, map->max_offset,
8983 	    VM_MAP_REMOVE_NO_FLAGS, KMEM_GUARD_NONE);
8984 	return KERN_SUCCESS;
8985 }
8986 
8987 /*
8988  *	Routine:	vm_map_copy_allocate
8989  *
8990  *	Description:
8991  *		Allocates and initializes a map copy object.
8992  */
8993 static vm_map_copy_t
vm_map_copy_allocate(uint16_t type)8994 vm_map_copy_allocate(uint16_t type)
8995 {
8996 	vm_map_copy_t new_copy;
8997 
8998 	new_copy = zalloc_id(ZONE_ID_VM_MAP_COPY, Z_WAITOK | Z_ZERO);
8999 	new_copy->type = type;
9000 	if (type == VM_MAP_COPY_ENTRY_LIST) {
9001 		new_copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE;
9002 		vm_map_store_init(&new_copy->cpy_hdr);
9003 	}
9004 	return new_copy;
9005 }
9006 
9007 /*
9008  *	Routine:	vm_map_copy_discard
9009  *
9010  *	Description:
9011  *		Dispose of a map copy object (returned by
9012  *		vm_map_copyin).
9013  */
9014 void
vm_map_copy_discard(vm_map_copy_t copy)9015 vm_map_copy_discard(
9016 	vm_map_copy_t   copy)
9017 {
9018 	if (copy == VM_MAP_COPY_NULL) {
9019 		return;
9020 	}
9021 
9022 	/*
9023 	 * Assert that the vm_map_copy is coming from the right
9024 	 * zone and hasn't been forged
9025 	 */
9026 	vm_map_copy_require(copy);
9027 
9028 	switch (copy->type) {
9029 	case VM_MAP_COPY_ENTRY_LIST:
9030 		while (vm_map_copy_first_entry(copy) !=
9031 		    vm_map_copy_to_entry(copy)) {
9032 			vm_map_entry_t  entry = vm_map_copy_first_entry(copy);
9033 
9034 			vm_map_copy_entry_unlink(copy, entry);
9035 			if (entry->is_sub_map) {
9036 				vm_map_deallocate(VME_SUBMAP(entry));
9037 			} else {
9038 				vm_object_deallocate(VME_OBJECT(entry));
9039 			}
9040 			vm_map_copy_entry_dispose(entry);
9041 		}
9042 		break;
9043 	case VM_MAP_COPY_KERNEL_BUFFER:
9044 
9045 		/*
9046 		 * The vm_map_copy_t and possibly the data buffer were
9047 		 * allocated by a single call to kalloc_data(), i.e. the
9048 		 * vm_map_copy_t was not allocated out of the zone.
9049 		 */
9050 		if (copy->size > msg_ool_size_small || copy->offset) {
9051 			panic("Invalid vm_map_copy_t sz:%lld, ofst:%lld",
9052 			    (long long)copy->size, (long long)copy->offset);
9053 		}
9054 		kfree_data(copy->cpy_kdata, copy->size);
9055 	}
9056 	zfree_id(ZONE_ID_VM_MAP_COPY, copy);
9057 }
9058 
9059 #if XNU_PLATFORM_MacOSX
9060 
9061 /*
9062  *	Routine:	vm_map_copy_copy
9063  *
9064  *	Description:
9065  *			Move the information in a map copy object to
9066  *			a new map copy object, leaving the old one
9067  *			empty.
9068  *
9069  *			This is used by kernel routines that need
9070  *			to look at out-of-line data (in copyin form)
9071  *			before deciding whether to return SUCCESS.
9072  *			If the routine returns FAILURE, the original
9073  *			copy object will be deallocated; therefore,
9074  *			these routines must make a copy of the copy
9075  *			object and leave the original empty so that
9076  *			deallocation will not fail.
9077  */
9078 vm_map_copy_t
vm_map_copy_copy(vm_map_copy_t copy)9079 vm_map_copy_copy(
9080 	vm_map_copy_t   copy)
9081 {
9082 	vm_map_copy_t   new_copy;
9083 
9084 	if (copy == VM_MAP_COPY_NULL) {
9085 		return VM_MAP_COPY_NULL;
9086 	}
9087 
9088 	/*
9089 	 * Assert that the vm_map_copy is coming from the right
9090 	 * zone and hasn't been forged
9091 	 */
9092 	vm_map_copy_require(copy);
9093 
9094 	/*
9095 	 * Allocate a new copy object, and copy the information
9096 	 * from the old one into it.
9097 	 */
9098 
9099 	new_copy = zalloc_id(ZONE_ID_VM_MAP_COPY, Z_WAITOK | Z_ZERO | Z_NOFAIL);
9100 	memcpy((void *) new_copy, (void *) copy, sizeof(struct vm_map_copy));
9101 #if __has_feature(ptrauth_calls)
9102 	if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
9103 		new_copy->cpy_kdata = copy->cpy_kdata;
9104 	}
9105 #endif
9106 
9107 	if (copy->type == VM_MAP_COPY_ENTRY_LIST) {
9108 		/*
9109 		 * The links in the entry chain must be
9110 		 * changed to point to the new copy object.
9111 		 */
9112 		vm_map_copy_first_entry(copy)->vme_prev
9113 		        = vm_map_copy_to_entry(new_copy);
9114 		vm_map_copy_last_entry(copy)->vme_next
9115 		        = vm_map_copy_to_entry(new_copy);
9116 	}
9117 
9118 	/*
9119 	 * Change the old copy object into one that contains
9120 	 * nothing to be deallocated.
9121 	 */
9122 	bzero(copy, sizeof(struct vm_map_copy));
9123 	copy->type = VM_MAP_COPY_KERNEL_BUFFER;
9124 
9125 	/*
9126 	 * Return the new object.
9127 	 */
9128 	return new_copy;
9129 }
9130 
9131 #endif /* XNU_PLATFORM_MacOSX */
9132 
9133 static boolean_t
vm_map_entry_is_overwritable(vm_map_t dst_map __unused,vm_map_entry_t entry)9134 vm_map_entry_is_overwritable(
9135 	vm_map_t        dst_map __unused,
9136 	vm_map_entry_t  entry)
9137 {
9138 	if (!(entry->protection & VM_PROT_WRITE)) {
9139 		/* can't overwrite if not writable */
9140 		return FALSE;
9141 	}
9142 #if !__x86_64__
9143 	if (entry->used_for_jit &&
9144 	    vm_map_cs_enforcement(dst_map) &&
9145 	    !dst_map->cs_debugged) {
9146 		/*
9147 		 * Can't overwrite a JIT region while cs_enforced
9148 		 * and not cs_debugged.
9149 		 */
9150 		return FALSE;
9151 	}
9152 
9153 #if __arm64e__
9154 	/* Do not allow overwrite HW assisted TPRO entries */
9155 	if (entry->used_for_tpro) {
9156 		return FALSE;
9157 	}
9158 #endif /* __arm64e__ */
9159 
9160 	if (entry->vme_permanent) {
9161 		if (entry->is_sub_map) {
9162 			/*
9163 			 * We can't tell if the submap contains "permanent"
9164 			 * entries within the range targeted by the caller.
9165 			 * The caller will have to check for that with
9166 			 * vm_map_overwrite_submap_recurse() for example.
9167 			 */
9168 		} else {
9169 			/*
9170 			 * Do not allow overwriting of a "permanent"
9171 			 * entry.
9172 			 */
9173 			DTRACE_VM6(vm_map_delete_permanent_deny_overwrite,
9174 			    vm_map_entry_t, entry,
9175 			    vm_map_offset_t, entry->vme_start,
9176 			    vm_map_offset_t, entry->vme_end,
9177 			    vm_prot_t, entry->protection,
9178 			    vm_prot_t, entry->max_protection,
9179 			    int, VME_ALIAS(entry));
9180 			return FALSE;
9181 		}
9182 	}
9183 #endif /* !__x86_64__ */
9184 	return TRUE;
9185 }
9186 
9187 static kern_return_t
vm_map_overwrite_submap_recurse(vm_map_t dst_map,vm_map_offset_t dst_addr,vm_map_size_t dst_size)9188 vm_map_overwrite_submap_recurse(
9189 	vm_map_t        dst_map,
9190 	vm_map_offset_t dst_addr,
9191 	vm_map_size_t   dst_size)
9192 {
9193 	vm_map_offset_t dst_end;
9194 	vm_map_entry_t  tmp_entry;
9195 	vm_map_entry_t  entry;
9196 	kern_return_t   result;
9197 	boolean_t       encountered_sub_map = FALSE;
9198 
9199 
9200 
9201 	/*
9202 	 *	Verify that the destination is all writeable
9203 	 *	initially.  We have to trunc the destination
9204 	 *	address and round the copy size or we'll end up
9205 	 *	splitting entries in strange ways.
9206 	 */
9207 
9208 	dst_end = vm_map_round_page(dst_addr + dst_size,
9209 	    VM_MAP_PAGE_MASK(dst_map));
9210 	vm_map_lock(dst_map);
9211 
9212 start_pass_1:
9213 	if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
9214 		vm_map_unlock(dst_map);
9215 		return KERN_INVALID_ADDRESS;
9216 	}
9217 
9218 	vm_map_clip_start(dst_map,
9219 	    tmp_entry,
9220 	    vm_map_trunc_page(dst_addr,
9221 	    VM_MAP_PAGE_MASK(dst_map)));
9222 	if (tmp_entry->is_sub_map) {
9223 		/* clipping did unnest if needed */
9224 		assert(!tmp_entry->use_pmap);
9225 	}
9226 
9227 	for (entry = tmp_entry;;) {
9228 		vm_map_entry_t  next;
9229 
9230 		next = entry->vme_next;
9231 		while (entry->is_sub_map) {
9232 			vm_map_offset_t sub_start;
9233 			vm_map_offset_t sub_end;
9234 			vm_map_offset_t local_end;
9235 
9236 			if (entry->in_transition) {
9237 				/*
9238 				 * Say that we are waiting, and wait for entry.
9239 				 */
9240 				entry->needs_wakeup = TRUE;
9241 				vm_map_entry_wait(dst_map, THREAD_UNINT);
9242 
9243 				goto start_pass_1;
9244 			}
9245 
9246 			encountered_sub_map = TRUE;
9247 			sub_start = VME_OFFSET(entry);
9248 
9249 			if (entry->vme_end < dst_end) {
9250 				sub_end = entry->vme_end;
9251 			} else {
9252 				sub_end = dst_end;
9253 			}
9254 			sub_end -= entry->vme_start;
9255 			sub_end += VME_OFFSET(entry);
9256 			local_end = entry->vme_end;
9257 			vm_map_unlock(dst_map);
9258 
9259 			result = vm_map_overwrite_submap_recurse(
9260 				VME_SUBMAP(entry),
9261 				sub_start,
9262 				sub_end - sub_start);
9263 
9264 			if (result != KERN_SUCCESS) {
9265 				return result;
9266 			}
9267 			if (dst_end <= entry->vme_end) {
9268 				return KERN_SUCCESS;
9269 			}
9270 			vm_map_lock(dst_map);
9271 			if (!vm_map_lookup_entry(dst_map, local_end,
9272 			    &tmp_entry)) {
9273 				vm_map_unlock(dst_map);
9274 				return KERN_INVALID_ADDRESS;
9275 			}
9276 			entry = tmp_entry;
9277 			next = entry->vme_next;
9278 		}
9279 
9280 		if (!(entry->protection & VM_PROT_WRITE)) {
9281 			vm_map_unlock(dst_map);
9282 			return KERN_PROTECTION_FAILURE;
9283 		}
9284 
9285 		if (!vm_map_entry_is_overwritable(dst_map, entry)) {
9286 			vm_map_unlock(dst_map);
9287 			return KERN_PROTECTION_FAILURE;
9288 		}
9289 
9290 		/*
9291 		 *	If the entry is in transition, we must wait
9292 		 *	for it to exit that state.  Anything could happen
9293 		 *	when we unlock the map, so start over.
9294 		 */
9295 		if (entry->in_transition) {
9296 			/*
9297 			 * Say that we are waiting, and wait for entry.
9298 			 */
9299 			entry->needs_wakeup = TRUE;
9300 			vm_map_entry_wait(dst_map, THREAD_UNINT);
9301 
9302 			goto start_pass_1;
9303 		}
9304 
9305 /*
9306  *		our range is contained completely within this map entry
9307  */
9308 		if (dst_end <= entry->vme_end) {
9309 			vm_map_unlock(dst_map);
9310 			return KERN_SUCCESS;
9311 		}
9312 /*
9313  *		check that range specified is contiguous region
9314  */
9315 		if ((next == vm_map_to_entry(dst_map)) ||
9316 		    (next->vme_start != entry->vme_end)) {
9317 			vm_map_unlock(dst_map);
9318 			return KERN_INVALID_ADDRESS;
9319 		}
9320 
9321 		/*
9322 		 *	Check for permanent objects in the destination.
9323 		 */
9324 		if ((VME_OBJECT(entry) != VM_OBJECT_NULL) &&
9325 		    ((!VME_OBJECT(entry)->internal) ||
9326 		    (VME_OBJECT(entry)->true_share))) {
9327 			if (encountered_sub_map) {
9328 				vm_map_unlock(dst_map);
9329 				return KERN_FAILURE;
9330 			}
9331 		}
9332 
9333 
9334 		entry = next;
9335 	}/* for */
9336 	vm_map_unlock(dst_map);
9337 	return KERN_SUCCESS;
9338 }
9339 
9340 /*
9341  *	Routine:	vm_map_copy_overwrite
9342  *
9343  *	Description:
9344  *		Copy the memory described by the map copy
9345  *		object (copy; returned by vm_map_copyin) onto
9346  *		the specified destination region (dst_map, dst_addr).
9347  *		The destination must be writeable.
9348  *
9349  *		Unlike vm_map_copyout, this routine actually
9350  *		writes over previously-mapped memory.  If the
9351  *		previous mapping was to a permanent (user-supplied)
9352  *		memory object, it is preserved.
9353  *
9354  *		The attributes (protection and inheritance) of the
9355  *		destination region are preserved.
9356  *
9357  *		If successful, consumes the copy object.
9358  *		Otherwise, the caller is responsible for it.
9359  *
9360  *	Implementation notes:
9361  *		To overwrite aligned temporary virtual memory, it is
9362  *		sufficient to remove the previous mapping and insert
9363  *		the new copy.  This replacement is done either on
9364  *		the whole region (if no permanent virtual memory
9365  *		objects are embedded in the destination region) or
9366  *		in individual map entries.
9367  *
9368  *		To overwrite permanent virtual memory , it is necessary
9369  *		to copy each page, as the external memory management
9370  *		interface currently does not provide any optimizations.
9371  *
9372  *		Unaligned memory also has to be copied.  It is possible
9373  *		to use 'vm_trickery' to copy the aligned data.  This is
9374  *		not done but not hard to implement.
9375  *
9376  *		Once a page of permanent memory has been overwritten,
9377  *		it is impossible to interrupt this function; otherwise,
9378  *		the call would be neither atomic nor location-independent.
9379  *		The kernel-state portion of a user thread must be
9380  *		interruptible.
9381  *
9382  *		It may be expensive to forward all requests that might
9383  *		overwrite permanent memory (vm_write, vm_copy) to
9384  *		uninterruptible kernel threads.  This routine may be
9385  *		called by interruptible threads; however, success is
9386  *		not guaranteed -- if the request cannot be performed
9387  *		atomically and interruptibly, an error indication is
9388  *		returned.
9389  *
9390  *		Callers of this function must call vm_map_copy_require on
9391  *		previously created vm_map_copy_t or pass a newly created
9392  *		one to ensure that it hasn't been forged.
9393  */
9394 static kern_return_t
vm_map_copy_overwrite_nested(vm_map_t dst_map,vm_map_address_t dst_addr,vm_map_copy_t copy,boolean_t interruptible,pmap_t pmap,boolean_t discard_on_success)9395 vm_map_copy_overwrite_nested(
9396 	vm_map_t                dst_map,
9397 	vm_map_address_t        dst_addr,
9398 	vm_map_copy_t           copy,
9399 	boolean_t               interruptible,
9400 	pmap_t                  pmap,
9401 	boolean_t               discard_on_success)
9402 {
9403 	vm_map_offset_t         dst_end;
9404 	vm_map_entry_t          tmp_entry;
9405 	vm_map_entry_t          entry;
9406 	kern_return_t           kr;
9407 	boolean_t               aligned = TRUE;
9408 	boolean_t               contains_permanent_objects = FALSE;
9409 	boolean_t               encountered_sub_map = FALSE;
9410 	vm_map_offset_t         base_addr;
9411 	vm_map_size_t           copy_size;
9412 	vm_map_size_t           total_size;
9413 	uint16_t                copy_page_shift;
9414 
9415 	/*
9416 	 *	Check for special kernel buffer allocated
9417 	 *	by new_ipc_kmsg_copyin.
9418 	 */
9419 
9420 	if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
9421 		kr = vm_map_copyout_kernel_buffer(
9422 			dst_map, &dst_addr,
9423 			copy, copy->size, TRUE, discard_on_success);
9424 		return kr;
9425 	}
9426 
9427 	/*
9428 	 *      Only works for entry lists at the moment.  Will
9429 	 *	support page lists later.
9430 	 */
9431 
9432 	assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
9433 
9434 	if (copy->size == 0) {
9435 		if (discard_on_success) {
9436 			vm_map_copy_discard(copy);
9437 		}
9438 		return KERN_SUCCESS;
9439 	}
9440 
9441 	copy_page_shift = copy->cpy_hdr.page_shift;
9442 
9443 	/*
9444 	 *	Verify that the destination is all writeable
9445 	 *	initially.  We have to trunc the destination
9446 	 *	address and round the copy size or we'll end up
9447 	 *	splitting entries in strange ways.
9448 	 */
9449 
9450 	if (!VM_MAP_PAGE_ALIGNED(copy->size,
9451 	    VM_MAP_PAGE_MASK(dst_map)) ||
9452 	    !VM_MAP_PAGE_ALIGNED(copy->offset,
9453 	    VM_MAP_PAGE_MASK(dst_map)) ||
9454 	    !VM_MAP_PAGE_ALIGNED(dst_addr,
9455 	    VM_MAP_PAGE_MASK(dst_map)) ||
9456 	    copy_page_shift != VM_MAP_PAGE_SHIFT(dst_map)) {
9457 		aligned = FALSE;
9458 		dst_end = vm_map_round_page(dst_addr + copy->size,
9459 		    VM_MAP_PAGE_MASK(dst_map));
9460 	} else {
9461 		dst_end = dst_addr + copy->size;
9462 	}
9463 
9464 	vm_map_lock(dst_map);
9465 
9466 	/* LP64todo - remove this check when vm_map_commpage64()
9467 	 * no longer has to stuff in a map_entry for the commpage
9468 	 * above the map's max_offset.
9469 	 */
9470 	if (dst_addr >= dst_map->max_offset) {
9471 		vm_map_unlock(dst_map);
9472 		return KERN_INVALID_ADDRESS;
9473 	}
9474 
9475 start_pass_1:
9476 	if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
9477 		vm_map_unlock(dst_map);
9478 		return KERN_INVALID_ADDRESS;
9479 	}
9480 	vm_map_clip_start(dst_map,
9481 	    tmp_entry,
9482 	    vm_map_trunc_page(dst_addr,
9483 	    VM_MAP_PAGE_MASK(dst_map)));
9484 	for (entry = tmp_entry;;) {
9485 		vm_map_entry_t  next = entry->vme_next;
9486 
9487 		while (entry->is_sub_map) {
9488 			vm_map_offset_t sub_start;
9489 			vm_map_offset_t sub_end;
9490 			vm_map_offset_t local_end;
9491 
9492 			if (entry->in_transition) {
9493 				/*
9494 				 * Say that we are waiting, and wait for entry.
9495 				 */
9496 				entry->needs_wakeup = TRUE;
9497 				vm_map_entry_wait(dst_map, THREAD_UNINT);
9498 
9499 				goto start_pass_1;
9500 			}
9501 
9502 			local_end = entry->vme_end;
9503 			if (!(entry->needs_copy)) {
9504 				/* if needs_copy we are a COW submap */
9505 				/* in such a case we just replace so */
9506 				/* there is no need for the follow-  */
9507 				/* ing check.                        */
9508 				encountered_sub_map = TRUE;
9509 				sub_start = VME_OFFSET(entry);
9510 
9511 				if (entry->vme_end < dst_end) {
9512 					sub_end = entry->vme_end;
9513 				} else {
9514 					sub_end = dst_end;
9515 				}
9516 				sub_end -= entry->vme_start;
9517 				sub_end += VME_OFFSET(entry);
9518 				vm_map_unlock(dst_map);
9519 
9520 				kr = vm_map_overwrite_submap_recurse(
9521 					VME_SUBMAP(entry),
9522 					sub_start,
9523 					sub_end - sub_start);
9524 				if (kr != KERN_SUCCESS) {
9525 					return kr;
9526 				}
9527 				vm_map_lock(dst_map);
9528 			}
9529 
9530 			if (dst_end <= entry->vme_end) {
9531 				goto start_overwrite;
9532 			}
9533 			if (!vm_map_lookup_entry(dst_map, local_end,
9534 			    &entry)) {
9535 				vm_map_unlock(dst_map);
9536 				return KERN_INVALID_ADDRESS;
9537 			}
9538 			next = entry->vme_next;
9539 		}
9540 
9541 		if (!(entry->protection & VM_PROT_WRITE)) {
9542 			vm_map_unlock(dst_map);
9543 			return KERN_PROTECTION_FAILURE;
9544 		}
9545 
9546 		if (!vm_map_entry_is_overwritable(dst_map, entry)) {
9547 			vm_map_unlock(dst_map);
9548 			return KERN_PROTECTION_FAILURE;
9549 		}
9550 
9551 		/*
9552 		 *	If the entry is in transition, we must wait
9553 		 *	for it to exit that state.  Anything could happen
9554 		 *	when we unlock the map, so start over.
9555 		 */
9556 		if (entry->in_transition) {
9557 			/*
9558 			 * Say that we are waiting, and wait for entry.
9559 			 */
9560 			entry->needs_wakeup = TRUE;
9561 			vm_map_entry_wait(dst_map, THREAD_UNINT);
9562 
9563 			goto start_pass_1;
9564 		}
9565 
9566 /*
9567  *		our range is contained completely within this map entry
9568  */
9569 		if (dst_end <= entry->vme_end) {
9570 			break;
9571 		}
9572 /*
9573  *		check that range specified is contiguous region
9574  */
9575 		if ((next == vm_map_to_entry(dst_map)) ||
9576 		    (next->vme_start != entry->vme_end)) {
9577 			vm_map_unlock(dst_map);
9578 			return KERN_INVALID_ADDRESS;
9579 		}
9580 
9581 
9582 		/*
9583 		 *	Check for permanent objects in the destination.
9584 		 */
9585 		if ((VME_OBJECT(entry) != VM_OBJECT_NULL) &&
9586 		    ((!VME_OBJECT(entry)->internal) ||
9587 		    (VME_OBJECT(entry)->true_share))) {
9588 			contains_permanent_objects = TRUE;
9589 		}
9590 
9591 		entry = next;
9592 	}/* for */
9593 
9594 start_overwrite:
9595 	/*
9596 	 *	If there are permanent objects in the destination, then
9597 	 *	the copy cannot be interrupted.
9598 	 */
9599 
9600 	if (interruptible && contains_permanent_objects) {
9601 		vm_map_unlock(dst_map);
9602 		return KERN_FAILURE;   /* XXX */
9603 	}
9604 
9605 	/*
9606 	 *
9607 	 *	Make a second pass, overwriting the data
9608 	 *	At the beginning of each loop iteration,
9609 	 *	the next entry to be overwritten is "tmp_entry"
9610 	 *	(initially, the value returned from the lookup above),
9611 	 *	and the starting address expected in that entry
9612 	 *	is "start".
9613 	 */
9614 
9615 	total_size = copy->size;
9616 	if (encountered_sub_map) {
9617 		copy_size = 0;
9618 		/* re-calculate tmp_entry since we've had the map */
9619 		/* unlocked */
9620 		if (!vm_map_lookup_entry( dst_map, dst_addr, &tmp_entry)) {
9621 			vm_map_unlock(dst_map);
9622 			return KERN_INVALID_ADDRESS;
9623 		}
9624 	} else {
9625 		copy_size = copy->size;
9626 	}
9627 
9628 	base_addr = dst_addr;
9629 	while (TRUE) {
9630 		/* deconstruct the copy object and do in parts */
9631 		/* only in sub_map, interruptable case */
9632 		vm_map_entry_t  copy_entry;
9633 		vm_map_entry_t  previous_prev = VM_MAP_ENTRY_NULL;
9634 		vm_map_entry_t  next_copy = VM_MAP_ENTRY_NULL;
9635 		int             nentries;
9636 		int             remaining_entries = 0;
9637 		vm_map_offset_t new_offset = 0;
9638 
9639 		for (entry = tmp_entry; copy_size == 0;) {
9640 			vm_map_entry_t  next;
9641 
9642 			next = entry->vme_next;
9643 
9644 			/* tmp_entry and base address are moved along */
9645 			/* each time we encounter a sub-map.  Otherwise */
9646 			/* entry can outpase tmp_entry, and the copy_size */
9647 			/* may reflect the distance between them */
9648 			/* if the current entry is found to be in transition */
9649 			/* we will start over at the beginning or the last */
9650 			/* encounter of a submap as dictated by base_addr */
9651 			/* we will zero copy_size accordingly. */
9652 			if (entry->in_transition) {
9653 				/*
9654 				 * Say that we are waiting, and wait for entry.
9655 				 */
9656 				entry->needs_wakeup = TRUE;
9657 				vm_map_entry_wait(dst_map, THREAD_UNINT);
9658 
9659 				if (!vm_map_lookup_entry(dst_map, base_addr,
9660 				    &tmp_entry)) {
9661 					vm_map_unlock(dst_map);
9662 					return KERN_INVALID_ADDRESS;
9663 				}
9664 				copy_size = 0;
9665 				entry = tmp_entry;
9666 				continue;
9667 			}
9668 			if (entry->is_sub_map) {
9669 				vm_map_offset_t sub_start;
9670 				vm_map_offset_t sub_end;
9671 				vm_map_offset_t local_end;
9672 
9673 				if (entry->needs_copy) {
9674 					/* if this is a COW submap */
9675 					/* just back the range with a */
9676 					/* anonymous entry */
9677 					assert(!entry->vme_permanent);
9678 					if (entry->vme_end < dst_end) {
9679 						sub_end = entry->vme_end;
9680 					} else {
9681 						sub_end = dst_end;
9682 					}
9683 					if (entry->vme_start < base_addr) {
9684 						sub_start = base_addr;
9685 					} else {
9686 						sub_start = entry->vme_start;
9687 					}
9688 					vm_map_clip_end(
9689 						dst_map, entry, sub_end);
9690 					vm_map_clip_start(
9691 						dst_map, entry, sub_start);
9692 					assert(!entry->use_pmap);
9693 					assert(!entry->iokit_acct);
9694 					entry->use_pmap = TRUE;
9695 					vm_map_deallocate(VME_SUBMAP(entry));
9696 					assert(!entry->vme_permanent);
9697 					VME_OBJECT_SET(entry, VM_OBJECT_NULL, false, 0);
9698 					VME_OFFSET_SET(entry, 0);
9699 					entry->is_shared = FALSE;
9700 					entry->needs_copy = FALSE;
9701 					entry->protection = VM_PROT_DEFAULT;
9702 					entry->max_protection = VM_PROT_ALL;
9703 					entry->wired_count = 0;
9704 					entry->user_wired_count = 0;
9705 					if (entry->inheritance
9706 					    == VM_INHERIT_SHARE) {
9707 						entry->inheritance = VM_INHERIT_COPY;
9708 					}
9709 					continue;
9710 				}
9711 				/* first take care of any non-sub_map */
9712 				/* entries to send */
9713 				if (base_addr < entry->vme_start) {
9714 					/* stuff to send */
9715 					copy_size =
9716 					    entry->vme_start - base_addr;
9717 					break;
9718 				}
9719 				sub_start = VME_OFFSET(entry);
9720 
9721 				if (entry->vme_end < dst_end) {
9722 					sub_end = entry->vme_end;
9723 				} else {
9724 					sub_end = dst_end;
9725 				}
9726 				sub_end -= entry->vme_start;
9727 				sub_end += VME_OFFSET(entry);
9728 				local_end = entry->vme_end;
9729 				vm_map_unlock(dst_map);
9730 				copy_size = sub_end - sub_start;
9731 
9732 				/* adjust the copy object */
9733 				if (total_size > copy_size) {
9734 					vm_map_size_t   local_size = 0;
9735 					vm_map_size_t   entry_size;
9736 
9737 					nentries = 1;
9738 					new_offset = copy->offset;
9739 					copy_entry = vm_map_copy_first_entry(copy);
9740 					while (copy_entry !=
9741 					    vm_map_copy_to_entry(copy)) {
9742 						entry_size = copy_entry->vme_end -
9743 						    copy_entry->vme_start;
9744 						if ((local_size < copy_size) &&
9745 						    ((local_size + entry_size)
9746 						    >= copy_size)) {
9747 							vm_map_copy_clip_end(copy,
9748 							    copy_entry,
9749 							    copy_entry->vme_start +
9750 							    (copy_size - local_size));
9751 							entry_size = copy_entry->vme_end -
9752 							    copy_entry->vme_start;
9753 							local_size += entry_size;
9754 							new_offset += entry_size;
9755 						}
9756 						if (local_size >= copy_size) {
9757 							next_copy = copy_entry->vme_next;
9758 							copy_entry->vme_next =
9759 							    vm_map_copy_to_entry(copy);
9760 							previous_prev =
9761 							    copy->cpy_hdr.links.prev;
9762 							copy->cpy_hdr.links.prev = copy_entry;
9763 							copy->size = copy_size;
9764 							remaining_entries =
9765 							    copy->cpy_hdr.nentries;
9766 							remaining_entries -= nentries;
9767 							copy->cpy_hdr.nentries = nentries;
9768 							break;
9769 						} else {
9770 							local_size += entry_size;
9771 							new_offset += entry_size;
9772 							nentries++;
9773 						}
9774 						copy_entry = copy_entry->vme_next;
9775 					}
9776 				}
9777 
9778 				if ((entry->use_pmap) && (pmap == NULL)) {
9779 					kr = vm_map_copy_overwrite_nested(
9780 						VME_SUBMAP(entry),
9781 						sub_start,
9782 						copy,
9783 						interruptible,
9784 						VME_SUBMAP(entry)->pmap,
9785 						TRUE);
9786 				} else if (pmap != NULL) {
9787 					kr = vm_map_copy_overwrite_nested(
9788 						VME_SUBMAP(entry),
9789 						sub_start,
9790 						copy,
9791 						interruptible, pmap,
9792 						TRUE);
9793 				} else {
9794 					kr = vm_map_copy_overwrite_nested(
9795 						VME_SUBMAP(entry),
9796 						sub_start,
9797 						copy,
9798 						interruptible,
9799 						dst_map->pmap,
9800 						TRUE);
9801 				}
9802 				if (kr != KERN_SUCCESS) {
9803 					if (next_copy != NULL) {
9804 						copy->cpy_hdr.nentries +=
9805 						    remaining_entries;
9806 						copy->cpy_hdr.links.prev->vme_next =
9807 						    next_copy;
9808 						copy->cpy_hdr.links.prev
9809 						        = previous_prev;
9810 						copy->size = total_size;
9811 					}
9812 					return kr;
9813 				}
9814 				if (dst_end <= local_end) {
9815 					return KERN_SUCCESS;
9816 				}
9817 				/* otherwise copy no longer exists, it was */
9818 				/* destroyed after successful copy_overwrite */
9819 				copy = vm_map_copy_allocate(VM_MAP_COPY_ENTRY_LIST);
9820 				copy->offset = new_offset;
9821 				copy->cpy_hdr.page_shift = copy_page_shift;
9822 
9823 				total_size -= copy_size;
9824 				copy_size = 0;
9825 				/* put back remainder of copy in container */
9826 				if (next_copy != NULL) {
9827 					copy->cpy_hdr.nentries = remaining_entries;
9828 					copy->cpy_hdr.links.next = next_copy;
9829 					copy->cpy_hdr.links.prev = previous_prev;
9830 					copy->size = total_size;
9831 					next_copy->vme_prev =
9832 					    vm_map_copy_to_entry(copy);
9833 					next_copy = NULL;
9834 				}
9835 				base_addr = local_end;
9836 				vm_map_lock(dst_map);
9837 				if (!vm_map_lookup_entry(dst_map,
9838 				    local_end, &tmp_entry)) {
9839 					vm_map_unlock(dst_map);
9840 					return KERN_INVALID_ADDRESS;
9841 				}
9842 				entry = tmp_entry;
9843 				continue;
9844 			}
9845 			if (dst_end <= entry->vme_end) {
9846 				copy_size = dst_end - base_addr;
9847 				break;
9848 			}
9849 
9850 			if ((next == vm_map_to_entry(dst_map)) ||
9851 			    (next->vme_start != entry->vme_end)) {
9852 				vm_map_unlock(dst_map);
9853 				return KERN_INVALID_ADDRESS;
9854 			}
9855 
9856 			entry = next;
9857 		}/* for */
9858 
9859 		next_copy = NULL;
9860 		nentries = 1;
9861 
9862 		/* adjust the copy object */
9863 		if (total_size > copy_size) {
9864 			vm_map_size_t   local_size = 0;
9865 			vm_map_size_t   entry_size;
9866 
9867 			new_offset = copy->offset;
9868 			copy_entry = vm_map_copy_first_entry(copy);
9869 			while (copy_entry != vm_map_copy_to_entry(copy)) {
9870 				entry_size = copy_entry->vme_end -
9871 				    copy_entry->vme_start;
9872 				if ((local_size < copy_size) &&
9873 				    ((local_size + entry_size)
9874 				    >= copy_size)) {
9875 					vm_map_copy_clip_end(copy, copy_entry,
9876 					    copy_entry->vme_start +
9877 					    (copy_size - local_size));
9878 					entry_size = copy_entry->vme_end -
9879 					    copy_entry->vme_start;
9880 					local_size += entry_size;
9881 					new_offset += entry_size;
9882 				}
9883 				if (local_size >= copy_size) {
9884 					next_copy = copy_entry->vme_next;
9885 					copy_entry->vme_next =
9886 					    vm_map_copy_to_entry(copy);
9887 					previous_prev =
9888 					    copy->cpy_hdr.links.prev;
9889 					copy->cpy_hdr.links.prev = copy_entry;
9890 					copy->size = copy_size;
9891 					remaining_entries =
9892 					    copy->cpy_hdr.nentries;
9893 					remaining_entries -= nentries;
9894 					copy->cpy_hdr.nentries = nentries;
9895 					break;
9896 				} else {
9897 					local_size += entry_size;
9898 					new_offset += entry_size;
9899 					nentries++;
9900 				}
9901 				copy_entry = copy_entry->vme_next;
9902 			}
9903 		}
9904 
9905 		if (aligned) {
9906 			pmap_t  local_pmap;
9907 
9908 			if (pmap) {
9909 				local_pmap = pmap;
9910 			} else {
9911 				local_pmap = dst_map->pmap;
9912 			}
9913 
9914 			if ((kr =  vm_map_copy_overwrite_aligned(
9915 				    dst_map, tmp_entry, copy,
9916 				    base_addr, local_pmap)) != KERN_SUCCESS) {
9917 				if (next_copy != NULL) {
9918 					copy->cpy_hdr.nentries +=
9919 					    remaining_entries;
9920 					copy->cpy_hdr.links.prev->vme_next =
9921 					    next_copy;
9922 					copy->cpy_hdr.links.prev =
9923 					    previous_prev;
9924 					copy->size += copy_size;
9925 				}
9926 				return kr;
9927 			}
9928 			vm_map_unlock(dst_map);
9929 		} else {
9930 			/*
9931 			 * Performance gain:
9932 			 *
9933 			 * if the copy and dst address are misaligned but the same
9934 			 * offset within the page we can copy_not_aligned the
9935 			 * misaligned parts and copy aligned the rest.  If they are
9936 			 * aligned but len is unaligned we simply need to copy
9937 			 * the end bit unaligned.  We'll need to split the misaligned
9938 			 * bits of the region in this case !
9939 			 */
9940 			/* ALWAYS UNLOCKS THE dst_map MAP */
9941 			kr = vm_map_copy_overwrite_unaligned(
9942 				dst_map,
9943 				tmp_entry,
9944 				copy,
9945 				base_addr,
9946 				discard_on_success);
9947 			if (kr != KERN_SUCCESS) {
9948 				if (next_copy != NULL) {
9949 					copy->cpy_hdr.nentries +=
9950 					    remaining_entries;
9951 					copy->cpy_hdr.links.prev->vme_next =
9952 					    next_copy;
9953 					copy->cpy_hdr.links.prev =
9954 					    previous_prev;
9955 					copy->size += copy_size;
9956 				}
9957 				return kr;
9958 			}
9959 		}
9960 		total_size -= copy_size;
9961 		if (total_size == 0) {
9962 			break;
9963 		}
9964 		base_addr += copy_size;
9965 		copy_size = 0;
9966 		copy->offset = new_offset;
9967 		if (next_copy != NULL) {
9968 			copy->cpy_hdr.nentries = remaining_entries;
9969 			copy->cpy_hdr.links.next = next_copy;
9970 			copy->cpy_hdr.links.prev = previous_prev;
9971 			next_copy->vme_prev = vm_map_copy_to_entry(copy);
9972 			copy->size = total_size;
9973 		}
9974 		vm_map_lock(dst_map);
9975 		while (TRUE) {
9976 			if (!vm_map_lookup_entry(dst_map,
9977 			    base_addr, &tmp_entry)) {
9978 				vm_map_unlock(dst_map);
9979 				return KERN_INVALID_ADDRESS;
9980 			}
9981 			if (tmp_entry->in_transition) {
9982 				entry->needs_wakeup = TRUE;
9983 				vm_map_entry_wait(dst_map, THREAD_UNINT);
9984 			} else {
9985 				break;
9986 			}
9987 		}
9988 		vm_map_clip_start(dst_map,
9989 		    tmp_entry,
9990 		    vm_map_trunc_page(base_addr,
9991 		    VM_MAP_PAGE_MASK(dst_map)));
9992 
9993 		entry = tmp_entry;
9994 	} /* while */
9995 
9996 	/*
9997 	 *	Throw away the vm_map_copy object
9998 	 */
9999 	if (discard_on_success) {
10000 		vm_map_copy_discard(copy);
10001 	}
10002 
10003 	return KERN_SUCCESS;
10004 }/* vm_map_copy_overwrite */
10005 
10006 kern_return_t
vm_map_copy_overwrite(vm_map_t dst_map,vm_map_offset_t dst_addr,vm_map_copy_t copy,vm_map_size_t copy_size,boolean_t interruptible)10007 vm_map_copy_overwrite(
10008 	vm_map_t        dst_map,
10009 	vm_map_offset_t dst_addr,
10010 	vm_map_copy_t   copy,
10011 	vm_map_size_t   copy_size,
10012 	boolean_t       interruptible)
10013 {
10014 	vm_map_size_t   head_size, tail_size;
10015 	vm_map_copy_t   head_copy, tail_copy;
10016 	vm_map_offset_t head_addr, tail_addr;
10017 	vm_map_entry_t  entry;
10018 	kern_return_t   kr;
10019 	vm_map_offset_t effective_page_mask, effective_page_size;
10020 	uint16_t        copy_page_shift;
10021 
10022 	head_size = 0;
10023 	tail_size = 0;
10024 	head_copy = NULL;
10025 	tail_copy = NULL;
10026 	head_addr = 0;
10027 	tail_addr = 0;
10028 
10029 	/*
10030 	 *	Check for null copy object.
10031 	 */
10032 	if (copy == VM_MAP_COPY_NULL) {
10033 		return KERN_SUCCESS;
10034 	}
10035 
10036 	if (__improbable(vm_map_range_overflows(dst_map, dst_addr, copy_size))) {
10037 		return KERN_INVALID_ADDRESS;
10038 	}
10039 
10040 	/*
10041 	 * Assert that the vm_map_copy is coming from the right
10042 	 * zone and hasn't been forged
10043 	 */
10044 	vm_map_copy_require(copy);
10045 
10046 	if (interruptible ||
10047 	    copy->type != VM_MAP_COPY_ENTRY_LIST) {
10048 		/*
10049 		 * We can't split the "copy" map if we're interruptible
10050 		 * or if we don't have a "copy" map...
10051 		 */
10052 blunt_copy:
10053 		kr = vm_map_copy_overwrite_nested(dst_map,
10054 		    dst_addr,
10055 		    copy,
10056 		    interruptible,
10057 		    (pmap_t) NULL,
10058 		    TRUE);
10059 		if (kr) {
10060 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COPYOVERWRITE_FULL_NESTED_ERROR), kr /* arg */);
10061 		}
10062 		return kr;
10063 	}
10064 
10065 	copy_page_shift = VM_MAP_COPY_PAGE_SHIFT(copy);
10066 	if (copy_page_shift < PAGE_SHIFT ||
10067 	    VM_MAP_PAGE_SHIFT(dst_map) < PAGE_SHIFT) {
10068 		goto blunt_copy;
10069 	}
10070 
10071 	if (VM_MAP_PAGE_SHIFT(dst_map) < PAGE_SHIFT) {
10072 		effective_page_mask = VM_MAP_PAGE_MASK(dst_map);
10073 	} else {
10074 		effective_page_mask = MAX(VM_MAP_PAGE_MASK(dst_map), PAGE_MASK);
10075 		effective_page_mask = MAX(VM_MAP_COPY_PAGE_MASK(copy),
10076 		    effective_page_mask);
10077 	}
10078 	effective_page_size = effective_page_mask + 1;
10079 
10080 	if (copy_size < VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES * effective_page_size) {
10081 		/*
10082 		 * Too small to bother with optimizing...
10083 		 */
10084 		goto blunt_copy;
10085 	}
10086 
10087 	if ((dst_addr & effective_page_mask) !=
10088 	    (copy->offset & effective_page_mask)) {
10089 		/*
10090 		 * Incompatible mis-alignment of source and destination...
10091 		 */
10092 		goto blunt_copy;
10093 	}
10094 
10095 	/*
10096 	 * Proper alignment or identical mis-alignment at the beginning.
10097 	 * Let's try and do a small unaligned copy first (if needed)
10098 	 * and then an aligned copy for the rest.
10099 	 */
10100 	if (!vm_map_page_aligned(dst_addr, effective_page_mask)) {
10101 		head_addr = dst_addr;
10102 		head_size = (effective_page_size -
10103 		    (copy->offset & effective_page_mask));
10104 		head_size = MIN(head_size, copy_size);
10105 	}
10106 	if (!vm_map_page_aligned(copy->offset + copy_size,
10107 	    effective_page_mask)) {
10108 		/*
10109 		 * Mis-alignment at the end.
10110 		 * Do an aligned copy up to the last page and
10111 		 * then an unaligned copy for the remaining bytes.
10112 		 */
10113 		tail_size = ((copy->offset + copy_size) &
10114 		    effective_page_mask);
10115 		tail_size = MIN(tail_size, copy_size);
10116 		tail_addr = dst_addr + copy_size - tail_size;
10117 		assert(tail_addr >= head_addr + head_size);
10118 	}
10119 	assert(head_size + tail_size <= copy_size);
10120 
10121 	if (head_size + tail_size == copy_size) {
10122 		/*
10123 		 * It's all unaligned, no optimization possible...
10124 		 */
10125 		goto blunt_copy;
10126 	}
10127 
10128 	/*
10129 	 * Can't optimize if there are any submaps in the
10130 	 * destination due to the way we free the "copy" map
10131 	 * progressively in vm_map_copy_overwrite_nested()
10132 	 * in that case.
10133 	 */
10134 	vm_map_lock_read(dst_map);
10135 	if (!vm_map_lookup_entry(dst_map, dst_addr, &entry)) {
10136 		vm_map_unlock_read(dst_map);
10137 		goto blunt_copy;
10138 	}
10139 	for (;
10140 	    (entry != vm_map_to_entry(dst_map) &&
10141 	    entry->vme_start < dst_addr + copy_size);
10142 	    entry = entry->vme_next) {
10143 		if (entry->is_sub_map) {
10144 			vm_map_unlock_read(dst_map);
10145 			goto blunt_copy;
10146 		}
10147 	}
10148 	vm_map_unlock_read(dst_map);
10149 
10150 	if (head_size) {
10151 		/*
10152 		 * Unaligned copy of the first "head_size" bytes, to reach
10153 		 * a page boundary.
10154 		 */
10155 
10156 		/*
10157 		 * Extract "head_copy" out of "copy".
10158 		 */
10159 		head_copy = vm_map_copy_allocate(VM_MAP_COPY_ENTRY_LIST);
10160 		head_copy->cpy_hdr.entries_pageable =
10161 		    copy->cpy_hdr.entries_pageable;
10162 		head_copy->cpy_hdr.page_shift = copy_page_shift;
10163 
10164 		entry = vm_map_copy_first_entry(copy);
10165 		if (entry->vme_end < copy->offset + head_size) {
10166 			head_size = entry->vme_end - copy->offset;
10167 		}
10168 
10169 		head_copy->offset = copy->offset;
10170 		head_copy->size = head_size;
10171 		copy->offset += head_size;
10172 		copy->size -= head_size;
10173 		copy_size -= head_size;
10174 		assert(copy_size > 0);
10175 
10176 		vm_map_copy_clip_end(copy, entry, copy->offset);
10177 		vm_map_copy_entry_unlink(copy, entry);
10178 		vm_map_copy_entry_link(head_copy,
10179 		    vm_map_copy_to_entry(head_copy),
10180 		    entry);
10181 
10182 		/*
10183 		 * Do the unaligned copy.
10184 		 */
10185 		kr = vm_map_copy_overwrite_nested(dst_map,
10186 		    head_addr,
10187 		    head_copy,
10188 		    interruptible,
10189 		    (pmap_t) NULL,
10190 		    FALSE);
10191 		if (kr != KERN_SUCCESS) {
10192 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COPYOVERWRITE_PARTIAL_HEAD_NESTED_ERROR), kr /* arg */);
10193 			goto done;
10194 		}
10195 	}
10196 
10197 	if (tail_size) {
10198 		/*
10199 		 * Extract "tail_copy" out of "copy".
10200 		 */
10201 		tail_copy = vm_map_copy_allocate(VM_MAP_COPY_ENTRY_LIST);
10202 		tail_copy->cpy_hdr.entries_pageable =
10203 		    copy->cpy_hdr.entries_pageable;
10204 		tail_copy->cpy_hdr.page_shift = copy_page_shift;
10205 
10206 		tail_copy->offset = copy->offset + copy_size - tail_size;
10207 		tail_copy->size = tail_size;
10208 
10209 		copy->size -= tail_size;
10210 		copy_size -= tail_size;
10211 		assert(copy_size > 0);
10212 
10213 		entry = vm_map_copy_last_entry(copy);
10214 		vm_map_copy_clip_start(copy, entry, tail_copy->offset);
10215 		entry = vm_map_copy_last_entry(copy);
10216 		vm_map_copy_entry_unlink(copy, entry);
10217 		vm_map_copy_entry_link(tail_copy,
10218 		    vm_map_copy_last_entry(tail_copy),
10219 		    entry);
10220 	}
10221 
10222 	/*
10223 	 * If we are here from ipc_kmsg_copyout_ool_descriptor(),
10224 	 * we want to avoid TOCTOU issues w.r.t copy->size but
10225 	 * we don't need to change vm_map_copy_overwrite_nested()
10226 	 * and all other vm_map_copy_overwrite variants.
10227 	 *
10228 	 * So we assign the original copy_size that was passed into
10229 	 * this routine back to copy.
10230 	 *
10231 	 * This use of local 'copy_size' passed into this routine is
10232 	 * to try and protect against TOCTOU attacks where the kernel
10233 	 * has been exploited. We don't expect this to be an issue
10234 	 * during normal system operation.
10235 	 */
10236 	assertf(copy->size == copy_size,
10237 	    "Mismatch of copy sizes. Expected 0x%llx, Got 0x%llx\n", (uint64_t) copy_size, (uint64_t) copy->size);
10238 	copy->size = copy_size;
10239 
10240 	/*
10241 	 * Copy most (or possibly all) of the data.
10242 	 */
10243 	kr = vm_map_copy_overwrite_nested(dst_map,
10244 	    dst_addr + head_size,
10245 	    copy,
10246 	    interruptible,
10247 	    (pmap_t) NULL,
10248 	    FALSE);
10249 	if (kr != KERN_SUCCESS) {
10250 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COPYOVERWRITE_PARTIAL_NESTED_ERROR), kr /* arg */);
10251 		goto done;
10252 	}
10253 
10254 	if (tail_size) {
10255 		kr = vm_map_copy_overwrite_nested(dst_map,
10256 		    tail_addr,
10257 		    tail_copy,
10258 		    interruptible,
10259 		    (pmap_t) NULL,
10260 		    FALSE);
10261 		if (kr) {
10262 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COPYOVERWRITE_PARTIAL_TAIL_NESTED_ERROR), kr /* arg */);
10263 		}
10264 	}
10265 
10266 done:
10267 	assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
10268 	if (kr == KERN_SUCCESS) {
10269 		/*
10270 		 * Discard all the copy maps.
10271 		 */
10272 		if (head_copy) {
10273 			vm_map_copy_discard(head_copy);
10274 			head_copy = NULL;
10275 		}
10276 		vm_map_copy_discard(copy);
10277 		if (tail_copy) {
10278 			vm_map_copy_discard(tail_copy);
10279 			tail_copy = NULL;
10280 		}
10281 	} else {
10282 		/*
10283 		 * Re-assemble the original copy map.
10284 		 */
10285 		if (head_copy) {
10286 			entry = vm_map_copy_first_entry(head_copy);
10287 			vm_map_copy_entry_unlink(head_copy, entry);
10288 			vm_map_copy_entry_link(copy,
10289 			    vm_map_copy_to_entry(copy),
10290 			    entry);
10291 			copy->offset -= head_size;
10292 			copy->size += head_size;
10293 			vm_map_copy_discard(head_copy);
10294 			head_copy = NULL;
10295 		}
10296 		if (tail_copy) {
10297 			entry = vm_map_copy_last_entry(tail_copy);
10298 			vm_map_copy_entry_unlink(tail_copy, entry);
10299 			vm_map_copy_entry_link(copy,
10300 			    vm_map_copy_last_entry(copy),
10301 			    entry);
10302 			copy->size += tail_size;
10303 			vm_map_copy_discard(tail_copy);
10304 			tail_copy = NULL;
10305 		}
10306 	}
10307 	return kr;
10308 }
10309 
10310 
10311 /*
10312  *	Routine: vm_map_copy_overwrite_unaligned	[internal use only]
10313  *
10314  *	Decription:
10315  *	Physically copy unaligned data
10316  *
10317  *	Implementation:
10318  *	Unaligned parts of pages have to be physically copied.  We use
10319  *	a modified form of vm_fault_copy (which understands none-aligned
10320  *	page offsets and sizes) to do the copy.  We attempt to copy as
10321  *	much memory in one go as possibly, however vm_fault_copy copies
10322  *	within 1 memory object so we have to find the smaller of "amount left"
10323  *	"source object data size" and "target object data size".  With
10324  *	unaligned data we don't need to split regions, therefore the source
10325  *	(copy) object should be one map entry, the target range may be split
10326  *	over multiple map entries however.  In any event we are pessimistic
10327  *	about these assumptions.
10328  *
10329  *	Callers of this function must call vm_map_copy_require on
10330  *	previously created vm_map_copy_t or pass a newly created
10331  *	one to ensure that it hasn't been forged.
10332  *
10333  *	Assumptions:
10334  *	dst_map is locked on entry and is return locked on success,
10335  *	unlocked on error.
10336  */
10337 
10338 static kern_return_t
vm_map_copy_overwrite_unaligned(vm_map_t dst_map,vm_map_entry_t entry,vm_map_copy_t copy,vm_map_offset_t start,boolean_t discard_on_success)10339 vm_map_copy_overwrite_unaligned(
10340 	vm_map_t        dst_map,
10341 	vm_map_entry_t  entry,
10342 	vm_map_copy_t   copy,
10343 	vm_map_offset_t start,
10344 	boolean_t       discard_on_success)
10345 {
10346 	vm_map_entry_t          copy_entry;
10347 	vm_map_entry_t          copy_entry_next;
10348 	vm_map_version_t        version;
10349 	vm_object_t             dst_object;
10350 	vm_object_offset_t      dst_offset;
10351 	vm_object_offset_t      src_offset;
10352 	vm_object_offset_t      entry_offset;
10353 	vm_map_offset_t         entry_end;
10354 	vm_map_size_t           src_size,
10355 	    dst_size,
10356 	    copy_size,
10357 	    amount_left;
10358 	kern_return_t           kr = KERN_SUCCESS;
10359 
10360 
10361 	copy_entry = vm_map_copy_first_entry(copy);
10362 
10363 	vm_map_lock_write_to_read(dst_map);
10364 
10365 	src_offset = copy->offset - trunc_page_mask_64(copy->offset, VM_MAP_COPY_PAGE_MASK(copy));
10366 	amount_left = copy->size;
10367 /*
10368  *	unaligned so we never clipped this entry, we need the offset into
10369  *	the vm_object not just the data.
10370  */
10371 	while (amount_left > 0) {
10372 		if (entry == vm_map_to_entry(dst_map)) {
10373 			vm_map_unlock_read(dst_map);
10374 			return KERN_INVALID_ADDRESS;
10375 		}
10376 
10377 		/* "start" must be within the current map entry */
10378 		assert((start >= entry->vme_start) && (start < entry->vme_end));
10379 
10380 		/*
10381 		 *	Check protection again
10382 		 */
10383 		if (!(entry->protection & VM_PROT_WRITE)) {
10384 			vm_map_unlock_read(dst_map);
10385 			return KERN_PROTECTION_FAILURE;
10386 		}
10387 		if (!vm_map_entry_is_overwritable(dst_map, entry)) {
10388 			vm_map_unlock_read(dst_map);
10389 			return KERN_PROTECTION_FAILURE;
10390 		}
10391 
10392 		/*
10393 		 *	If the entry is in transition, we must wait
10394 		 *	for it to exit that state.  Anything could happen
10395 		 *	when we unlock the map, so start over.
10396 		 */
10397 		if (entry->in_transition) {
10398 			/*
10399 			 * Say that we are waiting, and wait for entry.
10400 			 */
10401 			entry->needs_wakeup = TRUE;
10402 			vm_map_entry_wait(dst_map, THREAD_UNINT);
10403 
10404 			goto RetryLookup;
10405 		}
10406 
10407 		dst_offset = start - entry->vme_start;
10408 
10409 		dst_size = entry->vme_end - start;
10410 
10411 		src_size = copy_entry->vme_end -
10412 		    (copy_entry->vme_start + src_offset);
10413 
10414 		if (dst_size < src_size) {
10415 /*
10416  *			we can only copy dst_size bytes before
10417  *			we have to get the next destination entry
10418  */
10419 			copy_size = dst_size;
10420 		} else {
10421 /*
10422  *			we can only copy src_size bytes before
10423  *			we have to get the next source copy entry
10424  */
10425 			copy_size = src_size;
10426 		}
10427 
10428 		if (copy_size > amount_left) {
10429 			copy_size = amount_left;
10430 		}
10431 /*
10432  *		Entry needs copy, create a shadow shadow object for
10433  *		Copy on write region.
10434  */
10435 		if (entry->needs_copy) {
10436 			if (vm_map_lock_read_to_write(dst_map)) {
10437 				vm_map_lock_read(dst_map);
10438 				goto RetryLookup;
10439 			}
10440 			VME_OBJECT_SHADOW(entry,
10441 			    (vm_map_size_t)(entry->vme_end
10442 			    - entry->vme_start),
10443 			    vm_map_always_shadow(dst_map));
10444 			entry->needs_copy = FALSE;
10445 			vm_map_lock_write_to_read(dst_map);
10446 		}
10447 		dst_object = VME_OBJECT(entry);
10448 /*
10449  *		unlike with the virtual (aligned) copy we're going
10450  *		to fault on it therefore we need a target object.
10451  */
10452 		if (dst_object == VM_OBJECT_NULL) {
10453 			if (vm_map_lock_read_to_write(dst_map)) {
10454 				vm_map_lock_read(dst_map);
10455 				goto RetryLookup;
10456 			}
10457 			dst_object = vm_object_allocate((vm_map_size_t)
10458 			    entry->vme_end - entry->vme_start);
10459 			VME_OBJECT_SET(entry, dst_object, false, 0);
10460 			VME_OFFSET_SET(entry, 0);
10461 			assert(entry->use_pmap);
10462 			vm_map_lock_write_to_read(dst_map);
10463 		}
10464 /*
10465  *		Take an object reference and unlock map. The "entry" may
10466  *		disappear or change when the map is unlocked.
10467  */
10468 		vm_object_reference(dst_object);
10469 		version.main_timestamp = dst_map->timestamp;
10470 		entry_offset = VME_OFFSET(entry);
10471 		entry_end = entry->vme_end;
10472 		vm_map_unlock_read(dst_map);
10473 /*
10474  *		Copy as much as possible in one pass
10475  */
10476 		kr = vm_fault_copy(
10477 			VME_OBJECT(copy_entry),
10478 			VME_OFFSET(copy_entry) + src_offset,
10479 			&copy_size,
10480 			dst_object,
10481 			entry_offset + dst_offset,
10482 			dst_map,
10483 			&version,
10484 			THREAD_UNINT );
10485 
10486 		start += copy_size;
10487 		src_offset += copy_size;
10488 		amount_left -= copy_size;
10489 /*
10490  *		Release the object reference
10491  */
10492 		vm_object_deallocate(dst_object);
10493 /*
10494  *		If a hard error occurred, return it now
10495  */
10496 		if (kr != KERN_SUCCESS) {
10497 			return kr;
10498 		}
10499 
10500 		if ((copy_entry->vme_start + src_offset) == copy_entry->vme_end
10501 		    || amount_left == 0) {
10502 /*
10503  *			all done with this copy entry, dispose.
10504  */
10505 			copy_entry_next = copy_entry->vme_next;
10506 
10507 			if (discard_on_success) {
10508 				vm_map_copy_entry_unlink(copy, copy_entry);
10509 				assert(!copy_entry->is_sub_map);
10510 				vm_object_deallocate(VME_OBJECT(copy_entry));
10511 				vm_map_copy_entry_dispose(copy_entry);
10512 			}
10513 
10514 			if (copy_entry_next == vm_map_copy_to_entry(copy) &&
10515 			    amount_left) {
10516 /*
10517  *				not finished copying but run out of source
10518  */
10519 				return KERN_INVALID_ADDRESS;
10520 			}
10521 
10522 			copy_entry = copy_entry_next;
10523 
10524 			src_offset = 0;
10525 		}
10526 
10527 		if (amount_left == 0) {
10528 			return KERN_SUCCESS;
10529 		}
10530 
10531 		vm_map_lock_read(dst_map);
10532 		if (version.main_timestamp == dst_map->timestamp) {
10533 			if (start == entry_end) {
10534 /*
10535  *				destination region is split.  Use the version
10536  *				information to avoid a lookup in the normal
10537  *				case.
10538  */
10539 				entry = entry->vme_next;
10540 /*
10541  *				should be contiguous. Fail if we encounter
10542  *				a hole in the destination.
10543  */
10544 				if (start != entry->vme_start) {
10545 					vm_map_unlock_read(dst_map);
10546 					return KERN_INVALID_ADDRESS;
10547 				}
10548 			}
10549 		} else {
10550 /*
10551  *			Map version check failed.
10552  *			we must lookup the entry because somebody
10553  *			might have changed the map behind our backs.
10554  */
10555 RetryLookup:
10556 			if (!vm_map_lookup_entry(dst_map, start, &entry)) {
10557 				vm_map_unlock_read(dst_map);
10558 				return KERN_INVALID_ADDRESS;
10559 			}
10560 		}
10561 	}/* while */
10562 
10563 	return KERN_SUCCESS;
10564 }/* vm_map_copy_overwrite_unaligned */
10565 
10566 /*
10567  *	Routine: vm_map_copy_overwrite_aligned	[internal use only]
10568  *
10569  *	Description:
10570  *	Does all the vm_trickery possible for whole pages.
10571  *
10572  *	Implementation:
10573  *
10574  *	If there are no permanent objects in the destination,
10575  *	and the source and destination map entry zones match,
10576  *	and the destination map entry is not shared,
10577  *	then the map entries can be deleted and replaced
10578  *	with those from the copy.  The following code is the
10579  *	basic idea of what to do, but there are lots of annoying
10580  *	little details about getting protection and inheritance
10581  *	right.  Should add protection, inheritance, and sharing checks
10582  *	to the above pass and make sure that no wiring is involved.
10583  *
10584  *	Callers of this function must call vm_map_copy_require on
10585  *	previously created vm_map_copy_t or pass a newly created
10586  *	one to ensure that it hasn't been forged.
10587  */
10588 
10589 int vm_map_copy_overwrite_aligned_src_not_internal = 0;
10590 int vm_map_copy_overwrite_aligned_src_not_symmetric = 0;
10591 int vm_map_copy_overwrite_aligned_src_large = 0;
10592 
10593 static kern_return_t
vm_map_copy_overwrite_aligned(vm_map_t dst_map,vm_map_entry_t tmp_entry,vm_map_copy_t copy,vm_map_offset_t start,__unused pmap_t pmap)10594 vm_map_copy_overwrite_aligned(
10595 	vm_map_t        dst_map,
10596 	vm_map_entry_t  tmp_entry,
10597 	vm_map_copy_t   copy,
10598 	vm_map_offset_t start,
10599 	__unused pmap_t pmap)
10600 {
10601 	vm_object_t     object;
10602 	vm_map_entry_t  copy_entry;
10603 	vm_map_size_t   copy_size;
10604 	vm_map_size_t   size;
10605 	vm_map_entry_t  entry;
10606 
10607 	while ((copy_entry = vm_map_copy_first_entry(copy))
10608 	    != vm_map_copy_to_entry(copy)) {
10609 		copy_size = (copy_entry->vme_end - copy_entry->vme_start);
10610 
10611 		entry = tmp_entry;
10612 		if (entry->is_sub_map) {
10613 			/* unnested when clipped earlier */
10614 			assert(!entry->use_pmap);
10615 		}
10616 		if (entry == vm_map_to_entry(dst_map)) {
10617 			vm_map_unlock(dst_map);
10618 			return KERN_INVALID_ADDRESS;
10619 		}
10620 		size = (entry->vme_end - entry->vme_start);
10621 		/*
10622 		 *	Make sure that no holes popped up in the
10623 		 *	address map, and that the protection is
10624 		 *	still valid, in case the map was unlocked
10625 		 *	earlier.
10626 		 */
10627 
10628 		if ((entry->vme_start != start) || ((entry->is_sub_map)
10629 		    && !entry->needs_copy)) {
10630 			vm_map_unlock(dst_map);
10631 			return KERN_INVALID_ADDRESS;
10632 		}
10633 		assert(entry != vm_map_to_entry(dst_map));
10634 
10635 		/*
10636 		 *	Check protection again
10637 		 */
10638 
10639 		if (!(entry->protection & VM_PROT_WRITE)) {
10640 			vm_map_unlock(dst_map);
10641 			return KERN_PROTECTION_FAILURE;
10642 		}
10643 
10644 		if (!vm_map_entry_is_overwritable(dst_map, entry)) {
10645 			vm_map_unlock(dst_map);
10646 			return KERN_PROTECTION_FAILURE;
10647 		}
10648 
10649 		/*
10650 		 *	If the entry is in transition, we must wait
10651 		 *	for it to exit that state.  Anything could happen
10652 		 *	when we unlock the map, so start over.
10653 		 */
10654 		if (entry->in_transition) {
10655 			/*
10656 			 * Say that we are waiting, and wait for entry.
10657 			 */
10658 			entry->needs_wakeup = TRUE;
10659 			vm_map_entry_wait(dst_map, THREAD_UNINT);
10660 
10661 			goto RetryLookup;
10662 		}
10663 
10664 		/*
10665 		 *	Adjust to source size first
10666 		 */
10667 
10668 		if (copy_size < size) {
10669 			if (entry->map_aligned &&
10670 			    !VM_MAP_PAGE_ALIGNED(entry->vme_start + copy_size,
10671 			    VM_MAP_PAGE_MASK(dst_map))) {
10672 				/* no longer map-aligned */
10673 				entry->map_aligned = FALSE;
10674 			}
10675 			vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size);
10676 			size = copy_size;
10677 		}
10678 
10679 		/*
10680 		 *	Adjust to destination size
10681 		 */
10682 
10683 		if (size < copy_size) {
10684 			vm_map_copy_clip_end(copy, copy_entry,
10685 			    copy_entry->vme_start + size);
10686 			copy_size = size;
10687 		}
10688 
10689 		assert((entry->vme_end - entry->vme_start) == size);
10690 		assert((tmp_entry->vme_end - tmp_entry->vme_start) == size);
10691 		assert((copy_entry->vme_end - copy_entry->vme_start) == size);
10692 
10693 		/*
10694 		 *	If the destination contains temporary unshared memory,
10695 		 *	we can perform the copy by throwing it away and
10696 		 *	installing the source data.
10697 		 */
10698 
10699 		object = VME_OBJECT(entry);
10700 		if ((!entry->is_shared &&
10701 		    ((object == VM_OBJECT_NULL) ||
10702 		    (object->internal && !object->true_share))) ||
10703 		    entry->needs_copy) {
10704 			vm_object_t     old_object = VME_OBJECT(entry);
10705 			vm_object_offset_t      old_offset = VME_OFFSET(entry);
10706 			vm_object_offset_t      offset;
10707 
10708 			/*
10709 			 * Ensure that the source and destination aren't
10710 			 * identical
10711 			 */
10712 			if (old_object == VME_OBJECT(copy_entry) &&
10713 			    old_offset == VME_OFFSET(copy_entry)) {
10714 				vm_map_copy_entry_unlink(copy, copy_entry);
10715 				vm_map_copy_entry_dispose(copy_entry);
10716 
10717 				if (old_object != VM_OBJECT_NULL) {
10718 					vm_object_deallocate(old_object);
10719 				}
10720 
10721 				start = tmp_entry->vme_end;
10722 				tmp_entry = tmp_entry->vme_next;
10723 				continue;
10724 			}
10725 
10726 #if XNU_TARGET_OS_OSX
10727 #define __TRADEOFF1_OBJ_SIZE (64 * 1024 * 1024) /* 64 MB */
10728 #define __TRADEOFF1_COPY_SIZE (128 * 1024)      /* 128 KB */
10729 			if (VME_OBJECT(copy_entry) != VM_OBJECT_NULL &&
10730 			    VME_OBJECT(copy_entry)->vo_size >= __TRADEOFF1_OBJ_SIZE &&
10731 			    copy_size <= __TRADEOFF1_COPY_SIZE) {
10732 				/*
10733 				 * Virtual vs. Physical copy tradeoff #1.
10734 				 *
10735 				 * Copying only a few pages out of a large
10736 				 * object:  do a physical copy instead of
10737 				 * a virtual copy, to avoid possibly keeping
10738 				 * the entire large object alive because of
10739 				 * those few copy-on-write pages.
10740 				 */
10741 				vm_map_copy_overwrite_aligned_src_large++;
10742 				goto slow_copy;
10743 			}
10744 #endif /* XNU_TARGET_OS_OSX */
10745 
10746 			if ((dst_map->pmap != kernel_pmap) &&
10747 			    (VME_ALIAS(entry) >= VM_MEMORY_MALLOC) &&
10748 			    (VME_ALIAS(entry) <= VM_MEMORY_MALLOC_MEDIUM)) {
10749 				vm_object_t new_object, new_shadow;
10750 
10751 				/*
10752 				 * We're about to map something over a mapping
10753 				 * established by malloc()...
10754 				 */
10755 				new_object = VME_OBJECT(copy_entry);
10756 				if (new_object != VM_OBJECT_NULL) {
10757 					vm_object_lock_shared(new_object);
10758 				}
10759 				while (new_object != VM_OBJECT_NULL &&
10760 #if XNU_TARGET_OS_OSX
10761 				    !new_object->true_share &&
10762 				    new_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
10763 #endif /* XNU_TARGET_OS_OSX */
10764 				    new_object->internal) {
10765 					new_shadow = new_object->shadow;
10766 					if (new_shadow == VM_OBJECT_NULL) {
10767 						break;
10768 					}
10769 					vm_object_lock_shared(new_shadow);
10770 					vm_object_unlock(new_object);
10771 					new_object = new_shadow;
10772 				}
10773 				if (new_object != VM_OBJECT_NULL) {
10774 					if (!new_object->internal) {
10775 						/*
10776 						 * The new mapping is backed
10777 						 * by an external object.  We
10778 						 * don't want malloc'ed memory
10779 						 * to be replaced with such a
10780 						 * non-anonymous mapping, so
10781 						 * let's go off the optimized
10782 						 * path...
10783 						 */
10784 						vm_map_copy_overwrite_aligned_src_not_internal++;
10785 						vm_object_unlock(new_object);
10786 						goto slow_copy;
10787 					}
10788 #if XNU_TARGET_OS_OSX
10789 					if (new_object->true_share ||
10790 					    new_object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) {
10791 						/*
10792 						 * Same if there's a "true_share"
10793 						 * object in the shadow chain, or
10794 						 * an object with a non-default
10795 						 * (SYMMETRIC) copy strategy.
10796 						 */
10797 						vm_map_copy_overwrite_aligned_src_not_symmetric++;
10798 						vm_object_unlock(new_object);
10799 						goto slow_copy;
10800 					}
10801 #endif /* XNU_TARGET_OS_OSX */
10802 					vm_object_unlock(new_object);
10803 				}
10804 				/*
10805 				 * The new mapping is still backed by
10806 				 * anonymous (internal) memory, so it's
10807 				 * OK to substitute it for the original
10808 				 * malloc() mapping.
10809 				 */
10810 			}
10811 
10812 			if (old_object != VM_OBJECT_NULL) {
10813 				assert(!entry->vme_permanent);
10814 				if (entry->is_sub_map) {
10815 					if (entry->use_pmap) {
10816 #ifndef NO_NESTED_PMAP
10817 						pmap_unnest(dst_map->pmap,
10818 						    (addr64_t)entry->vme_start,
10819 						    entry->vme_end - entry->vme_start);
10820 #endif  /* NO_NESTED_PMAP */
10821 						if (dst_map->mapped_in_other_pmaps) {
10822 							/* clean up parent */
10823 							/* map/maps */
10824 							vm_map_submap_pmap_clean(
10825 								dst_map, entry->vme_start,
10826 								entry->vme_end,
10827 								VME_SUBMAP(entry),
10828 								VME_OFFSET(entry));
10829 						}
10830 					} else {
10831 						vm_map_submap_pmap_clean(
10832 							dst_map, entry->vme_start,
10833 							entry->vme_end,
10834 							VME_SUBMAP(entry),
10835 							VME_OFFSET(entry));
10836 					}
10837 					vm_map_deallocate(VME_SUBMAP(entry));
10838 				} else {
10839 					if (dst_map->mapped_in_other_pmaps) {
10840 						vm_object_pmap_protect_options(
10841 							VME_OBJECT(entry),
10842 							VME_OFFSET(entry),
10843 							entry->vme_end
10844 							- entry->vme_start,
10845 							PMAP_NULL,
10846 							PAGE_SIZE,
10847 							entry->vme_start,
10848 							VM_PROT_NONE,
10849 							PMAP_OPTIONS_REMOVE);
10850 					} else {
10851 						pmap_remove_options(
10852 							dst_map->pmap,
10853 							(addr64_t)(entry->vme_start),
10854 							(addr64_t)(entry->vme_end),
10855 							PMAP_OPTIONS_REMOVE);
10856 					}
10857 					vm_object_deallocate(old_object);
10858 				}
10859 			}
10860 
10861 			if (entry->iokit_acct) {
10862 				/* keep using iokit accounting */
10863 				entry->use_pmap = FALSE;
10864 			} else {
10865 				/* use pmap accounting */
10866 				entry->use_pmap = TRUE;
10867 			}
10868 			assert(!entry->vme_permanent);
10869 			VME_OBJECT_SET(entry, VME_OBJECT(copy_entry), false, 0);
10870 			object = VME_OBJECT(entry);
10871 			entry->needs_copy = copy_entry->needs_copy;
10872 			entry->wired_count = 0;
10873 			entry->user_wired_count = 0;
10874 			offset = VME_OFFSET(copy_entry);
10875 			VME_OFFSET_SET(entry, offset);
10876 
10877 			vm_map_copy_entry_unlink(copy, copy_entry);
10878 			vm_map_copy_entry_dispose(copy_entry);
10879 
10880 			/*
10881 			 * we could try to push pages into the pmap at this point, BUT
10882 			 * this optimization only saved on average 2 us per page if ALL
10883 			 * the pages in the source were currently mapped
10884 			 * and ALL the pages in the dest were touched, if there were fewer
10885 			 * than 2/3 of the pages touched, this optimization actually cost more cycles
10886 			 * it also puts a lot of pressure on the pmap layer w/r to mapping structures
10887 			 */
10888 
10889 			/*
10890 			 *	Set up for the next iteration.  The map
10891 			 *	has not been unlocked, so the next
10892 			 *	address should be at the end of this
10893 			 *	entry, and the next map entry should be
10894 			 *	the one following it.
10895 			 */
10896 
10897 			start = tmp_entry->vme_end;
10898 			tmp_entry = tmp_entry->vme_next;
10899 		} else {
10900 			vm_map_version_t        version;
10901 			vm_object_t             dst_object;
10902 			vm_object_offset_t      dst_offset;
10903 			kern_return_t           r;
10904 
10905 slow_copy:
10906 			if (entry->needs_copy) {
10907 				VME_OBJECT_SHADOW(entry,
10908 				    (entry->vme_end -
10909 				    entry->vme_start),
10910 				    vm_map_always_shadow(dst_map));
10911 				entry->needs_copy = FALSE;
10912 			}
10913 
10914 			dst_object = VME_OBJECT(entry);
10915 			dst_offset = VME_OFFSET(entry);
10916 
10917 			/*
10918 			 *	Take an object reference, and record
10919 			 *	the map version information so that the
10920 			 *	map can be safely unlocked.
10921 			 */
10922 
10923 			if (dst_object == VM_OBJECT_NULL) {
10924 				/*
10925 				 * We would usually have just taken the
10926 				 * optimized path above if the destination
10927 				 * object has not been allocated yet.  But we
10928 				 * now disable that optimization if the copy
10929 				 * entry's object is not backed by anonymous
10930 				 * memory to avoid replacing malloc'ed
10931 				 * (i.e. re-usable) anonymous memory with a
10932 				 * not-so-anonymous mapping.
10933 				 * So we have to handle this case here and
10934 				 * allocate a new VM object for this map entry.
10935 				 */
10936 				dst_object = vm_object_allocate(
10937 					entry->vme_end - entry->vme_start);
10938 				dst_offset = 0;
10939 				VME_OBJECT_SET(entry, dst_object, false, 0);
10940 				VME_OFFSET_SET(entry, dst_offset);
10941 				assert(entry->use_pmap);
10942 			}
10943 
10944 			vm_object_reference(dst_object);
10945 
10946 			/* account for unlock bumping up timestamp */
10947 			version.main_timestamp = dst_map->timestamp + 1;
10948 
10949 			vm_map_unlock(dst_map);
10950 
10951 			/*
10952 			 *	Copy as much as possible in one pass
10953 			 */
10954 
10955 			copy_size = size;
10956 			r = vm_fault_copy(
10957 				VME_OBJECT(copy_entry),
10958 				VME_OFFSET(copy_entry),
10959 				&copy_size,
10960 				dst_object,
10961 				dst_offset,
10962 				dst_map,
10963 				&version,
10964 				THREAD_UNINT );
10965 
10966 			/*
10967 			 *	Release the object reference
10968 			 */
10969 
10970 			vm_object_deallocate(dst_object);
10971 
10972 			/*
10973 			 *	If a hard error occurred, return it now
10974 			 */
10975 
10976 			if (r != KERN_SUCCESS) {
10977 				return r;
10978 			}
10979 
10980 			if (copy_size != 0) {
10981 				/*
10982 				 *	Dispose of the copied region
10983 				 */
10984 
10985 				vm_map_copy_clip_end(copy, copy_entry,
10986 				    copy_entry->vme_start + copy_size);
10987 				vm_map_copy_entry_unlink(copy, copy_entry);
10988 				vm_object_deallocate(VME_OBJECT(copy_entry));
10989 				vm_map_copy_entry_dispose(copy_entry);
10990 			}
10991 
10992 			/*
10993 			 *	Pick up in the destination map where we left off.
10994 			 *
10995 			 *	Use the version information to avoid a lookup
10996 			 *	in the normal case.
10997 			 */
10998 
10999 			start += copy_size;
11000 			vm_map_lock(dst_map);
11001 			if (version.main_timestamp == dst_map->timestamp &&
11002 			    copy_size != 0) {
11003 				/* We can safely use saved tmp_entry value */
11004 
11005 				if (tmp_entry->map_aligned &&
11006 				    !VM_MAP_PAGE_ALIGNED(
11007 					    start,
11008 					    VM_MAP_PAGE_MASK(dst_map))) {
11009 					/* no longer map-aligned */
11010 					tmp_entry->map_aligned = FALSE;
11011 				}
11012 				vm_map_clip_end(dst_map, tmp_entry, start);
11013 				tmp_entry = tmp_entry->vme_next;
11014 			} else {
11015 				/* Must do lookup of tmp_entry */
11016 
11017 RetryLookup:
11018 				if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) {
11019 					vm_map_unlock(dst_map);
11020 					return KERN_INVALID_ADDRESS;
11021 				}
11022 				if (tmp_entry->map_aligned &&
11023 				    !VM_MAP_PAGE_ALIGNED(
11024 					    start,
11025 					    VM_MAP_PAGE_MASK(dst_map))) {
11026 					/* no longer map-aligned */
11027 					tmp_entry->map_aligned = FALSE;
11028 				}
11029 				vm_map_clip_start(dst_map, tmp_entry, start);
11030 			}
11031 		}
11032 	}/* while */
11033 
11034 	return KERN_SUCCESS;
11035 }/* vm_map_copy_overwrite_aligned */
11036 
11037 /*
11038  *	Routine: vm_map_copyin_kernel_buffer [internal use only]
11039  *
11040  *	Description:
11041  *		Copy in data to a kernel buffer from space in the
11042  *		source map. The original space may be optionally
11043  *		deallocated.
11044  *
11045  *		If successful, returns a new copy object.
11046  */
11047 static kern_return_t
vm_map_copyin_kernel_buffer(vm_map_t src_map,vm_map_offset_t src_addr,vm_map_size_t len,boolean_t src_destroy,vm_map_copy_t * copy_result)11048 vm_map_copyin_kernel_buffer(
11049 	vm_map_t        src_map,
11050 	vm_map_offset_t src_addr,
11051 	vm_map_size_t   len,
11052 	boolean_t       src_destroy,
11053 	vm_map_copy_t   *copy_result)
11054 {
11055 	kern_return_t kr;
11056 	vm_map_copy_t copy;
11057 	void *kdata;
11058 
11059 	if (len > msg_ool_size_small) {
11060 		return KERN_INVALID_ARGUMENT;
11061 	}
11062 
11063 	kdata = kalloc_data(len, Z_WAITOK);
11064 	if (kdata == NULL) {
11065 		return KERN_RESOURCE_SHORTAGE;
11066 	}
11067 	kr = copyinmap(src_map, src_addr, kdata, (vm_size_t)len);
11068 	if (kr != KERN_SUCCESS) {
11069 		kfree_data(kdata, len);
11070 		return kr;
11071 	}
11072 
11073 	copy = vm_map_copy_allocate(VM_MAP_COPY_KERNEL_BUFFER);
11074 	copy->cpy_kdata = kdata;
11075 	copy->size = len;
11076 	copy->offset = 0;
11077 
11078 	if (src_destroy) {
11079 		vmr_flags_t flags = VM_MAP_REMOVE_INTERRUPTIBLE;
11080 
11081 		if (src_map == kernel_map) {
11082 			flags |= VM_MAP_REMOVE_KUNWIRE;
11083 		}
11084 
11085 		(void)vm_map_remove_guard(src_map,
11086 		    vm_map_trunc_page(src_addr, VM_MAP_PAGE_MASK(src_map)),
11087 		    vm_map_round_page(src_addr + len, VM_MAP_PAGE_MASK(src_map)),
11088 		    flags, KMEM_GUARD_NONE);
11089 	}
11090 
11091 	*copy_result = copy;
11092 	return KERN_SUCCESS;
11093 }
11094 
11095 /*
11096  *	Routine: vm_map_copyout_kernel_buffer	[internal use only]
11097  *
11098  *	Description:
11099  *		Copy out data from a kernel buffer into space in the
11100  *		destination map. The space may be otpionally dynamically
11101  *		allocated.
11102  *
11103  *		If successful, consumes the copy object.
11104  *		Otherwise, the caller is responsible for it.
11105  *
11106  *		Callers of this function must call vm_map_copy_require on
11107  *		previously created vm_map_copy_t or pass a newly created
11108  *		one to ensure that it hasn't been forged.
11109  */
11110 static int vm_map_copyout_kernel_buffer_failures = 0;
11111 static kern_return_t
vm_map_copyout_kernel_buffer(vm_map_t map,vm_map_address_t * addr,vm_map_copy_t copy,vm_map_size_t copy_size,boolean_t overwrite,boolean_t consume_on_success)11112 vm_map_copyout_kernel_buffer(
11113 	vm_map_t                map,
11114 	vm_map_address_t        *addr,  /* IN/OUT */
11115 	vm_map_copy_t           copy,
11116 	vm_map_size_t           copy_size,
11117 	boolean_t               overwrite,
11118 	boolean_t               consume_on_success)
11119 {
11120 	kern_return_t kr = KERN_SUCCESS;
11121 	thread_t thread = current_thread();
11122 
11123 	assert(copy->size == copy_size);
11124 
11125 	/*
11126 	 * check for corrupted vm_map_copy structure
11127 	 */
11128 	if (copy_size > msg_ool_size_small || copy->offset) {
11129 		panic("Invalid vm_map_copy_t sz:%lld, ofst:%lld",
11130 		    (long long)copy->size, (long long)copy->offset);
11131 	}
11132 
11133 	if (!overwrite) {
11134 		/*
11135 		 * Allocate space in the target map for the data
11136 		 */
11137 		vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
11138 
11139 		if (map == kernel_map) {
11140 			vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
11141 		}
11142 
11143 		*addr = 0;
11144 		kr = vm_map_enter(map,
11145 		    addr,
11146 		    vm_map_round_page(copy_size,
11147 		    VM_MAP_PAGE_MASK(map)),
11148 		    (vm_map_offset_t) 0,
11149 		    vmk_flags,
11150 		    VM_OBJECT_NULL,
11151 		    (vm_object_offset_t) 0,
11152 		    FALSE,
11153 		    VM_PROT_DEFAULT,
11154 		    VM_PROT_ALL,
11155 		    VM_INHERIT_DEFAULT);
11156 		if (kr != KERN_SUCCESS) {
11157 			return kr;
11158 		}
11159 #if KASAN
11160 		if (map->pmap == kernel_pmap) {
11161 			kasan_notify_address(*addr, copy->size);
11162 		}
11163 #endif
11164 	}
11165 
11166 	/*
11167 	 * Copyout the data from the kernel buffer to the target map.
11168 	 */
11169 	if (thread->map == map) {
11170 		/*
11171 		 * If the target map is the current map, just do
11172 		 * the copy.
11173 		 */
11174 		assert((vm_size_t)copy_size == copy_size);
11175 		if (copyout(copy->cpy_kdata, *addr, (vm_size_t)copy_size)) {
11176 			kr = KERN_INVALID_ADDRESS;
11177 		}
11178 	} else {
11179 		vm_map_t oldmap;
11180 
11181 		/*
11182 		 * If the target map is another map, assume the
11183 		 * target's address space identity for the duration
11184 		 * of the copy.
11185 		 */
11186 		vm_map_reference(map);
11187 		oldmap = vm_map_switch(map);
11188 
11189 		assert((vm_size_t)copy_size == copy_size);
11190 		if (copyout(copy->cpy_kdata, *addr, (vm_size_t)copy_size)) {
11191 			vm_map_copyout_kernel_buffer_failures++;
11192 			kr = KERN_INVALID_ADDRESS;
11193 		}
11194 
11195 		(void) vm_map_switch(oldmap);
11196 		vm_map_deallocate(map);
11197 	}
11198 
11199 	if (kr != KERN_SUCCESS) {
11200 		/* the copy failed, clean up */
11201 		if (!overwrite) {
11202 			/*
11203 			 * Deallocate the space we allocated in the target map.
11204 			 */
11205 			(void) vm_map_remove(map,
11206 			    vm_map_trunc_page(*addr,
11207 			    VM_MAP_PAGE_MASK(map)),
11208 			    vm_map_round_page((*addr +
11209 			    vm_map_round_page(copy_size,
11210 			    VM_MAP_PAGE_MASK(map))),
11211 			    VM_MAP_PAGE_MASK(map)));
11212 			*addr = 0;
11213 		}
11214 	} else {
11215 		/* copy was successful, dicard the copy structure */
11216 		if (consume_on_success) {
11217 			kfree_data(copy->cpy_kdata, copy_size);
11218 			zfree_id(ZONE_ID_VM_MAP_COPY, copy);
11219 		}
11220 	}
11221 
11222 	return kr;
11223 }
11224 
11225 /*
11226  *	Routine:	vm_map_copy_insert      [internal use only]
11227  *
11228  *	Description:
11229  *		Link a copy chain ("copy") into a map at the
11230  *		specified location (after "where").
11231  *
11232  *		Callers of this function must call vm_map_copy_require on
11233  *		previously created vm_map_copy_t or pass a newly created
11234  *		one to ensure that it hasn't been forged.
11235  *	Side effects:
11236  *		The copy chain is destroyed.
11237  */
11238 static void
vm_map_copy_insert(vm_map_t map,vm_map_entry_t after_where,vm_map_copy_t copy)11239 vm_map_copy_insert(
11240 	vm_map_t        map,
11241 	vm_map_entry_t  after_where,
11242 	vm_map_copy_t   copy)
11243 {
11244 	vm_map_entry_t  entry;
11245 
11246 	while (vm_map_copy_first_entry(copy) != vm_map_copy_to_entry(copy)) {
11247 		entry = vm_map_copy_first_entry(copy);
11248 		vm_map_copy_entry_unlink(copy, entry);
11249 		vm_map_store_entry_link(map, after_where, entry,
11250 		    VM_MAP_KERNEL_FLAGS_NONE);
11251 		after_where = entry;
11252 	}
11253 	zfree_id(ZONE_ID_VM_MAP_COPY, copy);
11254 }
11255 
11256 /*
11257  * Callers of this function must call vm_map_copy_require on
11258  * previously created vm_map_copy_t or pass a newly created
11259  * one to ensure that it hasn't been forged.
11260  */
11261 void
vm_map_copy_remap(vm_map_t map,vm_map_entry_t where,vm_map_copy_t copy,vm_map_offset_t adjustment,vm_prot_t cur_prot,vm_prot_t max_prot,vm_inherit_t inheritance)11262 vm_map_copy_remap(
11263 	vm_map_t        map,
11264 	vm_map_entry_t  where,
11265 	vm_map_copy_t   copy,
11266 	vm_map_offset_t adjustment,
11267 	vm_prot_t       cur_prot,
11268 	vm_prot_t       max_prot,
11269 	vm_inherit_t    inheritance)
11270 {
11271 	vm_map_entry_t  copy_entry, new_entry;
11272 
11273 	for (copy_entry = vm_map_copy_first_entry(copy);
11274 	    copy_entry != vm_map_copy_to_entry(copy);
11275 	    copy_entry = copy_entry->vme_next) {
11276 		/* get a new VM map entry for the map */
11277 		new_entry = vm_map_entry_create(map);
11278 		/* copy the "copy entry" to the new entry */
11279 		vm_map_entry_copy(map, new_entry, copy_entry);
11280 		/* adjust "start" and "end" */
11281 		new_entry->vme_start += adjustment;
11282 		new_entry->vme_end += adjustment;
11283 		/* clear some attributes */
11284 		new_entry->inheritance = inheritance;
11285 		new_entry->protection = cur_prot;
11286 		new_entry->max_protection = max_prot;
11287 		new_entry->behavior = VM_BEHAVIOR_DEFAULT;
11288 		/* take an extra reference on the entry's "object" */
11289 		if (new_entry->is_sub_map) {
11290 			assert(!new_entry->use_pmap); /* not nested */
11291 			vm_map_reference(VME_SUBMAP(new_entry));
11292 		} else {
11293 			vm_object_reference(VME_OBJECT(new_entry));
11294 		}
11295 		/* insert the new entry in the map */
11296 		vm_map_store_entry_link(map, where, new_entry,
11297 		    VM_MAP_KERNEL_FLAGS_NONE);
11298 		/* continue inserting the "copy entries" after the new entry */
11299 		where = new_entry;
11300 	}
11301 }
11302 
11303 
11304 /*
11305  * Returns true if *size matches (or is in the range of) copy->size.
11306  * Upon returning true, the *size field is updated with the actual size of the
11307  * copy object (may be different for VM_MAP_COPY_ENTRY_LIST types)
11308  */
11309 boolean_t
vm_map_copy_validate_size(vm_map_t dst_map,vm_map_copy_t copy,vm_map_size_t * size)11310 vm_map_copy_validate_size(
11311 	vm_map_t                dst_map,
11312 	vm_map_copy_t           copy,
11313 	vm_map_size_t           *size)
11314 {
11315 	if (copy == VM_MAP_COPY_NULL) {
11316 		return FALSE;
11317 	}
11318 
11319 	/*
11320 	 * Assert that the vm_map_copy is coming from the right
11321 	 * zone and hasn't been forged
11322 	 */
11323 	vm_map_copy_require(copy);
11324 
11325 	vm_map_size_t copy_sz = copy->size;
11326 	vm_map_size_t sz = *size;
11327 	switch (copy->type) {
11328 	case VM_MAP_COPY_KERNEL_BUFFER:
11329 		if (sz == copy_sz) {
11330 			return TRUE;
11331 		}
11332 		break;
11333 	case VM_MAP_COPY_ENTRY_LIST:
11334 		/*
11335 		 * potential page-size rounding prevents us from exactly
11336 		 * validating this flavor of vm_map_copy, but we can at least
11337 		 * assert that it's within a range.
11338 		 */
11339 		if (copy_sz >= sz &&
11340 		    copy_sz <= vm_map_round_page(sz, VM_MAP_PAGE_MASK(dst_map))) {
11341 			*size = copy_sz;
11342 			return TRUE;
11343 		}
11344 		break;
11345 	default:
11346 		break;
11347 	}
11348 	return FALSE;
11349 }
11350 
11351 /*
11352  *	Routine:	vm_map_copyout_size
11353  *
11354  *	Description:
11355  *		Copy out a copy chain ("copy") into newly-allocated
11356  *		space in the destination map. Uses a prevalidated
11357  *		size for the copy object (vm_map_copy_validate_size).
11358  *
11359  *		If successful, consumes the copy object.
11360  *		Otherwise, the caller is responsible for it.
11361  */
11362 kern_return_t
vm_map_copyout_size(vm_map_t dst_map,vm_map_address_t * dst_addr,vm_map_copy_t copy,vm_map_size_t copy_size)11363 vm_map_copyout_size(
11364 	vm_map_t                dst_map,
11365 	vm_map_address_t        *dst_addr,      /* OUT */
11366 	vm_map_copy_t           copy,
11367 	vm_map_size_t           copy_size)
11368 {
11369 	return vm_map_copyout_internal(dst_map, dst_addr, copy, copy_size,
11370 	           TRUE,                     /* consume_on_success */
11371 	           VM_PROT_DEFAULT,
11372 	           VM_PROT_ALL,
11373 	           VM_INHERIT_DEFAULT);
11374 }
11375 
11376 /*
11377  *	Routine:	vm_map_copyout
11378  *
11379  *	Description:
11380  *		Copy out a copy chain ("copy") into newly-allocated
11381  *		space in the destination map.
11382  *
11383  *		If successful, consumes the copy object.
11384  *		Otherwise, the caller is responsible for it.
11385  */
11386 kern_return_t
vm_map_copyout(vm_map_t dst_map,vm_map_address_t * dst_addr,vm_map_copy_t copy)11387 vm_map_copyout(
11388 	vm_map_t                dst_map,
11389 	vm_map_address_t        *dst_addr,      /* OUT */
11390 	vm_map_copy_t           copy)
11391 {
11392 	return vm_map_copyout_internal(dst_map, dst_addr, copy, copy ? copy->size : 0,
11393 	           TRUE,                     /* consume_on_success */
11394 	           VM_PROT_DEFAULT,
11395 	           VM_PROT_ALL,
11396 	           VM_INHERIT_DEFAULT);
11397 }
11398 
11399 kern_return_t
vm_map_copyout_internal(vm_map_t dst_map,vm_map_address_t * dst_addr,vm_map_copy_t copy,vm_map_size_t copy_size,boolean_t consume_on_success,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)11400 vm_map_copyout_internal(
11401 	vm_map_t                dst_map,
11402 	vm_map_address_t        *dst_addr,      /* OUT */
11403 	vm_map_copy_t           copy,
11404 	vm_map_size_t           copy_size,
11405 	boolean_t               consume_on_success,
11406 	vm_prot_t               cur_protection,
11407 	vm_prot_t               max_protection,
11408 	vm_inherit_t            inheritance)
11409 {
11410 	vm_map_size_t           size;
11411 	vm_map_size_t           adjustment;
11412 	vm_map_offset_t         start;
11413 	vm_object_offset_t      vm_copy_start;
11414 	vm_map_entry_t          last;
11415 	vm_map_entry_t          entry;
11416 	vm_map_copy_t           original_copy;
11417 	kern_return_t           kr;
11418 	vm_map_kernel_flags_t   vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
11419 
11420 	/*
11421 	 *	Check for null copy object.
11422 	 */
11423 
11424 	if (copy == VM_MAP_COPY_NULL) {
11425 		*dst_addr = 0;
11426 		return KERN_SUCCESS;
11427 	}
11428 
11429 	/*
11430 	 * Assert that the vm_map_copy is coming from the right
11431 	 * zone and hasn't been forged
11432 	 */
11433 	vm_map_copy_require(copy);
11434 
11435 	if (copy->size != copy_size) {
11436 		*dst_addr = 0;
11437 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COPYOUT_INTERNAL_SIZE_ERROR), KERN_FAILURE /* arg */);
11438 		return KERN_FAILURE;
11439 	}
11440 
11441 	/*
11442 	 *	Check for special kernel buffer allocated
11443 	 *	by new_ipc_kmsg_copyin.
11444 	 */
11445 
11446 	if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
11447 		kr = vm_map_copyout_kernel_buffer(dst_map, dst_addr,
11448 		    copy, copy_size, FALSE,
11449 		    consume_on_success);
11450 		if (kr) {
11451 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COPYOUT_KERNEL_BUFFER_ERROR), kr /* arg */);
11452 		}
11453 		return kr;
11454 	}
11455 
11456 	original_copy = copy;
11457 	if (copy->cpy_hdr.page_shift != VM_MAP_PAGE_SHIFT(dst_map)) {
11458 		vm_map_copy_t target_copy;
11459 		vm_map_offset_t overmap_start, overmap_end, trimmed_start;
11460 
11461 		target_copy = VM_MAP_COPY_NULL;
11462 		DEBUG4K_ADJUST("adjusting...\n");
11463 		kr = vm_map_copy_adjust_to_target(
11464 			copy,
11465 			0, /* offset */
11466 			copy->size, /* size */
11467 			dst_map,
11468 			TRUE, /* copy */
11469 			&target_copy,
11470 			&overmap_start,
11471 			&overmap_end,
11472 			&trimmed_start);
11473 		if (kr != KERN_SUCCESS) {
11474 			DEBUG4K_COPY("adjust failed 0x%x\n", kr);
11475 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COPYOUT_INTERNAL_ADJUSTING_ERROR), kr /* arg */);
11476 			return kr;
11477 		}
11478 		DEBUG4K_COPY("copy %p (%d 0x%llx 0x%llx) dst_map %p (%d) target_copy %p (%d 0x%llx 0x%llx) overmap_start 0x%llx overmap_end 0x%llx trimmed_start 0x%llx\n", copy, copy->cpy_hdr.page_shift, copy->offset, (uint64_t)copy->size, dst_map, VM_MAP_PAGE_SHIFT(dst_map), target_copy, target_copy->cpy_hdr.page_shift, target_copy->offset, (uint64_t)target_copy->size, (uint64_t)overmap_start, (uint64_t)overmap_end, (uint64_t)trimmed_start);
11479 		if (target_copy != copy) {
11480 			copy = target_copy;
11481 		}
11482 		copy_size = copy->size;
11483 	}
11484 
11485 	/*
11486 	 *	Find space for the data
11487 	 */
11488 
11489 	vm_copy_start = vm_map_trunc_page((vm_map_size_t)copy->offset,
11490 	    VM_MAP_COPY_PAGE_MASK(copy));
11491 	size = vm_map_round_page((vm_map_size_t)copy->offset + copy_size,
11492 	    VM_MAP_COPY_PAGE_MASK(copy))
11493 	    - vm_copy_start;
11494 
11495 	vm_map_kernel_flags_update_range_id(&vmk_flags, dst_map);
11496 
11497 	vm_map_lock(dst_map);
11498 	kr = vm_map_locate_space(dst_map, size, 0, vmk_flags,
11499 	    &start, &last);
11500 	if (kr != KERN_SUCCESS) {
11501 		vm_map_unlock(dst_map);
11502 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COPYOUT_INTERNAL_SPACE_ERROR), kr /* arg */);
11503 		return kr;
11504 	}
11505 
11506 	adjustment = start - vm_copy_start;
11507 	if (!consume_on_success) {
11508 		/*
11509 		 * We're not allowed to consume "copy", so we'll have to
11510 		 * copy its map entries into the destination map below.
11511 		 * No need to re-allocate map entries from the correct
11512 		 * (pageable or not) zone, since we'll get new map entries
11513 		 * during the transfer.
11514 		 * We'll also adjust the map entries's "start" and "end"
11515 		 * during the transfer, to keep "copy"'s entries consistent
11516 		 * with its "offset".
11517 		 */
11518 		goto after_adjustments;
11519 	}
11520 
11521 	/*
11522 	 *	Since we're going to just drop the map
11523 	 *	entries from the copy into the destination
11524 	 *	map, they must come from the same pool.
11525 	 */
11526 
11527 	if (copy->cpy_hdr.entries_pageable != dst_map->hdr.entries_pageable) {
11528 		/*
11529 		 * Mismatches occur when dealing with the default
11530 		 * pager.
11531 		 */
11532 		vm_map_entry_t  next, new;
11533 
11534 		/*
11535 		 * Find the zone that the copies were allocated from
11536 		 */
11537 
11538 		entry = vm_map_copy_first_entry(copy);
11539 
11540 		/*
11541 		 * Reinitialize the copy so that vm_map_copy_entry_link
11542 		 * will work.
11543 		 */
11544 		vm_map_store_copy_reset(copy, entry);
11545 		copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable;
11546 
11547 		/*
11548 		 * Copy each entry.
11549 		 */
11550 		while (entry != vm_map_copy_to_entry(copy)) {
11551 			new = vm_map_copy_entry_create(copy);
11552 			vm_map_entry_copy_full(new, entry);
11553 			new->vme_no_copy_on_read = FALSE;
11554 			assert(!new->iokit_acct);
11555 			if (new->is_sub_map) {
11556 				/* clr address space specifics */
11557 				new->use_pmap = FALSE;
11558 			}
11559 			vm_map_copy_entry_link(copy,
11560 			    vm_map_copy_last_entry(copy),
11561 			    new);
11562 			next = entry->vme_next;
11563 			vm_map_entry_dispose(entry);
11564 			entry = next;
11565 		}
11566 	}
11567 
11568 	/*
11569 	 *	Adjust the addresses in the copy chain, and
11570 	 *	reset the region attributes.
11571 	 */
11572 
11573 	for (entry = vm_map_copy_first_entry(copy);
11574 	    entry != vm_map_copy_to_entry(copy);
11575 	    entry = entry->vme_next) {
11576 		if (VM_MAP_PAGE_SHIFT(dst_map) == PAGE_SHIFT) {
11577 			/*
11578 			 * We're injecting this copy entry into a map that
11579 			 * has the standard page alignment, so clear
11580 			 * "map_aligned" (which might have been inherited
11581 			 * from the original map entry).
11582 			 */
11583 			entry->map_aligned = FALSE;
11584 		}
11585 
11586 		entry->vme_start += adjustment;
11587 		entry->vme_end += adjustment;
11588 
11589 		if (entry->map_aligned) {
11590 			assert(VM_MAP_PAGE_ALIGNED(entry->vme_start,
11591 			    VM_MAP_PAGE_MASK(dst_map)));
11592 			assert(VM_MAP_PAGE_ALIGNED(entry->vme_end,
11593 			    VM_MAP_PAGE_MASK(dst_map)));
11594 		}
11595 
11596 		entry->inheritance = VM_INHERIT_DEFAULT;
11597 		entry->protection = VM_PROT_DEFAULT;
11598 		entry->max_protection = VM_PROT_ALL;
11599 		entry->behavior = VM_BEHAVIOR_DEFAULT;
11600 
11601 		/*
11602 		 * If the entry is now wired,
11603 		 * map the pages into the destination map.
11604 		 */
11605 		if (entry->wired_count != 0) {
11606 			vm_map_offset_t va;
11607 			vm_object_offset_t       offset;
11608 			vm_object_t object;
11609 			vm_prot_t prot;
11610 			int     type_of_fault;
11611 			uint8_t object_lock_type = OBJECT_LOCK_EXCLUSIVE;
11612 
11613 			/* TODO4K would need to use actual page size */
11614 			assert(VM_MAP_PAGE_SHIFT(dst_map) == PAGE_SHIFT);
11615 
11616 			object = VME_OBJECT(entry);
11617 			offset = VME_OFFSET(entry);
11618 			va = entry->vme_start;
11619 
11620 			pmap_pageable(dst_map->pmap,
11621 			    entry->vme_start,
11622 			    entry->vme_end,
11623 			    TRUE);
11624 
11625 			while (va < entry->vme_end) {
11626 				vm_page_t       m;
11627 				struct vm_object_fault_info fault_info = {};
11628 
11629 				/*
11630 				 * Look up the page in the object.
11631 				 * Assert that the page will be found in the
11632 				 * top object:
11633 				 * either
11634 				 *	the object was newly created by
11635 				 *	vm_object_copy_slowly, and has
11636 				 *	copies of all of the pages from
11637 				 *	the source object
11638 				 * or
11639 				 *	the object was moved from the old
11640 				 *	map entry; because the old map
11641 				 *	entry was wired, all of the pages
11642 				 *	were in the top-level object.
11643 				 *	(XXX not true if we wire pages for
11644 				 *	 reading)
11645 				 */
11646 				vm_object_lock(object);
11647 
11648 				m = vm_page_lookup(object, offset);
11649 				if (m == VM_PAGE_NULL || !VM_PAGE_WIRED(m) ||
11650 				    m->vmp_absent) {
11651 					panic("vm_map_copyout: wiring %p", m);
11652 				}
11653 
11654 				prot = entry->protection;
11655 
11656 				if (override_nx(dst_map, VME_ALIAS(entry)) &&
11657 				    prot) {
11658 					prot |= VM_PROT_EXECUTE;
11659 				}
11660 
11661 				type_of_fault = DBG_CACHE_HIT_FAULT;
11662 
11663 				fault_info.user_tag = VME_ALIAS(entry);
11664 				fault_info.pmap_options = 0;
11665 				if (entry->iokit_acct ||
11666 				    (!entry->is_sub_map && !entry->use_pmap)) {
11667 					fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
11668 				}
11669 				if (entry->vme_xnu_user_debug &&
11670 				    !VM_PAGE_OBJECT(m)->code_signed) {
11671 					/*
11672 					 * Modified code-signed executable
11673 					 * region: this page does not belong
11674 					 * to a code-signed VM object, so it
11675 					 * must have been copied and should
11676 					 * therefore be typed XNU_USER_DEBUG
11677 					 * rather than XNU_USER_EXEC.
11678 					 */
11679 					fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
11680 				}
11681 
11682 				vm_fault_enter(m,
11683 				    dst_map->pmap,
11684 				    va,
11685 				    PAGE_SIZE, 0,
11686 				    prot,
11687 				    prot,
11688 				    VM_PAGE_WIRED(m),
11689 				    FALSE,            /* change_wiring */
11690 				    VM_KERN_MEMORY_NONE,            /* tag - not wiring */
11691 				    &fault_info,
11692 				    NULL,             /* need_retry */
11693 				    &type_of_fault,
11694 				    &object_lock_type); /*Exclusive mode lock. Will remain unchanged.*/
11695 
11696 				vm_object_unlock(object);
11697 
11698 				offset += PAGE_SIZE_64;
11699 				va += PAGE_SIZE;
11700 			}
11701 		}
11702 	}
11703 
11704 after_adjustments:
11705 
11706 	/*
11707 	 *	Correct the page alignment for the result
11708 	 */
11709 
11710 	*dst_addr = start + (copy->offset - vm_copy_start);
11711 
11712 #if KASAN
11713 	kasan_notify_address(*dst_addr, size);
11714 #endif
11715 
11716 	/*
11717 	 *	Update the hints and the map size
11718 	 */
11719 
11720 	if (consume_on_success) {
11721 		SAVE_HINT_MAP_WRITE(dst_map, vm_map_copy_last_entry(copy));
11722 	} else {
11723 		SAVE_HINT_MAP_WRITE(dst_map, last);
11724 	}
11725 
11726 	dst_map->size += size;
11727 
11728 	/*
11729 	 *	Link in the copy
11730 	 */
11731 
11732 	if (consume_on_success) {
11733 		vm_map_copy_insert(dst_map, last, copy);
11734 		if (copy != original_copy) {
11735 			vm_map_copy_discard(original_copy);
11736 			original_copy = VM_MAP_COPY_NULL;
11737 		}
11738 	} else {
11739 		vm_map_copy_remap(dst_map, last, copy, adjustment,
11740 		    cur_protection, max_protection,
11741 		    inheritance);
11742 		if (copy != original_copy && original_copy != VM_MAP_COPY_NULL) {
11743 			vm_map_copy_discard(copy);
11744 			copy = original_copy;
11745 		}
11746 	}
11747 
11748 
11749 	vm_map_unlock(dst_map);
11750 
11751 	/*
11752 	 * XXX	If wiring_required, call vm_map_pageable
11753 	 */
11754 
11755 	return KERN_SUCCESS;
11756 }
11757 
11758 /*
11759  *	Routine:	vm_map_copyin
11760  *
11761  *	Description:
11762  *		see vm_map_copyin_common.  Exported via Unsupported.exports.
11763  *
11764  */
11765 
11766 #undef vm_map_copyin
11767 
11768 kern_return_t
vm_map_copyin(vm_map_t src_map,vm_map_address_t src_addr,vm_map_size_t len,boolean_t src_destroy,vm_map_copy_t * copy_result)11769 vm_map_copyin(
11770 	vm_map_t                        src_map,
11771 	vm_map_address_t        src_addr,
11772 	vm_map_size_t           len,
11773 	boolean_t                       src_destroy,
11774 	vm_map_copy_t           *copy_result)   /* OUT */
11775 {
11776 	return vm_map_copyin_common(src_map, src_addr, len, src_destroy,
11777 	           FALSE, copy_result, FALSE);
11778 }
11779 
11780 /*
11781  *	Routine:	vm_map_copyin_common
11782  *
11783  *	Description:
11784  *		Copy the specified region (src_addr, len) from the
11785  *		source address space (src_map), possibly removing
11786  *		the region from the source address space (src_destroy).
11787  *
11788  *	Returns:
11789  *		A vm_map_copy_t object (copy_result), suitable for
11790  *		insertion into another address space (using vm_map_copyout),
11791  *		copying over another address space region (using
11792  *		vm_map_copy_overwrite).  If the copy is unused, it
11793  *		should be destroyed (using vm_map_copy_discard).
11794  *
11795  *	In/out conditions:
11796  *		The source map should not be locked on entry.
11797  */
11798 
11799 typedef struct submap_map {
11800 	vm_map_t        parent_map;
11801 	vm_map_offset_t base_start;
11802 	vm_map_offset_t base_end;
11803 	vm_map_size_t   base_len;
11804 	struct submap_map *next;
11805 } submap_map_t;
11806 
11807 kern_return_t
vm_map_copyin_common(vm_map_t src_map,vm_map_address_t src_addr,vm_map_size_t len,boolean_t src_destroy,__unused boolean_t src_volatile,vm_map_copy_t * copy_result,boolean_t use_maxprot)11808 vm_map_copyin_common(
11809 	vm_map_t        src_map,
11810 	vm_map_address_t src_addr,
11811 	vm_map_size_t   len,
11812 	boolean_t       src_destroy,
11813 	__unused boolean_t      src_volatile,
11814 	vm_map_copy_t   *copy_result,   /* OUT */
11815 	boolean_t       use_maxprot)
11816 {
11817 	int flags;
11818 
11819 	flags = 0;
11820 	if (src_destroy) {
11821 		flags |= VM_MAP_COPYIN_SRC_DESTROY;
11822 	}
11823 	if (use_maxprot) {
11824 		flags |= VM_MAP_COPYIN_USE_MAXPROT;
11825 	}
11826 	return vm_map_copyin_internal(src_map,
11827 	           src_addr,
11828 	           len,
11829 	           flags,
11830 	           copy_result);
11831 }
11832 kern_return_t
vm_map_copyin_internal(vm_map_t src_map,vm_map_address_t src_addr,vm_map_size_t len,int flags,vm_map_copy_t * copy_result)11833 vm_map_copyin_internal(
11834 	vm_map_t        src_map,
11835 	vm_map_address_t src_addr,
11836 	vm_map_size_t   len,
11837 	int             flags,
11838 	vm_map_copy_t   *copy_result)   /* OUT */
11839 {
11840 	vm_map_entry_t  tmp_entry;      /* Result of last map lookup --
11841 	                                 * in multi-level lookup, this
11842 	                                 * entry contains the actual
11843 	                                 * vm_object/offset.
11844 	                                 */
11845 	vm_map_entry_t  new_entry = VM_MAP_ENTRY_NULL;  /* Map entry for copy */
11846 
11847 	vm_map_offset_t src_start;      /* Start of current entry --
11848 	                                 * where copy is taking place now
11849 	                                 */
11850 	vm_map_offset_t src_end;        /* End of entire region to be
11851 	                                 * copied */
11852 	vm_map_offset_t src_base;
11853 	vm_map_t        base_map = src_map;
11854 	boolean_t       map_share = FALSE;
11855 	submap_map_t    *parent_maps = NULL;
11856 
11857 	vm_map_copy_t   copy;           /* Resulting copy */
11858 	vm_map_address_t copy_addr;
11859 	vm_map_size_t   copy_size;
11860 	boolean_t       src_destroy;
11861 	boolean_t       use_maxprot;
11862 	boolean_t       preserve_purgeable;
11863 	boolean_t       entry_was_shared;
11864 	vm_map_entry_t  saved_src_entry;
11865 
11866 	if (flags & ~VM_MAP_COPYIN_ALL_FLAGS) {
11867 		return KERN_INVALID_ARGUMENT;
11868 	}
11869 
11870 #if CONFIG_KERNEL_TAGGING
11871 	if (src_map->pmap == kernel_pmap) {
11872 		src_addr = vm_memtag_canonicalize_address(src_addr);
11873 	}
11874 #endif /* CONFIG_KERNEL_TAGGING */
11875 
11876 	src_destroy = (flags & VM_MAP_COPYIN_SRC_DESTROY) ? TRUE : FALSE;
11877 	use_maxprot = (flags & VM_MAP_COPYIN_USE_MAXPROT) ? TRUE : FALSE;
11878 	preserve_purgeable =
11879 	    (flags & VM_MAP_COPYIN_PRESERVE_PURGEABLE) ? TRUE : FALSE;
11880 
11881 	/*
11882 	 *	Check for copies of zero bytes.
11883 	 */
11884 
11885 	if (len == 0) {
11886 		*copy_result = VM_MAP_COPY_NULL;
11887 		return KERN_SUCCESS;
11888 	}
11889 
11890 	/*
11891 	 *	Check that the end address doesn't overflow
11892 	 */
11893 	if (__improbable(vm_map_range_overflows(src_map, src_addr, len))) {
11894 		return KERN_INVALID_ADDRESS;
11895 	}
11896 	src_end = src_addr + len;
11897 	if (src_end < src_addr) {
11898 		return KERN_INVALID_ADDRESS;
11899 	}
11900 
11901 	/*
11902 	 *	Compute (page aligned) start and end of region
11903 	 */
11904 	src_start = vm_map_trunc_page(src_addr,
11905 	    VM_MAP_PAGE_MASK(src_map));
11906 	src_end = vm_map_round_page(src_end,
11907 	    VM_MAP_PAGE_MASK(src_map));
11908 	if (src_end < src_addr) {
11909 		return KERN_INVALID_ADDRESS;
11910 	}
11911 
11912 	/*
11913 	 * If the copy is sufficiently small, use a kernel buffer instead
11914 	 * of making a virtual copy.  The theory being that the cost of
11915 	 * setting up VM (and taking C-O-W faults) dominates the copy costs
11916 	 * for small regions.
11917 	 */
11918 	if ((len <= msg_ool_size_small) &&
11919 	    !use_maxprot &&
11920 	    !preserve_purgeable &&
11921 	    !(flags & VM_MAP_COPYIN_ENTRY_LIST) &&
11922 	    /*
11923 	     * Since the "msg_ool_size_small" threshold was increased and
11924 	     * vm_map_copyin_kernel_buffer() doesn't handle accesses beyond the
11925 	     * address space limits, we revert to doing a virtual copy if the
11926 	     * copied range goes beyond those limits.  Otherwise, mach_vm_read()
11927 	     * of the commpage would now fail when it used to work.
11928 	     */
11929 	    (src_start >= vm_map_min(src_map) &&
11930 	    src_start < vm_map_max(src_map) &&
11931 	    src_end >= vm_map_min(src_map) &&
11932 	    src_end < vm_map_max(src_map))) {
11933 		return vm_map_copyin_kernel_buffer(src_map, src_addr, len,
11934 		           src_destroy, copy_result);
11935 	}
11936 
11937 	/*
11938 	 *	Allocate a header element for the list.
11939 	 *
11940 	 *	Use the start and end in the header to
11941 	 *	remember the endpoints prior to rounding.
11942 	 */
11943 
11944 	copy = vm_map_copy_allocate(VM_MAP_COPY_ENTRY_LIST);
11945 	copy->cpy_hdr.entries_pageable = TRUE;
11946 	copy->cpy_hdr.page_shift = (uint16_t)VM_MAP_PAGE_SHIFT(src_map);
11947 	copy->offset = src_addr;
11948 	copy->size = len;
11949 
11950 	new_entry = vm_map_copy_entry_create(copy);
11951 
11952 #define RETURN(x)                                               \
11953 	MACRO_BEGIN                                             \
11954 	vm_map_unlock(src_map);                                 \
11955 	if(src_map != base_map)                                 \
11956 	        vm_map_deallocate(src_map);                     \
11957 	if (new_entry != VM_MAP_ENTRY_NULL)                     \
11958 	        vm_map_copy_entry_dispose(new_entry);           \
11959 	vm_map_copy_discard(copy);                              \
11960 	{                                                       \
11961 	        submap_map_t	*_ptr;                          \
11962                                                                 \
11963 	        for(_ptr = parent_maps; _ptr != NULL; _ptr = parent_maps) { \
11964 	                parent_maps=parent_maps->next;          \
11965 	                if (_ptr->parent_map != base_map)       \
11966 	                        vm_map_deallocate(_ptr->parent_map);    \
11967 	                kfree_type(submap_map_t, _ptr);         \
11968 	        }                                               \
11969 	}                                                       \
11970 	MACRO_RETURN(x);                                        \
11971 	MACRO_END
11972 
11973 	/*
11974 	 *	Find the beginning of the region.
11975 	 */
11976 
11977 	vm_map_lock(src_map);
11978 
11979 	/*
11980 	 * Lookup the original "src_addr" rather than the truncated
11981 	 * "src_start", in case "src_start" falls in a non-map-aligned
11982 	 * map entry *before* the map entry that contains "src_addr"...
11983 	 */
11984 	if (!vm_map_lookup_entry(src_map, src_addr, &tmp_entry)) {
11985 		RETURN(KERN_INVALID_ADDRESS);
11986 	}
11987 	if (!tmp_entry->is_sub_map) {
11988 		/*
11989 		 * ... but clip to the map-rounded "src_start" rather than
11990 		 * "src_addr" to preserve map-alignment.  We'll adjust the
11991 		 * first copy entry at the end, if needed.
11992 		 */
11993 		vm_map_clip_start(src_map, tmp_entry, src_start);
11994 	}
11995 	if (src_start < tmp_entry->vme_start) {
11996 		/*
11997 		 * Move "src_start" up to the start of the
11998 		 * first map entry to copy.
11999 		 */
12000 		src_start = tmp_entry->vme_start;
12001 	}
12002 	/* set for later submap fix-up */
12003 	copy_addr = src_start;
12004 
12005 	/*
12006 	 *	Go through entries until we get to the end.
12007 	 */
12008 
12009 	while (TRUE) {
12010 		vm_map_entry_t  src_entry = tmp_entry;  /* Top-level entry */
12011 		vm_map_size_t   src_size;               /* Size of source
12012 		                                         * map entry (in both
12013 		                                         * maps)
12014 		                                         */
12015 
12016 		vm_object_t             src_object;     /* Object to copy */
12017 		vm_object_offset_t      src_offset;
12018 
12019 		vm_object_t             new_copy_object;/* vm_object_copy_* result */
12020 
12021 		boolean_t       src_needs_copy;         /* Should source map
12022 		                                         * be made read-only
12023 		                                         * for copy-on-write?
12024 		                                         */
12025 
12026 		boolean_t       new_entry_needs_copy;   /* Will new entry be COW? */
12027 
12028 		boolean_t       was_wired;              /* Was source wired? */
12029 		boolean_t       saved_used_for_jit;     /* Saved used_for_jit. */
12030 		vm_map_version_t version;               /* Version before locks
12031 		                                         * dropped to make copy
12032 		                                         */
12033 		kern_return_t   result;                 /* Return value from
12034 		                                         * copy_strategically.
12035 		                                         */
12036 		while (tmp_entry->is_sub_map) {
12037 			vm_map_size_t submap_len;
12038 			submap_map_t *ptr;
12039 
12040 			ptr = kalloc_type(submap_map_t, Z_WAITOK);
12041 			ptr->next = parent_maps;
12042 			parent_maps = ptr;
12043 			ptr->parent_map = src_map;
12044 			ptr->base_start = src_start;
12045 			ptr->base_end = src_end;
12046 			submap_len = tmp_entry->vme_end - src_start;
12047 			if (submap_len > (src_end - src_start)) {
12048 				submap_len = src_end - src_start;
12049 			}
12050 			ptr->base_len = submap_len;
12051 
12052 			src_start -= tmp_entry->vme_start;
12053 			src_start += VME_OFFSET(tmp_entry);
12054 			src_end = src_start + submap_len;
12055 			src_map = VME_SUBMAP(tmp_entry);
12056 			vm_map_lock(src_map);
12057 			/* keep an outstanding reference for all maps in */
12058 			/* the parents tree except the base map */
12059 			vm_map_reference(src_map);
12060 			vm_map_unlock(ptr->parent_map);
12061 			if (!vm_map_lookup_entry(
12062 				    src_map, src_start, &tmp_entry)) {
12063 				RETURN(KERN_INVALID_ADDRESS);
12064 			}
12065 			map_share = TRUE;
12066 			if (!tmp_entry->is_sub_map) {
12067 				vm_map_clip_start(src_map, tmp_entry, src_start);
12068 			}
12069 			src_entry = tmp_entry;
12070 		}
12071 		/* we are now in the lowest level submap... */
12072 
12073 		if ((VME_OBJECT(tmp_entry) != VM_OBJECT_NULL) &&
12074 		    (VME_OBJECT(tmp_entry)->phys_contiguous)) {
12075 			/* This is not, supported for now.In future */
12076 			/* we will need to detect the phys_contig   */
12077 			/* condition and then upgrade copy_slowly   */
12078 			/* to do physical copy from the device mem  */
12079 			/* based object. We can piggy-back off of   */
12080 			/* the was wired boolean to set-up the      */
12081 			/* proper handling */
12082 			RETURN(KERN_PROTECTION_FAILURE);
12083 		}
12084 		/*
12085 		 *	Create a new address map entry to hold the result.
12086 		 *	Fill in the fields from the appropriate source entries.
12087 		 *	We must unlock the source map to do this if we need
12088 		 *	to allocate a map entry.
12089 		 */
12090 		if (new_entry == VM_MAP_ENTRY_NULL) {
12091 			version.main_timestamp = src_map->timestamp;
12092 			vm_map_unlock(src_map);
12093 
12094 			new_entry = vm_map_copy_entry_create(copy);
12095 
12096 			vm_map_lock(src_map);
12097 			if ((version.main_timestamp + 1) != src_map->timestamp) {
12098 				if (!vm_map_lookup_entry(src_map, src_start,
12099 				    &tmp_entry)) {
12100 					RETURN(KERN_INVALID_ADDRESS);
12101 				}
12102 				if (!tmp_entry->is_sub_map) {
12103 					vm_map_clip_start(src_map, tmp_entry, src_start);
12104 				}
12105 				continue; /* restart w/ new tmp_entry */
12106 			}
12107 		}
12108 
12109 		/*
12110 		 *	Verify that the region can be read.
12111 		 */
12112 		if (((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE &&
12113 		    !use_maxprot) ||
12114 		    (src_entry->max_protection & VM_PROT_READ) == 0) {
12115 			RETURN(KERN_PROTECTION_FAILURE);
12116 		}
12117 
12118 		/*
12119 		 *	Clip against the endpoints of the entire region.
12120 		 */
12121 
12122 		vm_map_clip_end(src_map, src_entry, src_end);
12123 
12124 		src_size = src_entry->vme_end - src_start;
12125 		src_object = VME_OBJECT(src_entry);
12126 		src_offset = VME_OFFSET(src_entry);
12127 		was_wired = (src_entry->wired_count != 0);
12128 
12129 		vm_map_entry_copy(src_map, new_entry, src_entry);
12130 		if (new_entry->is_sub_map) {
12131 			/* clr address space specifics */
12132 			new_entry->use_pmap = FALSE;
12133 		} else {
12134 			/*
12135 			 * We're dealing with a copy-on-write operation,
12136 			 * so the resulting mapping should not inherit the
12137 			 * original mapping's accounting settings.
12138 			 * "iokit_acct" should have been cleared in
12139 			 * vm_map_entry_copy().
12140 			 * "use_pmap" should be reset to its default (TRUE)
12141 			 * so that the new mapping gets accounted for in
12142 			 * the task's memory footprint.
12143 			 */
12144 			assert(!new_entry->iokit_acct);
12145 			new_entry->use_pmap = TRUE;
12146 		}
12147 
12148 		/*
12149 		 *	Attempt non-blocking copy-on-write optimizations.
12150 		 */
12151 
12152 		/*
12153 		 * If we are destroying the source, and the object
12154 		 * is internal, we could move the object reference
12155 		 * from the source to the copy.  The copy is
12156 		 * copy-on-write only if the source is.
12157 		 * We make another reference to the object, because
12158 		 * destroying the source entry will deallocate it.
12159 		 *
12160 		 * This memory transfer has to be atomic, (to prevent
12161 		 * the VM object from being shared or copied while
12162 		 * it's being moved here), so we could only do this
12163 		 * if we won't have to unlock the VM map until the
12164 		 * original mapping has been fully removed.
12165 		 */
12166 
12167 RestartCopy:
12168 		if ((src_object == VM_OBJECT_NULL ||
12169 		    (!was_wired && !map_share && !tmp_entry->is_shared
12170 		    && !(debug4k_no_cow_copyin && VM_MAP_PAGE_SHIFT(src_map) < PAGE_SHIFT))) &&
12171 		    vm_object_copy_quickly(
12172 			    VME_OBJECT(new_entry),
12173 			    src_offset,
12174 			    src_size,
12175 			    &src_needs_copy,
12176 			    &new_entry_needs_copy)) {
12177 			new_entry->needs_copy = new_entry_needs_copy;
12178 
12179 			/*
12180 			 *	Handle copy-on-write obligations
12181 			 */
12182 
12183 			if (src_needs_copy && !tmp_entry->needs_copy) {
12184 				vm_prot_t prot;
12185 
12186 				prot = src_entry->protection & ~VM_PROT_WRITE;
12187 
12188 				if (override_nx(src_map, VME_ALIAS(src_entry))
12189 				    && prot) {
12190 					prot |= VM_PROT_EXECUTE;
12191 				}
12192 
12193 				vm_object_pmap_protect(
12194 					src_object,
12195 					src_offset,
12196 					src_size,
12197 					(src_entry->is_shared ?
12198 					PMAP_NULL
12199 					: src_map->pmap),
12200 					VM_MAP_PAGE_SIZE(src_map),
12201 					src_entry->vme_start,
12202 					prot);
12203 
12204 				assert(tmp_entry->wired_count == 0);
12205 				tmp_entry->needs_copy = TRUE;
12206 			}
12207 
12208 			/*
12209 			 *	The map has never been unlocked, so it's safe
12210 			 *	to move to the next entry rather than doing
12211 			 *	another lookup.
12212 			 */
12213 
12214 			goto CopySuccessful;
12215 		}
12216 
12217 		entry_was_shared = tmp_entry->is_shared;
12218 
12219 		/*
12220 		 *	Take an object reference, so that we may
12221 		 *	release the map lock(s).
12222 		 */
12223 
12224 		assert(src_object != VM_OBJECT_NULL);
12225 		vm_object_reference(src_object);
12226 
12227 		/*
12228 		 *	Record the timestamp for later verification.
12229 		 *	Unlock the map.
12230 		 */
12231 
12232 		version.main_timestamp = src_map->timestamp;
12233 		vm_map_unlock(src_map); /* Increments timestamp once! */
12234 		saved_src_entry = src_entry;
12235 		tmp_entry = VM_MAP_ENTRY_NULL;
12236 		src_entry = VM_MAP_ENTRY_NULL;
12237 
12238 		/*
12239 		 *	Perform the copy
12240 		 */
12241 
12242 		if (was_wired ||
12243 		    (src_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY_FORK &&
12244 		    !(flags & VM_MAP_COPYIN_FORK)) ||
12245 		    (debug4k_no_cow_copyin &&
12246 		    VM_MAP_PAGE_SHIFT(src_map) < PAGE_SHIFT)) {
12247 CopySlowly:
12248 			vm_object_lock(src_object);
12249 			result = vm_object_copy_slowly(
12250 				src_object,
12251 				src_offset,
12252 				src_size,
12253 				THREAD_UNINT,
12254 				&new_copy_object);
12255 			/* VME_OBJECT_SET will reset used_for_jit|tpro, so preserve it. */
12256 			saved_used_for_jit = new_entry->used_for_jit;
12257 			VME_OBJECT_SET(new_entry, new_copy_object, false, 0);
12258 			new_entry->used_for_jit = saved_used_for_jit;
12259 			VME_OFFSET_SET(new_entry,
12260 			    src_offset - vm_object_trunc_page(src_offset));
12261 			new_entry->needs_copy = FALSE;
12262 		} else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
12263 		    (entry_was_shared || map_share)) {
12264 			vm_object_t new_object;
12265 
12266 			vm_object_lock_shared(src_object);
12267 			new_object = vm_object_copy_delayed(
12268 				src_object,
12269 				src_offset,
12270 				src_size,
12271 				TRUE);
12272 			if (new_object == VM_OBJECT_NULL) {
12273 				goto CopySlowly;
12274 			}
12275 
12276 			VME_OBJECT_SET(new_entry, new_object, false, 0);
12277 			assert(new_entry->wired_count == 0);
12278 			new_entry->needs_copy = TRUE;
12279 			assert(!new_entry->iokit_acct);
12280 			assert(new_object->purgable == VM_PURGABLE_DENY);
12281 			assertf(new_entry->use_pmap, "src_map %p new_entry %p\n", src_map, new_entry);
12282 			result = KERN_SUCCESS;
12283 		} else {
12284 			vm_object_offset_t new_offset;
12285 			new_offset = VME_OFFSET(new_entry);
12286 			result = vm_object_copy_strategically(src_object,
12287 			    src_offset,
12288 			    src_size,
12289 			    (flags & VM_MAP_COPYIN_FORK),
12290 			    &new_copy_object,
12291 			    &new_offset,
12292 			    &new_entry_needs_copy);
12293 			/* VME_OBJECT_SET will reset used_for_jit, so preserve it. */
12294 			saved_used_for_jit = new_entry->used_for_jit;
12295 			VME_OBJECT_SET(new_entry, new_copy_object, false, 0);
12296 			new_entry->used_for_jit = saved_used_for_jit;
12297 			if (new_offset != VME_OFFSET(new_entry)) {
12298 				VME_OFFSET_SET(new_entry, new_offset);
12299 			}
12300 
12301 			new_entry->needs_copy = new_entry_needs_copy;
12302 		}
12303 
12304 		if (result == KERN_SUCCESS &&
12305 		    ((preserve_purgeable &&
12306 		    src_object->purgable != VM_PURGABLE_DENY) ||
12307 		    new_entry->used_for_jit)) {
12308 			/*
12309 			 * Purgeable objects should be COPY_NONE, true share;
12310 			 * this should be propogated to the copy.
12311 			 *
12312 			 * Also force mappings the pmap specially protects to
12313 			 * be COPY_NONE; trying to COW these mappings would
12314 			 * change the effective protections, which could have
12315 			 * side effects if the pmap layer relies on the
12316 			 * specified protections.
12317 			 */
12318 
12319 			vm_object_t     new_object;
12320 
12321 			new_object = VME_OBJECT(new_entry);
12322 			assert(new_object != src_object);
12323 			vm_object_lock(new_object);
12324 			assert(new_object->ref_count == 1);
12325 			assert(new_object->shadow == VM_OBJECT_NULL);
12326 			assert(new_object->vo_copy == VM_OBJECT_NULL);
12327 			assert(new_object->vo_owner == NULL);
12328 
12329 			new_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
12330 
12331 			if (preserve_purgeable &&
12332 			    src_object->purgable != VM_PURGABLE_DENY) {
12333 				new_object->true_share = TRUE;
12334 
12335 				/* start as non-volatile with no owner... */
12336 				new_object->purgable = VM_PURGABLE_NONVOLATILE;
12337 				vm_purgeable_nonvolatile_enqueue(new_object, NULL);
12338 				/* ... and move to src_object's purgeable state */
12339 				if (src_object->purgable != VM_PURGABLE_NONVOLATILE) {
12340 					int state;
12341 					state = src_object->purgable;
12342 					vm_object_purgable_control(
12343 						new_object,
12344 						VM_PURGABLE_SET_STATE_FROM_KERNEL,
12345 						&state);
12346 				}
12347 				/* no pmap accounting for purgeable objects */
12348 				new_entry->use_pmap = FALSE;
12349 			}
12350 
12351 			vm_object_unlock(new_object);
12352 			new_object = VM_OBJECT_NULL;
12353 		}
12354 
12355 		if (result != KERN_SUCCESS &&
12356 		    result != KERN_MEMORY_RESTART_COPY) {
12357 			vm_map_lock(src_map);
12358 			RETURN(result);
12359 		}
12360 
12361 		/*
12362 		 *	Throw away the extra reference
12363 		 */
12364 
12365 		vm_object_deallocate(src_object);
12366 
12367 		/*
12368 		 *	Verify that the map has not substantially
12369 		 *	changed while the copy was being made.
12370 		 */
12371 
12372 		vm_map_lock(src_map);
12373 
12374 		if ((version.main_timestamp + 1) == src_map->timestamp) {
12375 			/* src_map hasn't changed: src_entry is still valid */
12376 			src_entry = saved_src_entry;
12377 			goto VerificationSuccessful;
12378 		}
12379 
12380 		/*
12381 		 *	Simple version comparison failed.
12382 		 *
12383 		 *	Retry the lookup and verify that the
12384 		 *	same object/offset are still present.
12385 		 *
12386 		 *	[Note: a memory manager that colludes with
12387 		 *	the calling task can detect that we have
12388 		 *	cheated.  While the map was unlocked, the
12389 		 *	mapping could have been changed and restored.]
12390 		 */
12391 
12392 		if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) {
12393 			if (result != KERN_MEMORY_RESTART_COPY) {
12394 				vm_object_deallocate(VME_OBJECT(new_entry));
12395 				VME_OBJECT_SET(new_entry, VM_OBJECT_NULL, false, 0);
12396 				/* reset accounting state */
12397 				new_entry->iokit_acct = FALSE;
12398 				new_entry->use_pmap = TRUE;
12399 			}
12400 			RETURN(KERN_INVALID_ADDRESS);
12401 		}
12402 
12403 		src_entry = tmp_entry;
12404 		vm_map_clip_start(src_map, src_entry, src_start);
12405 
12406 		if ((((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE) &&
12407 		    !use_maxprot) ||
12408 		    ((src_entry->max_protection & VM_PROT_READ) == 0)) {
12409 			goto VerificationFailed;
12410 		}
12411 
12412 		if (src_entry->vme_end < new_entry->vme_end) {
12413 			/*
12414 			 * This entry might have been shortened
12415 			 * (vm_map_clip_end) or been replaced with
12416 			 * an entry that ends closer to "src_start"
12417 			 * than before.
12418 			 * Adjust "new_entry" accordingly; copying
12419 			 * less memory would be correct but we also
12420 			 * redo the copy (see below) if the new entry
12421 			 * no longer points at the same object/offset.
12422 			 */
12423 			assert(VM_MAP_PAGE_ALIGNED(src_entry->vme_end,
12424 			    VM_MAP_COPY_PAGE_MASK(copy)));
12425 			new_entry->vme_end = src_entry->vme_end;
12426 			src_size = new_entry->vme_end - src_start;
12427 		} else if (src_entry->vme_end > new_entry->vme_end) {
12428 			/*
12429 			 * This entry might have been extended
12430 			 * (vm_map_entry_simplify() or coalesce)
12431 			 * or been replaced with an entry that ends farther
12432 			 * from "src_start" than before.
12433 			 *
12434 			 * We've called vm_object_copy_*() only on
12435 			 * the previous <start:end> range, so we can't
12436 			 * just extend new_entry.  We have to re-do
12437 			 * the copy based on the new entry as if it was
12438 			 * pointing at a different object/offset (see
12439 			 * "Verification failed" below).
12440 			 */
12441 		}
12442 
12443 		if ((VME_OBJECT(src_entry) != src_object) ||
12444 		    (VME_OFFSET(src_entry) != src_offset) ||
12445 		    (src_entry->vme_end > new_entry->vme_end)) {
12446 			/*
12447 			 *	Verification failed.
12448 			 *
12449 			 *	Start over with this top-level entry.
12450 			 */
12451 
12452 VerificationFailed:     ;
12453 
12454 			vm_object_deallocate(VME_OBJECT(new_entry));
12455 			tmp_entry = src_entry;
12456 			continue;
12457 		}
12458 
12459 		/*
12460 		 *	Verification succeeded.
12461 		 */
12462 
12463 VerificationSuccessful:;
12464 
12465 		if (result == KERN_MEMORY_RESTART_COPY) {
12466 			goto RestartCopy;
12467 		}
12468 
12469 		/*
12470 		 *	Copy succeeded.
12471 		 */
12472 
12473 CopySuccessful: ;
12474 
12475 		/*
12476 		 *	Link in the new copy entry.
12477 		 */
12478 
12479 		vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy),
12480 		    new_entry);
12481 
12482 		/*
12483 		 *	Determine whether the entire region
12484 		 *	has been copied.
12485 		 */
12486 		src_base = src_start;
12487 		src_start = new_entry->vme_end;
12488 		new_entry = VM_MAP_ENTRY_NULL;
12489 		while ((src_start >= src_end) && (src_end != 0)) {
12490 			submap_map_t    *ptr;
12491 
12492 			if (src_map == base_map) {
12493 				/* back to the top */
12494 				break;
12495 			}
12496 
12497 			ptr = parent_maps;
12498 			assert(ptr != NULL);
12499 			parent_maps = parent_maps->next;
12500 
12501 			/* fix up the damage we did in that submap */
12502 			vm_map_simplify_range(src_map,
12503 			    src_base,
12504 			    src_end);
12505 
12506 			vm_map_unlock(src_map);
12507 			vm_map_deallocate(src_map);
12508 			vm_map_lock(ptr->parent_map);
12509 			src_map = ptr->parent_map;
12510 			src_base = ptr->base_start;
12511 			src_start = ptr->base_start + ptr->base_len;
12512 			src_end = ptr->base_end;
12513 			if (!vm_map_lookup_entry(src_map,
12514 			    src_start,
12515 			    &tmp_entry) &&
12516 			    (src_end > src_start)) {
12517 				RETURN(KERN_INVALID_ADDRESS);
12518 			}
12519 			kfree_type(submap_map_t, ptr);
12520 			if (parent_maps == NULL) {
12521 				map_share = FALSE;
12522 			}
12523 			src_entry = tmp_entry->vme_prev;
12524 		}
12525 
12526 		if ((VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT) &&
12527 		    (src_start >= src_addr + len) &&
12528 		    (src_addr + len != 0)) {
12529 			/*
12530 			 * Stop copying now, even though we haven't reached
12531 			 * "src_end".  We'll adjust the end of the last copy
12532 			 * entry at the end, if needed.
12533 			 *
12534 			 * If src_map's aligment is different from the
12535 			 * system's page-alignment, there could be
12536 			 * extra non-map-aligned map entries between
12537 			 * the original (non-rounded) "src_addr + len"
12538 			 * and the rounded "src_end".
12539 			 * We do not want to copy those map entries since
12540 			 * they're not part of the copied range.
12541 			 */
12542 			break;
12543 		}
12544 
12545 		if ((src_start >= src_end) && (src_end != 0)) {
12546 			break;
12547 		}
12548 
12549 		/*
12550 		 *	Verify that there are no gaps in the region
12551 		 */
12552 
12553 		tmp_entry = src_entry->vme_next;
12554 		if ((tmp_entry->vme_start != src_start) ||
12555 		    (tmp_entry == vm_map_to_entry(src_map))) {
12556 			RETURN(KERN_INVALID_ADDRESS);
12557 		}
12558 	}
12559 
12560 	/*
12561 	 * If the source should be destroyed, do it now, since the
12562 	 * copy was successful.
12563 	 */
12564 	if (src_destroy) {
12565 		vmr_flags_t remove_flags = VM_MAP_REMOVE_NO_FLAGS;
12566 
12567 		if (src_map == kernel_map) {
12568 			remove_flags |= VM_MAP_REMOVE_KUNWIRE;
12569 		}
12570 		(void)vm_map_remove_and_unlock(src_map,
12571 		    vm_map_trunc_page(src_addr, VM_MAP_PAGE_MASK(src_map)),
12572 		    src_end,
12573 		    remove_flags,
12574 		    KMEM_GUARD_NONE);
12575 	} else {
12576 		/* fix up the damage we did in the base map */
12577 		vm_map_simplify_range(
12578 			src_map,
12579 			vm_map_trunc_page(src_addr,
12580 			VM_MAP_PAGE_MASK(src_map)),
12581 			vm_map_round_page(src_end,
12582 			VM_MAP_PAGE_MASK(src_map)));
12583 		vm_map_unlock(src_map);
12584 	}
12585 
12586 	tmp_entry = VM_MAP_ENTRY_NULL;
12587 
12588 	if (VM_MAP_PAGE_SHIFT(src_map) > PAGE_SHIFT &&
12589 	    VM_MAP_PAGE_SHIFT(src_map) != VM_MAP_COPY_PAGE_SHIFT(copy)) {
12590 		vm_map_offset_t original_start, original_offset, original_end;
12591 
12592 		assert(VM_MAP_COPY_PAGE_MASK(copy) == PAGE_MASK);
12593 
12594 		/* adjust alignment of first copy_entry's "vme_start" */
12595 		tmp_entry = vm_map_copy_first_entry(copy);
12596 		if (tmp_entry != vm_map_copy_to_entry(copy)) {
12597 			vm_map_offset_t adjustment;
12598 
12599 			original_start = tmp_entry->vme_start;
12600 			original_offset = VME_OFFSET(tmp_entry);
12601 
12602 			/* map-align the start of the first copy entry... */
12603 			adjustment = (tmp_entry->vme_start -
12604 			    vm_map_trunc_page(
12605 				    tmp_entry->vme_start,
12606 				    VM_MAP_PAGE_MASK(src_map)));
12607 			tmp_entry->vme_start -= adjustment;
12608 			VME_OFFSET_SET(tmp_entry,
12609 			    VME_OFFSET(tmp_entry) - adjustment);
12610 			copy_addr -= adjustment;
12611 			assert(tmp_entry->vme_start < tmp_entry->vme_end);
12612 			/* ... adjust for mis-aligned start of copy range */
12613 			adjustment =
12614 			    (vm_map_trunc_page(copy->offset,
12615 			    PAGE_MASK) -
12616 			    vm_map_trunc_page(copy->offset,
12617 			    VM_MAP_PAGE_MASK(src_map)));
12618 			if (adjustment) {
12619 				assert(page_aligned(adjustment));
12620 				assert(adjustment < VM_MAP_PAGE_SIZE(src_map));
12621 				tmp_entry->vme_start += adjustment;
12622 				VME_OFFSET_SET(tmp_entry,
12623 				    (VME_OFFSET(tmp_entry) +
12624 				    adjustment));
12625 				copy_addr += adjustment;
12626 				assert(tmp_entry->vme_start < tmp_entry->vme_end);
12627 			}
12628 
12629 			/*
12630 			 * Assert that the adjustments haven't exposed
12631 			 * more than was originally copied...
12632 			 */
12633 			assert(tmp_entry->vme_start >= original_start);
12634 			assert(VME_OFFSET(tmp_entry) >= original_offset);
12635 			/*
12636 			 * ... and that it did not adjust outside of a
12637 			 * a single 16K page.
12638 			 */
12639 			assert(vm_map_trunc_page(tmp_entry->vme_start,
12640 			    VM_MAP_PAGE_MASK(src_map)) ==
12641 			    vm_map_trunc_page(original_start,
12642 			    VM_MAP_PAGE_MASK(src_map)));
12643 		}
12644 
12645 		/* adjust alignment of last copy_entry's "vme_end" */
12646 		tmp_entry = vm_map_copy_last_entry(copy);
12647 		if (tmp_entry != vm_map_copy_to_entry(copy)) {
12648 			vm_map_offset_t adjustment;
12649 
12650 			original_end = tmp_entry->vme_end;
12651 
12652 			/* map-align the end of the last copy entry... */
12653 			tmp_entry->vme_end =
12654 			    vm_map_round_page(tmp_entry->vme_end,
12655 			    VM_MAP_PAGE_MASK(src_map));
12656 			/* ... adjust for mis-aligned end of copy range */
12657 			adjustment =
12658 			    (vm_map_round_page((copy->offset +
12659 			    copy->size),
12660 			    VM_MAP_PAGE_MASK(src_map)) -
12661 			    vm_map_round_page((copy->offset +
12662 			    copy->size),
12663 			    PAGE_MASK));
12664 			if (adjustment) {
12665 				assert(page_aligned(adjustment));
12666 				assert(adjustment < VM_MAP_PAGE_SIZE(src_map));
12667 				tmp_entry->vme_end -= adjustment;
12668 				assert(tmp_entry->vme_start < tmp_entry->vme_end);
12669 			}
12670 
12671 			/*
12672 			 * Assert that the adjustments haven't exposed
12673 			 * more than was originally copied...
12674 			 */
12675 			assert(tmp_entry->vme_end <= original_end);
12676 			/*
12677 			 * ... and that it did not adjust outside of a
12678 			 * a single 16K page.
12679 			 */
12680 			assert(vm_map_round_page(tmp_entry->vme_end,
12681 			    VM_MAP_PAGE_MASK(src_map)) ==
12682 			    vm_map_round_page(original_end,
12683 			    VM_MAP_PAGE_MASK(src_map)));
12684 		}
12685 	}
12686 
12687 	/* Fix-up start and end points in copy.  This is necessary */
12688 	/* when the various entries in the copy object were picked */
12689 	/* up from different sub-maps */
12690 
12691 	tmp_entry = vm_map_copy_first_entry(copy);
12692 	copy_size = 0; /* compute actual size */
12693 	while (tmp_entry != vm_map_copy_to_entry(copy)) {
12694 		assert(VM_MAP_PAGE_ALIGNED(
12695 			    copy_addr + (tmp_entry->vme_end -
12696 			    tmp_entry->vme_start),
12697 			    MIN(VM_MAP_COPY_PAGE_MASK(copy), PAGE_MASK)));
12698 		assert(VM_MAP_PAGE_ALIGNED(
12699 			    copy_addr,
12700 			    MIN(VM_MAP_COPY_PAGE_MASK(copy), PAGE_MASK)));
12701 
12702 		/*
12703 		 * The copy_entries will be injected directly into the
12704 		 * destination map and might not be "map aligned" there...
12705 		 */
12706 		tmp_entry->map_aligned = FALSE;
12707 
12708 		tmp_entry->vme_end = copy_addr +
12709 		    (tmp_entry->vme_end - tmp_entry->vme_start);
12710 		tmp_entry->vme_start = copy_addr;
12711 		assert(tmp_entry->vme_start < tmp_entry->vme_end);
12712 		copy_addr += tmp_entry->vme_end - tmp_entry->vme_start;
12713 		copy_size += tmp_entry->vme_end - tmp_entry->vme_start;
12714 		tmp_entry = (struct vm_map_entry *)tmp_entry->vme_next;
12715 	}
12716 
12717 	if (VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT &&
12718 	    copy_size < copy->size) {
12719 		/*
12720 		 * The actual size of the VM map copy is smaller than what
12721 		 * was requested by the caller.  This must be because some
12722 		 * PAGE_SIZE-sized pages are missing at the end of the last
12723 		 * VM_MAP_PAGE_SIZE(src_map)-sized chunk of the range.
12724 		 * The caller might not have been aware of those missing
12725 		 * pages and might not want to be aware of it, which is
12726 		 * fine as long as they don't try to access (and crash on)
12727 		 * those missing pages.
12728 		 * Let's adjust the size of the "copy", to avoid failing
12729 		 * in vm_map_copyout() or vm_map_copy_overwrite().
12730 		 */
12731 		assert(vm_map_round_page(copy_size,
12732 		    VM_MAP_PAGE_MASK(src_map)) ==
12733 		    vm_map_round_page(copy->size,
12734 		    VM_MAP_PAGE_MASK(src_map)));
12735 		copy->size = copy_size;
12736 	}
12737 
12738 	*copy_result = copy;
12739 	return KERN_SUCCESS;
12740 
12741 #undef  RETURN
12742 }
12743 
12744 kern_return_t
vm_map_copy_extract(vm_map_t src_map,vm_map_address_t src_addr,vm_map_size_t len,boolean_t do_copy,vm_map_copy_t * copy_result,vm_prot_t * cur_prot,vm_prot_t * max_prot,vm_inherit_t inheritance,vm_map_kernel_flags_t vmk_flags)12745 vm_map_copy_extract(
12746 	vm_map_t                src_map,
12747 	vm_map_address_t        src_addr,
12748 	vm_map_size_t           len,
12749 	boolean_t               do_copy,
12750 	vm_map_copy_t           *copy_result,   /* OUT */
12751 	vm_prot_t               *cur_prot,      /* IN/OUT */
12752 	vm_prot_t               *max_prot,      /* IN/OUT */
12753 	vm_inherit_t            inheritance,
12754 	vm_map_kernel_flags_t   vmk_flags)
12755 {
12756 	vm_map_copy_t   copy;
12757 	kern_return_t   kr;
12758 	vm_prot_t required_cur_prot, required_max_prot;
12759 
12760 	/*
12761 	 *	Check for copies of zero bytes.
12762 	 */
12763 
12764 	if (len == 0) {
12765 		*copy_result = VM_MAP_COPY_NULL;
12766 		return KERN_SUCCESS;
12767 	}
12768 
12769 	/*
12770 	 *	Check that the end address doesn't overflow
12771 	 */
12772 	if (src_addr + len < src_addr) {
12773 		return KERN_INVALID_ADDRESS;
12774 	}
12775 	if (__improbable(vm_map_range_overflows(src_map, src_addr, len))) {
12776 		return KERN_INVALID_ADDRESS;
12777 	}
12778 
12779 	if (VM_MAP_PAGE_SIZE(src_map) < PAGE_SIZE) {
12780 		DEBUG4K_SHARE("src_map %p src_addr 0x%llx src_end 0x%llx\n", src_map, (uint64_t)src_addr, (uint64_t)(src_addr + len));
12781 	}
12782 
12783 	required_cur_prot = *cur_prot;
12784 	required_max_prot = *max_prot;
12785 
12786 	/*
12787 	 *	Allocate a header element for the list.
12788 	 *
12789 	 *	Use the start and end in the header to
12790 	 *	remember the endpoints prior to rounding.
12791 	 */
12792 
12793 	copy = vm_map_copy_allocate(VM_MAP_COPY_ENTRY_LIST);
12794 	copy->cpy_hdr.entries_pageable = vmk_flags.vmkf_copy_pageable;
12795 	copy->offset = 0;
12796 	copy->size = len;
12797 
12798 	kr = vm_map_remap_extract(src_map,
12799 	    src_addr,
12800 	    len,
12801 	    do_copy,             /* copy */
12802 	    copy,
12803 	    cur_prot,            /* IN/OUT */
12804 	    max_prot,            /* IN/OUT */
12805 	    inheritance,
12806 	    vmk_flags);
12807 	if (kr != KERN_SUCCESS) {
12808 		vm_map_copy_discard(copy);
12809 		return kr;
12810 	}
12811 	if (required_cur_prot != VM_PROT_NONE) {
12812 		assert((*cur_prot & required_cur_prot) == required_cur_prot);
12813 		assert((*max_prot & required_max_prot) == required_max_prot);
12814 	}
12815 
12816 	*copy_result = copy;
12817 	return KERN_SUCCESS;
12818 }
12819 
12820 static void
vm_map_fork_share(vm_map_t old_map,vm_map_entry_t old_entry,vm_map_t new_map)12821 vm_map_fork_share(
12822 	vm_map_t        old_map,
12823 	vm_map_entry_t  old_entry,
12824 	vm_map_t        new_map)
12825 {
12826 	vm_object_t     object;
12827 	vm_map_entry_t  new_entry;
12828 
12829 	/*
12830 	 *	New sharing code.  New map entry
12831 	 *	references original object.  Internal
12832 	 *	objects use asynchronous copy algorithm for
12833 	 *	future copies.  First make sure we have
12834 	 *	the right object.  If we need a shadow,
12835 	 *	or someone else already has one, then
12836 	 *	make a new shadow and share it.
12837 	 */
12838 
12839 	if (!old_entry->is_sub_map) {
12840 		object = VME_OBJECT(old_entry);
12841 	}
12842 
12843 	if (old_entry->is_sub_map) {
12844 		assert(old_entry->wired_count == 0);
12845 #ifndef NO_NESTED_PMAP
12846 #if !PMAP_FORK_NEST
12847 		if (old_entry->use_pmap) {
12848 			kern_return_t   result;
12849 
12850 			result = pmap_nest(new_map->pmap,
12851 			    (VME_SUBMAP(old_entry))->pmap,
12852 			    (addr64_t)old_entry->vme_start,
12853 			    (uint64_t)(old_entry->vme_end - old_entry->vme_start));
12854 			if (result) {
12855 				panic("vm_map_fork_share: pmap_nest failed!");
12856 			}
12857 		}
12858 #endif /* !PMAP_FORK_NEST */
12859 #endif  /* NO_NESTED_PMAP */
12860 	} else if (object == VM_OBJECT_NULL) {
12861 		object = vm_object_allocate((vm_map_size_t)(old_entry->vme_end -
12862 		    old_entry->vme_start));
12863 		VME_OFFSET_SET(old_entry, 0);
12864 		VME_OBJECT_SET(old_entry, object, false, 0);
12865 		old_entry->use_pmap = TRUE;
12866 //		assert(!old_entry->needs_copy);
12867 	} else if (object->copy_strategy !=
12868 	    MEMORY_OBJECT_COPY_SYMMETRIC) {
12869 		/*
12870 		 *	We are already using an asymmetric
12871 		 *	copy, and therefore we already have
12872 		 *	the right object.
12873 		 */
12874 
12875 		assert(!old_entry->needs_copy);
12876 	} else if (old_entry->needs_copy ||       /* case 1 */
12877 	    object->shadowed ||                 /* case 2 */
12878 	    (!object->true_share &&             /* case 3 */
12879 	    !old_entry->is_shared &&
12880 	    (object->vo_size >
12881 	    (vm_map_size_t)(old_entry->vme_end -
12882 	    old_entry->vme_start)))) {
12883 		/*
12884 		 *	We need to create a shadow.
12885 		 *	There are three cases here.
12886 		 *	In the first case, we need to
12887 		 *	complete a deferred symmetrical
12888 		 *	copy that we participated in.
12889 		 *	In the second and third cases,
12890 		 *	we need to create the shadow so
12891 		 *	that changes that we make to the
12892 		 *	object do not interfere with
12893 		 *	any symmetrical copies which
12894 		 *	have occured (case 2) or which
12895 		 *	might occur (case 3).
12896 		 *
12897 		 *	The first case is when we had
12898 		 *	deferred shadow object creation
12899 		 *	via the entry->needs_copy mechanism.
12900 		 *	This mechanism only works when
12901 		 *	only one entry points to the source
12902 		 *	object, and we are about to create
12903 		 *	a second entry pointing to the
12904 		 *	same object. The problem is that
12905 		 *	there is no way of mapping from
12906 		 *	an object to the entries pointing
12907 		 *	to it. (Deferred shadow creation
12908 		 *	works with one entry because occurs
12909 		 *	at fault time, and we walk from the
12910 		 *	entry to the object when handling
12911 		 *	the fault.)
12912 		 *
12913 		 *	The second case is when the object
12914 		 *	to be shared has already been copied
12915 		 *	with a symmetric copy, but we point
12916 		 *	directly to the object without
12917 		 *	needs_copy set in our entry. (This
12918 		 *	can happen because different ranges
12919 		 *	of an object can be pointed to by
12920 		 *	different entries. In particular,
12921 		 *	a single entry pointing to an object
12922 		 *	can be split by a call to vm_inherit,
12923 		 *	which, combined with task_create, can
12924 		 *	result in the different entries
12925 		 *	having different needs_copy values.)
12926 		 *	The shadowed flag in the object allows
12927 		 *	us to detect this case. The problem
12928 		 *	with this case is that if this object
12929 		 *	has or will have shadows, then we
12930 		 *	must not perform an asymmetric copy
12931 		 *	of this object, since such a copy
12932 		 *	allows the object to be changed, which
12933 		 *	will break the previous symmetrical
12934 		 *	copies (which rely upon the object
12935 		 *	not changing). In a sense, the shadowed
12936 		 *	flag says "don't change this object".
12937 		 *	We fix this by creating a shadow
12938 		 *	object for this object, and sharing
12939 		 *	that. This works because we are free
12940 		 *	to change the shadow object (and thus
12941 		 *	to use an asymmetric copy strategy);
12942 		 *	this is also semantically correct,
12943 		 *	since this object is temporary, and
12944 		 *	therefore a copy of the object is
12945 		 *	as good as the object itself. (This
12946 		 *	is not true for permanent objects,
12947 		 *	since the pager needs to see changes,
12948 		 *	which won't happen if the changes
12949 		 *	are made to a copy.)
12950 		 *
12951 		 *	The third case is when the object
12952 		 *	to be shared has parts sticking
12953 		 *	outside of the entry we're working
12954 		 *	with, and thus may in the future
12955 		 *	be subject to a symmetrical copy.
12956 		 *	(This is a preemptive version of
12957 		 *	case 2.)
12958 		 */
12959 		VME_OBJECT_SHADOW(old_entry,
12960 		    (vm_map_size_t) (old_entry->vme_end -
12961 		    old_entry->vme_start),
12962 		    vm_map_always_shadow(old_map));
12963 
12964 		/*
12965 		 *	If we're making a shadow for other than
12966 		 *	copy on write reasons, then we have
12967 		 *	to remove write permission.
12968 		 */
12969 
12970 		if (!old_entry->needs_copy &&
12971 		    (old_entry->protection & VM_PROT_WRITE)) {
12972 			vm_prot_t prot;
12973 
12974 			assert(!pmap_has_prot_policy(old_map->pmap, old_entry->translated_allow_execute, old_entry->protection));
12975 
12976 			prot = old_entry->protection & ~VM_PROT_WRITE;
12977 
12978 			assert(!pmap_has_prot_policy(old_map->pmap, old_entry->translated_allow_execute, prot));
12979 
12980 			if (override_nx(old_map, VME_ALIAS(old_entry)) && prot) {
12981 				prot |= VM_PROT_EXECUTE;
12982 			}
12983 
12984 
12985 			if (old_map->mapped_in_other_pmaps) {
12986 				vm_object_pmap_protect(
12987 					VME_OBJECT(old_entry),
12988 					VME_OFFSET(old_entry),
12989 					(old_entry->vme_end -
12990 					old_entry->vme_start),
12991 					PMAP_NULL,
12992 					PAGE_SIZE,
12993 					old_entry->vme_start,
12994 					prot);
12995 			} else {
12996 				pmap_protect(old_map->pmap,
12997 				    old_entry->vme_start,
12998 				    old_entry->vme_end,
12999 				    prot);
13000 			}
13001 		}
13002 
13003 		old_entry->needs_copy = FALSE;
13004 		object = VME_OBJECT(old_entry);
13005 	}
13006 
13007 
13008 	/*
13009 	 *	If object was using a symmetric copy strategy,
13010 	 *	change its copy strategy to the default
13011 	 *	asymmetric copy strategy, which is copy_delay
13012 	 *	in the non-norma case and copy_call in the
13013 	 *	norma case. Bump the reference count for the
13014 	 *	new entry.
13015 	 */
13016 
13017 	if (old_entry->is_sub_map) {
13018 		vm_map_reference(VME_SUBMAP(old_entry));
13019 	} else {
13020 		vm_object_lock(object);
13021 		vm_object_reference_locked(object);
13022 		if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
13023 			object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
13024 		}
13025 		vm_object_unlock(object);
13026 	}
13027 
13028 	/*
13029 	 *	Clone the entry, using object ref from above.
13030 	 *	Mark both entries as shared.
13031 	 */
13032 
13033 	new_entry = vm_map_entry_create(new_map); /* Never the kernel map or descendants */
13034 	vm_map_entry_copy(old_map, new_entry, old_entry);
13035 	old_entry->is_shared = TRUE;
13036 	new_entry->is_shared = TRUE;
13037 
13038 	/*
13039 	 * We're dealing with a shared mapping, so the resulting mapping
13040 	 * should inherit some of the original mapping's accounting settings.
13041 	 * "iokit_acct" should have been cleared in vm_map_entry_copy().
13042 	 * "use_pmap" should stay the same as before (if it hasn't been reset
13043 	 * to TRUE when we cleared "iokit_acct").
13044 	 */
13045 	assert(!new_entry->iokit_acct);
13046 
13047 	/*
13048 	 *	If old entry's inheritence is VM_INHERIT_NONE,
13049 	 *	the new entry is for corpse fork, remove the
13050 	 *	write permission from the new entry.
13051 	 */
13052 	if (old_entry->inheritance == VM_INHERIT_NONE) {
13053 		new_entry->protection &= ~VM_PROT_WRITE;
13054 		new_entry->max_protection &= ~VM_PROT_WRITE;
13055 	}
13056 
13057 	/*
13058 	 *	Insert the entry into the new map -- we
13059 	 *	know we're inserting at the end of the new
13060 	 *	map.
13061 	 */
13062 
13063 	vm_map_store_entry_link(new_map, vm_map_last_entry(new_map), new_entry,
13064 	    VM_MAP_KERNEL_FLAGS_NONE);
13065 
13066 	/*
13067 	 *	Update the physical map
13068 	 */
13069 
13070 	if (old_entry->is_sub_map) {
13071 		/* Bill Angell pmap support goes here */
13072 	} else {
13073 		pmap_copy(new_map->pmap, old_map->pmap, new_entry->vme_start,
13074 		    old_entry->vme_end - old_entry->vme_start,
13075 		    old_entry->vme_start);
13076 	}
13077 }
13078 
13079 static boolean_t
vm_map_fork_copy(vm_map_t old_map,vm_map_entry_t * old_entry_p,vm_map_t new_map,int vm_map_copyin_flags)13080 vm_map_fork_copy(
13081 	vm_map_t        old_map,
13082 	vm_map_entry_t  *old_entry_p,
13083 	vm_map_t        new_map,
13084 	int             vm_map_copyin_flags)
13085 {
13086 	vm_map_entry_t old_entry = *old_entry_p;
13087 	vm_map_size_t entry_size = old_entry->vme_end - old_entry->vme_start;
13088 	vm_map_offset_t start = old_entry->vme_start;
13089 	vm_map_copy_t copy;
13090 	vm_map_entry_t last = vm_map_last_entry(new_map);
13091 
13092 	vm_map_unlock(old_map);
13093 	/*
13094 	 *	Use maxprot version of copyin because we
13095 	 *	care about whether this memory can ever
13096 	 *	be accessed, not just whether it's accessible
13097 	 *	right now.
13098 	 */
13099 	vm_map_copyin_flags |= VM_MAP_COPYIN_USE_MAXPROT;
13100 	if (vm_map_copyin_internal(old_map, start, entry_size,
13101 	    vm_map_copyin_flags, &copy)
13102 	    != KERN_SUCCESS) {
13103 		/*
13104 		 *	The map might have changed while it
13105 		 *	was unlocked, check it again.  Skip
13106 		 *	any blank space or permanently
13107 		 *	unreadable region.
13108 		 */
13109 		vm_map_lock(old_map);
13110 		if (!vm_map_lookup_entry(old_map, start, &last) ||
13111 		    (last->max_protection & VM_PROT_READ) == VM_PROT_NONE) {
13112 			last = last->vme_next;
13113 		}
13114 		*old_entry_p = last;
13115 
13116 		/*
13117 		 * XXX	For some error returns, want to
13118 		 * XXX	skip to the next element.  Note
13119 		 *	that INVALID_ADDRESS and
13120 		 *	PROTECTION_FAILURE are handled above.
13121 		 */
13122 
13123 		return FALSE;
13124 	}
13125 
13126 	/*
13127 	 * Assert that the vm_map_copy is coming from the right
13128 	 * zone and hasn't been forged
13129 	 */
13130 	vm_map_copy_require(copy);
13131 
13132 	/*
13133 	 *	Insert the copy into the new map
13134 	 */
13135 	vm_map_copy_insert(new_map, last, copy);
13136 
13137 	/*
13138 	 *	Pick up the traversal at the end of
13139 	 *	the copied region.
13140 	 */
13141 
13142 	vm_map_lock(old_map);
13143 	start += entry_size;
13144 	if (!vm_map_lookup_entry(old_map, start, &last)) {
13145 		last = last->vme_next;
13146 	} else {
13147 		if (last->vme_start == start) {
13148 			/*
13149 			 * No need to clip here and we don't
13150 			 * want to cause any unnecessary
13151 			 * unnesting...
13152 			 */
13153 		} else {
13154 			vm_map_clip_start(old_map, last, start);
13155 		}
13156 	}
13157 	*old_entry_p = last;
13158 
13159 	return TRUE;
13160 }
13161 
13162 #if PMAP_FORK_NEST
13163 #define PMAP_FORK_NEST_DEBUG 0
13164 static inline void
vm_map_fork_unnest(pmap_t new_pmap,vm_map_offset_t pre_nested_start,vm_map_offset_t pre_nested_end,vm_map_offset_t start,vm_map_offset_t end)13165 vm_map_fork_unnest(
13166 	pmap_t new_pmap,
13167 	vm_map_offset_t pre_nested_start,
13168 	vm_map_offset_t pre_nested_end,
13169 	vm_map_offset_t start,
13170 	vm_map_offset_t end)
13171 {
13172 	kern_return_t kr;
13173 	vm_map_offset_t nesting_mask, start_unnest, end_unnest;
13174 
13175 	assertf(pre_nested_start <= pre_nested_end,
13176 	    "pre_nested start 0x%llx end 0x%llx",
13177 	    (uint64_t)pre_nested_start, (uint64_t)pre_nested_end);
13178 	assertf(start <= end,
13179 	    "start 0x%llx end 0x%llx",
13180 	    (uint64_t) start, (uint64_t)end);
13181 
13182 	if (pre_nested_start == pre_nested_end) {
13183 		/* nothing was pre-nested: done */
13184 		return;
13185 	}
13186 	if (end <= pre_nested_start) {
13187 		/* fully before pre-nested range: done */
13188 		return;
13189 	}
13190 	if (start >= pre_nested_end) {
13191 		/* fully after pre-nested range: done */
13192 		return;
13193 	}
13194 	/* ignore parts of range outside of pre_nested range */
13195 	if (start < pre_nested_start) {
13196 		start = pre_nested_start;
13197 	}
13198 	if (end > pre_nested_end) {
13199 		end = pre_nested_end;
13200 	}
13201 	nesting_mask = pmap_shared_region_size_min(new_pmap) - 1;
13202 	start_unnest = start & ~nesting_mask;
13203 	end_unnest = (end + nesting_mask) & ~nesting_mask;
13204 	kr = pmap_unnest(new_pmap,
13205 	    (addr64_t)start_unnest,
13206 	    (uint64_t)(end_unnest - start_unnest));
13207 #if PMAP_FORK_NEST_DEBUG
13208 	printf("PMAP_FORK_NEST %s:%d new_pmap %p 0x%llx:0x%llx -> pmap_unnest 0x%llx:0x%llx kr 0x%x\n", __FUNCTION__, __LINE__, new_pmap, (uint64_t)start, (uint64_t)end, (uint64_t)start_unnest, (uint64_t)end_unnest, kr);
13209 #endif /* PMAP_FORK_NEST_DEBUG */
13210 	assertf(kr == KERN_SUCCESS,
13211 	    "0x%llx 0x%llx pmap_unnest(%p, 0x%llx, 0x%llx) -> 0x%x",
13212 	    (uint64_t)start, (uint64_t)end, new_pmap,
13213 	    (uint64_t)start_unnest, (uint64_t)(end_unnest - start_unnest),
13214 	    kr);
13215 }
13216 #endif /* PMAP_FORK_NEST */
13217 
13218 void
vm_map_inherit_limits(vm_map_t new_map,const struct _vm_map * old_map)13219 vm_map_inherit_limits(vm_map_t new_map, const struct _vm_map *old_map)
13220 {
13221 	new_map->size_limit = old_map->size_limit;
13222 	new_map->data_limit = old_map->data_limit;
13223 	new_map->user_wire_limit = old_map->user_wire_limit;
13224 	new_map->reserved_regions = old_map->reserved_regions;
13225 }
13226 
13227 /*
13228  *	vm_map_fork:
13229  *
13230  *	Create and return a new map based on the old
13231  *	map, according to the inheritance values on the
13232  *	regions in that map and the options.
13233  *
13234  *	The source map must not be locked.
13235  */
13236 vm_map_t
vm_map_fork(ledger_t ledger,vm_map_t old_map,int options)13237 vm_map_fork(
13238 	ledger_t        ledger,
13239 	vm_map_t        old_map,
13240 	int             options)
13241 {
13242 	pmap_t          new_pmap;
13243 	vm_map_t        new_map;
13244 	vm_map_entry_t  old_entry;
13245 	vm_map_size_t   new_size = 0, entry_size;
13246 	vm_map_entry_t  new_entry;
13247 	boolean_t       src_needs_copy;
13248 	boolean_t       new_entry_needs_copy;
13249 	boolean_t       pmap_is64bit;
13250 	int             vm_map_copyin_flags;
13251 	vm_inherit_t    old_entry_inheritance;
13252 	int             map_create_options;
13253 	kern_return_t   footprint_collect_kr;
13254 
13255 	if (options & ~(VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
13256 	    VM_MAP_FORK_PRESERVE_PURGEABLE |
13257 	    VM_MAP_FORK_CORPSE_FOOTPRINT)) {
13258 		/* unsupported option */
13259 		return VM_MAP_NULL;
13260 	}
13261 
13262 	pmap_is64bit =
13263 #if defined(__i386__) || defined(__x86_64__)
13264 	    old_map->pmap->pm_task_map != TASK_MAP_32BIT;
13265 #elif defined(__arm64__)
13266 	    old_map->pmap->is_64bit;
13267 #else
13268 #error Unknown architecture.
13269 #endif
13270 
13271 	unsigned int pmap_flags = 0;
13272 	pmap_flags |= pmap_is64bit ? PMAP_CREATE_64BIT : 0;
13273 #if defined(HAS_APPLE_PAC)
13274 	pmap_flags |= old_map->pmap->disable_jop ? PMAP_CREATE_DISABLE_JOP : 0;
13275 #endif
13276 #if CONFIG_ROSETTA
13277 	pmap_flags |= old_map->pmap->is_rosetta ? PMAP_CREATE_ROSETTA : 0;
13278 #endif
13279 #if PMAP_CREATE_FORCE_4K_PAGES
13280 	if (VM_MAP_PAGE_SIZE(old_map) == FOURK_PAGE_SIZE &&
13281 	    PAGE_SIZE != FOURK_PAGE_SIZE) {
13282 		pmap_flags |= PMAP_CREATE_FORCE_4K_PAGES;
13283 	}
13284 #endif /* PMAP_CREATE_FORCE_4K_PAGES */
13285 	new_pmap = pmap_create_options(ledger, (vm_map_size_t) 0, pmap_flags);
13286 	if (new_pmap == NULL) {
13287 		return VM_MAP_NULL;
13288 	}
13289 
13290 	vm_map_reference(old_map);
13291 	vm_map_lock(old_map);
13292 
13293 	map_create_options = 0;
13294 	if (old_map->hdr.entries_pageable) {
13295 		map_create_options |= VM_MAP_CREATE_PAGEABLE;
13296 	}
13297 	if (options & VM_MAP_FORK_CORPSE_FOOTPRINT) {
13298 		map_create_options |= VM_MAP_CREATE_CORPSE_FOOTPRINT;
13299 		footprint_collect_kr = KERN_SUCCESS;
13300 	}
13301 	new_map = vm_map_create_options(new_pmap,
13302 	    old_map->min_offset,
13303 	    old_map->max_offset,
13304 	    map_create_options);
13305 
13306 	/* inherit cs_enforcement */
13307 	vm_map_cs_enforcement_set(new_map, old_map->cs_enforcement);
13308 
13309 	vm_map_lock(new_map);
13310 	vm_commit_pagezero_status(new_map);
13311 	/* inherit the parent map's page size */
13312 	vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(old_map));
13313 
13314 	/* inherit the parent rlimits */
13315 	vm_map_inherit_limits(new_map, old_map);
13316 
13317 #if CONFIG_MAP_RANGES
13318 	/* inherit the parent map's VM ranges */
13319 	vm_map_range_fork(new_map, old_map);
13320 #endif
13321 
13322 #if CODE_SIGNING_MONITOR
13323 	/* Prepare the monitor for the fork */
13324 	csm_fork_prepare(old_map->pmap, new_pmap);
13325 #endif
13326 
13327 #if PMAP_FORK_NEST
13328 	/*
13329 	 * Pre-nest the shared region's pmap.
13330 	 */
13331 	vm_map_offset_t pre_nested_start = 0, pre_nested_end = 0;
13332 	pmap_fork_nest(old_map->pmap, new_pmap,
13333 	    &pre_nested_start, &pre_nested_end);
13334 #if PMAP_FORK_NEST_DEBUG
13335 	printf("PMAP_FORK_NEST %s:%d old %p new %p pre_nested start 0x%llx end 0x%llx\n", __FUNCTION__, __LINE__, old_map->pmap, new_pmap, (uint64_t)pre_nested_start, (uint64_t)pre_nested_end);
13336 #endif /* PMAP_FORK_NEST_DEBUG */
13337 #endif /* PMAP_FORK_NEST */
13338 
13339 	for (old_entry = vm_map_first_entry(old_map); old_entry != vm_map_to_entry(old_map);) {
13340 		/*
13341 		 * Abort any corpse collection if the system is shutting down.
13342 		 */
13343 		if ((options & VM_MAP_FORK_CORPSE_FOOTPRINT) &&
13344 		    get_system_inshutdown()) {
13345 #if PMAP_FORK_NEST
13346 			new_entry = vm_map_last_entry(new_map);
13347 			if (new_entry == vm_map_to_entry(new_map)) {
13348 				/* unnest all that was pre-nested */
13349 				vm_map_fork_unnest(new_pmap,
13350 				    pre_nested_start, pre_nested_end,
13351 				    vm_map_min(new_map), vm_map_max(new_map));
13352 			} else if (new_entry->vme_end < vm_map_max(new_map)) {
13353 				/* unnest hole at the end, if pre-nested */
13354 				vm_map_fork_unnest(new_pmap,
13355 				    pre_nested_start, pre_nested_end,
13356 				    new_entry->vme_end, vm_map_max(new_map));
13357 			}
13358 #endif /* PMAP_FORK_NEST */
13359 			vm_map_corpse_footprint_collect_done(new_map);
13360 			vm_map_unlock(new_map);
13361 			vm_map_unlock(old_map);
13362 			vm_map_deallocate(new_map);
13363 			vm_map_deallocate(old_map);
13364 			printf("Aborting corpse map due to system shutdown\n");
13365 			return VM_MAP_NULL;
13366 		}
13367 
13368 		entry_size = old_entry->vme_end - old_entry->vme_start;
13369 
13370 #if PMAP_FORK_NEST
13371 		/*
13372 		 * Undo any unnecessary pre-nesting.
13373 		 */
13374 		vm_map_offset_t prev_end;
13375 		if (old_entry == vm_map_first_entry(old_map)) {
13376 			prev_end = vm_map_min(old_map);
13377 		} else {
13378 			prev_end = old_entry->vme_prev->vme_end;
13379 		}
13380 		if (prev_end < old_entry->vme_start) {
13381 			/* unnest hole before this entry, if pre-nested */
13382 			vm_map_fork_unnest(new_pmap,
13383 			    pre_nested_start, pre_nested_end,
13384 			    prev_end, old_entry->vme_start);
13385 		}
13386 		if (old_entry->is_sub_map && old_entry->use_pmap) {
13387 			/* keep this entry nested in the child */
13388 #if PMAP_FORK_NEST_DEBUG
13389 			printf("PMAP_FORK_NEST %s:%d new_pmap %p keeping 0x%llx:0x%llx nested\n", __FUNCTION__, __LINE__, new_pmap, (uint64_t)old_entry->vme_start, (uint64_t)old_entry->vme_end);
13390 #endif /* PMAP_FORK_NEST_DEBUG */
13391 		} else {
13392 			/* undo nesting for this entry, if pre-nested */
13393 			vm_map_fork_unnest(new_pmap,
13394 			    pre_nested_start, pre_nested_end,
13395 			    old_entry->vme_start, old_entry->vme_end);
13396 		}
13397 #endif /* PMAP_FORK_NEST */
13398 
13399 		old_entry_inheritance = old_entry->inheritance;
13400 		/*
13401 		 * If caller used the VM_MAP_FORK_SHARE_IF_INHERIT_NONE option
13402 		 * share VM_INHERIT_NONE entries that are not backed by a
13403 		 * device pager.
13404 		 */
13405 		if (old_entry_inheritance == VM_INHERIT_NONE &&
13406 		    (options & VM_MAP_FORK_SHARE_IF_INHERIT_NONE) &&
13407 		    (old_entry->protection & VM_PROT_READ) &&
13408 		    !(!old_entry->is_sub_map &&
13409 		    VME_OBJECT(old_entry) != NULL &&
13410 		    VME_OBJECT(old_entry)->pager != NULL &&
13411 		    is_device_pager_ops(
13412 			    VME_OBJECT(old_entry)->pager->mo_pager_ops))) {
13413 			old_entry_inheritance = VM_INHERIT_SHARE;
13414 		}
13415 
13416 		if (old_entry_inheritance != VM_INHERIT_NONE &&
13417 		    (options & VM_MAP_FORK_CORPSE_FOOTPRINT) &&
13418 		    footprint_collect_kr == KERN_SUCCESS) {
13419 			/*
13420 			 * The corpse won't have old_map->pmap to query
13421 			 * footprint information, so collect that data now
13422 			 * and store it in new_map->vmmap_corpse_footprint
13423 			 * for later autopsy.
13424 			 */
13425 			footprint_collect_kr =
13426 			    vm_map_corpse_footprint_collect(old_map,
13427 			    old_entry,
13428 			    new_map);
13429 		}
13430 
13431 		switch (old_entry_inheritance) {
13432 		case VM_INHERIT_NONE:
13433 			break;
13434 
13435 		case VM_INHERIT_SHARE:
13436 			vm_map_fork_share(old_map, old_entry, new_map);
13437 			new_size += entry_size;
13438 			break;
13439 
13440 		case VM_INHERIT_COPY:
13441 
13442 			/*
13443 			 *	Inline the copy_quickly case;
13444 			 *	upon failure, fall back on call
13445 			 *	to vm_map_fork_copy.
13446 			 */
13447 
13448 			if (old_entry->is_sub_map) {
13449 				break;
13450 			}
13451 			if ((old_entry->wired_count != 0) ||
13452 			    ((VME_OBJECT(old_entry) != NULL) &&
13453 			    (VME_OBJECT(old_entry)->true_share))) {
13454 				goto slow_vm_map_fork_copy;
13455 			}
13456 
13457 			new_entry = vm_map_entry_create(new_map); /* never the kernel map or descendants */
13458 			vm_map_entry_copy(old_map, new_entry, old_entry);
13459 			if (old_entry->vme_permanent) {
13460 				/* inherit "permanent" on fork() */
13461 				new_entry->vme_permanent = TRUE;
13462 			}
13463 
13464 			if (new_entry->used_for_jit == TRUE && new_map->jit_entry_exists == FALSE) {
13465 				new_map->jit_entry_exists = TRUE;
13466 			}
13467 
13468 			if (new_entry->is_sub_map) {
13469 				/* clear address space specifics */
13470 				new_entry->use_pmap = FALSE;
13471 			} else {
13472 				/*
13473 				 * We're dealing with a copy-on-write operation,
13474 				 * so the resulting mapping should not inherit
13475 				 * the original mapping's accounting settings.
13476 				 * "iokit_acct" should have been cleared in
13477 				 * vm_map_entry_copy().
13478 				 * "use_pmap" should be reset to its default
13479 				 * (TRUE) so that the new mapping gets
13480 				 * accounted for in the task's memory footprint.
13481 				 */
13482 				assert(!new_entry->iokit_acct);
13483 				new_entry->use_pmap = TRUE;
13484 			}
13485 
13486 			if (!vm_object_copy_quickly(
13487 				    VME_OBJECT(new_entry),
13488 				    VME_OFFSET(old_entry),
13489 				    (old_entry->vme_end -
13490 				    old_entry->vme_start),
13491 				    &src_needs_copy,
13492 				    &new_entry_needs_copy)) {
13493 				vm_map_entry_dispose(new_entry);
13494 				goto slow_vm_map_fork_copy;
13495 			}
13496 
13497 			/*
13498 			 *	Handle copy-on-write obligations
13499 			 */
13500 
13501 			if (src_needs_copy && !old_entry->needs_copy) {
13502 				vm_prot_t prot;
13503 
13504 				assert(!pmap_has_prot_policy(old_map->pmap, old_entry->translated_allow_execute, old_entry->protection));
13505 
13506 				prot = old_entry->protection & ~VM_PROT_WRITE;
13507 
13508 				if (override_nx(old_map, VME_ALIAS(old_entry))
13509 				    && prot) {
13510 					prot |= VM_PROT_EXECUTE;
13511 				}
13512 
13513 				assert(!pmap_has_prot_policy(old_map->pmap, old_entry->translated_allow_execute, prot));
13514 
13515 				vm_object_pmap_protect(
13516 					VME_OBJECT(old_entry),
13517 					VME_OFFSET(old_entry),
13518 					(old_entry->vme_end -
13519 					old_entry->vme_start),
13520 					((old_entry->is_shared
13521 					|| old_map->mapped_in_other_pmaps)
13522 					? PMAP_NULL :
13523 					old_map->pmap),
13524 					VM_MAP_PAGE_SIZE(old_map),
13525 					old_entry->vme_start,
13526 					prot);
13527 
13528 				assert(old_entry->wired_count == 0);
13529 				old_entry->needs_copy = TRUE;
13530 			}
13531 			new_entry->needs_copy = new_entry_needs_copy;
13532 
13533 			/*
13534 			 *	Insert the entry at the end
13535 			 *	of the map.
13536 			 */
13537 
13538 			vm_map_store_entry_link(new_map,
13539 			    vm_map_last_entry(new_map),
13540 			    new_entry,
13541 			    VM_MAP_KERNEL_FLAGS_NONE);
13542 			new_size += entry_size;
13543 			break;
13544 
13545 slow_vm_map_fork_copy:
13546 			vm_map_copyin_flags = VM_MAP_COPYIN_FORK;
13547 			if (options & VM_MAP_FORK_PRESERVE_PURGEABLE) {
13548 				vm_map_copyin_flags |=
13549 				    VM_MAP_COPYIN_PRESERVE_PURGEABLE;
13550 			}
13551 			if (vm_map_fork_copy(old_map,
13552 			    &old_entry,
13553 			    new_map,
13554 			    vm_map_copyin_flags)) {
13555 				new_size += entry_size;
13556 			}
13557 			continue;
13558 		}
13559 		old_entry = old_entry->vme_next;
13560 	}
13561 
13562 #if PMAP_FORK_NEST
13563 	new_entry = vm_map_last_entry(new_map);
13564 	if (new_entry == vm_map_to_entry(new_map)) {
13565 		/* unnest all that was pre-nested */
13566 		vm_map_fork_unnest(new_pmap,
13567 		    pre_nested_start, pre_nested_end,
13568 		    vm_map_min(new_map), vm_map_max(new_map));
13569 	} else if (new_entry->vme_end < vm_map_max(new_map)) {
13570 		/* unnest hole at the end, if pre-nested */
13571 		vm_map_fork_unnest(new_pmap,
13572 		    pre_nested_start, pre_nested_end,
13573 		    new_entry->vme_end, vm_map_max(new_map));
13574 	}
13575 #endif /* PMAP_FORK_NEST */
13576 
13577 #if defined(__arm64__)
13578 	pmap_insert_commpage(new_map->pmap);
13579 #endif /* __arm64__ */
13580 
13581 	new_map->size = new_size;
13582 
13583 	if (options & VM_MAP_FORK_CORPSE_FOOTPRINT) {
13584 		vm_map_corpse_footprint_collect_done(new_map);
13585 	}
13586 
13587 	/* Propagate JIT entitlement for the pmap layer. */
13588 	if (pmap_get_jit_entitled(old_map->pmap)) {
13589 		/* Tell the pmap that it supports JIT. */
13590 		pmap_set_jit_entitled(new_map->pmap);
13591 	}
13592 
13593 	/* Propagate TPRO settings for the pmap layer */
13594 	if (pmap_get_tpro(old_map->pmap)) {
13595 		/* Tell the pmap that it supports TPRO */
13596 		pmap_set_tpro(new_map->pmap);
13597 	}
13598 
13599 	vm_map_unlock(new_map);
13600 	vm_map_unlock(old_map);
13601 	vm_map_deallocate(old_map);
13602 
13603 	return new_map;
13604 }
13605 
13606 /*
13607  * vm_map_exec:
13608  *
13609  *      Setup the "new_map" with the proper execution environment according
13610  *	to the type of executable (platform, 64bit, chroot environment).
13611  *	Map the comm page and shared region, etc...
13612  */
13613 kern_return_t
vm_map_exec(vm_map_t new_map,task_t task,boolean_t is64bit,void * fsroot,cpu_type_t cpu,cpu_subtype_t cpu_subtype,boolean_t reslide,boolean_t is_driverkit,uint32_t rsr_version)13614 vm_map_exec(
13615 	vm_map_t        new_map,
13616 	task_t          task,
13617 	boolean_t       is64bit,
13618 	void            *fsroot,
13619 	cpu_type_t      cpu,
13620 	cpu_subtype_t   cpu_subtype,
13621 	boolean_t       reslide,
13622 	boolean_t       is_driverkit,
13623 	uint32_t        rsr_version)
13624 {
13625 	SHARED_REGION_TRACE_DEBUG(
13626 		("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x,0x%x): ->\n",
13627 		(void *)VM_KERNEL_ADDRPERM(current_task()),
13628 		(void *)VM_KERNEL_ADDRPERM(new_map),
13629 		(void *)VM_KERNEL_ADDRPERM(task),
13630 		(void *)VM_KERNEL_ADDRPERM(fsroot),
13631 		cpu,
13632 		cpu_subtype));
13633 	(void) vm_commpage_enter(new_map, task, is64bit);
13634 
13635 	(void) vm_shared_region_enter(new_map, task, is64bit, fsroot, cpu, cpu_subtype, reslide, is_driverkit, rsr_version);
13636 
13637 	SHARED_REGION_TRACE_DEBUG(
13638 		("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x,0x%x): <-\n",
13639 		(void *)VM_KERNEL_ADDRPERM(current_task()),
13640 		(void *)VM_KERNEL_ADDRPERM(new_map),
13641 		(void *)VM_KERNEL_ADDRPERM(task),
13642 		(void *)VM_KERNEL_ADDRPERM(fsroot),
13643 		cpu,
13644 		cpu_subtype));
13645 
13646 	/*
13647 	 * Some devices have region(s) of memory that shouldn't get allocated by
13648 	 * user processes. The following code creates dummy vm_map_entry_t's for each
13649 	 * of the regions that needs to be reserved to prevent any allocations in
13650 	 * those regions.
13651 	 */
13652 	kern_return_t kr = KERN_FAILURE;
13653 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT();
13654 	vmk_flags.vmkf_beyond_max = true;
13655 
13656 	const struct vm_reserved_region *regions = NULL;
13657 	size_t num_regions = ml_get_vm_reserved_regions(is64bit, &regions);
13658 	assert((num_regions == 0) || (num_regions > 0 && regions != NULL));
13659 
13660 	for (size_t i = 0; i < num_regions; ++i) {
13661 		vm_map_offset_t address = regions[i].vmrr_addr;
13662 
13663 		kr = vm_map_enter(
13664 			new_map,
13665 			&address,
13666 			regions[i].vmrr_size,
13667 			(vm_map_offset_t)0,
13668 			vmk_flags,
13669 			VM_OBJECT_NULL,
13670 			(vm_object_offset_t)0,
13671 			FALSE,
13672 			VM_PROT_NONE,
13673 			VM_PROT_NONE,
13674 			VM_INHERIT_COPY);
13675 
13676 		if (kr != KERN_SUCCESS) {
13677 			panic("Failed to reserve %s region in user map %p %d", regions[i].vmrr_name, new_map, kr);
13678 		}
13679 	}
13680 
13681 	new_map->reserved_regions = (num_regions ? TRUE : FALSE);
13682 
13683 	return KERN_SUCCESS;
13684 }
13685 
13686 uint64_t vm_map_lookup_and_lock_object_copy_slowly_count = 0;
13687 uint64_t vm_map_lookup_and_lock_object_copy_slowly_size = 0;
13688 uint64_t vm_map_lookup_and_lock_object_copy_slowly_max = 0;
13689 uint64_t vm_map_lookup_and_lock_object_copy_slowly_restart = 0;
13690 uint64_t vm_map_lookup_and_lock_object_copy_slowly_error = 0;
13691 uint64_t vm_map_lookup_and_lock_object_copy_strategically_count = 0;
13692 uint64_t vm_map_lookup_and_lock_object_copy_strategically_size = 0;
13693 uint64_t vm_map_lookup_and_lock_object_copy_strategically_max = 0;
13694 uint64_t vm_map_lookup_and_lock_object_copy_strategically_restart = 0;
13695 uint64_t vm_map_lookup_and_lock_object_copy_strategically_error = 0;
13696 uint64_t vm_map_lookup_and_lock_object_copy_shadow_count = 0;
13697 uint64_t vm_map_lookup_and_lock_object_copy_shadow_size = 0;
13698 uint64_t vm_map_lookup_and_lock_object_copy_shadow_max = 0;
13699 /*
13700  *	vm_map_lookup_and_lock_object:
13701  *
13702  *	Finds the VM object, offset, and
13703  *	protection for a given virtual address in the
13704  *	specified map, assuming a page fault of the
13705  *	type specified.
13706  *
13707  *	Returns the (object, offset, protection) for
13708  *	this address, whether it is wired down, and whether
13709  *	this map has the only reference to the data in question.
13710  *	In order to later verify this lookup, a "version"
13711  *	is returned.
13712  *	If contended != NULL, *contended will be set to
13713  *	true iff the thread had to spin or block to acquire
13714  *	an exclusive lock.
13715  *
13716  *	The map MUST be locked by the caller and WILL be
13717  *	locked on exit.  In order to guarantee the
13718  *	existence of the returned object, it is returned
13719  *	locked.
13720  *
13721  *	If a lookup is requested with "write protection"
13722  *	specified, the map may be changed to perform virtual
13723  *	copying operations, although the data referenced will
13724  *	remain the same.
13725  */
13726 kern_return_t
vm_map_lookup_and_lock_object(vm_map_t * var_map,vm_map_offset_t vaddr,vm_prot_t fault_type,int object_lock_type,vm_map_version_t * out_version,vm_object_t * object,vm_object_offset_t * offset,vm_prot_t * out_prot,boolean_t * wired,vm_object_fault_info_t fault_info,vm_map_t * real_map,bool * contended)13727 vm_map_lookup_and_lock_object(
13728 	vm_map_t                *var_map,       /* IN/OUT */
13729 	vm_map_offset_t         vaddr,
13730 	vm_prot_t               fault_type,
13731 	int                     object_lock_type,
13732 	vm_map_version_t        *out_version,   /* OUT */
13733 	vm_object_t             *object,        /* OUT */
13734 	vm_object_offset_t      *offset,        /* OUT */
13735 	vm_prot_t               *out_prot,      /* OUT */
13736 	boolean_t               *wired,         /* OUT */
13737 	vm_object_fault_info_t  fault_info,     /* OUT */
13738 	vm_map_t                *real_map,      /* OUT */
13739 	bool                    *contended)     /* OUT */
13740 {
13741 	vm_map_entry_t                  entry;
13742 	vm_map_t                        map = *var_map;
13743 	vm_map_t                        old_map = *var_map;
13744 	vm_map_t                        cow_sub_map_parent = VM_MAP_NULL;
13745 	vm_map_offset_t                 cow_parent_vaddr = 0;
13746 	vm_map_offset_t                 old_start = 0;
13747 	vm_map_offset_t                 old_end = 0;
13748 	vm_prot_t                       prot;
13749 	boolean_t                       mask_protections;
13750 	boolean_t                       force_copy;
13751 	boolean_t                       no_force_copy_if_executable;
13752 	boolean_t                       submap_needed_copy;
13753 	vm_prot_t                       original_fault_type;
13754 	vm_map_size_t                   fault_page_mask;
13755 
13756 	/*
13757 	 * VM_PROT_MASK means that the caller wants us to use "fault_type"
13758 	 * as a mask against the mapping's actual protections, not as an
13759 	 * absolute value.
13760 	 */
13761 	mask_protections = (fault_type & VM_PROT_IS_MASK) ? TRUE : FALSE;
13762 	force_copy = (fault_type & VM_PROT_COPY) ? TRUE : FALSE;
13763 	no_force_copy_if_executable = (fault_type & VM_PROT_COPY_FAIL_IF_EXECUTABLE) ? TRUE : FALSE;
13764 	fault_type &= VM_PROT_ALL;
13765 	original_fault_type = fault_type;
13766 	if (contended) {
13767 		*contended = false;
13768 	}
13769 
13770 	*real_map = map;
13771 
13772 	fault_page_mask = MIN(VM_MAP_PAGE_MASK(map), PAGE_MASK);
13773 	vaddr = VM_MAP_TRUNC_PAGE(vaddr, fault_page_mask);
13774 
13775 RetryLookup:
13776 	fault_type = original_fault_type;
13777 
13778 	/*
13779 	 *	If the map has an interesting hint, try it before calling
13780 	 *	full blown lookup routine.
13781 	 */
13782 	entry = map->hint;
13783 
13784 	if ((entry == vm_map_to_entry(map)) ||
13785 	    (vaddr < entry->vme_start) || (vaddr >= entry->vme_end)) {
13786 		vm_map_entry_t  tmp_entry;
13787 
13788 		/*
13789 		 *	Entry was either not a valid hint, or the vaddr
13790 		 *	was not contained in the entry, so do a full lookup.
13791 		 */
13792 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
13793 			if ((cow_sub_map_parent) && (cow_sub_map_parent != map)) {
13794 				vm_map_unlock(cow_sub_map_parent);
13795 			}
13796 			if ((*real_map != map)
13797 			    && (*real_map != cow_sub_map_parent)) {
13798 				vm_map_unlock(*real_map);
13799 			}
13800 			return KERN_INVALID_ADDRESS;
13801 		}
13802 
13803 		entry = tmp_entry;
13804 	}
13805 	if (map == old_map) {
13806 		old_start = entry->vme_start;
13807 		old_end = entry->vme_end;
13808 	}
13809 
13810 	/*
13811 	 *	Handle submaps.  Drop lock on upper map, submap is
13812 	 *	returned locked.
13813 	 */
13814 
13815 	submap_needed_copy = FALSE;
13816 submap_recurse:
13817 	if (entry->is_sub_map) {
13818 		vm_map_offset_t         local_vaddr;
13819 		vm_map_offset_t         end_delta;
13820 		vm_map_offset_t         start_delta;
13821 		vm_map_offset_t         top_entry_saved_start;
13822 		vm_object_offset_t      top_entry_saved_offset;
13823 		vm_map_entry_t          submap_entry, saved_submap_entry;
13824 		vm_object_offset_t      submap_entry_offset;
13825 		vm_object_size_t        submap_entry_size;
13826 		vm_prot_t               subentry_protection;
13827 		vm_prot_t               subentry_max_protection;
13828 		boolean_t               subentry_no_copy_on_read;
13829 		boolean_t               subentry_permanent;
13830 		boolean_t               subentry_csm_associated;
13831 #if __arm64e__
13832 		boolean_t               subentry_used_for_tpro;
13833 #endif /* __arm64e__ */
13834 		boolean_t               mapped_needs_copy = FALSE;
13835 		vm_map_version_t        version;
13836 
13837 		assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map),
13838 		    "map %p (%d) entry %p submap %p (%d)\n",
13839 		    map, VM_MAP_PAGE_SHIFT(map), entry,
13840 		    VME_SUBMAP(entry), VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)));
13841 
13842 		local_vaddr = vaddr;
13843 		top_entry_saved_start = entry->vme_start;
13844 		top_entry_saved_offset = VME_OFFSET(entry);
13845 
13846 		if ((entry->use_pmap &&
13847 		    !((fault_type & VM_PROT_WRITE) ||
13848 		    force_copy))) {
13849 			/* if real_map equals map we unlock below */
13850 			if ((*real_map != map) &&
13851 			    (*real_map != cow_sub_map_parent)) {
13852 				vm_map_unlock(*real_map);
13853 			}
13854 			*real_map = VME_SUBMAP(entry);
13855 		}
13856 
13857 		if (entry->needs_copy &&
13858 		    ((fault_type & VM_PROT_WRITE) ||
13859 		    force_copy)) {
13860 			if (!mapped_needs_copy) {
13861 				if (vm_map_lock_read_to_write(map)) {
13862 					vm_map_lock_read(map);
13863 					*real_map = map;
13864 					goto RetryLookup;
13865 				}
13866 				vm_map_lock_read(VME_SUBMAP(entry));
13867 				*var_map = VME_SUBMAP(entry);
13868 				cow_sub_map_parent = map;
13869 				/* reset base to map before cow object */
13870 				/* this is the map which will accept   */
13871 				/* the new cow object */
13872 				old_start = entry->vme_start;
13873 				old_end = entry->vme_end;
13874 				cow_parent_vaddr = vaddr;
13875 				mapped_needs_copy = TRUE;
13876 			} else {
13877 				vm_map_lock_read(VME_SUBMAP(entry));
13878 				*var_map = VME_SUBMAP(entry);
13879 				if ((cow_sub_map_parent != map) &&
13880 				    (*real_map != map)) {
13881 					vm_map_unlock(map);
13882 				}
13883 			}
13884 		} else {
13885 			if (entry->needs_copy) {
13886 				submap_needed_copy = TRUE;
13887 			}
13888 			vm_map_lock_read(VME_SUBMAP(entry));
13889 			*var_map = VME_SUBMAP(entry);
13890 			/* leave map locked if it is a target */
13891 			/* cow sub_map above otherwise, just  */
13892 			/* follow the maps down to the object */
13893 			/* here we unlock knowing we are not  */
13894 			/* revisiting the map.  */
13895 			if ((*real_map != map) && (map != cow_sub_map_parent)) {
13896 				vm_map_unlock_read(map);
13897 			}
13898 		}
13899 
13900 		entry = NULL;
13901 		map = *var_map;
13902 
13903 		/* calculate the offset in the submap for vaddr */
13904 		local_vaddr = (local_vaddr - top_entry_saved_start) + top_entry_saved_offset;
13905 		assertf(VM_MAP_PAGE_ALIGNED(local_vaddr, fault_page_mask),
13906 		    "local_vaddr 0x%llx entry->vme_start 0x%llx fault_page_mask 0x%llx\n",
13907 		    (uint64_t)local_vaddr, (uint64_t)top_entry_saved_start, (uint64_t)fault_page_mask);
13908 
13909 RetrySubMap:
13910 		if (!vm_map_lookup_entry(map, local_vaddr, &submap_entry)) {
13911 			if ((cow_sub_map_parent) && (cow_sub_map_parent != map)) {
13912 				vm_map_unlock(cow_sub_map_parent);
13913 			}
13914 			if ((*real_map != map)
13915 			    && (*real_map != cow_sub_map_parent)) {
13916 				vm_map_unlock(*real_map);
13917 			}
13918 			*real_map = map;
13919 			return KERN_INVALID_ADDRESS;
13920 		}
13921 
13922 		/* find the attenuated shadow of the underlying object */
13923 		/* on our target map */
13924 
13925 		/* in english the submap object may extend beyond the     */
13926 		/* region mapped by the entry or, may only fill a portion */
13927 		/* of it.  For our purposes, we only care if the object   */
13928 		/* doesn't fill.  In this case the area which will        */
13929 		/* ultimately be clipped in the top map will only need    */
13930 		/* to be as big as the portion of the underlying entry    */
13931 		/* which is mapped */
13932 		start_delta = submap_entry->vme_start > top_entry_saved_offset ?
13933 		    submap_entry->vme_start - top_entry_saved_offset : 0;
13934 
13935 		end_delta =
13936 		    (top_entry_saved_offset + start_delta + (old_end - old_start)) <=
13937 		    submap_entry->vme_end ?
13938 		    0 : (top_entry_saved_offset +
13939 		    (old_end - old_start))
13940 		    - submap_entry->vme_end;
13941 
13942 		old_start += start_delta;
13943 		old_end -= end_delta;
13944 
13945 		if (submap_entry->is_sub_map) {
13946 			entry = submap_entry;
13947 			vaddr = local_vaddr;
13948 			goto submap_recurse;
13949 		}
13950 
13951 		if (((fault_type & VM_PROT_WRITE) ||
13952 		    force_copy)
13953 		    && cow_sub_map_parent) {
13954 			vm_object_t     sub_object, copy_object;
13955 			vm_object_offset_t copy_offset;
13956 			vm_map_offset_t local_start;
13957 			vm_map_offset_t local_end;
13958 			boolean_t       object_copied = FALSE;
13959 			vm_object_offset_t object_copied_offset = 0;
13960 			boolean_t       object_copied_needs_copy = FALSE;
13961 			kern_return_t   kr = KERN_SUCCESS;
13962 
13963 			if (vm_map_lock_read_to_write(map)) {
13964 				vm_map_lock_read(map);
13965 				old_start -= start_delta;
13966 				old_end += end_delta;
13967 				goto RetrySubMap;
13968 			}
13969 
13970 
13971 			sub_object = VME_OBJECT(submap_entry);
13972 			if (sub_object == VM_OBJECT_NULL) {
13973 				sub_object =
13974 				    vm_object_allocate(
13975 					(vm_map_size_t)
13976 					(submap_entry->vme_end -
13977 					submap_entry->vme_start));
13978 				VME_OBJECT_SET(submap_entry, sub_object, false, 0);
13979 				VME_OFFSET_SET(submap_entry, 0);
13980 				assert(!submap_entry->is_sub_map);
13981 				assert(submap_entry->use_pmap);
13982 			}
13983 			local_start =  local_vaddr -
13984 			    (cow_parent_vaddr - old_start);
13985 			local_end = local_vaddr +
13986 			    (old_end - cow_parent_vaddr);
13987 			vm_map_clip_start(map, submap_entry, local_start);
13988 			vm_map_clip_end(map, submap_entry, local_end);
13989 			if (submap_entry->is_sub_map) {
13990 				/* unnesting was done when clipping */
13991 				assert(!submap_entry->use_pmap);
13992 			}
13993 
13994 			/* This is the COW case, lets connect */
13995 			/* an entry in our space to the underlying */
13996 			/* object in the submap, bypassing the  */
13997 			/* submap. */
13998 			submap_entry_offset = VME_OFFSET(submap_entry);
13999 			submap_entry_size = submap_entry->vme_end - submap_entry->vme_start;
14000 
14001 			if ((submap_entry->wired_count != 0 ||
14002 			    sub_object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) &&
14003 			    (submap_entry->protection & VM_PROT_EXECUTE) &&
14004 			    no_force_copy_if_executable) {
14005 //				printf("FBDP map %p entry %p start 0x%llx end 0x%llx wired %d strat %d\n", map, submap_entry, (uint64_t)local_start, (uint64_t)local_end, submap_entry->wired_count, sub_object->copy_strategy);
14006 				if ((cow_sub_map_parent) && (cow_sub_map_parent != map)) {
14007 					vm_map_unlock(cow_sub_map_parent);
14008 				}
14009 				if ((*real_map != map)
14010 				    && (*real_map != cow_sub_map_parent)) {
14011 					vm_map_unlock(*real_map);
14012 				}
14013 				*real_map = map;
14014 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_SUBMAP_NO_COW_ON_EXECUTABLE), 0 /* arg */);
14015 				vm_map_lock_write_to_read(map);
14016 				kr = KERN_PROTECTION_FAILURE;
14017 				DTRACE_VM4(submap_no_copy_executable,
14018 				    vm_map_t, map,
14019 				    vm_object_offset_t, submap_entry_offset,
14020 				    vm_object_size_t, submap_entry_size,
14021 				    int, kr);
14022 				return kr;
14023 			}
14024 
14025 			if (submap_entry->wired_count != 0) {
14026 				vm_object_reference(sub_object);
14027 
14028 				assertf(VM_MAP_PAGE_ALIGNED(VME_OFFSET(submap_entry), VM_MAP_PAGE_MASK(map)),
14029 				    "submap_entry %p offset 0x%llx\n",
14030 				    submap_entry, VME_OFFSET(submap_entry));
14031 
14032 				DTRACE_VM6(submap_copy_slowly,
14033 				    vm_map_t, cow_sub_map_parent,
14034 				    vm_map_offset_t, vaddr,
14035 				    vm_map_t, map,
14036 				    vm_object_size_t, submap_entry_size,
14037 				    int, submap_entry->wired_count,
14038 				    int, sub_object->copy_strategy);
14039 
14040 				saved_submap_entry = submap_entry;
14041 				version.main_timestamp = map->timestamp;
14042 				vm_map_unlock(map); /* Increments timestamp by 1 */
14043 				submap_entry = VM_MAP_ENTRY_NULL;
14044 
14045 				vm_object_lock(sub_object);
14046 				kr = vm_object_copy_slowly(sub_object,
14047 				    submap_entry_offset,
14048 				    submap_entry_size,
14049 				    FALSE,
14050 				    &copy_object);
14051 				object_copied = TRUE;
14052 				object_copied_offset = 0;
14053 				/* 4k: account for extra offset in physical page */
14054 				object_copied_offset += submap_entry_offset - vm_object_trunc_page(submap_entry_offset);
14055 				object_copied_needs_copy = FALSE;
14056 				vm_object_deallocate(sub_object);
14057 
14058 				vm_map_lock(map);
14059 
14060 				if (kr != KERN_SUCCESS &&
14061 				    kr != KERN_MEMORY_RESTART_COPY) {
14062 					if ((cow_sub_map_parent) && (cow_sub_map_parent != map)) {
14063 						vm_map_unlock(cow_sub_map_parent);
14064 					}
14065 					if ((*real_map != map)
14066 					    && (*real_map != cow_sub_map_parent)) {
14067 						vm_map_unlock(*real_map);
14068 					}
14069 					*real_map = map;
14070 					vm_object_deallocate(copy_object);
14071 					copy_object = VM_OBJECT_NULL;
14072 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_SUBMAP_COPY_SLOWLY_FAILED), 0 /* arg */);
14073 					vm_map_lock_write_to_read(map);
14074 					DTRACE_VM4(submap_copy_error_slowly,
14075 					    vm_object_t, sub_object,
14076 					    vm_object_offset_t, submap_entry_offset,
14077 					    vm_object_size_t, submap_entry_size,
14078 					    int, kr);
14079 					vm_map_lookup_and_lock_object_copy_slowly_error++;
14080 					return kr;
14081 				}
14082 
14083 				if ((kr == KERN_SUCCESS) &&
14084 				    (version.main_timestamp + 1) == map->timestamp) {
14085 					submap_entry = saved_submap_entry;
14086 				} else {
14087 					saved_submap_entry = NULL;
14088 					old_start -= start_delta;
14089 					old_end += end_delta;
14090 					vm_object_deallocate(copy_object);
14091 					copy_object = VM_OBJECT_NULL;
14092 					vm_map_lock_write_to_read(map);
14093 					vm_map_lookup_and_lock_object_copy_slowly_restart++;
14094 					goto RetrySubMap;
14095 				}
14096 				vm_map_lookup_and_lock_object_copy_slowly_count++;
14097 				vm_map_lookup_and_lock_object_copy_slowly_size += submap_entry_size;
14098 				if (submap_entry_size > vm_map_lookup_and_lock_object_copy_slowly_max) {
14099 					vm_map_lookup_and_lock_object_copy_slowly_max = submap_entry_size;
14100 				}
14101 			} else if (sub_object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) {
14102 				submap_entry_offset = VME_OFFSET(submap_entry);
14103 				copy_object = VM_OBJECT_NULL;
14104 				object_copied_offset = submap_entry_offset;
14105 				object_copied_needs_copy = FALSE;
14106 				DTRACE_VM6(submap_copy_strategically,
14107 				    vm_map_t, cow_sub_map_parent,
14108 				    vm_map_offset_t, vaddr,
14109 				    vm_map_t, map,
14110 				    vm_object_size_t, submap_entry_size,
14111 				    int, submap_entry->wired_count,
14112 				    int, sub_object->copy_strategy);
14113 				kr = vm_object_copy_strategically(
14114 					sub_object,
14115 					submap_entry_offset,
14116 					submap_entry->vme_end - submap_entry->vme_start,
14117 					false, /* forking */
14118 					&copy_object,
14119 					&object_copied_offset,
14120 					&object_copied_needs_copy);
14121 				if (kr == KERN_MEMORY_RESTART_COPY) {
14122 					old_start -= start_delta;
14123 					old_end += end_delta;
14124 					vm_object_deallocate(copy_object);
14125 					copy_object = VM_OBJECT_NULL;
14126 					vm_map_lock_write_to_read(map);
14127 					vm_map_lookup_and_lock_object_copy_strategically_restart++;
14128 					goto RetrySubMap;
14129 				}
14130 				if (kr != KERN_SUCCESS) {
14131 					if ((cow_sub_map_parent) && (cow_sub_map_parent != map)) {
14132 						vm_map_unlock(cow_sub_map_parent);
14133 					}
14134 					if ((*real_map != map)
14135 					    && (*real_map != cow_sub_map_parent)) {
14136 						vm_map_unlock(*real_map);
14137 					}
14138 					*real_map = map;
14139 					vm_object_deallocate(copy_object);
14140 					copy_object = VM_OBJECT_NULL;
14141 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_SUBMAP_COPY_STRAT_FAILED), 0 /* arg */);
14142 					vm_map_lock_write_to_read(map);
14143 					DTRACE_VM4(submap_copy_error_strategically,
14144 					    vm_object_t, sub_object,
14145 					    vm_object_offset_t, submap_entry_offset,
14146 					    vm_object_size_t, submap_entry_size,
14147 					    int, kr);
14148 					vm_map_lookup_and_lock_object_copy_strategically_error++;
14149 					return kr;
14150 				}
14151 				assert(copy_object != VM_OBJECT_NULL);
14152 				assert(copy_object != sub_object);
14153 				object_copied = TRUE;
14154 				vm_map_lookup_and_lock_object_copy_strategically_count++;
14155 				vm_map_lookup_and_lock_object_copy_strategically_size += submap_entry_size;
14156 				if (submap_entry_size > vm_map_lookup_and_lock_object_copy_strategically_max) {
14157 					vm_map_lookup_and_lock_object_copy_strategically_max = submap_entry_size;
14158 				}
14159 			} else {
14160 				/* set up shadow object */
14161 				object_copied = FALSE;
14162 				copy_object = sub_object;
14163 				vm_object_lock(sub_object);
14164 				vm_object_reference_locked(sub_object);
14165 				sub_object->shadowed = TRUE;
14166 				vm_object_unlock(sub_object);
14167 
14168 				assert(submap_entry->wired_count == 0);
14169 				submap_entry->needs_copy = TRUE;
14170 
14171 				prot = submap_entry->protection;
14172 				assert(!pmap_has_prot_policy(map->pmap, submap_entry->translated_allow_execute, prot));
14173 				prot = prot & ~VM_PROT_WRITE;
14174 				assert(!pmap_has_prot_policy(map->pmap, submap_entry->translated_allow_execute, prot));
14175 
14176 				if (override_nx(old_map,
14177 				    VME_ALIAS(submap_entry))
14178 				    && prot) {
14179 					prot |= VM_PROT_EXECUTE;
14180 				}
14181 
14182 				vm_object_pmap_protect(
14183 					sub_object,
14184 					VME_OFFSET(submap_entry),
14185 					submap_entry->vme_end -
14186 					submap_entry->vme_start,
14187 					(submap_entry->is_shared
14188 					|| map->mapped_in_other_pmaps) ?
14189 					PMAP_NULL : map->pmap,
14190 					VM_MAP_PAGE_SIZE(map),
14191 					submap_entry->vme_start,
14192 					prot);
14193 				vm_map_lookup_and_lock_object_copy_shadow_count++;
14194 				vm_map_lookup_and_lock_object_copy_shadow_size += submap_entry_size;
14195 				if (submap_entry_size > vm_map_lookup_and_lock_object_copy_shadow_max) {
14196 					vm_map_lookup_and_lock_object_copy_shadow_max = submap_entry_size;
14197 				}
14198 			}
14199 
14200 			/*
14201 			 * Adjust the fault offset to the submap entry.
14202 			 */
14203 			copy_offset = (local_vaddr -
14204 			    submap_entry->vme_start +
14205 			    VME_OFFSET(submap_entry));
14206 
14207 			/* This works diffently than the   */
14208 			/* normal submap case. We go back  */
14209 			/* to the parent of the cow map and*/
14210 			/* clip out the target portion of  */
14211 			/* the sub_map, substituting the   */
14212 			/* new copy object,                */
14213 
14214 			subentry_protection = submap_entry->protection;
14215 			subentry_max_protection = submap_entry->max_protection;
14216 			subentry_no_copy_on_read = submap_entry->vme_no_copy_on_read;
14217 			subentry_permanent = submap_entry->vme_permanent;
14218 			subentry_csm_associated = submap_entry->csm_associated;
14219 #if __arm64e__
14220 			subentry_used_for_tpro = submap_entry->used_for_tpro;
14221 #endif // __arm64e__
14222 			vm_map_unlock(map);
14223 			submap_entry = NULL; /* not valid after map unlock */
14224 
14225 			local_start = old_start;
14226 			local_end = old_end;
14227 			map = cow_sub_map_parent;
14228 			*var_map = cow_sub_map_parent;
14229 			vaddr = cow_parent_vaddr;
14230 			cow_sub_map_parent = NULL;
14231 
14232 			if (!vm_map_lookup_entry(map,
14233 			    vaddr, &entry)) {
14234 				if ((cow_sub_map_parent) && (cow_sub_map_parent != map)) {
14235 					vm_map_unlock(cow_sub_map_parent);
14236 				}
14237 				if ((*real_map != map)
14238 				    && (*real_map != cow_sub_map_parent)) {
14239 					vm_map_unlock(*real_map);
14240 				}
14241 				*real_map = map;
14242 				vm_object_deallocate(
14243 					copy_object);
14244 				copy_object = VM_OBJECT_NULL;
14245 				vm_map_lock_write_to_read(map);
14246 				DTRACE_VM4(submap_lookup_post_unlock,
14247 				    uint64_t, (uint64_t)entry->vme_start,
14248 				    uint64_t, (uint64_t)entry->vme_end,
14249 				    vm_map_offset_t, vaddr,
14250 				    int, object_copied);
14251 				return KERN_INVALID_ADDRESS;
14252 			}
14253 
14254 			/* clip out the portion of space */
14255 			/* mapped by the sub map which   */
14256 			/* corresponds to the underlying */
14257 			/* object */
14258 
14259 			/*
14260 			 * Clip (and unnest) the smallest nested chunk
14261 			 * possible around the faulting address...
14262 			 */
14263 			local_start = vaddr & ~(pmap_shared_region_size_min(map->pmap) - 1);
14264 			local_end = local_start + pmap_shared_region_size_min(map->pmap);
14265 			/*
14266 			 * ... but don't go beyond the "old_start" to "old_end"
14267 			 * range, to avoid spanning over another VM region
14268 			 * with a possibly different VM object and/or offset.
14269 			 */
14270 			if (local_start < old_start) {
14271 				local_start = old_start;
14272 			}
14273 			if (local_end > old_end) {
14274 				local_end = old_end;
14275 			}
14276 			/*
14277 			 * Adjust copy_offset to the start of the range.
14278 			 */
14279 			copy_offset -= (vaddr - local_start);
14280 
14281 			vm_map_clip_start(map, entry, local_start);
14282 			vm_map_clip_end(map, entry, local_end);
14283 			if (entry->is_sub_map) {
14284 				/* unnesting was done when clipping */
14285 				assert(!entry->use_pmap);
14286 			}
14287 
14288 			/* substitute copy object for */
14289 			/* shared map entry           */
14290 			vm_map_deallocate(VME_SUBMAP(entry));
14291 			assert(!entry->iokit_acct);
14292 			entry->use_pmap = TRUE;
14293 			VME_OBJECT_SET(entry, copy_object, false, 0);
14294 
14295 			/* propagate the submap entry's protections */
14296 			if (entry->protection != VM_PROT_READ) {
14297 				/*
14298 				 * Someone has already altered the top entry's
14299 				 * protections via vm_protect(VM_PROT_COPY).
14300 				 * Respect these new values and ignore the
14301 				 * submap entry's protections.
14302 				 */
14303 			} else {
14304 				/*
14305 				 * Regular copy-on-write: propagate the submap
14306 				 * entry's protections to the top map entry.
14307 				 */
14308 				entry->protection |= subentry_protection;
14309 			}
14310 			entry->max_protection |= subentry_max_protection;
14311 			/* propagate some attributes from subentry */
14312 			entry->vme_no_copy_on_read = subentry_no_copy_on_read;
14313 			entry->vme_permanent = subentry_permanent;
14314 			entry->csm_associated = subentry_csm_associated;
14315 #if __arm64e__
14316 			/* propagate TPRO iff the destination map has TPRO enabled */
14317 			if (subentry_used_for_tpro && vm_map_tpro(map)) {
14318 				entry->used_for_tpro = subentry_used_for_tpro;
14319 			}
14320 #endif /* __arm64e */
14321 			if ((entry->protection & VM_PROT_WRITE) &&
14322 			    (entry->protection & VM_PROT_EXECUTE) &&
14323 #if XNU_TARGET_OS_OSX
14324 			    map->pmap != kernel_pmap &&
14325 			    (vm_map_cs_enforcement(map)
14326 #if __arm64__
14327 			    || !VM_MAP_IS_EXOTIC(map)
14328 #endif /* __arm64__ */
14329 			    ) &&
14330 #endif /* XNU_TARGET_OS_OSX */
14331 #if CODE_SIGNING_MONITOR
14332 			    (csm_address_space_exempt(map->pmap) != KERN_SUCCESS) &&
14333 #endif
14334 			    !(entry->used_for_jit) &&
14335 			    VM_MAP_POLICY_WX_STRIP_X(map)) {
14336 				DTRACE_VM3(cs_wx,
14337 				    uint64_t, (uint64_t)entry->vme_start,
14338 				    uint64_t, (uint64_t)entry->vme_end,
14339 				    vm_prot_t, entry->protection);
14340 				printf("CODE SIGNING: %d[%s] %s:%d(0x%llx,0x%llx,0x%x) can't have both write and exec at the same time\n",
14341 				    proc_selfpid(),
14342 				    (get_bsdtask_info(current_task())
14343 				    ? proc_name_address(get_bsdtask_info(current_task()))
14344 				    : "?"),
14345 				    __FUNCTION__, __LINE__,
14346 #if DEVELOPMENT || DEBUG
14347 				    (uint64_t)entry->vme_start,
14348 				    (uint64_t)entry->vme_end,
14349 #else /* DEVELOPMENT || DEBUG */
14350 				    (uint64_t)0,
14351 				    (uint64_t)0,
14352 #endif /* DEVELOPMENT || DEBUG */
14353 				    entry->protection);
14354 				entry->protection &= ~VM_PROT_EXECUTE;
14355 			}
14356 
14357 			if (object_copied) {
14358 				VME_OFFSET_SET(entry, local_start - old_start + object_copied_offset);
14359 				entry->needs_copy = object_copied_needs_copy;
14360 				entry->is_shared = FALSE;
14361 			} else {
14362 				assert(VME_OBJECT(entry) != VM_OBJECT_NULL);
14363 				assert(VME_OBJECT(entry)->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
14364 				assert(entry->wired_count == 0);
14365 				VME_OFFSET_SET(entry, copy_offset);
14366 				entry->needs_copy = TRUE;
14367 				if (map != old_map) {
14368 					entry->is_shared = TRUE;
14369 				}
14370 			}
14371 			if (entry->inheritance == VM_INHERIT_SHARE) {
14372 				entry->inheritance = VM_INHERIT_COPY;
14373 			}
14374 
14375 			vm_map_lock_write_to_read(map);
14376 		} else {
14377 			if ((cow_sub_map_parent)
14378 			    && (cow_sub_map_parent != *real_map)
14379 			    && (cow_sub_map_parent != map)) {
14380 				vm_map_unlock(cow_sub_map_parent);
14381 			}
14382 			entry = submap_entry;
14383 			vaddr = local_vaddr;
14384 		}
14385 	}
14386 
14387 	/*
14388 	 *	Check whether this task is allowed to have
14389 	 *	this page.
14390 	 */
14391 
14392 	prot = entry->protection;
14393 
14394 	if (override_nx(old_map, VME_ALIAS(entry)) && prot) {
14395 		/*
14396 		 * HACK -- if not a stack, then allow execution
14397 		 */
14398 		prot |= VM_PROT_EXECUTE;
14399 	}
14400 
14401 #if __arm64e__
14402 	/*
14403 	 * If the entry we're dealing with is TPRO and we have a write
14404 	 * fault, inject VM_PROT_WRITE into protections. This allows us
14405 	 * to maintain RO permissions when not marked as TPRO.
14406 	 */
14407 	if (entry->used_for_tpro && (fault_type & VM_PROT_WRITE)) {
14408 		prot |= VM_PROT_WRITE;
14409 	}
14410 #endif /* __arm64e__ */
14411 	if (mask_protections) {
14412 		fault_type &= prot;
14413 		if (fault_type == VM_PROT_NONE) {
14414 			goto protection_failure;
14415 		}
14416 	}
14417 	if (((fault_type & prot) != fault_type)
14418 #if __arm64__
14419 	    /* prefetch abort in execute-only page */
14420 	    && !(prot == VM_PROT_EXECUTE && fault_type == (VM_PROT_READ | VM_PROT_EXECUTE))
14421 #elif defined(__x86_64__)
14422 	    /* Consider the UEXEC bit when handling an EXECUTE fault */
14423 	    && !((fault_type & VM_PROT_EXECUTE) && !(prot & VM_PROT_EXECUTE) && (prot & VM_PROT_UEXEC))
14424 #endif
14425 	    ) {
14426 protection_failure:
14427 		if (*real_map != map) {
14428 			vm_map_unlock(*real_map);
14429 		}
14430 		*real_map = map;
14431 
14432 		if ((fault_type & VM_PROT_EXECUTE) && prot) {
14433 			log_stack_execution_failure((addr64_t)vaddr, prot);
14434 		}
14435 
14436 		DTRACE_VM2(prot_fault, int, 1, (uint64_t *), NULL);
14437 		DTRACE_VM3(prot_fault_detailed, vm_prot_t, fault_type, vm_prot_t, prot, void *, vaddr);
14438 		/*
14439 		 * Noisy (esp. internally) and can be inferred from CrashReports. So OFF for now.
14440 		 *
14441 		 * ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PROTECTION_FAILURE), 0);
14442 		 */
14443 		return KERN_PROTECTION_FAILURE;
14444 	}
14445 
14446 	/*
14447 	 *	If this page is not pageable, we have to get
14448 	 *	it for all possible accesses.
14449 	 */
14450 
14451 	*wired = (entry->wired_count != 0);
14452 	if (*wired) {
14453 		fault_type = prot;
14454 	}
14455 
14456 	/*
14457 	 *	If the entry was copy-on-write, we either ...
14458 	 */
14459 
14460 	if (entry->needs_copy) {
14461 		/*
14462 		 *	If we want to write the page, we may as well
14463 		 *	handle that now since we've got the map locked.
14464 		 *
14465 		 *	If we don't need to write the page, we just
14466 		 *	demote the permissions allowed.
14467 		 */
14468 
14469 		if ((fault_type & VM_PROT_WRITE) || *wired || force_copy) {
14470 			/*
14471 			 *	Make a new object, and place it in the
14472 			 *	object chain.  Note that no new references
14473 			 *	have appeared -- one just moved from the
14474 			 *	map to the new object.
14475 			 */
14476 
14477 			if (vm_map_lock_read_to_write(map)) {
14478 				vm_map_lock_read(map);
14479 				goto RetryLookup;
14480 			}
14481 
14482 			if (VME_OBJECT(entry)->shadowed == FALSE) {
14483 				vm_object_lock(VME_OBJECT(entry));
14484 				VME_OBJECT(entry)->shadowed = TRUE;
14485 				vm_object_unlock(VME_OBJECT(entry));
14486 			}
14487 			VME_OBJECT_SHADOW(entry,
14488 			    (vm_map_size_t) (entry->vme_end -
14489 			    entry->vme_start),
14490 			    vm_map_always_shadow(map));
14491 			entry->needs_copy = FALSE;
14492 
14493 			vm_map_lock_write_to_read(map);
14494 		}
14495 		if ((fault_type & VM_PROT_WRITE) == 0 && *wired == 0) {
14496 			/*
14497 			 *	We're attempting to read a copy-on-write
14498 			 *	page -- don't allow writes.
14499 			 */
14500 
14501 			prot &= (~VM_PROT_WRITE);
14502 		}
14503 	}
14504 
14505 	if (submap_needed_copy && (prot & VM_PROT_WRITE)) {
14506 		/*
14507 		 * We went through a "needs_copy" submap without triggering
14508 		 * a copy, so granting write access to the page would bypass
14509 		 * that submap's "needs_copy".
14510 		 */
14511 		assert(!(fault_type & VM_PROT_WRITE));
14512 		assert(!*wired);
14513 		assert(!force_copy);
14514 		// printf("FBDP %d[%s] submap_needed_copy for %p 0x%llx\n", proc_selfpid(), proc_name_address(current_task()->bsd_info), map, vaddr);
14515 		prot &= ~VM_PROT_WRITE;
14516 	}
14517 
14518 	/*
14519 	 *	Create an object if necessary.
14520 	 */
14521 	if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
14522 		if (vm_map_lock_read_to_write(map)) {
14523 			vm_map_lock_read(map);
14524 			goto RetryLookup;
14525 		}
14526 
14527 		VME_OBJECT_SET(entry,
14528 		    vm_object_allocate(
14529 			    (vm_map_size_t)(entry->vme_end -
14530 			    entry->vme_start)), false, 0);
14531 		VME_OFFSET_SET(entry, 0);
14532 		assert(entry->use_pmap);
14533 		vm_map_lock_write_to_read(map);
14534 	}
14535 
14536 	/*
14537 	 *	Return the object/offset from this entry.  If the entry
14538 	 *	was copy-on-write or empty, it has been fixed up.  Also
14539 	 *	return the protection.
14540 	 */
14541 
14542 	*offset = (vaddr - entry->vme_start) + VME_OFFSET(entry);
14543 	*object = VME_OBJECT(entry);
14544 	*out_prot = prot;
14545 	KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_MAP_LOOKUP_OBJECT), VM_KERNEL_UNSLIDE_OR_PERM(*object), (unsigned long) VME_ALIAS(entry), 0, 0);
14546 
14547 	if (fault_info) {
14548 		fault_info->interruptible = THREAD_UNINT; /* for now... */
14549 		/* ... the caller will change "interruptible" if needed */
14550 		fault_info->cluster_size = 0;
14551 		fault_info->user_tag = VME_ALIAS(entry);
14552 		fault_info->pmap_options = 0;
14553 		if (entry->iokit_acct ||
14554 		    (!entry->is_sub_map && !entry->use_pmap)) {
14555 			fault_info->pmap_options |= PMAP_OPTIONS_ALT_ACCT;
14556 		}
14557 		fault_info->behavior = entry->behavior;
14558 		fault_info->lo_offset = VME_OFFSET(entry);
14559 		fault_info->hi_offset =
14560 		    (entry->vme_end - entry->vme_start) + VME_OFFSET(entry);
14561 		fault_info->no_cache  = entry->no_cache;
14562 		fault_info->stealth = FALSE;
14563 		fault_info->io_sync = FALSE;
14564 		if (entry->used_for_jit ||
14565 #if CODE_SIGNING_MONITOR
14566 		    (csm_address_space_exempt(map->pmap) == KERN_SUCCESS) ||
14567 #endif
14568 		    entry->vme_resilient_codesign) {
14569 			fault_info->cs_bypass = TRUE;
14570 		} else {
14571 			fault_info->cs_bypass = FALSE;
14572 		}
14573 		fault_info->csm_associated = FALSE;
14574 #if CODE_SIGNING_MONITOR
14575 		if (entry->csm_associated) {
14576 			/*
14577 			 * The pmap layer will validate this page
14578 			 * before allowing it to be executed from.
14579 			 */
14580 			fault_info->csm_associated = TRUE;
14581 		}
14582 #endif
14583 		fault_info->mark_zf_absent = FALSE;
14584 		fault_info->batch_pmap_op = FALSE;
14585 		fault_info->resilient_media = entry->vme_resilient_media;
14586 		fault_info->fi_xnu_user_debug = entry->vme_xnu_user_debug;
14587 		fault_info->no_copy_on_read = entry->vme_no_copy_on_read;
14588 #if __arm64e__
14589 		fault_info->fi_used_for_tpro = entry->used_for_tpro;
14590 #else /* __arm64e__ */
14591 		fault_info->fi_used_for_tpro = FALSE;
14592 #endif
14593 		if (entry->translated_allow_execute) {
14594 			fault_info->pmap_options |= PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE;
14595 		}
14596 	}
14597 
14598 	/*
14599 	 *	Lock the object to prevent it from disappearing
14600 	 */
14601 	if (object_lock_type == OBJECT_LOCK_EXCLUSIVE) {
14602 		if (contended == NULL) {
14603 			vm_object_lock(*object);
14604 		} else {
14605 			*contended = vm_object_lock_check_contended(*object);
14606 		}
14607 	} else {
14608 		vm_object_lock_shared(*object);
14609 	}
14610 
14611 	/*
14612 	 *	Save the version number
14613 	 */
14614 
14615 	out_version->main_timestamp = map->timestamp;
14616 
14617 	return KERN_SUCCESS;
14618 }
14619 
14620 
14621 /*
14622  *	vm_map_verify:
14623  *
14624  *	Verifies that the map in question has not changed
14625  *	since the given version. The map has to be locked
14626  *	("shared" mode is fine) before calling this function
14627  *	and it will be returned locked too.
14628  */
14629 boolean_t
vm_map_verify(vm_map_t map,vm_map_version_t * version)14630 vm_map_verify(
14631 	vm_map_t                map,
14632 	vm_map_version_t        *version)       /* REF */
14633 {
14634 	boolean_t       result;
14635 
14636 	vm_map_lock_assert_held(map);
14637 	result = (map->timestamp == version->main_timestamp);
14638 
14639 	return result;
14640 }
14641 
14642 /*
14643  *	TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
14644  *	Goes away after regular vm_region_recurse function migrates to
14645  *	64 bits
14646  *	vm_region_recurse: A form of vm_region which follows the
14647  *	submaps in a target map
14648  *
14649  */
14650 
14651 kern_return_t
vm_map_region_recurse_64(vm_map_t map,vm_map_offset_t * address,vm_map_size_t * size,natural_t * nesting_depth,vm_region_submap_info_64_t submap_info,mach_msg_type_number_t * count)14652 vm_map_region_recurse_64(
14653 	vm_map_t                 map,
14654 	vm_map_offset_t *address,               /* IN/OUT */
14655 	vm_map_size_t           *size,                  /* OUT */
14656 	natural_t               *nesting_depth, /* IN/OUT */
14657 	vm_region_submap_info_64_t      submap_info,    /* IN/OUT */
14658 	mach_msg_type_number_t  *count) /* IN/OUT */
14659 {
14660 	mach_msg_type_number_t  original_count;
14661 	vm_region_extended_info_data_t  extended;
14662 	vm_map_entry_t                  tmp_entry;
14663 	vm_map_offset_t                 user_address;
14664 	unsigned int                    user_max_depth;
14665 
14666 	/*
14667 	 * "curr_entry" is the VM map entry preceding or including the
14668 	 * address we're looking for.
14669 	 * "curr_map" is the map or sub-map containing "curr_entry".
14670 	 * "curr_address" is the equivalent of the top map's "user_address"
14671 	 * in the current map.
14672 	 * "curr_offset" is the cumulated offset of "curr_map" in the
14673 	 * target task's address space.
14674 	 * "curr_depth" is the depth of "curr_map" in the chain of
14675 	 * sub-maps.
14676 	 *
14677 	 * "curr_max_below" and "curr_max_above" limit the range (around
14678 	 * "curr_address") we should take into account in the current (sub)map.
14679 	 * They limit the range to what's visible through the map entries
14680 	 * we've traversed from the top map to the current map.
14681 	 *
14682 	 */
14683 	vm_map_entry_t                  curr_entry;
14684 	vm_map_address_t                curr_address;
14685 	vm_map_offset_t                 curr_offset;
14686 	vm_map_t                        curr_map;
14687 	unsigned int                    curr_depth;
14688 	vm_map_offset_t                 curr_max_below, curr_max_above;
14689 	vm_map_offset_t                 curr_skip;
14690 
14691 	/*
14692 	 * "next_" is the same as "curr_" but for the VM region immediately
14693 	 * after the address we're looking for.  We need to keep track of this
14694 	 * too because we want to return info about that region if the
14695 	 * address we're looking for is not mapped.
14696 	 */
14697 	vm_map_entry_t                  next_entry;
14698 	vm_map_offset_t                 next_offset;
14699 	vm_map_offset_t                 next_address;
14700 	vm_map_t                        next_map;
14701 	unsigned int                    next_depth;
14702 	vm_map_offset_t                 next_max_below, next_max_above;
14703 	vm_map_offset_t                 next_skip;
14704 
14705 	boolean_t                       look_for_pages;
14706 	vm_region_submap_short_info_64_t short_info;
14707 	boolean_t                       do_region_footprint;
14708 	int                             effective_page_size, effective_page_shift;
14709 	boolean_t                       submap_needed_copy;
14710 
14711 	if (map == VM_MAP_NULL) {
14712 		/* no address space to work on */
14713 		return KERN_INVALID_ARGUMENT;
14714 	}
14715 
14716 	effective_page_shift = vm_self_region_page_shift(map);
14717 	effective_page_size = (1 << effective_page_shift);
14718 
14719 	if (*count < VM_REGION_SUBMAP_SHORT_INFO_COUNT_64) {
14720 		/*
14721 		 * "info" structure is not big enough and
14722 		 * would overflow
14723 		 */
14724 		return KERN_INVALID_ARGUMENT;
14725 	}
14726 
14727 	do_region_footprint = task_self_region_footprint();
14728 	original_count = *count;
14729 
14730 	if (original_count < VM_REGION_SUBMAP_INFO_V0_COUNT_64) {
14731 		*count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
14732 		look_for_pages = FALSE;
14733 		short_info = (vm_region_submap_short_info_64_t) submap_info;
14734 		submap_info = NULL;
14735 	} else {
14736 		look_for_pages = TRUE;
14737 		*count = VM_REGION_SUBMAP_INFO_V0_COUNT_64;
14738 		short_info = NULL;
14739 
14740 		if (original_count >= VM_REGION_SUBMAP_INFO_V1_COUNT_64) {
14741 			*count = VM_REGION_SUBMAP_INFO_V1_COUNT_64;
14742 		}
14743 		if (original_count >= VM_REGION_SUBMAP_INFO_V2_COUNT_64) {
14744 			*count = VM_REGION_SUBMAP_INFO_V2_COUNT_64;
14745 		}
14746 	}
14747 
14748 	user_address = *address;
14749 	user_max_depth = *nesting_depth;
14750 	submap_needed_copy = FALSE;
14751 
14752 	if (not_in_kdp) {
14753 		vm_map_lock_read(map);
14754 	}
14755 
14756 recurse_again:
14757 	curr_entry = NULL;
14758 	curr_map = map;
14759 	curr_address = user_address;
14760 	curr_offset = 0;
14761 	curr_skip = 0;
14762 	curr_depth = 0;
14763 	curr_max_above = ((vm_map_offset_t) -1) - curr_address;
14764 	curr_max_below = curr_address;
14765 
14766 	next_entry = NULL;
14767 	next_map = NULL;
14768 	next_address = 0;
14769 	next_offset = 0;
14770 	next_skip = 0;
14771 	next_depth = 0;
14772 	next_max_above = (vm_map_offset_t) -1;
14773 	next_max_below = (vm_map_offset_t) -1;
14774 
14775 	for (;;) {
14776 		if (vm_map_lookup_entry(curr_map,
14777 		    curr_address,
14778 		    &tmp_entry)) {
14779 			/* tmp_entry contains the address we're looking for */
14780 			curr_entry = tmp_entry;
14781 		} else {
14782 			vm_map_offset_t skip;
14783 			/*
14784 			 * The address is not mapped.  "tmp_entry" is the
14785 			 * map entry preceding the address.  We want the next
14786 			 * one, if it exists.
14787 			 */
14788 			curr_entry = tmp_entry->vme_next;
14789 
14790 			if (curr_entry == vm_map_to_entry(curr_map) ||
14791 			    (curr_entry->vme_start >=
14792 			    curr_address + curr_max_above)) {
14793 				/* no next entry at this level: stop looking */
14794 				if (not_in_kdp) {
14795 					vm_map_unlock_read(curr_map);
14796 				}
14797 				curr_entry = NULL;
14798 				curr_map = NULL;
14799 				curr_skip = 0;
14800 				curr_offset = 0;
14801 				curr_depth = 0;
14802 				curr_max_above = 0;
14803 				curr_max_below = 0;
14804 				break;
14805 			}
14806 
14807 			/* adjust current address and offset */
14808 			skip = curr_entry->vme_start - curr_address;
14809 			curr_address = curr_entry->vme_start;
14810 			curr_skip += skip;
14811 			curr_offset += skip;
14812 			curr_max_above -= skip;
14813 			curr_max_below = 0;
14814 		}
14815 
14816 		/*
14817 		 * Is the next entry at this level closer to the address (or
14818 		 * deeper in the submap chain) than the one we had
14819 		 * so far ?
14820 		 */
14821 		tmp_entry = curr_entry->vme_next;
14822 		if (tmp_entry == vm_map_to_entry(curr_map)) {
14823 			/* no next entry at this level */
14824 		} else if (tmp_entry->vme_start >=
14825 		    curr_address + curr_max_above) {
14826 			/*
14827 			 * tmp_entry is beyond the scope of what we mapped of
14828 			 * this submap in the upper level: ignore it.
14829 			 */
14830 		} else if ((next_entry == NULL) ||
14831 		    (tmp_entry->vme_start + curr_offset <=
14832 		    next_entry->vme_start + next_offset)) {
14833 			/*
14834 			 * We didn't have a "next_entry" or this one is
14835 			 * closer to the address we're looking for:
14836 			 * use this "tmp_entry" as the new "next_entry".
14837 			 */
14838 			if (next_entry != NULL) {
14839 				/* unlock the last "next_map" */
14840 				if (next_map != curr_map && not_in_kdp) {
14841 					vm_map_unlock_read(next_map);
14842 				}
14843 			}
14844 			next_entry = tmp_entry;
14845 			next_map = curr_map;
14846 			next_depth = curr_depth;
14847 			next_address = next_entry->vme_start;
14848 			next_skip = curr_skip;
14849 			next_skip += (next_address - curr_address);
14850 			next_offset = curr_offset;
14851 			next_offset += (next_address - curr_address);
14852 			next_max_above = MIN(next_max_above, curr_max_above);
14853 			next_max_above = MIN(next_max_above,
14854 			    next_entry->vme_end - next_address);
14855 			next_max_below = MIN(next_max_below, curr_max_below);
14856 			next_max_below = MIN(next_max_below,
14857 			    next_address - next_entry->vme_start);
14858 		}
14859 
14860 		/*
14861 		 * "curr_max_{above,below}" allow us to keep track of the
14862 		 * portion of the submap that is actually mapped at this level:
14863 		 * the rest of that submap is irrelevant to us, since it's not
14864 		 * mapped here.
14865 		 * The relevant portion of the map starts at
14866 		 * "VME_OFFSET(curr_entry)" up to the size of "curr_entry".
14867 		 */
14868 		curr_max_above = MIN(curr_max_above,
14869 		    curr_entry->vme_end - curr_address);
14870 		curr_max_below = MIN(curr_max_below,
14871 		    curr_address - curr_entry->vme_start);
14872 
14873 		if (!curr_entry->is_sub_map ||
14874 		    curr_depth >= user_max_depth) {
14875 			/*
14876 			 * We hit a leaf map or we reached the maximum depth
14877 			 * we could, so stop looking.  Keep the current map
14878 			 * locked.
14879 			 */
14880 			break;
14881 		}
14882 
14883 		/*
14884 		 * Get down to the next submap level.
14885 		 */
14886 
14887 		if (curr_entry->needs_copy) {
14888 			/* everything below this is effectively copy-on-write */
14889 			submap_needed_copy = TRUE;
14890 		}
14891 
14892 		/*
14893 		 * Lock the next level and unlock the current level,
14894 		 * unless we need to keep it locked to access the "next_entry"
14895 		 * later.
14896 		 */
14897 		if (not_in_kdp) {
14898 			vm_map_lock_read(VME_SUBMAP(curr_entry));
14899 		}
14900 		if (curr_map == next_map) {
14901 			/* keep "next_map" locked in case we need it */
14902 		} else {
14903 			/* release this map */
14904 			if (not_in_kdp) {
14905 				vm_map_unlock_read(curr_map);
14906 			}
14907 		}
14908 
14909 		/*
14910 		 * Adjust the offset.  "curr_entry" maps the submap
14911 		 * at relative address "curr_entry->vme_start" in the
14912 		 * curr_map but skips the first "VME_OFFSET(curr_entry)"
14913 		 * bytes of the submap.
14914 		 * "curr_offset" always represents the offset of a virtual
14915 		 * address in the curr_map relative to the absolute address
14916 		 * space (i.e. the top-level VM map).
14917 		 */
14918 		curr_offset +=
14919 		    (VME_OFFSET(curr_entry) - curr_entry->vme_start);
14920 		curr_address = user_address + curr_offset;
14921 		/* switch to the submap */
14922 		curr_map = VME_SUBMAP(curr_entry);
14923 		curr_depth++;
14924 		curr_entry = NULL;
14925 	}
14926 
14927 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
14928 // so probably should be a real 32b ID vs. ptr.
14929 // Current users just check for equality
14930 
14931 	if (curr_entry == NULL) {
14932 		/* no VM region contains the address... */
14933 
14934 		if (do_region_footprint && /* we want footprint numbers */
14935 		    next_entry == NULL && /* & there are no more regions */
14936 		    /* & we haven't already provided our fake region: */
14937 		    user_address <= vm_map_last_entry(map)->vme_end) {
14938 			ledger_amount_t ledger_resident, ledger_compressed;
14939 
14940 			/*
14941 			 * Add a fake memory region to account for
14942 			 * purgeable and/or ledger-tagged memory that
14943 			 * counts towards this task's memory footprint,
14944 			 * i.e. the resident/compressed pages of non-volatile
14945 			 * objects owned by that task.
14946 			 */
14947 			task_ledgers_footprint(map->pmap->ledger,
14948 			    &ledger_resident,
14949 			    &ledger_compressed);
14950 			if (ledger_resident + ledger_compressed == 0) {
14951 				/* no purgeable memory usage to report */
14952 				return KERN_INVALID_ADDRESS;
14953 			}
14954 			/* fake region to show nonvolatile footprint */
14955 			if (look_for_pages) {
14956 				submap_info->protection = VM_PROT_DEFAULT;
14957 				submap_info->max_protection = VM_PROT_DEFAULT;
14958 				submap_info->inheritance = VM_INHERIT_DEFAULT;
14959 				submap_info->offset = 0;
14960 				submap_info->user_tag = -1;
14961 				submap_info->pages_resident = (unsigned int) (ledger_resident / effective_page_size);
14962 				submap_info->pages_shared_now_private = 0;
14963 				submap_info->pages_swapped_out = (unsigned int) (ledger_compressed / effective_page_size);
14964 				submap_info->pages_dirtied = submap_info->pages_resident;
14965 				submap_info->ref_count = 1;
14966 				submap_info->shadow_depth = 0;
14967 				submap_info->external_pager = 0;
14968 				submap_info->share_mode = SM_PRIVATE;
14969 				if (submap_needed_copy) {
14970 					submap_info->share_mode = SM_COW;
14971 				}
14972 				submap_info->is_submap = 0;
14973 				submap_info->behavior = VM_BEHAVIOR_DEFAULT;
14974 				submap_info->object_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile);
14975 				submap_info->user_wired_count = 0;
14976 				submap_info->pages_reusable = 0;
14977 			} else {
14978 				short_info->user_tag = -1;
14979 				short_info->offset = 0;
14980 				short_info->protection = VM_PROT_DEFAULT;
14981 				short_info->inheritance = VM_INHERIT_DEFAULT;
14982 				short_info->max_protection = VM_PROT_DEFAULT;
14983 				short_info->behavior = VM_BEHAVIOR_DEFAULT;
14984 				short_info->user_wired_count = 0;
14985 				short_info->is_submap = 0;
14986 				short_info->object_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile);
14987 				short_info->external_pager = 0;
14988 				short_info->shadow_depth = 0;
14989 				short_info->share_mode = SM_PRIVATE;
14990 				if (submap_needed_copy) {
14991 					short_info->share_mode = SM_COW;
14992 				}
14993 				short_info->ref_count = 1;
14994 			}
14995 			*nesting_depth = 0;
14996 			*size = (vm_map_size_t) (ledger_resident + ledger_compressed);
14997 //			*address = user_address;
14998 			*address = vm_map_last_entry(map)->vme_end;
14999 			return KERN_SUCCESS;
15000 		}
15001 
15002 		if (next_entry == NULL) {
15003 			/* ... and no VM region follows it either */
15004 			return KERN_INVALID_ADDRESS;
15005 		}
15006 		/* ... gather info about the next VM region */
15007 		curr_entry = next_entry;
15008 		curr_map = next_map;    /* still locked ... */
15009 		curr_address = next_address;
15010 		curr_skip = next_skip;
15011 		curr_offset = next_offset;
15012 		curr_depth = next_depth;
15013 		curr_max_above = next_max_above;
15014 		curr_max_below = next_max_below;
15015 	} else {
15016 		/* we won't need "next_entry" after all */
15017 		if (next_entry != NULL) {
15018 			/* release "next_map" */
15019 			if (next_map != curr_map && not_in_kdp) {
15020 				vm_map_unlock_read(next_map);
15021 			}
15022 		}
15023 	}
15024 	next_entry = NULL;
15025 	next_map = NULL;
15026 	next_offset = 0;
15027 	next_skip = 0;
15028 	next_depth = 0;
15029 	next_max_below = -1;
15030 	next_max_above = -1;
15031 
15032 	if (curr_entry->is_sub_map &&
15033 	    curr_depth < user_max_depth) {
15034 		/*
15035 		 * We're not as deep as we could be:  we must have
15036 		 * gone back up after not finding anything mapped
15037 		 * below the original top-level map entry's.
15038 		 * Let's move "curr_address" forward and recurse again.
15039 		 */
15040 		user_address = curr_address;
15041 		goto recurse_again;
15042 	}
15043 
15044 	*nesting_depth = curr_depth;
15045 	*size = curr_max_above + curr_max_below;
15046 	*address = user_address + curr_skip - curr_max_below;
15047 
15048 	if (look_for_pages) {
15049 		submap_info->user_tag = VME_ALIAS(curr_entry);
15050 		submap_info->offset = VME_OFFSET(curr_entry);
15051 		submap_info->protection = curr_entry->protection;
15052 		submap_info->inheritance = curr_entry->inheritance;
15053 		submap_info->max_protection = curr_entry->max_protection;
15054 		submap_info->behavior = curr_entry->behavior;
15055 		submap_info->user_wired_count = curr_entry->user_wired_count;
15056 		submap_info->is_submap = curr_entry->is_sub_map;
15057 		if (curr_entry->is_sub_map) {
15058 			submap_info->object_id = VM_OBJECT_ID(VME_SUBMAP(curr_entry));
15059 		} else {
15060 			submap_info->object_id = VM_OBJECT_ID(VME_OBJECT(curr_entry));
15061 		}
15062 	} else {
15063 		short_info->user_tag = VME_ALIAS(curr_entry);
15064 		short_info->offset = VME_OFFSET(curr_entry);
15065 		short_info->protection = curr_entry->protection;
15066 		short_info->inheritance = curr_entry->inheritance;
15067 		short_info->max_protection = curr_entry->max_protection;
15068 		short_info->behavior = curr_entry->behavior;
15069 		short_info->user_wired_count = curr_entry->user_wired_count;
15070 		short_info->is_submap = curr_entry->is_sub_map;
15071 		if (curr_entry->is_sub_map) {
15072 			short_info->object_id = VM_OBJECT_ID(VME_SUBMAP(curr_entry));
15073 		} else {
15074 			short_info->object_id = VM_OBJECT_ID(VME_OBJECT(curr_entry));
15075 		}
15076 	}
15077 
15078 	extended.pages_resident = 0;
15079 	extended.pages_swapped_out = 0;
15080 	extended.pages_shared_now_private = 0;
15081 	extended.pages_dirtied = 0;
15082 	extended.pages_reusable = 0;
15083 	extended.external_pager = 0;
15084 	extended.shadow_depth = 0;
15085 	extended.share_mode = SM_EMPTY;
15086 	extended.ref_count = 0;
15087 
15088 	if (not_in_kdp) {
15089 		if (!curr_entry->is_sub_map) {
15090 			vm_map_offset_t range_start, range_end;
15091 			range_start = MAX((curr_address - curr_max_below),
15092 			    curr_entry->vme_start);
15093 			range_end = MIN((curr_address + curr_max_above),
15094 			    curr_entry->vme_end);
15095 			vm_map_region_walk(curr_map,
15096 			    range_start,
15097 			    curr_entry,
15098 			    (VME_OFFSET(curr_entry) +
15099 			    (range_start -
15100 			    curr_entry->vme_start)),
15101 			    range_end - range_start,
15102 			    &extended,
15103 			    look_for_pages, VM_REGION_EXTENDED_INFO_COUNT);
15104 			if (extended.external_pager &&
15105 			    extended.ref_count == 2 &&
15106 			    extended.share_mode == SM_SHARED) {
15107 				extended.share_mode = SM_PRIVATE;
15108 			}
15109 			if (submap_needed_copy) {
15110 				extended.share_mode = SM_COW;
15111 			}
15112 		} else {
15113 			if (curr_entry->use_pmap) {
15114 				extended.share_mode = SM_TRUESHARED;
15115 			} else {
15116 				extended.share_mode = SM_PRIVATE;
15117 			}
15118 			extended.ref_count = os_ref_get_count_raw(&VME_SUBMAP(curr_entry)->map_refcnt);
15119 		}
15120 	}
15121 
15122 	if (look_for_pages) {
15123 		submap_info->pages_resident = extended.pages_resident;
15124 		submap_info->pages_swapped_out = extended.pages_swapped_out;
15125 		submap_info->pages_shared_now_private =
15126 		    extended.pages_shared_now_private;
15127 		submap_info->pages_dirtied = extended.pages_dirtied;
15128 		submap_info->external_pager = extended.external_pager;
15129 		submap_info->shadow_depth = extended.shadow_depth;
15130 		submap_info->share_mode = extended.share_mode;
15131 		submap_info->ref_count = extended.ref_count;
15132 
15133 		if (original_count >= VM_REGION_SUBMAP_INFO_V1_COUNT_64) {
15134 			submap_info->pages_reusable = extended.pages_reusable;
15135 		}
15136 		if (original_count >= VM_REGION_SUBMAP_INFO_V2_COUNT_64) {
15137 			if (curr_entry->is_sub_map) {
15138 				submap_info->object_id_full = (vm_object_id_t)VM_KERNEL_ADDRPERM(VME_SUBMAP(curr_entry));
15139 			} else if (VME_OBJECT(curr_entry)) {
15140 				submap_info->object_id_full = (vm_object_id_t)VM_KERNEL_ADDRPERM(VME_OBJECT(curr_entry));
15141 			} else {
15142 				submap_info->object_id_full = 0ull;
15143 			}
15144 		}
15145 	} else {
15146 		short_info->external_pager = extended.external_pager;
15147 		short_info->shadow_depth = extended.shadow_depth;
15148 		short_info->share_mode = extended.share_mode;
15149 		short_info->ref_count = extended.ref_count;
15150 	}
15151 
15152 	if (not_in_kdp) {
15153 		vm_map_unlock_read(curr_map);
15154 	}
15155 
15156 	return KERN_SUCCESS;
15157 }
15158 
15159 /*
15160  *	vm_region:
15161  *
15162  *	User call to obtain information about a region in
15163  *	a task's address map. Currently, only one flavor is
15164  *	supported.
15165  *
15166  *	XXX The reserved and behavior fields cannot be filled
15167  *	    in until the vm merge from the IK is completed, and
15168  *	    vm_reserve is implemented.
15169  */
15170 
15171 kern_return_t
vm_map_region(vm_map_t map,vm_map_offset_t * address,vm_map_size_t * size,vm_region_flavor_t flavor,vm_region_info_t info,mach_msg_type_number_t * count,mach_port_t * object_name)15172 vm_map_region(
15173 	vm_map_t                 map,
15174 	vm_map_offset_t *address,               /* IN/OUT */
15175 	vm_map_size_t           *size,                  /* OUT */
15176 	vm_region_flavor_t       flavor,                /* IN */
15177 	vm_region_info_t         info,                  /* OUT */
15178 	mach_msg_type_number_t  *count, /* IN/OUT */
15179 	mach_port_t             *object_name)           /* OUT */
15180 {
15181 	vm_map_entry_t          tmp_entry;
15182 	vm_map_entry_t          entry;
15183 	vm_map_offset_t         start;
15184 
15185 	if (map == VM_MAP_NULL) {
15186 		return KERN_INVALID_ARGUMENT;
15187 	}
15188 
15189 	switch (flavor) {
15190 	case VM_REGION_BASIC_INFO:
15191 		/* legacy for old 32-bit objects info */
15192 	{
15193 		vm_region_basic_info_t  basic;
15194 
15195 		if (*count < VM_REGION_BASIC_INFO_COUNT) {
15196 			return KERN_INVALID_ARGUMENT;
15197 		}
15198 
15199 		basic = (vm_region_basic_info_t) info;
15200 		*count = VM_REGION_BASIC_INFO_COUNT;
15201 
15202 		vm_map_lock_read(map);
15203 
15204 		start = *address;
15205 		if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
15206 			if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
15207 				vm_map_unlock_read(map);
15208 				return KERN_INVALID_ADDRESS;
15209 			}
15210 		} else {
15211 			entry = tmp_entry;
15212 		}
15213 
15214 		start = entry->vme_start;
15215 
15216 		basic->offset = (uint32_t)VME_OFFSET(entry);
15217 		basic->protection = entry->protection;
15218 		basic->inheritance = entry->inheritance;
15219 		basic->max_protection = entry->max_protection;
15220 		basic->behavior = entry->behavior;
15221 		basic->user_wired_count = entry->user_wired_count;
15222 		basic->reserved = entry->is_sub_map;
15223 		*address = start;
15224 		*size = (entry->vme_end - start);
15225 
15226 		if (object_name) {
15227 			*object_name = IP_NULL;
15228 		}
15229 		if (entry->is_sub_map) {
15230 			basic->shared = FALSE;
15231 		} else {
15232 			basic->shared = entry->is_shared;
15233 		}
15234 
15235 		vm_map_unlock_read(map);
15236 		return KERN_SUCCESS;
15237 	}
15238 
15239 	case VM_REGION_BASIC_INFO_64:
15240 	{
15241 		vm_region_basic_info_64_t       basic;
15242 
15243 		if (*count < VM_REGION_BASIC_INFO_COUNT_64) {
15244 			return KERN_INVALID_ARGUMENT;
15245 		}
15246 
15247 		basic = (vm_region_basic_info_64_t) info;
15248 		*count = VM_REGION_BASIC_INFO_COUNT_64;
15249 
15250 		vm_map_lock_read(map);
15251 
15252 		start = *address;
15253 		if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
15254 			if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
15255 				vm_map_unlock_read(map);
15256 				return KERN_INVALID_ADDRESS;
15257 			}
15258 		} else {
15259 			entry = tmp_entry;
15260 		}
15261 
15262 		start = entry->vme_start;
15263 
15264 		basic->offset = VME_OFFSET(entry);
15265 		basic->protection = entry->protection;
15266 		basic->inheritance = entry->inheritance;
15267 		basic->max_protection = entry->max_protection;
15268 		basic->behavior = entry->behavior;
15269 		basic->user_wired_count = entry->user_wired_count;
15270 		basic->reserved = entry->is_sub_map;
15271 		*address = start;
15272 		*size = (entry->vme_end - start);
15273 
15274 		if (object_name) {
15275 			*object_name = IP_NULL;
15276 		}
15277 		if (entry->is_sub_map) {
15278 			basic->shared = FALSE;
15279 		} else {
15280 			basic->shared = entry->is_shared;
15281 		}
15282 
15283 		vm_map_unlock_read(map);
15284 		return KERN_SUCCESS;
15285 	}
15286 	case VM_REGION_EXTENDED_INFO:
15287 		if (*count < VM_REGION_EXTENDED_INFO_COUNT) {
15288 			return KERN_INVALID_ARGUMENT;
15289 		}
15290 		OS_FALLTHROUGH;
15291 	case VM_REGION_EXTENDED_INFO__legacy:
15292 		if (*count < VM_REGION_EXTENDED_INFO_COUNT__legacy) {
15293 			return KERN_INVALID_ARGUMENT;
15294 		}
15295 
15296 		{
15297 			vm_region_extended_info_t       extended;
15298 			mach_msg_type_number_t original_count;
15299 			int effective_page_size, effective_page_shift;
15300 
15301 			extended = (vm_region_extended_info_t) info;
15302 
15303 			effective_page_shift = vm_self_region_page_shift(map);
15304 			effective_page_size = (1 << effective_page_shift);
15305 
15306 			vm_map_lock_read(map);
15307 
15308 			start = *address;
15309 			if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
15310 				if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
15311 					vm_map_unlock_read(map);
15312 					return KERN_INVALID_ADDRESS;
15313 				}
15314 			} else {
15315 				entry = tmp_entry;
15316 			}
15317 			start = entry->vme_start;
15318 
15319 			extended->protection = entry->protection;
15320 			extended->user_tag = VME_ALIAS(entry);
15321 			extended->pages_resident = 0;
15322 			extended->pages_swapped_out = 0;
15323 			extended->pages_shared_now_private = 0;
15324 			extended->pages_dirtied = 0;
15325 			extended->external_pager = 0;
15326 			extended->shadow_depth = 0;
15327 
15328 			original_count = *count;
15329 			if (flavor == VM_REGION_EXTENDED_INFO__legacy) {
15330 				*count = VM_REGION_EXTENDED_INFO_COUNT__legacy;
15331 			} else {
15332 				extended->pages_reusable = 0;
15333 				*count = VM_REGION_EXTENDED_INFO_COUNT;
15334 			}
15335 
15336 			vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, extended, TRUE, *count);
15337 
15338 			if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED) {
15339 				extended->share_mode = SM_PRIVATE;
15340 			}
15341 
15342 			if (object_name) {
15343 				*object_name = IP_NULL;
15344 			}
15345 			*address = start;
15346 			*size = (entry->vme_end - start);
15347 
15348 			vm_map_unlock_read(map);
15349 			return KERN_SUCCESS;
15350 		}
15351 	case VM_REGION_TOP_INFO:
15352 	{
15353 		vm_region_top_info_t    top;
15354 
15355 		if (*count < VM_REGION_TOP_INFO_COUNT) {
15356 			return KERN_INVALID_ARGUMENT;
15357 		}
15358 
15359 		top = (vm_region_top_info_t) info;
15360 		*count = VM_REGION_TOP_INFO_COUNT;
15361 
15362 		vm_map_lock_read(map);
15363 
15364 		start = *address;
15365 		if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
15366 			if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
15367 				vm_map_unlock_read(map);
15368 				return KERN_INVALID_ADDRESS;
15369 			}
15370 		} else {
15371 			entry = tmp_entry;
15372 		}
15373 		start = entry->vme_start;
15374 
15375 		top->private_pages_resident = 0;
15376 		top->shared_pages_resident = 0;
15377 
15378 		vm_map_region_top_walk(entry, top);
15379 
15380 		if (object_name) {
15381 			*object_name = IP_NULL;
15382 		}
15383 		*address = start;
15384 		*size = (entry->vme_end - start);
15385 
15386 		vm_map_unlock_read(map);
15387 		return KERN_SUCCESS;
15388 	}
15389 	default:
15390 		return KERN_INVALID_ARGUMENT;
15391 	}
15392 }
15393 
15394 #define OBJ_RESIDENT_COUNT(obj, entry_size)                             \
15395 	MIN((entry_size),                                               \
15396 	    ((obj)->all_reusable ?                                      \
15397 	     (obj)->wired_page_count :                                  \
15398 	     (obj)->resident_page_count - (obj)->reusable_page_count))
15399 
15400 void
vm_map_region_top_walk(vm_map_entry_t entry,vm_region_top_info_t top)15401 vm_map_region_top_walk(
15402 	vm_map_entry_t             entry,
15403 	vm_region_top_info_t       top)
15404 {
15405 	if (entry->is_sub_map || VME_OBJECT(entry) == 0) {
15406 		top->share_mode = SM_EMPTY;
15407 		top->ref_count = 0;
15408 		top->obj_id = 0;
15409 		return;
15410 	}
15411 
15412 	{
15413 		struct  vm_object *obj, *tmp_obj;
15414 		int             ref_count;
15415 		uint32_t        entry_size;
15416 
15417 		entry_size = (uint32_t) ((entry->vme_end - entry->vme_start) / PAGE_SIZE_64);
15418 
15419 		obj = VME_OBJECT(entry);
15420 
15421 		vm_object_lock(obj);
15422 
15423 		if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) {
15424 			ref_count--;
15425 		}
15426 
15427 		assert(obj->reusable_page_count <= obj->resident_page_count);
15428 		if (obj->shadow) {
15429 			if (ref_count == 1) {
15430 				top->private_pages_resident =
15431 				    OBJ_RESIDENT_COUNT(obj, entry_size);
15432 			} else {
15433 				top->shared_pages_resident =
15434 				    OBJ_RESIDENT_COUNT(obj, entry_size);
15435 			}
15436 			top->ref_count  = ref_count;
15437 			top->share_mode = SM_COW;
15438 
15439 			while ((tmp_obj = obj->shadow)) {
15440 				vm_object_lock(tmp_obj);
15441 				vm_object_unlock(obj);
15442 				obj = tmp_obj;
15443 
15444 				if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) {
15445 					ref_count--;
15446 				}
15447 
15448 				assert(obj->reusable_page_count <= obj->resident_page_count);
15449 				top->shared_pages_resident +=
15450 				    OBJ_RESIDENT_COUNT(obj, entry_size);
15451 				top->ref_count += ref_count - 1;
15452 			}
15453 		} else {
15454 			if (entry->superpage_size) {
15455 				top->share_mode = SM_LARGE_PAGE;
15456 				top->shared_pages_resident = 0;
15457 				top->private_pages_resident = entry_size;
15458 			} else if (entry->needs_copy) {
15459 				top->share_mode = SM_COW;
15460 				top->shared_pages_resident =
15461 				    OBJ_RESIDENT_COUNT(obj, entry_size);
15462 			} else {
15463 				if (ref_count == 1 ||
15464 				    (ref_count == 2 && obj->named)) {
15465 					top->share_mode = SM_PRIVATE;
15466 					top->private_pages_resident =
15467 					    OBJ_RESIDENT_COUNT(obj,
15468 					    entry_size);
15469 				} else {
15470 					top->share_mode = SM_SHARED;
15471 					top->shared_pages_resident =
15472 					    OBJ_RESIDENT_COUNT(obj,
15473 					    entry_size);
15474 				}
15475 			}
15476 			top->ref_count = ref_count;
15477 		}
15478 		/* XXX K64: obj_id will be truncated */
15479 		top->obj_id = (unsigned int) (uintptr_t)VM_KERNEL_ADDRPERM(obj);
15480 
15481 		vm_object_unlock(obj);
15482 	}
15483 }
15484 
15485 void
vm_map_region_walk(vm_map_t map,vm_map_offset_t va,vm_map_entry_t entry,vm_object_offset_t offset,vm_object_size_t range,vm_region_extended_info_t extended,boolean_t look_for_pages,mach_msg_type_number_t count)15486 vm_map_region_walk(
15487 	vm_map_t                        map,
15488 	vm_map_offset_t                 va,
15489 	vm_map_entry_t                  entry,
15490 	vm_object_offset_t              offset,
15491 	vm_object_size_t                range,
15492 	vm_region_extended_info_t       extended,
15493 	boolean_t                       look_for_pages,
15494 	mach_msg_type_number_t count)
15495 {
15496 	struct vm_object *obj, *tmp_obj;
15497 	vm_map_offset_t       last_offset;
15498 	int               i;
15499 	int               ref_count;
15500 	struct vm_object        *shadow_object;
15501 	unsigned short          shadow_depth;
15502 	boolean_t         do_region_footprint;
15503 	int                     effective_page_size, effective_page_shift;
15504 	vm_map_offset_t         effective_page_mask;
15505 
15506 	do_region_footprint = task_self_region_footprint();
15507 
15508 	if ((entry->is_sub_map) ||
15509 	    (VME_OBJECT(entry) == 0) ||
15510 	    (VME_OBJECT(entry)->phys_contiguous &&
15511 	    !entry->superpage_size)) {
15512 		extended->share_mode = SM_EMPTY;
15513 		extended->ref_count = 0;
15514 		return;
15515 	}
15516 
15517 	if (entry->superpage_size) {
15518 		extended->shadow_depth = 0;
15519 		extended->share_mode = SM_LARGE_PAGE;
15520 		extended->ref_count = 1;
15521 		extended->external_pager = 0;
15522 
15523 		/* TODO4K: Superpage in 4k mode? */
15524 		extended->pages_resident = (unsigned int)(range >> PAGE_SHIFT);
15525 		extended->shadow_depth = 0;
15526 		return;
15527 	}
15528 
15529 	effective_page_shift = vm_self_region_page_shift(map);
15530 	effective_page_size = (1 << effective_page_shift);
15531 	effective_page_mask = effective_page_size - 1;
15532 
15533 	offset = vm_map_trunc_page(offset, effective_page_mask);
15534 
15535 	obj = VME_OBJECT(entry);
15536 
15537 	vm_object_lock(obj);
15538 
15539 	if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) {
15540 		ref_count--;
15541 	}
15542 
15543 	if (look_for_pages) {
15544 		for (last_offset = offset + range;
15545 		    offset < last_offset;
15546 		    offset += effective_page_size, va += effective_page_size) {
15547 			if (do_region_footprint) {
15548 				int disp;
15549 
15550 				disp = 0;
15551 				if (map->has_corpse_footprint) {
15552 					/*
15553 					 * Query the page info data we saved
15554 					 * while forking the corpse.
15555 					 */
15556 					vm_map_corpse_footprint_query_page_info(
15557 						map,
15558 						va,
15559 						&disp);
15560 				} else {
15561 					/*
15562 					 * Query the pmap.
15563 					 */
15564 					vm_map_footprint_query_page_info(
15565 						map,
15566 						entry,
15567 						va,
15568 						&disp);
15569 				}
15570 				if (disp & VM_PAGE_QUERY_PAGE_PRESENT) {
15571 					extended->pages_resident++;
15572 				}
15573 				if (disp & VM_PAGE_QUERY_PAGE_REUSABLE) {
15574 					extended->pages_reusable++;
15575 				}
15576 				if (disp & VM_PAGE_QUERY_PAGE_DIRTY) {
15577 					extended->pages_dirtied++;
15578 				}
15579 				if (disp & PMAP_QUERY_PAGE_COMPRESSED) {
15580 					extended->pages_swapped_out++;
15581 				}
15582 				continue;
15583 			}
15584 
15585 			vm_map_region_look_for_page(map, va, obj,
15586 			    vm_object_trunc_page(offset), ref_count,
15587 			    0, extended, count);
15588 		}
15589 
15590 		if (do_region_footprint) {
15591 			goto collect_object_info;
15592 		}
15593 	} else {
15594 collect_object_info:
15595 		shadow_object = obj->shadow;
15596 		shadow_depth = 0;
15597 
15598 		if (!(obj->internal)) {
15599 			extended->external_pager = 1;
15600 		}
15601 
15602 		if (shadow_object != VM_OBJECT_NULL) {
15603 			vm_object_lock(shadow_object);
15604 			for (;
15605 			    shadow_object != VM_OBJECT_NULL;
15606 			    shadow_depth++) {
15607 				vm_object_t     next_shadow;
15608 
15609 				if (!(shadow_object->internal)) {
15610 					extended->external_pager = 1;
15611 				}
15612 
15613 				next_shadow = shadow_object->shadow;
15614 				if (next_shadow) {
15615 					vm_object_lock(next_shadow);
15616 				}
15617 				vm_object_unlock(shadow_object);
15618 				shadow_object = next_shadow;
15619 			}
15620 		}
15621 		extended->shadow_depth = shadow_depth;
15622 	}
15623 
15624 	if (extended->shadow_depth || entry->needs_copy) {
15625 		extended->share_mode = SM_COW;
15626 	} else {
15627 		if (ref_count == 1) {
15628 			extended->share_mode = SM_PRIVATE;
15629 		} else {
15630 			if (obj->true_share) {
15631 				extended->share_mode = SM_TRUESHARED;
15632 			} else {
15633 				extended->share_mode = SM_SHARED;
15634 			}
15635 		}
15636 	}
15637 	extended->ref_count = ref_count - extended->shadow_depth;
15638 
15639 	for (i = 0; i < extended->shadow_depth; i++) {
15640 		if ((tmp_obj = obj->shadow) == 0) {
15641 			break;
15642 		}
15643 		vm_object_lock(tmp_obj);
15644 		vm_object_unlock(obj);
15645 
15646 		if ((ref_count = tmp_obj->ref_count) > 1 && tmp_obj->paging_in_progress) {
15647 			ref_count--;
15648 		}
15649 
15650 		extended->ref_count += ref_count;
15651 		obj = tmp_obj;
15652 	}
15653 	vm_object_unlock(obj);
15654 
15655 	if (extended->share_mode == SM_SHARED) {
15656 		vm_map_entry_t       cur;
15657 		vm_map_entry_t       last;
15658 		int      my_refs;
15659 
15660 		obj = VME_OBJECT(entry);
15661 		last = vm_map_to_entry(map);
15662 		my_refs = 0;
15663 
15664 		if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) {
15665 			ref_count--;
15666 		}
15667 		for (cur = vm_map_first_entry(map); cur != last; cur = cur->vme_next) {
15668 			my_refs += vm_map_region_count_obj_refs(cur, obj);
15669 		}
15670 
15671 		if (my_refs == ref_count) {
15672 			extended->share_mode = SM_PRIVATE_ALIASED;
15673 		} else if (my_refs > 1) {
15674 			extended->share_mode = SM_SHARED_ALIASED;
15675 		}
15676 	}
15677 }
15678 
15679 
15680 /* object is locked on entry and locked on return */
15681 
15682 
15683 static void
vm_map_region_look_for_page(__unused vm_map_t map,__unused vm_map_offset_t va,vm_object_t object,vm_object_offset_t offset,int max_refcnt,unsigned short depth,vm_region_extended_info_t extended,mach_msg_type_number_t count)15684 vm_map_region_look_for_page(
15685 	__unused vm_map_t               map,
15686 	__unused vm_map_offset_t        va,
15687 	vm_object_t                     object,
15688 	vm_object_offset_t              offset,
15689 	int                             max_refcnt,
15690 	unsigned short                  depth,
15691 	vm_region_extended_info_t       extended,
15692 	mach_msg_type_number_t count)
15693 {
15694 	vm_page_t       p;
15695 	vm_object_t     shadow;
15696 	int             ref_count;
15697 	vm_object_t     caller_object;
15698 
15699 	shadow = object->shadow;
15700 	caller_object = object;
15701 
15702 
15703 	while (TRUE) {
15704 		if (!(object->internal)) {
15705 			extended->external_pager = 1;
15706 		}
15707 
15708 		if ((p = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
15709 			if (shadow && (max_refcnt == 1)) {
15710 				extended->pages_shared_now_private++;
15711 			}
15712 
15713 			if (!p->vmp_fictitious &&
15714 			    (p->vmp_dirty || pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
15715 				extended->pages_dirtied++;
15716 			} else if (count >= VM_REGION_EXTENDED_INFO_COUNT) {
15717 				if (p->vmp_reusable || object->all_reusable) {
15718 					extended->pages_reusable++;
15719 				}
15720 			}
15721 
15722 			extended->pages_resident++;
15723 
15724 			if (object != caller_object) {
15725 				vm_object_unlock(object);
15726 			}
15727 
15728 			return;
15729 		}
15730 		if (object->internal &&
15731 		    object->alive &&
15732 		    !object->terminating &&
15733 		    object->pager_ready) {
15734 			if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
15735 			    == VM_EXTERNAL_STATE_EXISTS) {
15736 				/* the pager has that page */
15737 				extended->pages_swapped_out++;
15738 				if (object != caller_object) {
15739 					vm_object_unlock(object);
15740 				}
15741 				return;
15742 			}
15743 		}
15744 
15745 		if (shadow) {
15746 			vm_object_lock(shadow);
15747 
15748 			if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress) {
15749 				ref_count--;
15750 			}
15751 
15752 			if (++depth > extended->shadow_depth) {
15753 				extended->shadow_depth = depth;
15754 			}
15755 
15756 			if (ref_count > max_refcnt) {
15757 				max_refcnt = ref_count;
15758 			}
15759 
15760 			if (object != caller_object) {
15761 				vm_object_unlock(object);
15762 			}
15763 
15764 			offset = offset + object->vo_shadow_offset;
15765 			object = shadow;
15766 			shadow = object->shadow;
15767 			continue;
15768 		}
15769 		if (object != caller_object) {
15770 			vm_object_unlock(object);
15771 		}
15772 		break;
15773 	}
15774 }
15775 
15776 static int
vm_map_region_count_obj_refs(vm_map_entry_t entry,vm_object_t object)15777 vm_map_region_count_obj_refs(
15778 	vm_map_entry_t    entry,
15779 	vm_object_t       object)
15780 {
15781 	int ref_count;
15782 	vm_object_t chk_obj;
15783 	vm_object_t tmp_obj;
15784 
15785 	if (entry->is_sub_map || VME_OBJECT(entry) == VM_OBJECT_NULL) {
15786 		return 0;
15787 	}
15788 
15789 	ref_count = 0;
15790 	chk_obj = VME_OBJECT(entry);
15791 	vm_object_lock(chk_obj);
15792 
15793 	while (chk_obj) {
15794 		if (chk_obj == object) {
15795 			ref_count++;
15796 		}
15797 		tmp_obj = chk_obj->shadow;
15798 		if (tmp_obj) {
15799 			vm_object_lock(tmp_obj);
15800 		}
15801 		vm_object_unlock(chk_obj);
15802 
15803 		chk_obj = tmp_obj;
15804 	}
15805 
15806 	return ref_count;
15807 }
15808 
15809 
15810 /*
15811  *	Routine:	vm_map_simplify
15812  *
15813  *	Description:
15814  *		Attempt to simplify the map representation in
15815  *		the vicinity of the given starting address.
15816  *	Note:
15817  *		This routine is intended primarily to keep the
15818  *		kernel maps more compact -- they generally don't
15819  *		benefit from the "expand a map entry" technology
15820  *		at allocation time because the adjacent entry
15821  *		is often wired down.
15822  */
15823 void
vm_map_simplify_entry(vm_map_t map,vm_map_entry_t this_entry)15824 vm_map_simplify_entry(
15825 	vm_map_t        map,
15826 	vm_map_entry_t  this_entry)
15827 {
15828 	vm_map_entry_t  prev_entry;
15829 
15830 	prev_entry = this_entry->vme_prev;
15831 
15832 	if ((this_entry != vm_map_to_entry(map)) &&
15833 	    (prev_entry != vm_map_to_entry(map)) &&
15834 
15835 	    (prev_entry->vme_end == this_entry->vme_start) &&
15836 
15837 	    (prev_entry->is_sub_map == this_entry->is_sub_map) &&
15838 	    (prev_entry->vme_object_value == this_entry->vme_object_value) &&
15839 	    (prev_entry->vme_kernel_object == this_entry->vme_kernel_object) &&
15840 	    ((VME_OFFSET(prev_entry) + (prev_entry->vme_end -
15841 	    prev_entry->vme_start))
15842 	    == VME_OFFSET(this_entry)) &&
15843 
15844 	    (prev_entry->behavior == this_entry->behavior) &&
15845 	    (prev_entry->needs_copy == this_entry->needs_copy) &&
15846 	    (prev_entry->protection == this_entry->protection) &&
15847 	    (prev_entry->max_protection == this_entry->max_protection) &&
15848 	    (prev_entry->inheritance == this_entry->inheritance) &&
15849 	    (prev_entry->use_pmap == this_entry->use_pmap) &&
15850 	    (VME_ALIAS(prev_entry) == VME_ALIAS(this_entry)) &&
15851 	    (prev_entry->no_cache == this_entry->no_cache) &&
15852 	    (prev_entry->vme_permanent == this_entry->vme_permanent) &&
15853 	    (prev_entry->map_aligned == this_entry->map_aligned) &&
15854 	    (prev_entry->zero_wired_pages == this_entry->zero_wired_pages) &&
15855 	    (prev_entry->used_for_jit == this_entry->used_for_jit) &&
15856 #if __arm64e__
15857 	    (prev_entry->used_for_tpro == this_entry->used_for_tpro) &&
15858 #endif
15859 	    (prev_entry->csm_associated == this_entry->csm_associated) &&
15860 	    (prev_entry->vme_xnu_user_debug == this_entry->vme_xnu_user_debug) &&
15861 	    (prev_entry->iokit_acct == this_entry->iokit_acct) &&
15862 	    (prev_entry->vme_resilient_codesign ==
15863 	    this_entry->vme_resilient_codesign) &&
15864 	    (prev_entry->vme_resilient_media ==
15865 	    this_entry->vme_resilient_media) &&
15866 	    (prev_entry->vme_no_copy_on_read == this_entry->vme_no_copy_on_read) &&
15867 	    (prev_entry->translated_allow_execute == this_entry->translated_allow_execute) &&
15868 
15869 	    (prev_entry->wired_count == this_entry->wired_count) &&
15870 	    (prev_entry->user_wired_count == this_entry->user_wired_count) &&
15871 
15872 	    ((prev_entry->vme_atomic == FALSE) && (this_entry->vme_atomic == FALSE)) &&
15873 	    (prev_entry->in_transition == FALSE) &&
15874 	    (this_entry->in_transition == FALSE) &&
15875 	    (prev_entry->needs_wakeup == FALSE) &&
15876 	    (this_entry->needs_wakeup == FALSE) &&
15877 	    (prev_entry->is_shared == this_entry->is_shared) &&
15878 	    (prev_entry->superpage_size == FALSE) &&
15879 	    (this_entry->superpage_size == FALSE)
15880 	    ) {
15881 		if (prev_entry->vme_permanent) {
15882 			assert(this_entry->vme_permanent);
15883 			prev_entry->vme_permanent = false;
15884 		}
15885 		vm_map_store_entry_unlink(map, prev_entry, true);
15886 		assert(prev_entry->vme_start < this_entry->vme_end);
15887 		if (prev_entry->map_aligned) {
15888 			assert(VM_MAP_PAGE_ALIGNED(prev_entry->vme_start,
15889 			    VM_MAP_PAGE_MASK(map)));
15890 		}
15891 		this_entry->vme_start = prev_entry->vme_start;
15892 		VME_OFFSET_SET(this_entry, VME_OFFSET(prev_entry));
15893 
15894 		if (map->holelistenabled) {
15895 			vm_map_store_update_first_free(map, this_entry, TRUE);
15896 		}
15897 
15898 		if (prev_entry->is_sub_map) {
15899 			vm_map_deallocate(VME_SUBMAP(prev_entry));
15900 		} else {
15901 			vm_object_deallocate(VME_OBJECT(prev_entry));
15902 		}
15903 		vm_map_entry_dispose(prev_entry);
15904 		SAVE_HINT_MAP_WRITE(map, this_entry);
15905 	}
15906 }
15907 
15908 void
vm_map_simplify(vm_map_t map,vm_map_offset_t start)15909 vm_map_simplify(
15910 	vm_map_t        map,
15911 	vm_map_offset_t start)
15912 {
15913 	vm_map_entry_t  this_entry;
15914 
15915 	vm_map_lock(map);
15916 	if (vm_map_lookup_entry(map, start, &this_entry)) {
15917 		vm_map_simplify_entry(map, this_entry);
15918 		vm_map_simplify_entry(map, this_entry->vme_next);
15919 	}
15920 	vm_map_unlock(map);
15921 }
15922 
15923 static void
vm_map_simplify_range(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)15924 vm_map_simplify_range(
15925 	vm_map_t        map,
15926 	vm_map_offset_t start,
15927 	vm_map_offset_t end)
15928 {
15929 	vm_map_entry_t  entry;
15930 
15931 	/*
15932 	 * The map should be locked (for "write") by the caller.
15933 	 */
15934 
15935 	if (start >= end) {
15936 		/* invalid address range */
15937 		return;
15938 	}
15939 
15940 	start = vm_map_trunc_page(start,
15941 	    VM_MAP_PAGE_MASK(map));
15942 	end = vm_map_round_page(end,
15943 	    VM_MAP_PAGE_MASK(map));
15944 
15945 	if (!vm_map_lookup_entry(map, start, &entry)) {
15946 		/* "start" is not mapped and "entry" ends before "start" */
15947 		if (entry == vm_map_to_entry(map)) {
15948 			/* start with first entry in the map */
15949 			entry = vm_map_first_entry(map);
15950 		} else {
15951 			/* start with next entry */
15952 			entry = entry->vme_next;
15953 		}
15954 	}
15955 
15956 	while (entry != vm_map_to_entry(map) &&
15957 	    entry->vme_start <= end) {
15958 		/* try and coalesce "entry" with its previous entry */
15959 		vm_map_simplify_entry(map, entry);
15960 		entry = entry->vme_next;
15961 	}
15962 }
15963 
15964 
15965 /*
15966  *	Routine:	vm_map_machine_attribute
15967  *	Purpose:
15968  *		Provide machine-specific attributes to mappings,
15969  *		such as cachability etc. for machines that provide
15970  *		them.  NUMA architectures and machines with big/strange
15971  *		caches will use this.
15972  *	Note:
15973  *		Responsibilities for locking and checking are handled here,
15974  *		everything else in the pmap module. If any non-volatile
15975  *		information must be kept, the pmap module should handle
15976  *		it itself. [This assumes that attributes do not
15977  *		need to be inherited, which seems ok to me]
15978  */
15979 kern_return_t
vm_map_machine_attribute(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vm_machine_attribute_t attribute,vm_machine_attribute_val_t * value)15980 vm_map_machine_attribute(
15981 	vm_map_t                        map,
15982 	vm_map_offset_t         start,
15983 	vm_map_offset_t         end,
15984 	vm_machine_attribute_t  attribute,
15985 	vm_machine_attribute_val_t* value)              /* IN/OUT */
15986 {
15987 	kern_return_t   ret;
15988 	vm_map_size_t sync_size;
15989 	vm_map_entry_t entry;
15990 
15991 	if (start < vm_map_min(map) || end > vm_map_max(map)) {
15992 		return KERN_INVALID_ADDRESS;
15993 	}
15994 	if (__improbable(vm_map_range_overflows(map, start, end - start))) {
15995 		return KERN_INVALID_ADDRESS;
15996 	}
15997 
15998 	/* Figure how much memory we need to flush (in page increments) */
15999 	sync_size = end - start;
16000 
16001 	vm_map_lock(map);
16002 
16003 	if (attribute != MATTR_CACHE) {
16004 		/* If we don't have to find physical addresses, we */
16005 		/* don't have to do an explicit traversal here.    */
16006 		ret = pmap_attribute(map->pmap, start, end - start,
16007 		    attribute, value);
16008 		vm_map_unlock(map);
16009 		return ret;
16010 	}
16011 
16012 	ret = KERN_SUCCESS;                                                                             /* Assume it all worked */
16013 
16014 	while (sync_size) {
16015 		if (vm_map_lookup_entry(map, start, &entry)) {
16016 			vm_map_size_t   sub_size;
16017 			if ((entry->vme_end - start) > sync_size) {
16018 				sub_size = sync_size;
16019 				sync_size = 0;
16020 			} else {
16021 				sub_size = entry->vme_end - start;
16022 				sync_size -= sub_size;
16023 			}
16024 			if (entry->is_sub_map) {
16025 				vm_map_offset_t sub_start;
16026 				vm_map_offset_t sub_end;
16027 
16028 				sub_start = (start - entry->vme_start)
16029 				    + VME_OFFSET(entry);
16030 				sub_end = sub_start + sub_size;
16031 				vm_map_machine_attribute(
16032 					VME_SUBMAP(entry),
16033 					sub_start,
16034 					sub_end,
16035 					attribute, value);
16036 			} else if (VME_OBJECT(entry)) {
16037 				vm_page_t               m;
16038 				vm_object_t             object;
16039 				vm_object_t             base_object;
16040 				vm_object_t             last_object;
16041 				vm_object_offset_t      offset;
16042 				vm_object_offset_t      base_offset;
16043 				vm_map_size_t           range;
16044 				range = sub_size;
16045 				offset = (start - entry->vme_start)
16046 				    + VME_OFFSET(entry);
16047 				offset = vm_object_trunc_page(offset);
16048 				base_offset = offset;
16049 				object = VME_OBJECT(entry);
16050 				base_object = object;
16051 				last_object = NULL;
16052 
16053 				vm_object_lock(object);
16054 
16055 				while (range) {
16056 					m = vm_page_lookup(
16057 						object, offset);
16058 
16059 					if (m && !m->vmp_fictitious) {
16060 						ret =
16061 						    pmap_attribute_cache_sync(
16062 							VM_PAGE_GET_PHYS_PAGE(m),
16063 							PAGE_SIZE,
16064 							attribute, value);
16065 					} else if (object->shadow) {
16066 						offset = offset + object->vo_shadow_offset;
16067 						last_object = object;
16068 						object = object->shadow;
16069 						vm_object_lock(last_object->shadow);
16070 						vm_object_unlock(last_object);
16071 						continue;
16072 					}
16073 					if (range < PAGE_SIZE) {
16074 						range = 0;
16075 					} else {
16076 						range -= PAGE_SIZE;
16077 					}
16078 
16079 					if (base_object != object) {
16080 						vm_object_unlock(object);
16081 						vm_object_lock(base_object);
16082 						object = base_object;
16083 					}
16084 					/* Bump to the next page */
16085 					base_offset += PAGE_SIZE;
16086 					offset = base_offset;
16087 				}
16088 				vm_object_unlock(object);
16089 			}
16090 			start += sub_size;
16091 		} else {
16092 			vm_map_unlock(map);
16093 			return KERN_FAILURE;
16094 		}
16095 	}
16096 
16097 	vm_map_unlock(map);
16098 
16099 	return ret;
16100 }
16101 
16102 /*
16103  *	vm_map_behavior_set:
16104  *
16105  *	Sets the paging reference behavior of the specified address
16106  *	range in the target map.  Paging reference behavior affects
16107  *	how pagein operations resulting from faults on the map will be
16108  *	clustered.
16109  */
16110 kern_return_t
vm_map_behavior_set(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vm_behavior_t new_behavior)16111 vm_map_behavior_set(
16112 	vm_map_t        map,
16113 	vm_map_offset_t start,
16114 	vm_map_offset_t end,
16115 	vm_behavior_t   new_behavior)
16116 {
16117 	vm_map_entry_t  entry;
16118 	vm_map_entry_t  temp_entry;
16119 
16120 	if (start > end ||
16121 	    start < vm_map_min(map) ||
16122 	    end > vm_map_max(map)) {
16123 		return KERN_NO_SPACE;
16124 	}
16125 	if (__improbable(vm_map_range_overflows(map, start, end - start))) {
16126 		return KERN_INVALID_ADDRESS;
16127 	}
16128 
16129 	switch (new_behavior) {
16130 	/*
16131 	 * This first block of behaviors all set a persistent state on the specified
16132 	 * memory range.  All we have to do here is to record the desired behavior
16133 	 * in the vm_map_entry_t's.
16134 	 */
16135 
16136 	case VM_BEHAVIOR_DEFAULT:
16137 	case VM_BEHAVIOR_RANDOM:
16138 	case VM_BEHAVIOR_SEQUENTIAL:
16139 	case VM_BEHAVIOR_RSEQNTL:
16140 	case VM_BEHAVIOR_ZERO_WIRED_PAGES:
16141 		vm_map_lock(map);
16142 
16143 		/*
16144 		 *	The entire address range must be valid for the map.
16145 		 *      Note that vm_map_range_check() does a
16146 		 *	vm_map_lookup_entry() internally and returns the
16147 		 *	entry containing the start of the address range if
16148 		 *	the entire range is valid.
16149 		 */
16150 		if (vm_map_range_check(map, start, end, &temp_entry)) {
16151 			entry = temp_entry;
16152 			vm_map_clip_start(map, entry, start);
16153 		} else {
16154 			vm_map_unlock(map);
16155 			return KERN_INVALID_ADDRESS;
16156 		}
16157 
16158 		while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
16159 			vm_map_clip_end(map, entry, end);
16160 			if (entry->is_sub_map) {
16161 				assert(!entry->use_pmap);
16162 			}
16163 
16164 			if (new_behavior == VM_BEHAVIOR_ZERO_WIRED_PAGES) {
16165 				entry->zero_wired_pages = TRUE;
16166 			} else {
16167 				entry->behavior = new_behavior;
16168 			}
16169 			entry = entry->vme_next;
16170 		}
16171 
16172 		vm_map_unlock(map);
16173 		break;
16174 
16175 	/*
16176 	 * The rest of these are different from the above in that they cause
16177 	 * an immediate action to take place as opposed to setting a behavior that
16178 	 * affects future actions.
16179 	 */
16180 
16181 	case VM_BEHAVIOR_WILLNEED:
16182 		return vm_map_willneed(map, start, end);
16183 
16184 	case VM_BEHAVIOR_DONTNEED:
16185 		return vm_map_msync(map, start, end - start, VM_SYNC_DEACTIVATE | VM_SYNC_CONTIGUOUS);
16186 
16187 	case VM_BEHAVIOR_FREE:
16188 		return vm_map_msync(map, start, end - start, VM_SYNC_KILLPAGES | VM_SYNC_CONTIGUOUS);
16189 
16190 	case VM_BEHAVIOR_REUSABLE:
16191 		return vm_map_reusable_pages(map, start, end);
16192 
16193 	case VM_BEHAVIOR_REUSE:
16194 		return vm_map_reuse_pages(map, start, end);
16195 
16196 	case VM_BEHAVIOR_CAN_REUSE:
16197 		return vm_map_can_reuse(map, start, end);
16198 
16199 #if MACH_ASSERT
16200 	case VM_BEHAVIOR_PAGEOUT:
16201 		return vm_map_pageout(map, start, end);
16202 #endif /* MACH_ASSERT */
16203 
16204 	default:
16205 		return KERN_INVALID_ARGUMENT;
16206 	}
16207 
16208 	return KERN_SUCCESS;
16209 }
16210 
16211 
16212 /*
16213  * Internals for madvise(MADV_WILLNEED) system call.
16214  *
16215  * The implementation is to do:-
16216  * a) read-ahead if the mapping corresponds to a mapped regular file
16217  * b) or, fault in the pages (zero-fill, decompress etc) if it's an anonymous mapping
16218  */
16219 
16220 
16221 static kern_return_t
vm_map_willneed(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)16222 vm_map_willneed(
16223 	vm_map_t        map,
16224 	vm_map_offset_t start,
16225 	vm_map_offset_t end
16226 	)
16227 {
16228 	vm_map_entry_t                  entry;
16229 	vm_object_t                     object;
16230 	memory_object_t                 pager;
16231 	struct vm_object_fault_info     fault_info = {};
16232 	kern_return_t                   kr;
16233 	vm_object_size_t                len;
16234 	vm_object_offset_t              offset;
16235 
16236 	fault_info.interruptible = THREAD_UNINT;        /* ignored value */
16237 	fault_info.behavior      = VM_BEHAVIOR_SEQUENTIAL;
16238 	fault_info.stealth       = TRUE;
16239 
16240 	/*
16241 	 * The MADV_WILLNEED operation doesn't require any changes to the
16242 	 * vm_map_entry_t's, so the read lock is sufficient.
16243 	 */
16244 
16245 	vm_map_lock_read(map);
16246 
16247 	/*
16248 	 * The madvise semantics require that the address range be fully
16249 	 * allocated with no holes.  Otherwise, we're required to return
16250 	 * an error.
16251 	 */
16252 
16253 	if (!vm_map_range_check(map, start, end, &entry)) {
16254 		vm_map_unlock_read(map);
16255 		return KERN_INVALID_ADDRESS;
16256 	}
16257 
16258 	/*
16259 	 * Examine each vm_map_entry_t in the range.
16260 	 */
16261 	for (; entry != vm_map_to_entry(map) && start < end;) {
16262 		/*
16263 		 * The first time through, the start address could be anywhere
16264 		 * within the vm_map_entry we found.  So adjust the offset to
16265 		 * correspond.  After that, the offset will always be zero to
16266 		 * correspond to the beginning of the current vm_map_entry.
16267 		 */
16268 		offset = (start - entry->vme_start) + VME_OFFSET(entry);
16269 
16270 		/*
16271 		 * Set the length so we don't go beyond the end of the
16272 		 * map_entry or beyond the end of the range we were given.
16273 		 * This range could span also multiple map entries all of which
16274 		 * map different files, so make sure we only do the right amount
16275 		 * of I/O for each object.  Note that it's possible for there
16276 		 * to be multiple map entries all referring to the same object
16277 		 * but with different page permissions, but it's not worth
16278 		 * trying to optimize that case.
16279 		 */
16280 		len = MIN(entry->vme_end - start, end - start);
16281 
16282 		if ((vm_size_t) len != len) {
16283 			/* 32-bit overflow */
16284 			len = (vm_size_t) (0 - PAGE_SIZE);
16285 		}
16286 		fault_info.cluster_size = (vm_size_t) len;
16287 		fault_info.lo_offset    = offset;
16288 		fault_info.hi_offset    = offset + len;
16289 		fault_info.user_tag     = VME_ALIAS(entry);
16290 		fault_info.pmap_options = 0;
16291 		if (entry->iokit_acct ||
16292 		    (!entry->is_sub_map && !entry->use_pmap)) {
16293 			fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
16294 		}
16295 		fault_info.fi_xnu_user_debug = entry->vme_xnu_user_debug;
16296 
16297 		/*
16298 		 * If the entry is a submap OR there's no read permission
16299 		 * to this mapping, then just skip it.
16300 		 */
16301 		if ((entry->is_sub_map) || (entry->protection & VM_PROT_READ) == 0) {
16302 			entry = entry->vme_next;
16303 			start = entry->vme_start;
16304 			continue;
16305 		}
16306 
16307 		object = VME_OBJECT(entry);
16308 
16309 		if (object == NULL ||
16310 		    (object && object->internal)) {
16311 			/*
16312 			 * Memory range backed by anonymous memory.
16313 			 */
16314 			vm_size_t region_size = 0, effective_page_size = 0;
16315 			vm_map_offset_t addr = 0, effective_page_mask = 0;
16316 
16317 			region_size = len;
16318 			addr = start;
16319 
16320 			effective_page_mask = MIN(vm_map_page_mask(current_map()), PAGE_MASK);
16321 			effective_page_size = effective_page_mask + 1;
16322 
16323 			vm_map_unlock_read(map);
16324 
16325 			while (region_size) {
16326 				vm_pre_fault(
16327 					vm_map_trunc_page(addr, effective_page_mask),
16328 					VM_PROT_READ | VM_PROT_WRITE);
16329 
16330 				region_size -= effective_page_size;
16331 				addr += effective_page_size;
16332 			}
16333 		} else {
16334 			/*
16335 			 * Find the file object backing this map entry.  If there is
16336 			 * none, then we simply ignore the "will need" advice for this
16337 			 * entry and go on to the next one.
16338 			 */
16339 			if ((object = find_vnode_object(entry)) == VM_OBJECT_NULL) {
16340 				entry = entry->vme_next;
16341 				start = entry->vme_start;
16342 				continue;
16343 			}
16344 
16345 			vm_object_paging_begin(object);
16346 			pager = object->pager;
16347 			vm_object_unlock(object);
16348 
16349 			/*
16350 			 * The data_request() could take a long time, so let's
16351 			 * release the map lock to avoid blocking other threads.
16352 			 */
16353 			vm_map_unlock_read(map);
16354 
16355 			/*
16356 			 * Get the data from the object asynchronously.
16357 			 *
16358 			 * Note that memory_object_data_request() places limits on the
16359 			 * amount of I/O it will do.  Regardless of the len we
16360 			 * specified, it won't do more than MAX_UPL_TRANSFER_BYTES and it
16361 			 * silently truncates the len to that size.  This isn't
16362 			 * necessarily bad since madvise shouldn't really be used to
16363 			 * page in unlimited amounts of data.  Other Unix variants
16364 			 * limit the willneed case as well.  If this turns out to be an
16365 			 * issue for developers, then we can always adjust the policy
16366 			 * here and still be backwards compatible since this is all
16367 			 * just "advice".
16368 			 */
16369 			kr = memory_object_data_request(
16370 				pager,
16371 				vm_object_trunc_page(offset) + object->paging_offset,
16372 				0,      /* ignored */
16373 				VM_PROT_READ,
16374 				(memory_object_fault_info_t)&fault_info);
16375 
16376 			vm_object_lock(object);
16377 			vm_object_paging_end(object);
16378 			vm_object_unlock(object);
16379 
16380 			/*
16381 			 * If we couldn't do the I/O for some reason, just give up on
16382 			 * the madvise.  We still return success to the user since
16383 			 * madvise isn't supposed to fail when the advice can't be
16384 			 * taken.
16385 			 */
16386 
16387 			if (kr != KERN_SUCCESS) {
16388 				return KERN_SUCCESS;
16389 			}
16390 		}
16391 
16392 		start += len;
16393 		if (start >= end) {
16394 			/* done */
16395 			return KERN_SUCCESS;
16396 		}
16397 
16398 		/* look up next entry */
16399 		vm_map_lock_read(map);
16400 		if (!vm_map_lookup_entry(map, start, &entry)) {
16401 			/*
16402 			 * There's a new hole in the address range.
16403 			 */
16404 			vm_map_unlock_read(map);
16405 			return KERN_INVALID_ADDRESS;
16406 		}
16407 	}
16408 
16409 	vm_map_unlock_read(map);
16410 	return KERN_SUCCESS;
16411 }
16412 
16413 static boolean_t
vm_map_entry_is_reusable(vm_map_entry_t entry)16414 vm_map_entry_is_reusable(
16415 	vm_map_entry_t entry)
16416 {
16417 	/* Only user map entries */
16418 
16419 	vm_object_t object;
16420 
16421 	if (entry->is_sub_map) {
16422 		return FALSE;
16423 	}
16424 
16425 	switch (VME_ALIAS(entry)) {
16426 	case VM_MEMORY_MALLOC:
16427 	case VM_MEMORY_MALLOC_SMALL:
16428 	case VM_MEMORY_MALLOC_LARGE:
16429 	case VM_MEMORY_REALLOC:
16430 	case VM_MEMORY_MALLOC_TINY:
16431 	case VM_MEMORY_MALLOC_LARGE_REUSABLE:
16432 	case VM_MEMORY_MALLOC_LARGE_REUSED:
16433 		/*
16434 		 * This is a malloc() memory region: check if it's still
16435 		 * in its original state and can be re-used for more
16436 		 * malloc() allocations.
16437 		 */
16438 		break;
16439 	default:
16440 		/*
16441 		 * Not a malloc() memory region: let the caller decide if
16442 		 * it's re-usable.
16443 		 */
16444 		return TRUE;
16445 	}
16446 
16447 	if (/*entry->is_shared ||*/
16448 		entry->is_sub_map ||
16449 		entry->in_transition ||
16450 		entry->protection != VM_PROT_DEFAULT ||
16451 		entry->max_protection != VM_PROT_ALL ||
16452 		entry->inheritance != VM_INHERIT_DEFAULT ||
16453 		entry->no_cache ||
16454 		entry->vme_permanent ||
16455 		entry->superpage_size != FALSE ||
16456 		entry->zero_wired_pages ||
16457 		entry->wired_count != 0 ||
16458 		entry->user_wired_count != 0) {
16459 		return FALSE;
16460 	}
16461 
16462 	object = VME_OBJECT(entry);
16463 	if (object == VM_OBJECT_NULL) {
16464 		return TRUE;
16465 	}
16466 	if (
16467 #if 0
16468 		/*
16469 		 * Let's proceed even if the VM object is potentially
16470 		 * shared.
16471 		 * We check for this later when processing the actual
16472 		 * VM pages, so the contents will be safe if shared.
16473 		 *
16474 		 * But we can still mark this memory region as "reusable" to
16475 		 * acknowledge that the caller did let us know that the memory
16476 		 * could be re-used and should not be penalized for holding
16477 		 * on to it.  This allows its "resident size" to not include
16478 		 * the reusable range.
16479 		 */
16480 		object->ref_count == 1 &&
16481 #endif
16482 		object->vo_copy == VM_OBJECT_NULL &&
16483 		object->shadow == VM_OBJECT_NULL &&
16484 		object->internal &&
16485 		object->purgable == VM_PURGABLE_DENY &&
16486 		object->wimg_bits == VM_WIMG_USE_DEFAULT &&
16487 		!object->code_signed) {
16488 		return TRUE;
16489 	}
16490 	return FALSE;
16491 }
16492 
16493 static kern_return_t
vm_map_reuse_pages(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)16494 vm_map_reuse_pages(
16495 	vm_map_t        map,
16496 	vm_map_offset_t start,
16497 	vm_map_offset_t end)
16498 {
16499 	vm_map_entry_t                  entry;
16500 	vm_object_t                     object;
16501 	vm_object_offset_t              start_offset, end_offset;
16502 
16503 	/*
16504 	 * The MADV_REUSE operation doesn't require any changes to the
16505 	 * vm_map_entry_t's, so the read lock is sufficient.
16506 	 */
16507 
16508 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
16509 		/*
16510 		 * XXX TODO4K
16511 		 * need to figure out what reusable means for a
16512 		 * portion of a native page.
16513 		 */
16514 		return KERN_SUCCESS;
16515 	}
16516 
16517 	vm_map_lock_read(map);
16518 	assert(map->pmap != kernel_pmap);       /* protect alias access */
16519 
16520 	/*
16521 	 * The madvise semantics require that the address range be fully
16522 	 * allocated with no holes.  Otherwise, we're required to return
16523 	 * an error.
16524 	 */
16525 
16526 	if (!vm_map_range_check(map, start, end, &entry)) {
16527 		vm_map_unlock_read(map);
16528 		vm_page_stats_reusable.reuse_pages_failure++;
16529 		return KERN_INVALID_ADDRESS;
16530 	}
16531 
16532 	/*
16533 	 * Examine each vm_map_entry_t in the range.
16534 	 */
16535 	for (; entry != vm_map_to_entry(map) && entry->vme_start < end;
16536 	    entry = entry->vme_next) {
16537 		/*
16538 		 * Sanity check on the VM map entry.
16539 		 */
16540 		if (!vm_map_entry_is_reusable(entry)) {
16541 			vm_map_unlock_read(map);
16542 			vm_page_stats_reusable.reuse_pages_failure++;
16543 			return KERN_INVALID_ADDRESS;
16544 		}
16545 
16546 		/*
16547 		 * The first time through, the start address could be anywhere
16548 		 * within the vm_map_entry we found.  So adjust the offset to
16549 		 * correspond.
16550 		 */
16551 		if (entry->vme_start < start) {
16552 			start_offset = start - entry->vme_start;
16553 		} else {
16554 			start_offset = 0;
16555 		}
16556 		end_offset = MIN(end, entry->vme_end) - entry->vme_start;
16557 		start_offset += VME_OFFSET(entry);
16558 		end_offset += VME_OFFSET(entry);
16559 
16560 		object = VME_OBJECT(entry);
16561 		if (object != VM_OBJECT_NULL) {
16562 			vm_object_lock(object);
16563 			vm_object_reuse_pages(object, start_offset, end_offset,
16564 			    TRUE);
16565 			vm_object_unlock(object);
16566 		}
16567 
16568 		if (VME_ALIAS(entry) == VM_MEMORY_MALLOC_LARGE_REUSABLE) {
16569 			/*
16570 			 * XXX
16571 			 * We do not hold the VM map exclusively here.
16572 			 * The "alias" field is not that critical, so it's
16573 			 * safe to update it here, as long as it is the only
16574 			 * one that can be modified while holding the VM map
16575 			 * "shared".
16576 			 */
16577 			VME_ALIAS_SET(entry, VM_MEMORY_MALLOC_LARGE_REUSED);
16578 		}
16579 	}
16580 
16581 	vm_map_unlock_read(map);
16582 	vm_page_stats_reusable.reuse_pages_success++;
16583 	return KERN_SUCCESS;
16584 }
16585 
16586 
16587 static kern_return_t
vm_map_reusable_pages(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)16588 vm_map_reusable_pages(
16589 	vm_map_t        map,
16590 	vm_map_offset_t start,
16591 	vm_map_offset_t end)
16592 {
16593 	vm_map_entry_t                  entry;
16594 	vm_object_t                     object;
16595 	vm_object_offset_t              start_offset, end_offset;
16596 	vm_map_offset_t                 pmap_offset;
16597 
16598 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
16599 		/*
16600 		 * XXX TODO4K
16601 		 * need to figure out what reusable means for a portion
16602 		 * of a native page.
16603 		 */
16604 		return KERN_SUCCESS;
16605 	}
16606 
16607 	/*
16608 	 * The MADV_REUSABLE operation doesn't require any changes to the
16609 	 * vm_map_entry_t's, so the read lock is sufficient.
16610 	 */
16611 
16612 	vm_map_lock_read(map);
16613 	assert(map->pmap != kernel_pmap);       /* protect alias access */
16614 
16615 	/*
16616 	 * The madvise semantics require that the address range be fully
16617 	 * allocated with no holes.  Otherwise, we're required to return
16618 	 * an error.
16619 	 */
16620 
16621 	if (!vm_map_range_check(map, start, end, &entry)) {
16622 		vm_map_unlock_read(map);
16623 		vm_page_stats_reusable.reusable_pages_failure++;
16624 		return KERN_INVALID_ADDRESS;
16625 	}
16626 
16627 	/*
16628 	 * Examine each vm_map_entry_t in the range.
16629 	 */
16630 	for (; entry != vm_map_to_entry(map) && entry->vme_start < end;
16631 	    entry = entry->vme_next) {
16632 		int kill_pages = 0;
16633 		boolean_t reusable_no_write = FALSE;
16634 
16635 		/*
16636 		 * Sanity check on the VM map entry.
16637 		 */
16638 		if (!vm_map_entry_is_reusable(entry)) {
16639 			vm_map_unlock_read(map);
16640 			vm_page_stats_reusable.reusable_pages_failure++;
16641 			return KERN_INVALID_ADDRESS;
16642 		}
16643 
16644 		if (!(entry->protection & VM_PROT_WRITE) && !entry->used_for_jit
16645 #if __arm64e__
16646 		    && !entry->used_for_tpro
16647 #endif
16648 		    ) {
16649 			/* not writable: can't discard contents */
16650 			vm_map_unlock_read(map);
16651 			vm_page_stats_reusable.reusable_nonwritable++;
16652 			vm_page_stats_reusable.reusable_pages_failure++;
16653 			return KERN_PROTECTION_FAILURE;
16654 		}
16655 
16656 		/*
16657 		 * The first time through, the start address could be anywhere
16658 		 * within the vm_map_entry we found.  So adjust the offset to
16659 		 * correspond.
16660 		 */
16661 		if (entry->vme_start < start) {
16662 			start_offset = start - entry->vme_start;
16663 			pmap_offset = start;
16664 		} else {
16665 			start_offset = 0;
16666 			pmap_offset = entry->vme_start;
16667 		}
16668 		end_offset = MIN(end, entry->vme_end) - entry->vme_start;
16669 		start_offset += VME_OFFSET(entry);
16670 		end_offset += VME_OFFSET(entry);
16671 
16672 		object = VME_OBJECT(entry);
16673 		if (object == VM_OBJECT_NULL) {
16674 			continue;
16675 		}
16676 
16677 		if (entry->protection & VM_PROT_EXECUTE) {
16678 			/*
16679 			 * Executable mappings might be write-protected by
16680 			 * hardware, so do not attempt to write to these pages.
16681 			 */
16682 			reusable_no_write = TRUE;
16683 		}
16684 
16685 		vm_object_lock(object);
16686 		if (((object->ref_count == 1) ||
16687 		    (object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC &&
16688 		    object->vo_copy == VM_OBJECT_NULL)) &&
16689 		    object->shadow == VM_OBJECT_NULL &&
16690 		    /*
16691 		     * "iokit_acct" entries are billed for their virtual size
16692 		     * (rather than for their resident pages only), so they
16693 		     * wouldn't benefit from making pages reusable, and it
16694 		     * would be hard to keep track of pages that are both
16695 		     * "iokit_acct" and "reusable" in the pmap stats and
16696 		     * ledgers.
16697 		     */
16698 		    !(entry->iokit_acct ||
16699 		    (!entry->is_sub_map && !entry->use_pmap))) {
16700 			if (object->ref_count != 1) {
16701 				vm_page_stats_reusable.reusable_shared++;
16702 			}
16703 			kill_pages = 1;
16704 		} else {
16705 			kill_pages = -1;
16706 		}
16707 		if (kill_pages != -1) {
16708 			vm_object_deactivate_pages(object,
16709 			    start_offset,
16710 			    end_offset - start_offset,
16711 			    kill_pages,
16712 			    TRUE /*reusable_pages*/,
16713 			    reusable_no_write,
16714 			    map->pmap,
16715 			    pmap_offset);
16716 		} else {
16717 			vm_page_stats_reusable.reusable_pages_shared++;
16718 			DTRACE_VM4(vm_map_reusable_pages_shared,
16719 			    unsigned int, VME_ALIAS(entry),
16720 			    vm_map_t, map,
16721 			    vm_map_entry_t, entry,
16722 			    vm_object_t, object);
16723 		}
16724 		vm_object_unlock(object);
16725 
16726 		if (VME_ALIAS(entry) == VM_MEMORY_MALLOC_LARGE ||
16727 		    VME_ALIAS(entry) == VM_MEMORY_MALLOC_LARGE_REUSED) {
16728 			/*
16729 			 * XXX
16730 			 * We do not hold the VM map exclusively here.
16731 			 * The "alias" field is not that critical, so it's
16732 			 * safe to update it here, as long as it is the only
16733 			 * one that can be modified while holding the VM map
16734 			 * "shared".
16735 			 */
16736 			VME_ALIAS_SET(entry, VM_MEMORY_MALLOC_LARGE_REUSABLE);
16737 		}
16738 	}
16739 
16740 	vm_map_unlock_read(map);
16741 	vm_page_stats_reusable.reusable_pages_success++;
16742 	return KERN_SUCCESS;
16743 }
16744 
16745 
16746 static kern_return_t
vm_map_can_reuse(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)16747 vm_map_can_reuse(
16748 	vm_map_t        map,
16749 	vm_map_offset_t start,
16750 	vm_map_offset_t end)
16751 {
16752 	vm_map_entry_t                  entry;
16753 
16754 	/*
16755 	 * The MADV_REUSABLE operation doesn't require any changes to the
16756 	 * vm_map_entry_t's, so the read lock is sufficient.
16757 	 */
16758 
16759 	vm_map_lock_read(map);
16760 	assert(map->pmap != kernel_pmap);       /* protect alias access */
16761 
16762 	/*
16763 	 * The madvise semantics require that the address range be fully
16764 	 * allocated with no holes.  Otherwise, we're required to return
16765 	 * an error.
16766 	 */
16767 
16768 	if (!vm_map_range_check(map, start, end, &entry)) {
16769 		vm_map_unlock_read(map);
16770 		vm_page_stats_reusable.can_reuse_failure++;
16771 		return KERN_INVALID_ADDRESS;
16772 	}
16773 
16774 	/*
16775 	 * Examine each vm_map_entry_t in the range.
16776 	 */
16777 	for (; entry != vm_map_to_entry(map) && entry->vme_start < end;
16778 	    entry = entry->vme_next) {
16779 		/*
16780 		 * Sanity check on the VM map entry.
16781 		 */
16782 		if (!vm_map_entry_is_reusable(entry)) {
16783 			vm_map_unlock_read(map);
16784 			vm_page_stats_reusable.can_reuse_failure++;
16785 			return KERN_INVALID_ADDRESS;
16786 		}
16787 	}
16788 
16789 	vm_map_unlock_read(map);
16790 	vm_page_stats_reusable.can_reuse_success++;
16791 	return KERN_SUCCESS;
16792 }
16793 
16794 
16795 #if MACH_ASSERT
16796 static kern_return_t
vm_map_pageout(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)16797 vm_map_pageout(
16798 	vm_map_t        map,
16799 	vm_map_offset_t start,
16800 	vm_map_offset_t end)
16801 {
16802 	vm_map_entry_t                  entry;
16803 
16804 	/*
16805 	 * The MADV_PAGEOUT operation doesn't require any changes to the
16806 	 * vm_map_entry_t's, so the read lock is sufficient.
16807 	 */
16808 
16809 	vm_map_lock_read(map);
16810 
16811 	/*
16812 	 * The madvise semantics require that the address range be fully
16813 	 * allocated with no holes.  Otherwise, we're required to return
16814 	 * an error.
16815 	 */
16816 
16817 	if (!vm_map_range_check(map, start, end, &entry)) {
16818 		vm_map_unlock_read(map);
16819 		return KERN_INVALID_ADDRESS;
16820 	}
16821 
16822 	/*
16823 	 * Examine each vm_map_entry_t in the range.
16824 	 */
16825 	for (; entry != vm_map_to_entry(map) && entry->vme_start < end;
16826 	    entry = entry->vme_next) {
16827 		vm_object_t     object;
16828 
16829 		/*
16830 		 * Sanity check on the VM map entry.
16831 		 */
16832 		if (entry->is_sub_map) {
16833 			vm_map_t submap;
16834 			vm_map_offset_t submap_start;
16835 			vm_map_offset_t submap_end;
16836 			vm_map_entry_t submap_entry;
16837 
16838 			submap = VME_SUBMAP(entry);
16839 			submap_start = VME_OFFSET(entry);
16840 			submap_end = submap_start + (entry->vme_end -
16841 			    entry->vme_start);
16842 
16843 			vm_map_lock_read(submap);
16844 
16845 			if (!vm_map_range_check(submap,
16846 			    submap_start,
16847 			    submap_end,
16848 			    &submap_entry)) {
16849 				vm_map_unlock_read(submap);
16850 				vm_map_unlock_read(map);
16851 				return KERN_INVALID_ADDRESS;
16852 			}
16853 
16854 			if (submap_entry->is_sub_map) {
16855 				vm_map_unlock_read(submap);
16856 				continue;
16857 			}
16858 
16859 			object = VME_OBJECT(submap_entry);
16860 			if (object == VM_OBJECT_NULL || !object->internal) {
16861 				vm_map_unlock_read(submap);
16862 				continue;
16863 			}
16864 
16865 			vm_object_pageout(object);
16866 
16867 			vm_map_unlock_read(submap);
16868 			submap = VM_MAP_NULL;
16869 			submap_entry = VM_MAP_ENTRY_NULL;
16870 			continue;
16871 		}
16872 
16873 		object = VME_OBJECT(entry);
16874 		if (object == VM_OBJECT_NULL || !object->internal) {
16875 			continue;
16876 		}
16877 
16878 		vm_object_pageout(object);
16879 	}
16880 
16881 	vm_map_unlock_read(map);
16882 	return KERN_SUCCESS;
16883 }
16884 #endif /* MACH_ASSERT */
16885 
16886 
16887 /*
16888  *	Routine:	vm_map_entry_insert
16889  *
16890  *	Description:	This routine inserts a new vm_entry in a locked map.
16891  */
16892 static vm_map_entry_t
vm_map_entry_insert(vm_map_t map,vm_map_entry_t insp_entry,vm_map_offset_t start,vm_map_offset_t end,vm_object_t object,vm_object_offset_t offset,vm_map_kernel_flags_t vmk_flags,boolean_t needs_copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance,boolean_t clear_map_aligned)16893 vm_map_entry_insert(
16894 	vm_map_t                map,
16895 	vm_map_entry_t          insp_entry,
16896 	vm_map_offset_t         start,
16897 	vm_map_offset_t         end,
16898 	vm_object_t             object,
16899 	vm_object_offset_t      offset,
16900 	vm_map_kernel_flags_t   vmk_flags,
16901 	boolean_t               needs_copy,
16902 	vm_prot_t               cur_protection,
16903 	vm_prot_t               max_protection,
16904 	vm_inherit_t            inheritance,
16905 	boolean_t               clear_map_aligned)
16906 {
16907 	vm_map_entry_t  new_entry;
16908 	boolean_t map_aligned = FALSE;
16909 
16910 	assert(insp_entry != (vm_map_entry_t)0);
16911 	vm_map_lock_assert_exclusive(map);
16912 
16913 #if DEVELOPMENT || DEBUG
16914 	vm_object_offset_t      end_offset = 0;
16915 	assertf(!os_add_overflow(end - start, offset, &end_offset), "size 0x%llx, offset 0x%llx caused overflow", (uint64_t)(end - start), offset);
16916 #endif /* DEVELOPMENT || DEBUG */
16917 
16918 	if (VM_MAP_PAGE_SHIFT(map) != PAGE_SHIFT) {
16919 		map_aligned = TRUE;
16920 	}
16921 	if (clear_map_aligned &&
16922 	    (!VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map)) ||
16923 	    !VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map)))) {
16924 		map_aligned = FALSE;
16925 	}
16926 	if (map_aligned) {
16927 		assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map)));
16928 		assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map)));
16929 	} else {
16930 		assert(page_aligned(start));
16931 		assert(page_aligned(end));
16932 	}
16933 	assert(start < end);
16934 
16935 	new_entry = vm_map_entry_create(map);
16936 
16937 	new_entry->vme_start = start;
16938 	new_entry->vme_end = end;
16939 
16940 	if (vmk_flags.vmkf_submap) {
16941 		new_entry->vme_atomic = vmk_flags.vmkf_submap_atomic;
16942 		VME_SUBMAP_SET(new_entry, (vm_map_t)object);
16943 	} else {
16944 		VME_OBJECT_SET(new_entry, object, false, 0);
16945 	}
16946 	VME_OFFSET_SET(new_entry, offset);
16947 	VME_ALIAS_SET(new_entry, vmk_flags.vm_tag);
16948 
16949 	new_entry->map_aligned = map_aligned;
16950 	new_entry->needs_copy = needs_copy;
16951 	new_entry->inheritance = inheritance;
16952 	new_entry->protection = cur_protection;
16953 	new_entry->max_protection = max_protection;
16954 	/*
16955 	 * submap: "use_pmap" means "nested".
16956 	 * default: false.
16957 	 *
16958 	 * object: "use_pmap" means "use pmap accounting" for footprint.
16959 	 * default: true.
16960 	 */
16961 	new_entry->use_pmap = !vmk_flags.vmkf_submap;
16962 	new_entry->no_cache = vmk_flags.vmf_no_cache;
16963 	new_entry->vme_permanent = vmk_flags.vmf_permanent;
16964 	new_entry->translated_allow_execute = vmk_flags.vmkf_translated_allow_execute;
16965 	new_entry->vme_no_copy_on_read = vmk_flags.vmkf_no_copy_on_read;
16966 	new_entry->superpage_size = (vmk_flags.vmf_superpage_size != 0);
16967 
16968 	if (vmk_flags.vmkf_map_jit) {
16969 		if (!(map->jit_entry_exists) ||
16970 		    VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(map)) {
16971 			new_entry->used_for_jit = TRUE;
16972 			map->jit_entry_exists = TRUE;
16973 		}
16974 	}
16975 
16976 	/*
16977 	 *	Insert the new entry into the list.
16978 	 */
16979 
16980 	vm_map_store_entry_link(map, insp_entry, new_entry, vmk_flags);
16981 	map->size += end - start;
16982 
16983 	/*
16984 	 *	Update the free space hint and the lookup hint.
16985 	 */
16986 
16987 	SAVE_HINT_MAP_WRITE(map, new_entry);
16988 	return new_entry;
16989 }
16990 
16991 /*
16992  *	Routine:	vm_map_remap_extract
16993  *
16994  *	Description:	This routine returns a vm_entry list from a map.
16995  */
16996 static kern_return_t
vm_map_remap_extract(vm_map_t map,vm_map_offset_t addr,vm_map_size_t size,boolean_t copy,vm_map_copy_t map_copy,vm_prot_t * cur_protection,vm_prot_t * max_protection,vm_inherit_t inheritance,vm_map_kernel_flags_t vmk_flags)16997 vm_map_remap_extract(
16998 	vm_map_t                map,
16999 	vm_map_offset_t         addr,
17000 	vm_map_size_t           size,
17001 	boolean_t               copy,
17002 	vm_map_copy_t           map_copy,
17003 	vm_prot_t               *cur_protection,   /* IN/OUT */
17004 	vm_prot_t               *max_protection,   /* IN/OUT */
17005 	/* What, no behavior? */
17006 	vm_inherit_t            inheritance,
17007 	vm_map_kernel_flags_t   vmk_flags)
17008 {
17009 	struct vm_map_header   *map_header = &map_copy->cpy_hdr;
17010 	kern_return_t           result;
17011 	vm_map_size_t           mapped_size;
17012 	vm_map_size_t           tmp_size;
17013 	vm_map_entry_t          src_entry;     /* result of last map lookup */
17014 	vm_map_entry_t          new_entry;
17015 	vm_object_offset_t      offset;
17016 	vm_map_offset_t         map_address;
17017 	vm_map_offset_t         src_start;     /* start of entry to map */
17018 	vm_map_offset_t         src_end;       /* end of region to be mapped */
17019 	vm_object_t             object;
17020 	vm_map_version_t        version;
17021 	boolean_t               src_needs_copy;
17022 	boolean_t               new_entry_needs_copy;
17023 	vm_map_entry_t          saved_src_entry;
17024 	boolean_t               src_entry_was_wired;
17025 	vm_prot_t               max_prot_for_prot_copy;
17026 	vm_map_offset_t         effective_page_mask;
17027 	bool                    pageable, same_map;
17028 	boolean_t               vm_remap_legacy;
17029 	vm_prot_t               required_cur_prot, required_max_prot;
17030 	vm_object_t             new_copy_object;     /* vm_object_copy_* result */
17031 	boolean_t               saved_used_for_jit;  /* Saved used_for_jit. */
17032 
17033 	pageable = vmk_flags.vmkf_copy_pageable;
17034 	same_map = vmk_flags.vmkf_copy_same_map;
17035 
17036 	effective_page_mask = MIN(PAGE_MASK, VM_MAP_PAGE_MASK(map));
17037 
17038 	assert(map != VM_MAP_NULL);
17039 	assert(size != 0);
17040 	assert(size == vm_map_round_page(size, effective_page_mask));
17041 	assert(inheritance == VM_INHERIT_NONE ||
17042 	    inheritance == VM_INHERIT_COPY ||
17043 	    inheritance == VM_INHERIT_SHARE);
17044 	assert(!(*cur_protection & ~(VM_PROT_ALL | VM_PROT_ALLEXEC)));
17045 	assert(!(*max_protection & ~(VM_PROT_ALL | VM_PROT_ALLEXEC)));
17046 	assert((*cur_protection & *max_protection) == *cur_protection);
17047 
17048 	/*
17049 	 *	Compute start and end of region.
17050 	 */
17051 	src_start = vm_map_trunc_page(addr, effective_page_mask);
17052 	src_end = vm_map_round_page(src_start + size, effective_page_mask);
17053 
17054 	/*
17055 	 *	Initialize map_header.
17056 	 */
17057 	map_header->nentries = 0;
17058 	map_header->entries_pageable = pageable;
17059 //	map_header->page_shift = MIN(VM_MAP_PAGE_SHIFT(map), PAGE_SHIFT);
17060 	map_header->page_shift = (uint16_t)VM_MAP_PAGE_SHIFT(map);
17061 	map_header->rb_head_store.rbh_root = (void *)(int)SKIP_RB_TREE;
17062 	vm_map_store_init(map_header);
17063 
17064 	if (copy && vmk_flags.vmkf_remap_prot_copy) {
17065 		/*
17066 		 * Special case for vm_map_protect(VM_PROT_COPY):
17067 		 * we want to set the new mappings' max protection to the
17068 		 * specified *max_protection...
17069 		 */
17070 		max_prot_for_prot_copy = *max_protection & (VM_PROT_ALL | VM_PROT_ALLEXEC);
17071 		/* ... but we want to use the vm_remap() legacy mode */
17072 		*max_protection = VM_PROT_NONE;
17073 		*cur_protection = VM_PROT_NONE;
17074 	} else {
17075 		max_prot_for_prot_copy = VM_PROT_NONE;
17076 	}
17077 
17078 	if (*cur_protection == VM_PROT_NONE &&
17079 	    *max_protection == VM_PROT_NONE) {
17080 		/*
17081 		 * vm_remap() legacy mode:
17082 		 * Extract all memory regions in the specified range and
17083 		 * collect the strictest set of protections allowed on the
17084 		 * entire range, so the caller knows what they can do with
17085 		 * the remapped range.
17086 		 * We start with VM_PROT_ALL and we'll remove the protections
17087 		 * missing from each memory region.
17088 		 */
17089 		vm_remap_legacy = TRUE;
17090 		*cur_protection = VM_PROT_ALL;
17091 		*max_protection = VM_PROT_ALL;
17092 		required_cur_prot = VM_PROT_NONE;
17093 		required_max_prot = VM_PROT_NONE;
17094 	} else {
17095 		/*
17096 		 * vm_remap_new() mode:
17097 		 * Extract all memory regions in the specified range and
17098 		 * ensure that they have at least the protections specified
17099 		 * by the caller via *cur_protection and *max_protection.
17100 		 * The resulting mapping should have these protections.
17101 		 */
17102 		vm_remap_legacy = FALSE;
17103 		if (copy) {
17104 			required_cur_prot = VM_PROT_NONE;
17105 			required_max_prot = VM_PROT_READ;
17106 		} else {
17107 			required_cur_prot = *cur_protection;
17108 			required_max_prot = *max_protection;
17109 		}
17110 	}
17111 
17112 	map_address = 0;
17113 	mapped_size = 0;
17114 	result = KERN_SUCCESS;
17115 
17116 	/*
17117 	 *	The specified source virtual space might correspond to
17118 	 *	multiple map entries, need to loop on them.
17119 	 */
17120 	vm_map_lock(map);
17121 
17122 	if (map->pmap == kernel_pmap) {
17123 		map_copy->is_kernel_range = true;
17124 		map_copy->orig_range = kmem_addr_get_range(addr, size);
17125 #if CONFIG_MAP_RANGES
17126 	} else if (map->uses_user_ranges) {
17127 		map_copy->is_user_range = true;
17128 		map_copy->orig_range = vm_map_user_range_resolve(map, addr, size, NULL);
17129 #endif /* CONFIG_MAP_RANGES */
17130 	}
17131 
17132 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
17133 		/*
17134 		 * This address space uses sub-pages so the range might
17135 		 * not be re-mappable in an address space with larger
17136 		 * pages. Re-assemble any broken-up VM map entries to
17137 		 * improve our chances of making it work.
17138 		 */
17139 		vm_map_simplify_range(map, src_start, src_end);
17140 	}
17141 	while (mapped_size != size) {
17142 		vm_map_size_t   entry_size;
17143 
17144 		/*
17145 		 *	Find the beginning of the region.
17146 		 */
17147 		if (!vm_map_lookup_entry(map, src_start, &src_entry)) {
17148 			result = KERN_INVALID_ADDRESS;
17149 			break;
17150 		}
17151 
17152 		if (src_start < src_entry->vme_start ||
17153 		    (mapped_size && src_start != src_entry->vme_start)) {
17154 			result = KERN_INVALID_ADDRESS;
17155 			break;
17156 		}
17157 
17158 		tmp_size = size - mapped_size;
17159 		if (src_end > src_entry->vme_end) {
17160 			tmp_size -= (src_end - src_entry->vme_end);
17161 		}
17162 
17163 		entry_size = (vm_map_size_t)(src_entry->vme_end -
17164 		    src_entry->vme_start);
17165 
17166 		if (src_entry->is_sub_map &&
17167 		    vmk_flags.vmkf_copy_single_object) {
17168 			vm_map_t submap;
17169 			vm_map_offset_t submap_start;
17170 			vm_map_size_t submap_size;
17171 			boolean_t submap_needs_copy;
17172 
17173 			/*
17174 			 * No check for "required protection" on "src_entry"
17175 			 * because the protections that matter are the ones
17176 			 * on the submap's VM map entry, which will be checked
17177 			 * during the call to vm_map_remap_extract() below.
17178 			 */
17179 			submap_size = src_entry->vme_end - src_start;
17180 			if (submap_size > size) {
17181 				submap_size = size;
17182 			}
17183 			submap_start = VME_OFFSET(src_entry) + src_start - src_entry->vme_start;
17184 			submap = VME_SUBMAP(src_entry);
17185 			if (copy) {
17186 				/*
17187 				 * The caller wants a copy-on-write re-mapping,
17188 				 * so let's extract from the submap accordingly.
17189 				 */
17190 				submap_needs_copy = TRUE;
17191 			} else if (src_entry->needs_copy) {
17192 				/*
17193 				 * The caller wants a shared re-mapping but the
17194 				 * submap is mapped with "needs_copy", so its
17195 				 * contents can't be shared as is. Extract the
17196 				 * contents of the submap as "copy-on-write".
17197 				 * The re-mapping won't be shared with the
17198 				 * original mapping but this is equivalent to
17199 				 * what happened with the original "remap from
17200 				 * submap" code.
17201 				 * The shared region is mapped "needs_copy", for
17202 				 * example.
17203 				 */
17204 				submap_needs_copy = TRUE;
17205 			} else {
17206 				/*
17207 				 * The caller wants a shared re-mapping and
17208 				 * this mapping can be shared (no "needs_copy"),
17209 				 * so let's extract from the submap accordingly.
17210 				 * Kernel submaps are mapped without
17211 				 * "needs_copy", for example.
17212 				 */
17213 				submap_needs_copy = FALSE;
17214 			}
17215 			vm_map_reference(submap);
17216 			vm_map_unlock(map);
17217 			src_entry = NULL;
17218 			if (vm_remap_legacy) {
17219 				*cur_protection = VM_PROT_NONE;
17220 				*max_protection = VM_PROT_NONE;
17221 			}
17222 
17223 			DTRACE_VM7(remap_submap_recurse,
17224 			    vm_map_t, map,
17225 			    vm_map_offset_t, addr,
17226 			    vm_map_size_t, size,
17227 			    boolean_t, copy,
17228 			    vm_map_offset_t, submap_start,
17229 			    vm_map_size_t, submap_size,
17230 			    boolean_t, submap_needs_copy);
17231 
17232 			result = vm_map_remap_extract(submap,
17233 			    submap_start,
17234 			    submap_size,
17235 			    submap_needs_copy,
17236 			    map_copy,
17237 			    cur_protection,
17238 			    max_protection,
17239 			    inheritance,
17240 			    vmk_flags);
17241 			vm_map_deallocate(submap);
17242 			return result;
17243 		}
17244 
17245 		if (src_entry->is_sub_map) {
17246 			/* protections for submap mapping are irrelevant here */
17247 		} else if (((src_entry->protection & required_cur_prot) !=
17248 		    required_cur_prot) ||
17249 		    ((src_entry->max_protection & required_max_prot) !=
17250 		    required_max_prot)) {
17251 			if (vmk_flags.vmkf_copy_single_object &&
17252 			    mapped_size != 0) {
17253 				/*
17254 				 * Single object extraction.
17255 				 * We can't extract more with the required
17256 				 * protection but we've extracted some, so
17257 				 * stop there and declare success.
17258 				 * The caller should check the size of
17259 				 * the copy entry we've extracted.
17260 				 */
17261 				result = KERN_SUCCESS;
17262 			} else {
17263 				/*
17264 				 * VM range extraction.
17265 				 * Required proctection is not available
17266 				 * for this part of the range: fail.
17267 				 */
17268 				result = KERN_PROTECTION_FAILURE;
17269 			}
17270 			break;
17271 		}
17272 
17273 		if (src_entry->is_sub_map) {
17274 			vm_map_t submap;
17275 			vm_map_offset_t submap_start;
17276 			vm_map_size_t submap_size;
17277 			vm_map_copy_t submap_copy;
17278 			vm_prot_t submap_curprot, submap_maxprot;
17279 			boolean_t submap_needs_copy;
17280 
17281 			/*
17282 			 * No check for "required protection" on "src_entry"
17283 			 * because the protections that matter are the ones
17284 			 * on the submap's VM map entry, which will be checked
17285 			 * during the call to vm_map_copy_extract() below.
17286 			 */
17287 			object = VM_OBJECT_NULL;
17288 			submap_copy = VM_MAP_COPY_NULL;
17289 
17290 			/* find equivalent range in the submap */
17291 			submap = VME_SUBMAP(src_entry);
17292 			submap_start = VME_OFFSET(src_entry) + src_start - src_entry->vme_start;
17293 			submap_size = tmp_size;
17294 			if (copy) {
17295 				/*
17296 				 * The caller wants a copy-on-write re-mapping,
17297 				 * so let's extract from the submap accordingly.
17298 				 */
17299 				submap_needs_copy = TRUE;
17300 			} else if (src_entry->needs_copy) {
17301 				/*
17302 				 * The caller wants a shared re-mapping but the
17303 				 * submap is mapped with "needs_copy", so its
17304 				 * contents can't be shared as is. Extract the
17305 				 * contents of the submap as "copy-on-write".
17306 				 * The re-mapping won't be shared with the
17307 				 * original mapping but this is equivalent to
17308 				 * what happened with the original "remap from
17309 				 * submap" code.
17310 				 * The shared region is mapped "needs_copy", for
17311 				 * example.
17312 				 */
17313 				submap_needs_copy = TRUE;
17314 			} else {
17315 				/*
17316 				 * The caller wants a shared re-mapping and
17317 				 * this mapping can be shared (no "needs_copy"),
17318 				 * so let's extract from the submap accordingly.
17319 				 * Kernel submaps are mapped without
17320 				 * "needs_copy", for example.
17321 				 */
17322 				submap_needs_copy = FALSE;
17323 			}
17324 			/* extra ref to keep submap alive */
17325 			vm_map_reference(submap);
17326 
17327 			DTRACE_VM7(remap_submap_recurse,
17328 			    vm_map_t, map,
17329 			    vm_map_offset_t, addr,
17330 			    vm_map_size_t, size,
17331 			    boolean_t, copy,
17332 			    vm_map_offset_t, submap_start,
17333 			    vm_map_size_t, submap_size,
17334 			    boolean_t, submap_needs_copy);
17335 
17336 			/*
17337 			 * The map can be safely unlocked since we
17338 			 * already hold a reference on the submap.
17339 			 *
17340 			 * No timestamp since we don't care if the map
17341 			 * gets modified while we're down in the submap.
17342 			 * We'll resume the extraction at src_start + tmp_size
17343 			 * anyway.
17344 			 */
17345 			vm_map_unlock(map);
17346 			src_entry = NULL; /* not valid once map is unlocked */
17347 
17348 			if (vm_remap_legacy) {
17349 				submap_curprot = VM_PROT_NONE;
17350 				submap_maxprot = VM_PROT_NONE;
17351 				if (max_prot_for_prot_copy) {
17352 					submap_maxprot = max_prot_for_prot_copy;
17353 				}
17354 			} else {
17355 				assert(!max_prot_for_prot_copy);
17356 				submap_curprot = *cur_protection;
17357 				submap_maxprot = *max_protection;
17358 			}
17359 			result = vm_map_copy_extract(submap,
17360 			    submap_start,
17361 			    submap_size,
17362 			    submap_needs_copy,
17363 			    &submap_copy,
17364 			    &submap_curprot,
17365 			    &submap_maxprot,
17366 			    inheritance,
17367 			    vmk_flags);
17368 
17369 			/* release extra ref on submap */
17370 			vm_map_deallocate(submap);
17371 			submap = VM_MAP_NULL;
17372 
17373 			if (result != KERN_SUCCESS) {
17374 				vm_map_lock(map);
17375 				break;
17376 			}
17377 
17378 			/* transfer submap_copy entries to map_header */
17379 			while (vm_map_copy_first_entry(submap_copy) !=
17380 			    vm_map_copy_to_entry(submap_copy)) {
17381 				vm_map_entry_t copy_entry;
17382 				vm_map_size_t copy_entry_size;
17383 
17384 				copy_entry = vm_map_copy_first_entry(submap_copy);
17385 
17386 				/*
17387 				 * Prevent kernel_object from being exposed to
17388 				 * user space.
17389 				 */
17390 				if (__improbable(copy_entry->vme_kernel_object)) {
17391 					printf("%d[%s]: rejecting attempt to extract from kernel_object\n",
17392 					    proc_selfpid(),
17393 					    (get_bsdtask_info(current_task())
17394 					    ? proc_name_address(get_bsdtask_info(current_task()))
17395 					    : "?"));
17396 					DTRACE_VM(extract_kernel_only);
17397 					result = KERN_INVALID_RIGHT;
17398 					vm_map_copy_discard(submap_copy);
17399 					submap_copy = VM_MAP_COPY_NULL;
17400 					vm_map_lock(map);
17401 					break;
17402 				}
17403 
17404 #ifdef __arm64e__
17405 				if (vmk_flags.vmkf_tpro_enforcement_override) {
17406 					copy_entry->used_for_tpro = FALSE;
17407 				}
17408 #endif /* __arm64e__ */
17409 
17410 				vm_map_copy_entry_unlink(submap_copy, copy_entry);
17411 				copy_entry_size = copy_entry->vme_end - copy_entry->vme_start;
17412 				copy_entry->vme_start = map_address;
17413 				copy_entry->vme_end = map_address + copy_entry_size;
17414 				map_address += copy_entry_size;
17415 				mapped_size += copy_entry_size;
17416 				src_start += copy_entry_size;
17417 				assert(src_start <= src_end);
17418 				_vm_map_store_entry_link(map_header,
17419 				    map_header->links.prev,
17420 				    copy_entry);
17421 			}
17422 			/* done with submap_copy */
17423 			vm_map_copy_discard(submap_copy);
17424 
17425 			if (vm_remap_legacy) {
17426 				*cur_protection &= submap_curprot;
17427 				*max_protection &= submap_maxprot;
17428 			}
17429 
17430 			/* re-acquire the map lock and continue to next entry */
17431 			vm_map_lock(map);
17432 			continue;
17433 		} else {
17434 			object = VME_OBJECT(src_entry);
17435 
17436 			/*
17437 			 * Prevent kernel_object from being exposed to
17438 			 * user space.
17439 			 */
17440 			if (__improbable(is_kernel_object(object))) {
17441 				printf("%d[%s]: rejecting attempt to extract from kernel_object\n",
17442 				    proc_selfpid(),
17443 				    (get_bsdtask_info(current_task())
17444 				    ? proc_name_address(get_bsdtask_info(current_task()))
17445 				    : "?"));
17446 				DTRACE_VM(extract_kernel_only);
17447 				result = KERN_INVALID_RIGHT;
17448 				break;
17449 			}
17450 
17451 			if (src_entry->iokit_acct) {
17452 				/*
17453 				 * This entry uses "IOKit accounting".
17454 				 */
17455 			} else if (object != VM_OBJECT_NULL &&
17456 			    (object->purgable != VM_PURGABLE_DENY ||
17457 			    object->vo_ledger_tag != VM_LEDGER_TAG_NONE)) {
17458 				/*
17459 				 * Purgeable objects have their own accounting:
17460 				 * no pmap accounting for them.
17461 				 */
17462 				assertf(!src_entry->use_pmap,
17463 				    "map=%p src_entry=%p [0x%llx:0x%llx] 0x%x/0x%x %d",
17464 				    map,
17465 				    src_entry,
17466 				    (uint64_t)src_entry->vme_start,
17467 				    (uint64_t)src_entry->vme_end,
17468 				    src_entry->protection,
17469 				    src_entry->max_protection,
17470 				    VME_ALIAS(src_entry));
17471 			} else {
17472 				/*
17473 				 * Not IOKit or purgeable:
17474 				 * must be accounted by pmap stats.
17475 				 */
17476 				assertf(src_entry->use_pmap,
17477 				    "map=%p src_entry=%p [0x%llx:0x%llx] 0x%x/0x%x %d",
17478 				    map,
17479 				    src_entry,
17480 				    (uint64_t)src_entry->vme_start,
17481 				    (uint64_t)src_entry->vme_end,
17482 				    src_entry->protection,
17483 				    src_entry->max_protection,
17484 				    VME_ALIAS(src_entry));
17485 			}
17486 
17487 			if (object == VM_OBJECT_NULL) {
17488 				assert(!src_entry->needs_copy);
17489 				if (src_entry->max_protection == VM_PROT_NONE) {
17490 					assert(src_entry->protection == VM_PROT_NONE);
17491 					/*
17492 					 * No VM object and no permissions:
17493 					 * this must be a reserved range with
17494 					 * nothing to share or copy.
17495 					 * There could also be all sorts of
17496 					 * pmap shenanigans within that reserved
17497 					 * range, so let's just copy the map
17498 					 * entry as is to remap a similar
17499 					 * reserved range.
17500 					 */
17501 					offset = 0; /* no object => no offset */
17502 					goto copy_src_entry;
17503 				}
17504 				object = vm_object_allocate(entry_size);
17505 				VME_OFFSET_SET(src_entry, 0);
17506 				VME_OBJECT_SET(src_entry, object, false, 0);
17507 				assert(src_entry->use_pmap);
17508 				assert(!map->mapped_in_other_pmaps);
17509 			} else if (src_entry->wired_count ||
17510 			    object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) {
17511 				/*
17512 				 * A wired memory region should not have
17513 				 * any pending copy-on-write and needs to
17514 				 * keep pointing at the VM object that
17515 				 * contains the wired pages.
17516 				 * If we're sharing this memory (copy=false),
17517 				 * we'll share this VM object.
17518 				 * If we're copying this memory (copy=true),
17519 				 * we'll call vm_object_copy_slowly() below
17520 				 * and use the new VM object for the remapping.
17521 				 *
17522 				 * Or, we are already using an asymmetric
17523 				 * copy, and therefore we already have
17524 				 * the right object.
17525 				 */
17526 				assert(!src_entry->needs_copy);
17527 			} else if (src_entry->needs_copy || object->shadowed ||
17528 			    (object->internal && !object->true_share &&
17529 			    !src_entry->is_shared &&
17530 			    object->vo_size > entry_size)) {
17531 				VME_OBJECT_SHADOW(src_entry, entry_size,
17532 				    vm_map_always_shadow(map));
17533 				assert(src_entry->use_pmap);
17534 
17535 				if (!src_entry->needs_copy &&
17536 				    (src_entry->protection & VM_PROT_WRITE)) {
17537 					vm_prot_t prot;
17538 
17539 					assert(!pmap_has_prot_policy(map->pmap, src_entry->translated_allow_execute, src_entry->protection));
17540 
17541 					prot = src_entry->protection & ~VM_PROT_WRITE;
17542 
17543 					if (override_nx(map,
17544 					    VME_ALIAS(src_entry))
17545 					    && prot) {
17546 						prot |= VM_PROT_EXECUTE;
17547 					}
17548 
17549 					assert(!pmap_has_prot_policy(map->pmap, src_entry->translated_allow_execute, prot));
17550 
17551 					if (map->mapped_in_other_pmaps) {
17552 						vm_object_pmap_protect(
17553 							VME_OBJECT(src_entry),
17554 							VME_OFFSET(src_entry),
17555 							entry_size,
17556 							PMAP_NULL,
17557 							PAGE_SIZE,
17558 							src_entry->vme_start,
17559 							prot);
17560 #if MACH_ASSERT
17561 					} else if (__improbable(map->pmap == PMAP_NULL)) {
17562 						extern boolean_t vm_tests_in_progress;
17563 						assert(vm_tests_in_progress);
17564 						/*
17565 						 * Some VM tests (in vm_tests.c)
17566 						 * sometimes want to use a VM
17567 						 * map without a pmap.
17568 						 * Otherwise, this should never
17569 						 * happen.
17570 						 */
17571 #endif /* MACH_ASSERT */
17572 					} else {
17573 						pmap_protect(vm_map_pmap(map),
17574 						    src_entry->vme_start,
17575 						    src_entry->vme_end,
17576 						    prot);
17577 					}
17578 				}
17579 
17580 				object = VME_OBJECT(src_entry);
17581 				src_entry->needs_copy = FALSE;
17582 			}
17583 
17584 
17585 			vm_object_lock(object);
17586 			vm_object_reference_locked(object); /* object ref. for new entry */
17587 			assert(!src_entry->needs_copy);
17588 			if (object->copy_strategy ==
17589 			    MEMORY_OBJECT_COPY_SYMMETRIC) {
17590 				/*
17591 				 * If we want to share this object (copy==0),
17592 				 * it needs to be COPY_DELAY.
17593 				 * If we want to copy this object (copy==1),
17594 				 * we can't just set "needs_copy" on our side
17595 				 * and expect the other side to do the same
17596 				 * (symmetrically), so we can't let the object
17597 				 * stay COPY_SYMMETRIC.
17598 				 * So we always switch from COPY_SYMMETRIC to
17599 				 * COPY_DELAY.
17600 				 */
17601 				object->copy_strategy =
17602 				    MEMORY_OBJECT_COPY_DELAY;
17603 				object->true_share = TRUE;
17604 			}
17605 			vm_object_unlock(object);
17606 		}
17607 
17608 		offset = (VME_OFFSET(src_entry) +
17609 		    (src_start - src_entry->vme_start));
17610 
17611 copy_src_entry:
17612 		new_entry = _vm_map_entry_create(map_header);
17613 		vm_map_entry_copy(map, new_entry, src_entry);
17614 		if (new_entry->is_sub_map) {
17615 			/* clr address space specifics */
17616 			new_entry->use_pmap = FALSE;
17617 		} else if (copy) {
17618 			/*
17619 			 * We're dealing with a copy-on-write operation,
17620 			 * so the resulting mapping should not inherit the
17621 			 * original mapping's accounting settings.
17622 			 * "use_pmap" should be reset to its default (TRUE)
17623 			 * so that the new mapping gets accounted for in
17624 			 * the task's memory footprint.
17625 			 */
17626 			new_entry->use_pmap = TRUE;
17627 		}
17628 		/* "iokit_acct" was cleared in vm_map_entry_copy() */
17629 		assert(!new_entry->iokit_acct);
17630 
17631 		new_entry->map_aligned = FALSE;
17632 
17633 		new_entry->vme_start = map_address;
17634 		new_entry->vme_end = map_address + tmp_size;
17635 		assert(new_entry->vme_start < new_entry->vme_end);
17636 		if (copy && vmk_flags.vmkf_remap_prot_copy) {
17637 			/* security: keep "permanent" and "csm_associated" */
17638 			new_entry->vme_permanent = src_entry->vme_permanent;
17639 			new_entry->csm_associated = src_entry->csm_associated;
17640 			/*
17641 			 * Remapping for vm_map_protect(VM_PROT_COPY)
17642 			 * to convert a read-only mapping into a
17643 			 * copy-on-write version of itself but
17644 			 * with write access:
17645 			 * keep the original inheritance but let's not
17646 			 * add VM_PROT_WRITE to the max protection yet
17647 			 * since we want to do more security checks against
17648 			 * the target map.
17649 			 */
17650 			new_entry->inheritance = src_entry->inheritance;
17651 			new_entry->protection &= max_prot_for_prot_copy;
17652 		} else {
17653 			new_entry->inheritance = inheritance;
17654 			if (!vm_remap_legacy) {
17655 				new_entry->protection = *cur_protection;
17656 				new_entry->max_protection = *max_protection;
17657 			}
17658 		}
17659 #ifdef __arm64e__
17660 		if (copy && vmk_flags.vmkf_tpro_enforcement_override) {
17661 			new_entry->used_for_tpro = FALSE;
17662 		}
17663 #endif /* __arm64e__ */
17664 		VME_OFFSET_SET(new_entry, offset);
17665 
17666 		/*
17667 		 * The new region has to be copied now if required.
17668 		 */
17669 RestartCopy:
17670 		if (!copy) {
17671 			if (src_entry->used_for_jit == TRUE) {
17672 				if (same_map) {
17673 				} else if (!VM_MAP_POLICY_ALLOW_JIT_SHARING(map)) {
17674 					/*
17675 					 * Cannot allow an entry describing a JIT
17676 					 * region to be shared across address spaces.
17677 					 */
17678 					result = KERN_INVALID_ARGUMENT;
17679 					vm_object_deallocate(object);
17680 					vm_map_entry_dispose(new_entry);
17681 					new_entry = VM_MAP_ENTRY_NULL;
17682 					break;
17683 				}
17684 			}
17685 
17686 			src_entry->is_shared = TRUE;
17687 			new_entry->is_shared = TRUE;
17688 			if (!(new_entry->is_sub_map)) {
17689 				new_entry->needs_copy = FALSE;
17690 			}
17691 		} else if (src_entry->is_sub_map) {
17692 			/* make this a COW sub_map if not already */
17693 			assert(new_entry->wired_count == 0);
17694 			new_entry->needs_copy = TRUE;
17695 			object = VM_OBJECT_NULL;
17696 		} else if (src_entry->wired_count == 0 &&
17697 		    !(debug4k_no_cow_copyin && VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) &&
17698 		    vm_object_copy_quickly(VME_OBJECT(new_entry),
17699 		    VME_OFFSET(new_entry),
17700 		    (new_entry->vme_end -
17701 		    new_entry->vme_start),
17702 		    &src_needs_copy,
17703 		    &new_entry_needs_copy)) {
17704 			new_entry->needs_copy = new_entry_needs_copy;
17705 			new_entry->is_shared = FALSE;
17706 			assertf(new_entry->use_pmap, "map %p new_entry %p\n", map, new_entry);
17707 
17708 			/*
17709 			 * Handle copy_on_write semantics.
17710 			 */
17711 			if (src_needs_copy && !src_entry->needs_copy) {
17712 				vm_prot_t prot;
17713 
17714 				assert(!pmap_has_prot_policy(map->pmap, src_entry->translated_allow_execute, src_entry->protection));
17715 
17716 				prot = src_entry->protection & ~VM_PROT_WRITE;
17717 
17718 				if (override_nx(map,
17719 				    VME_ALIAS(src_entry))
17720 				    && prot) {
17721 					prot |= VM_PROT_EXECUTE;
17722 				}
17723 
17724 				assert(!pmap_has_prot_policy(map->pmap, src_entry->translated_allow_execute, prot));
17725 
17726 				vm_object_pmap_protect(object,
17727 				    offset,
17728 				    entry_size,
17729 				    ((src_entry->is_shared
17730 				    || map->mapped_in_other_pmaps) ?
17731 				    PMAP_NULL : map->pmap),
17732 				    VM_MAP_PAGE_SIZE(map),
17733 				    src_entry->vme_start,
17734 				    prot);
17735 
17736 				assert(src_entry->wired_count == 0);
17737 				src_entry->needs_copy = TRUE;
17738 			}
17739 			/*
17740 			 * Throw away the old object reference of the new entry.
17741 			 */
17742 			vm_object_deallocate(object);
17743 		} else {
17744 			new_entry->is_shared = FALSE;
17745 			assertf(new_entry->use_pmap, "map %p new_entry %p\n", map, new_entry);
17746 
17747 			src_entry_was_wired = (src_entry->wired_count > 0);
17748 			saved_src_entry = src_entry;
17749 			src_entry = VM_MAP_ENTRY_NULL;
17750 
17751 			/*
17752 			 * The map can be safely unlocked since we
17753 			 * already hold a reference on the object.
17754 			 *
17755 			 * Record the timestamp of the map for later
17756 			 * verification, and unlock the map.
17757 			 */
17758 			version.main_timestamp = map->timestamp;
17759 			vm_map_unlock(map);     /* Increments timestamp once! */
17760 
17761 			/*
17762 			 * Perform the copy.
17763 			 */
17764 			if (src_entry_was_wired > 0 ||
17765 			    (debug4k_no_cow_copyin &&
17766 			    VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT)) {
17767 				vm_object_lock(object);
17768 				result = vm_object_copy_slowly(
17769 					object,
17770 					offset,
17771 					(new_entry->vme_end -
17772 					new_entry->vme_start),
17773 					THREAD_UNINT,
17774 					&new_copy_object);
17775 				/* VME_OBJECT_SET will reset used_for_jit, so preserve it. */
17776 				saved_used_for_jit = new_entry->used_for_jit;
17777 				VME_OBJECT_SET(new_entry, new_copy_object, false, 0);
17778 				new_entry->used_for_jit = saved_used_for_jit;
17779 				VME_OFFSET_SET(new_entry, offset - vm_object_trunc_page(offset));
17780 				new_entry->needs_copy = FALSE;
17781 			} else {
17782 				vm_object_offset_t new_offset;
17783 
17784 				new_offset = VME_OFFSET(new_entry);
17785 				result = vm_object_copy_strategically(
17786 					object,
17787 					offset,
17788 					(new_entry->vme_end -
17789 					new_entry->vme_start),
17790 					false, /* forking */
17791 					&new_copy_object,
17792 					&new_offset,
17793 					&new_entry_needs_copy);
17794 				/* VME_OBJECT_SET will reset used_for_jit, so preserve it. */
17795 				saved_used_for_jit = new_entry->used_for_jit;
17796 				VME_OBJECT_SET(new_entry, new_copy_object, false, 0);
17797 				new_entry->used_for_jit = saved_used_for_jit;
17798 				if (new_offset != VME_OFFSET(new_entry)) {
17799 					VME_OFFSET_SET(new_entry, new_offset);
17800 				}
17801 
17802 				new_entry->needs_copy = new_entry_needs_copy;
17803 			}
17804 
17805 			/*
17806 			 * Throw away the old object reference of the new entry.
17807 			 */
17808 			vm_object_deallocate(object);
17809 
17810 			if (result != KERN_SUCCESS &&
17811 			    result != KERN_MEMORY_RESTART_COPY) {
17812 				vm_map_entry_dispose(new_entry);
17813 				vm_map_lock(map);
17814 				break;
17815 			}
17816 
17817 			/*
17818 			 * Verify that the map has not substantially
17819 			 * changed while the copy was being made.
17820 			 */
17821 
17822 			vm_map_lock(map);
17823 			if (version.main_timestamp + 1 != map->timestamp) {
17824 				/*
17825 				 * Simple version comparison failed.
17826 				 *
17827 				 * Retry the lookup and verify that the
17828 				 * same object/offset are still present.
17829 				 */
17830 				saved_src_entry = VM_MAP_ENTRY_NULL;
17831 				vm_object_deallocate(VME_OBJECT(new_entry));
17832 				vm_map_entry_dispose(new_entry);
17833 				if (result == KERN_MEMORY_RESTART_COPY) {
17834 					result = KERN_SUCCESS;
17835 				}
17836 				continue;
17837 			}
17838 			/* map hasn't changed: src_entry is still valid */
17839 			src_entry = saved_src_entry;
17840 			saved_src_entry = VM_MAP_ENTRY_NULL;
17841 
17842 			if (result == KERN_MEMORY_RESTART_COPY) {
17843 				vm_object_reference(object);
17844 				goto RestartCopy;
17845 			}
17846 		}
17847 
17848 		_vm_map_store_entry_link(map_header,
17849 		    map_header->links.prev, new_entry);
17850 
17851 		/* protections for submap mapping are irrelevant here */
17852 		if (vm_remap_legacy && !src_entry->is_sub_map) {
17853 			*cur_protection &= src_entry->protection;
17854 			*max_protection &= src_entry->max_protection;
17855 		}
17856 
17857 		map_address += tmp_size;
17858 		mapped_size += tmp_size;
17859 		src_start += tmp_size;
17860 
17861 		if (vmk_flags.vmkf_copy_single_object) {
17862 			if (mapped_size != size) {
17863 				DEBUG4K_SHARE("map %p addr 0x%llx size 0x%llx clipped copy at mapped_size 0x%llx\n",
17864 				    map, (uint64_t)addr, (uint64_t)size, (uint64_t)mapped_size);
17865 				if (src_entry->vme_next != vm_map_to_entry(map) &&
17866 				    src_entry->vme_next->vme_object_value ==
17867 				    src_entry->vme_object_value) {
17868 					/* XXX TODO4K */
17869 					DEBUG4K_ERROR("could have extended copy to next entry...\n");
17870 				}
17871 			}
17872 			break;
17873 		}
17874 	} /* end while */
17875 
17876 	vm_map_unlock(map);
17877 	if (result != KERN_SUCCESS) {
17878 		/*
17879 		 * Free all allocated elements.
17880 		 */
17881 		for (src_entry = map_header->links.next;
17882 		    src_entry != CAST_TO_VM_MAP_ENTRY(&map_header->links);
17883 		    src_entry = new_entry) {
17884 			new_entry = src_entry->vme_next;
17885 			_vm_map_store_entry_unlink(map_header, src_entry, false);
17886 			if (src_entry->is_sub_map) {
17887 				vm_map_deallocate(VME_SUBMAP(src_entry));
17888 			} else {
17889 				vm_object_deallocate(VME_OBJECT(src_entry));
17890 			}
17891 			vm_map_entry_dispose(src_entry);
17892 		}
17893 	}
17894 	return result;
17895 }
17896 
17897 bool
vm_map_is_exotic(vm_map_t map)17898 vm_map_is_exotic(
17899 	vm_map_t map)
17900 {
17901 	return VM_MAP_IS_EXOTIC(map);
17902 }
17903 
17904 bool
vm_map_is_alien(vm_map_t map)17905 vm_map_is_alien(
17906 	vm_map_t map)
17907 {
17908 	return VM_MAP_IS_ALIEN(map);
17909 }
17910 
17911 #if XNU_TARGET_OS_OSX
17912 void
vm_map_mark_alien(vm_map_t map)17913 vm_map_mark_alien(
17914 	vm_map_t map)
17915 {
17916 	vm_map_lock(map);
17917 	map->is_alien = true;
17918 	vm_map_unlock(map);
17919 }
17920 
17921 void
vm_map_single_jit(vm_map_t map)17922 vm_map_single_jit(
17923 	vm_map_t map)
17924 {
17925 	vm_map_lock(map);
17926 	map->single_jit = true;
17927 	vm_map_unlock(map);
17928 }
17929 #endif /* XNU_TARGET_OS_OSX */
17930 
17931 /*
17932  * Callers of this function must call vm_map_copy_require on
17933  * previously created vm_map_copy_t or pass a newly created
17934  * one to ensure that it hasn't been forged.
17935  */
17936 static kern_return_t
vm_map_copy_to_physcopy(vm_map_copy_t copy_map,vm_map_t target_map)17937 vm_map_copy_to_physcopy(
17938 	vm_map_copy_t   copy_map,
17939 	vm_map_t        target_map)
17940 {
17941 	vm_map_size_t           size;
17942 	vm_map_entry_t          entry;
17943 	vm_map_entry_t          new_entry;
17944 	vm_object_t             new_object;
17945 	unsigned int            pmap_flags;
17946 	pmap_t                  new_pmap;
17947 	vm_map_t                new_map;
17948 	vm_map_address_t        src_start, src_end, src_cur;
17949 	vm_map_address_t        dst_start, dst_end, dst_cur;
17950 	kern_return_t           kr;
17951 	void                    *kbuf;
17952 
17953 	/*
17954 	 * Perform the equivalent of vm_allocate() and memcpy().
17955 	 * Replace the mappings in "copy_map" with the newly allocated mapping.
17956 	 */
17957 	DEBUG4K_COPY("copy_map %p (%d %d 0x%llx 0x%llx) BEFORE\n", copy_map, copy_map->cpy_hdr.page_shift, copy_map->cpy_hdr.nentries, copy_map->offset, (uint64_t)copy_map->size);
17958 
17959 	assert(copy_map->cpy_hdr.page_shift != VM_MAP_PAGE_MASK(target_map));
17960 
17961 	/* create a new pmap to map "copy_map" */
17962 	pmap_flags = 0;
17963 	assert(copy_map->cpy_hdr.page_shift == FOURK_PAGE_SHIFT);
17964 #if PMAP_CREATE_FORCE_4K_PAGES
17965 	pmap_flags |= PMAP_CREATE_FORCE_4K_PAGES;
17966 #endif /* PMAP_CREATE_FORCE_4K_PAGES */
17967 	pmap_flags |= PMAP_CREATE_64BIT;
17968 	new_pmap = pmap_create_options(NULL, (vm_map_size_t)0, pmap_flags);
17969 	if (new_pmap == NULL) {
17970 		return KERN_RESOURCE_SHORTAGE;
17971 	}
17972 
17973 	/* allocate new VM object */
17974 	size = VM_MAP_ROUND_PAGE(copy_map->size, PAGE_MASK);
17975 	new_object = vm_object_allocate(size);
17976 	assert(new_object);
17977 
17978 	/* allocate new VM map entry */
17979 	new_entry = vm_map_copy_entry_create(copy_map);
17980 	assert(new_entry);
17981 
17982 	/* finish initializing new VM map entry */
17983 	new_entry->protection = VM_PROT_DEFAULT;
17984 	new_entry->max_protection = VM_PROT_DEFAULT;
17985 	new_entry->use_pmap = TRUE;
17986 
17987 	/* make new VM map entry point to new VM object */
17988 	new_entry->vme_start = 0;
17989 	new_entry->vme_end = size;
17990 	VME_OBJECT_SET(new_entry, new_object, false, 0);
17991 	VME_OFFSET_SET(new_entry, 0);
17992 
17993 	/* create a new pageable VM map to map "copy_map" */
17994 	new_map = vm_map_create_options(new_pmap, 0, MACH_VM_MAX_ADDRESS,
17995 	    VM_MAP_CREATE_PAGEABLE);
17996 	assert(new_map);
17997 	vm_map_set_page_shift(new_map, copy_map->cpy_hdr.page_shift);
17998 
17999 	/* map "copy_map" in the new VM map */
18000 	src_start = 0;
18001 	kr = vm_map_copyout_internal(
18002 		new_map,
18003 		&src_start,
18004 		copy_map,
18005 		copy_map->size,
18006 		FALSE, /* consume_on_success */
18007 		VM_PROT_DEFAULT,
18008 		VM_PROT_DEFAULT,
18009 		VM_INHERIT_DEFAULT);
18010 	assert(kr == KERN_SUCCESS);
18011 	src_end = src_start + copy_map->size;
18012 
18013 	/* map "new_object" in the new VM map */
18014 	vm_object_reference(new_object);
18015 	dst_start = 0;
18016 	kr = vm_map_enter(new_map,
18017 	    &dst_start,
18018 	    size,
18019 	    0,               /* mask */
18020 	    VM_MAP_KERNEL_FLAGS_ANYWHERE(.vm_tag = VM_KERN_MEMORY_OSFMK),
18021 	    new_object,
18022 	    0,               /* offset */
18023 	    FALSE,               /* needs copy */
18024 	    VM_PROT_DEFAULT,
18025 	    VM_PROT_DEFAULT,
18026 	    VM_INHERIT_DEFAULT);
18027 	assert(kr == KERN_SUCCESS);
18028 	dst_end = dst_start + size;
18029 
18030 	/* get a kernel buffer */
18031 	kbuf = kalloc_data(PAGE_SIZE, Z_WAITOK | Z_NOFAIL);
18032 
18033 	/* physically copy "copy_map" mappings to new VM object */
18034 	for (src_cur = src_start, dst_cur = dst_start;
18035 	    src_cur < src_end;
18036 	    src_cur += PAGE_SIZE, dst_cur += PAGE_SIZE) {
18037 		vm_size_t bytes;
18038 
18039 		bytes = PAGE_SIZE;
18040 		if (src_cur + PAGE_SIZE > src_end) {
18041 			/* partial copy for last page */
18042 			bytes = src_end - src_cur;
18043 			assert(bytes > 0 && bytes < PAGE_SIZE);
18044 			/* rest of dst page should be zero-filled */
18045 		}
18046 		/* get bytes from src mapping */
18047 		kr = copyinmap(new_map, src_cur, kbuf, bytes);
18048 		if (kr != KERN_SUCCESS) {
18049 			DEBUG4K_COPY("copyinmap(%p, 0x%llx, %p, 0x%llx) kr 0x%x\n", new_map, (uint64_t)src_cur, kbuf, (uint64_t)bytes, kr);
18050 		}
18051 		/* put bytes in dst mapping */
18052 		assert(dst_cur < dst_end);
18053 		assert(dst_cur + bytes <= dst_end);
18054 		kr = copyoutmap(new_map, kbuf, dst_cur, bytes);
18055 		if (kr != KERN_SUCCESS) {
18056 			DEBUG4K_COPY("copyoutmap(%p, %p, 0x%llx, 0x%llx) kr 0x%x\n", new_map, kbuf, (uint64_t)dst_cur, (uint64_t)bytes, kr);
18057 		}
18058 	}
18059 
18060 	/* free kernel buffer */
18061 	kfree_data(kbuf, PAGE_SIZE);
18062 
18063 	/* destroy new map */
18064 	vm_map_destroy(new_map);
18065 	new_map = VM_MAP_NULL;
18066 
18067 	/* dispose of the old map entries in "copy_map" */
18068 	while (vm_map_copy_first_entry(copy_map) !=
18069 	    vm_map_copy_to_entry(copy_map)) {
18070 		entry = vm_map_copy_first_entry(copy_map);
18071 		vm_map_copy_entry_unlink(copy_map, entry);
18072 		if (entry->is_sub_map) {
18073 			vm_map_deallocate(VME_SUBMAP(entry));
18074 		} else {
18075 			vm_object_deallocate(VME_OBJECT(entry));
18076 		}
18077 		vm_map_copy_entry_dispose(entry);
18078 	}
18079 
18080 	/* change "copy_map"'s page_size to match "target_map" */
18081 	copy_map->cpy_hdr.page_shift = (uint16_t)VM_MAP_PAGE_SHIFT(target_map);
18082 	copy_map->offset = 0;
18083 	copy_map->size = size;
18084 
18085 	/* insert new map entry in "copy_map" */
18086 	assert(vm_map_copy_last_entry(copy_map) == vm_map_copy_to_entry(copy_map));
18087 	vm_map_copy_entry_link(copy_map, vm_map_copy_last_entry(copy_map), new_entry);
18088 
18089 	DEBUG4K_COPY("copy_map %p (%d %d 0x%llx 0x%llx) AFTER\n", copy_map, copy_map->cpy_hdr.page_shift, copy_map->cpy_hdr.nentries, copy_map->offset, (uint64_t)copy_map->size);
18090 	return KERN_SUCCESS;
18091 }
18092 
18093 void
18094 vm_map_copy_adjust_get_target_copy_map(
18095 	vm_map_copy_t   copy_map,
18096 	vm_map_copy_t   *target_copy_map_p);
18097 void
vm_map_copy_adjust_get_target_copy_map(vm_map_copy_t copy_map,vm_map_copy_t * target_copy_map_p)18098 vm_map_copy_adjust_get_target_copy_map(
18099 	vm_map_copy_t   copy_map,
18100 	vm_map_copy_t   *target_copy_map_p)
18101 {
18102 	vm_map_copy_t   target_copy_map;
18103 	vm_map_entry_t  entry, target_entry;
18104 
18105 	if (*target_copy_map_p != VM_MAP_COPY_NULL) {
18106 		/* the caller already has a "target_copy_map": use it */
18107 		return;
18108 	}
18109 
18110 	/* the caller wants us to create a new copy of "copy_map" */
18111 	assert(copy_map->type == VM_MAP_COPY_ENTRY_LIST);
18112 	target_copy_map = vm_map_copy_allocate(copy_map->type);
18113 	target_copy_map->offset = copy_map->offset;
18114 	target_copy_map->size = copy_map->size;
18115 	target_copy_map->cpy_hdr.page_shift = copy_map->cpy_hdr.page_shift;
18116 	for (entry = vm_map_copy_first_entry(copy_map);
18117 	    entry != vm_map_copy_to_entry(copy_map);
18118 	    entry = entry->vme_next) {
18119 		target_entry = vm_map_copy_entry_create(target_copy_map);
18120 		vm_map_entry_copy_full(target_entry, entry);
18121 		if (target_entry->is_sub_map) {
18122 			vm_map_reference(VME_SUBMAP(target_entry));
18123 		} else {
18124 			vm_object_reference(VME_OBJECT(target_entry));
18125 		}
18126 		vm_map_copy_entry_link(
18127 			target_copy_map,
18128 			vm_map_copy_last_entry(target_copy_map),
18129 			target_entry);
18130 	}
18131 	entry = VM_MAP_ENTRY_NULL;
18132 	*target_copy_map_p = target_copy_map;
18133 }
18134 
18135 /*
18136  * Callers of this function must call vm_map_copy_require on
18137  * previously created vm_map_copy_t or pass a newly created
18138  * one to ensure that it hasn't been forged.
18139  */
18140 static void
vm_map_copy_trim(vm_map_copy_t copy_map,uint16_t new_page_shift,vm_map_offset_t trim_start,vm_map_offset_t trim_end)18141 vm_map_copy_trim(
18142 	vm_map_copy_t   copy_map,
18143 	uint16_t        new_page_shift,
18144 	vm_map_offset_t trim_start,
18145 	vm_map_offset_t trim_end)
18146 {
18147 	uint16_t        copy_page_shift;
18148 	vm_map_entry_t  entry, next_entry;
18149 
18150 	assert(copy_map->type == VM_MAP_COPY_ENTRY_LIST);
18151 	assert(copy_map->cpy_hdr.nentries > 0);
18152 
18153 	trim_start += vm_map_copy_first_entry(copy_map)->vme_start;
18154 	trim_end += vm_map_copy_first_entry(copy_map)->vme_start;
18155 
18156 	/* use the new page_shift to do the clipping */
18157 	copy_page_shift = VM_MAP_COPY_PAGE_SHIFT(copy_map);
18158 	copy_map->cpy_hdr.page_shift = new_page_shift;
18159 
18160 	for (entry = vm_map_copy_first_entry(copy_map);
18161 	    entry != vm_map_copy_to_entry(copy_map);
18162 	    entry = next_entry) {
18163 		next_entry = entry->vme_next;
18164 		if (entry->vme_end <= trim_start) {
18165 			/* entry fully before trim range: skip */
18166 			continue;
18167 		}
18168 		if (entry->vme_start >= trim_end) {
18169 			/* entry fully after trim range: done */
18170 			break;
18171 		}
18172 		/* clip entry if needed */
18173 		vm_map_copy_clip_start(copy_map, entry, trim_start);
18174 		vm_map_copy_clip_end(copy_map, entry, trim_end);
18175 		/* dispose of entry */
18176 		copy_map->size -= entry->vme_end - entry->vme_start;
18177 		vm_map_copy_entry_unlink(copy_map, entry);
18178 		if (entry->is_sub_map) {
18179 			vm_map_deallocate(VME_SUBMAP(entry));
18180 		} else {
18181 			vm_object_deallocate(VME_OBJECT(entry));
18182 		}
18183 		vm_map_copy_entry_dispose(entry);
18184 		entry = VM_MAP_ENTRY_NULL;
18185 	}
18186 
18187 	/* restore copy_map's original page_shift */
18188 	copy_map->cpy_hdr.page_shift = copy_page_shift;
18189 }
18190 
18191 /*
18192  * Make any necessary adjustments to "copy_map" to allow it to be
18193  * mapped into "target_map".
18194  * If no changes were necessary, "target_copy_map" points to the
18195  * untouched "copy_map".
18196  * If changes are necessary, changes will be made to "target_copy_map".
18197  * If "target_copy_map" was NULL, we create a new "vm_map_copy_t" and
18198  * copy the original "copy_map" to it before applying the changes.
18199  * The caller should discard "target_copy_map" if it's not the same as
18200  * the original "copy_map".
18201  */
18202 /* TODO4K: also adjust to sub-range in the copy_map -> add start&end? */
18203 kern_return_t
vm_map_copy_adjust_to_target(vm_map_copy_t src_copy_map,vm_map_offset_t offset,vm_map_size_t size,vm_map_t target_map,boolean_t copy,vm_map_copy_t * target_copy_map_p,vm_map_offset_t * overmap_start_p,vm_map_offset_t * overmap_end_p,vm_map_offset_t * trimmed_start_p)18204 vm_map_copy_adjust_to_target(
18205 	vm_map_copy_t           src_copy_map,
18206 	vm_map_offset_t         offset,
18207 	vm_map_size_t           size,
18208 	vm_map_t                target_map,
18209 	boolean_t               copy,
18210 	vm_map_copy_t           *target_copy_map_p,
18211 	vm_map_offset_t         *overmap_start_p,
18212 	vm_map_offset_t         *overmap_end_p,
18213 	vm_map_offset_t         *trimmed_start_p)
18214 {
18215 	vm_map_copy_t           copy_map, target_copy_map;
18216 	vm_map_size_t           target_size;
18217 	vm_map_size_t           src_copy_map_size;
18218 	vm_map_size_t           overmap_start, overmap_end;
18219 	int                     misalignments;
18220 	vm_map_entry_t          entry, target_entry;
18221 	vm_map_offset_t         addr_adjustment;
18222 	vm_map_offset_t         new_start, new_end;
18223 	int                     copy_page_mask, target_page_mask;
18224 	uint16_t                copy_page_shift, target_page_shift;
18225 	vm_map_offset_t         trimmed_end;
18226 
18227 	/*
18228 	 * Assert that the vm_map_copy is coming from the right
18229 	 * zone and hasn't been forged
18230 	 */
18231 	vm_map_copy_require(src_copy_map);
18232 	assert(src_copy_map->type == VM_MAP_COPY_ENTRY_LIST);
18233 
18234 	/*
18235 	 * Start working with "src_copy_map" but we'll switch
18236 	 * to "target_copy_map" as soon as we start making adjustments.
18237 	 */
18238 	copy_map = src_copy_map;
18239 	src_copy_map_size = src_copy_map->size;
18240 
18241 	copy_page_shift = VM_MAP_COPY_PAGE_SHIFT(copy_map);
18242 	copy_page_mask = VM_MAP_COPY_PAGE_MASK(copy_map);
18243 	target_page_shift = (uint16_t)VM_MAP_PAGE_SHIFT(target_map);
18244 	target_page_mask = VM_MAP_PAGE_MASK(target_map);
18245 
18246 	DEBUG4K_ADJUST("copy_map %p (%d offset 0x%llx size 0x%llx) target_map %p (%d) copy %d offset 0x%llx size 0x%llx target_copy_map %p...\n", copy_map, copy_page_shift, (uint64_t)copy_map->offset, (uint64_t)copy_map->size, target_map, target_page_shift, copy, (uint64_t)offset, (uint64_t)size, *target_copy_map_p);
18247 
18248 	target_copy_map = *target_copy_map_p;
18249 	if (target_copy_map != VM_MAP_COPY_NULL) {
18250 		vm_map_copy_require(target_copy_map);
18251 	}
18252 
18253 	if (offset + size > copy_map->size) {
18254 		DEBUG4K_ERROR("copy_map %p (%d->%d) copy_map->size 0x%llx offset 0x%llx size 0x%llx KERN_INVALID_ARGUMENT\n", copy_map, copy_page_shift, target_page_shift, (uint64_t)copy_map->size, (uint64_t)offset, (uint64_t)size);
18255 		return KERN_INVALID_ARGUMENT;
18256 	}
18257 
18258 	/* trim the end */
18259 	trimmed_end = 0;
18260 	new_end = VM_MAP_ROUND_PAGE(offset + size, target_page_mask);
18261 	if (new_end < copy_map->size) {
18262 		trimmed_end = src_copy_map_size - new_end;
18263 		DEBUG4K_ADJUST("copy_map %p (%d->%d) copy %d offset 0x%llx size 0x%llx target_copy_map %p... trim end from 0x%llx to 0x%llx\n", copy_map, copy_page_shift, target_page_shift, copy, (uint64_t)offset, (uint64_t)size, target_copy_map, (uint64_t)new_end, (uint64_t)copy_map->size);
18264 		/* get "target_copy_map" if needed and adjust it */
18265 		vm_map_copy_adjust_get_target_copy_map(copy_map,
18266 		    &target_copy_map);
18267 		copy_map = target_copy_map;
18268 		vm_map_copy_trim(target_copy_map, target_page_shift,
18269 		    new_end, copy_map->size);
18270 	}
18271 
18272 	/* trim the start */
18273 	new_start = VM_MAP_TRUNC_PAGE(offset, target_page_mask);
18274 	if (new_start != 0) {
18275 		DEBUG4K_ADJUST("copy_map %p (%d->%d) copy %d offset 0x%llx size 0x%llx target_copy_map %p... trim start from 0x%llx to 0x%llx\n", copy_map, copy_page_shift, target_page_shift, copy, (uint64_t)offset, (uint64_t)size, target_copy_map, (uint64_t)0, (uint64_t)new_start);
18276 		/* get "target_copy_map" if needed and adjust it */
18277 		vm_map_copy_adjust_get_target_copy_map(copy_map,
18278 		    &target_copy_map);
18279 		copy_map = target_copy_map;
18280 		vm_map_copy_trim(target_copy_map, target_page_shift,
18281 		    0, new_start);
18282 	}
18283 	*trimmed_start_p = new_start;
18284 
18285 	/* target_size starts with what's left after trimming */
18286 	target_size = copy_map->size;
18287 	assertf(target_size == src_copy_map_size - *trimmed_start_p - trimmed_end,
18288 	    "target_size 0x%llx src_copy_map_size 0x%llx trimmed_start 0x%llx trimmed_end 0x%llx\n",
18289 	    (uint64_t)target_size, (uint64_t)src_copy_map_size,
18290 	    (uint64_t)*trimmed_start_p, (uint64_t)trimmed_end);
18291 
18292 	/* check for misalignments but don't adjust yet */
18293 	misalignments = 0;
18294 	overmap_start = 0;
18295 	overmap_end = 0;
18296 	if (copy_page_shift < target_page_shift) {
18297 		/*
18298 		 * Remapping from 4K to 16K: check the VM object alignments
18299 		 * throughout the range.
18300 		 * If the start and end of the range are mis-aligned, we can
18301 		 * over-map to re-align, and adjust the "overmap" start/end
18302 		 * and "target_size" of the range accordingly.
18303 		 * If there is any mis-alignment within the range:
18304 		 *     if "copy":
18305 		 *         we can do immediate-copy instead of copy-on-write,
18306 		 *     else:
18307 		 *         no way to remap and share; fail.
18308 		 */
18309 		for (entry = vm_map_copy_first_entry(copy_map);
18310 		    entry != vm_map_copy_to_entry(copy_map);
18311 		    entry = entry->vme_next) {
18312 			vm_object_offset_t object_offset_start, object_offset_end;
18313 
18314 			object_offset_start = VME_OFFSET(entry);
18315 			object_offset_end = object_offset_start;
18316 			object_offset_end += entry->vme_end - entry->vme_start;
18317 			if (object_offset_start & target_page_mask) {
18318 				if (entry == vm_map_copy_first_entry(copy_map) && !copy) {
18319 					overmap_start++;
18320 				} else {
18321 					misalignments++;
18322 				}
18323 			}
18324 			if (object_offset_end & target_page_mask) {
18325 				if (entry->vme_next == vm_map_copy_to_entry(copy_map) && !copy) {
18326 					overmap_end++;
18327 				} else {
18328 					misalignments++;
18329 				}
18330 			}
18331 		}
18332 	}
18333 	entry = VM_MAP_ENTRY_NULL;
18334 
18335 	/* decide how to deal with misalignments */
18336 	assert(overmap_start <= 1);
18337 	assert(overmap_end <= 1);
18338 	if (!overmap_start && !overmap_end && !misalignments) {
18339 		/* copy_map is properly aligned for target_map ... */
18340 		if (*trimmed_start_p) {
18341 			/* ... but we trimmed it, so still need to adjust */
18342 		} else {
18343 			/* ... and we didn't trim anything: we're done */
18344 			if (target_copy_map == VM_MAP_COPY_NULL) {
18345 				target_copy_map = copy_map;
18346 			}
18347 			*target_copy_map_p = target_copy_map;
18348 			*overmap_start_p = 0;
18349 			*overmap_end_p = 0;
18350 			DEBUG4K_ADJUST("copy_map %p (%d offset 0x%llx size 0x%llx) target_map %p (%d) copy %d target_copy_map %p (%d offset 0x%llx size 0x%llx) -> trimmed 0x%llx overmap start 0x%llx end 0x%llx KERN_SUCCESS\n", copy_map, copy_page_shift, (uint64_t)copy_map->offset, (uint64_t)copy_map->size, target_map, target_page_shift, copy, *target_copy_map_p, VM_MAP_COPY_PAGE_SHIFT(*target_copy_map_p), (uint64_t)(*target_copy_map_p)->offset, (uint64_t)(*target_copy_map_p)->size, (uint64_t)*trimmed_start_p, (uint64_t)*overmap_start_p, (uint64_t)*overmap_end_p);
18351 			return KERN_SUCCESS;
18352 		}
18353 	} else if (misalignments && !copy) {
18354 		/* can't "share" if misaligned */
18355 		DEBUG4K_ADJUST("unsupported sharing\n");
18356 #if MACH_ASSERT
18357 		if (debug4k_panic_on_misaligned_sharing) {
18358 			panic("DEBUG4k %s:%d unsupported sharing", __FUNCTION__, __LINE__);
18359 		}
18360 #endif /* MACH_ASSERT */
18361 		DEBUG4K_ADJUST("copy_map %p (%d) target_map %p (%d) copy %d target_copy_map %p -> KERN_NOT_SUPPORTED\n", copy_map, copy_page_shift, target_map, target_page_shift, copy, *target_copy_map_p);
18362 		return KERN_NOT_SUPPORTED;
18363 	} else {
18364 		/* can't virtual-copy if misaligned (but can physical-copy) */
18365 		DEBUG4K_ADJUST("mis-aligned copying\n");
18366 	}
18367 
18368 	/* get a "target_copy_map" if needed and switch to it */
18369 	vm_map_copy_adjust_get_target_copy_map(copy_map, &target_copy_map);
18370 	copy_map = target_copy_map;
18371 
18372 	if (misalignments && copy) {
18373 		vm_map_size_t target_copy_map_size;
18374 
18375 		/*
18376 		 * Can't do copy-on-write with misaligned mappings.
18377 		 * Replace the mappings with a physical copy of the original
18378 		 * mappings' contents.
18379 		 */
18380 		target_copy_map_size = target_copy_map->size;
18381 		kern_return_t kr = vm_map_copy_to_physcopy(target_copy_map, target_map);
18382 		if (kr != KERN_SUCCESS) {
18383 			return kr;
18384 		}
18385 		*target_copy_map_p = target_copy_map;
18386 		*overmap_start_p = 0;
18387 		*overmap_end_p = target_copy_map->size - target_copy_map_size;
18388 		DEBUG4K_ADJUST("copy_map %p (%d offset 0x%llx size 0x%llx) target_map %p (%d) copy %d target_copy_map %p (%d offset 0x%llx size 0x%llx)-> trimmed 0x%llx overmap start 0x%llx end 0x%llx PHYSCOPY\n", copy_map, copy_page_shift, (uint64_t)copy_map->offset, (uint64_t)copy_map->size, target_map, target_page_shift, copy, *target_copy_map_p, VM_MAP_COPY_PAGE_SHIFT(*target_copy_map_p), (uint64_t)(*target_copy_map_p)->offset, (uint64_t)(*target_copy_map_p)->size, (uint64_t)*trimmed_start_p, (uint64_t)*overmap_start_p, (uint64_t)*overmap_end_p);
18389 		return KERN_SUCCESS;
18390 	}
18391 
18392 	/* apply the adjustments */
18393 	misalignments = 0;
18394 	overmap_start = 0;
18395 	overmap_end = 0;
18396 	/* remove copy_map->offset, so that everything starts at offset 0 */
18397 	addr_adjustment = copy_map->offset;
18398 	/* also remove whatever we trimmed from the start */
18399 	addr_adjustment += *trimmed_start_p;
18400 	for (target_entry = vm_map_copy_first_entry(target_copy_map);
18401 	    target_entry != vm_map_copy_to_entry(target_copy_map);
18402 	    target_entry = target_entry->vme_next) {
18403 		vm_object_offset_t object_offset_start, object_offset_end;
18404 
18405 		DEBUG4K_ADJUST("copy %p (%d 0x%llx 0x%llx) entry %p [ 0x%llx 0x%llx ] object %p offset 0x%llx BEFORE\n", target_copy_map, VM_MAP_COPY_PAGE_SHIFT(target_copy_map), target_copy_map->offset, (uint64_t)target_copy_map->size, target_entry, (uint64_t)target_entry->vme_start, (uint64_t)target_entry->vme_end, VME_OBJECT(target_entry), VME_OFFSET(target_entry));
18406 		object_offset_start = VME_OFFSET(target_entry);
18407 		if (object_offset_start & target_page_mask) {
18408 			DEBUG4K_ADJUST("copy %p (%d 0x%llx 0x%llx) entry %p [ 0x%llx 0x%llx ] object %p offset 0x%llx misaligned at start\n", target_copy_map, VM_MAP_COPY_PAGE_SHIFT(target_copy_map), target_copy_map->offset, (uint64_t)target_copy_map->size, target_entry, (uint64_t)target_entry->vme_start, (uint64_t)target_entry->vme_end, VME_OBJECT(target_entry), VME_OFFSET(target_entry));
18409 			if (target_entry == vm_map_copy_first_entry(target_copy_map)) {
18410 				/*
18411 				 * start of 1st entry is mis-aligned:
18412 				 * re-adjust by over-mapping.
18413 				 */
18414 				overmap_start = object_offset_start - trunc_page_mask_64(object_offset_start, target_page_mask);
18415 				DEBUG4K_ADJUST("entry %p offset 0x%llx copy %d -> overmap_start 0x%llx\n", target_entry, VME_OFFSET(target_entry), copy, (uint64_t)overmap_start);
18416 				VME_OFFSET_SET(target_entry, VME_OFFSET(target_entry) - overmap_start);
18417 			} else {
18418 				misalignments++;
18419 				DEBUG4K_ADJUST("entry %p offset 0x%llx copy %d -> misalignments %d\n", target_entry, VME_OFFSET(target_entry), copy, misalignments);
18420 				assert(copy);
18421 			}
18422 		}
18423 
18424 		if (target_entry == vm_map_copy_first_entry(target_copy_map)) {
18425 			target_size += overmap_start;
18426 		} else {
18427 			target_entry->vme_start += overmap_start;
18428 		}
18429 		target_entry->vme_end += overmap_start;
18430 
18431 		object_offset_end = VME_OFFSET(target_entry) + target_entry->vme_end - target_entry->vme_start;
18432 		if (object_offset_end & target_page_mask) {
18433 			DEBUG4K_ADJUST("copy %p (%d 0x%llx 0x%llx) entry %p [ 0x%llx 0x%llx ] object %p offset 0x%llx misaligned at end\n", target_copy_map, VM_MAP_COPY_PAGE_SHIFT(target_copy_map), target_copy_map->offset, (uint64_t)target_copy_map->size, target_entry, (uint64_t)target_entry->vme_start, (uint64_t)target_entry->vme_end, VME_OBJECT(target_entry), VME_OFFSET(target_entry));
18434 			if (target_entry->vme_next == vm_map_copy_to_entry(target_copy_map)) {
18435 				/*
18436 				 * end of last entry is mis-aligned: re-adjust by over-mapping.
18437 				 */
18438 				overmap_end = round_page_mask_64(object_offset_end, target_page_mask) - object_offset_end;
18439 				DEBUG4K_ADJUST("entry %p offset 0x%llx copy %d -> overmap_end 0x%llx\n", target_entry, VME_OFFSET(target_entry), copy, (uint64_t)overmap_end);
18440 				target_entry->vme_end += overmap_end;
18441 				target_size += overmap_end;
18442 			} else {
18443 				misalignments++;
18444 				DEBUG4K_ADJUST("entry %p offset 0x%llx copy %d -> misalignments %d\n", target_entry, VME_OFFSET(target_entry), copy, misalignments);
18445 				assert(copy);
18446 			}
18447 		}
18448 		target_entry->vme_start -= addr_adjustment;
18449 		target_entry->vme_end -= addr_adjustment;
18450 		DEBUG4K_ADJUST("copy %p (%d 0x%llx 0x%llx) entry %p [ 0x%llx 0x%llx ] object %p offset 0x%llx AFTER\n", target_copy_map, VM_MAP_COPY_PAGE_SHIFT(target_copy_map), target_copy_map->offset, (uint64_t)target_copy_map->size, target_entry, (uint64_t)target_entry->vme_start, (uint64_t)target_entry->vme_end, VME_OBJECT(target_entry), VME_OFFSET(target_entry));
18451 	}
18452 
18453 	target_copy_map->size = target_size;
18454 	target_copy_map->offset += overmap_start;
18455 	target_copy_map->offset -= addr_adjustment;
18456 	target_copy_map->cpy_hdr.page_shift = target_page_shift;
18457 
18458 //	assert(VM_MAP_PAGE_ALIGNED(target_copy_map->size, target_page_mask));
18459 //	assert(VM_MAP_PAGE_ALIGNED(target_copy_map->offset, FOURK_PAGE_MASK));
18460 	assert(overmap_start < VM_MAP_PAGE_SIZE(target_map));
18461 	assert(overmap_end < VM_MAP_PAGE_SIZE(target_map));
18462 
18463 	*target_copy_map_p = target_copy_map;
18464 	*overmap_start_p = overmap_start;
18465 	*overmap_end_p = overmap_end;
18466 
18467 	DEBUG4K_ADJUST("copy_map %p (%d offset 0x%llx size 0x%llx) target_map %p (%d) copy %d target_copy_map %p (%d offset 0x%llx size 0x%llx) -> trimmed 0x%llx overmap start 0x%llx end 0x%llx KERN_SUCCESS\n", copy_map, copy_page_shift, (uint64_t)copy_map->offset, (uint64_t)copy_map->size, target_map, target_page_shift, copy, *target_copy_map_p, VM_MAP_COPY_PAGE_SHIFT(*target_copy_map_p), (uint64_t)(*target_copy_map_p)->offset, (uint64_t)(*target_copy_map_p)->size, (uint64_t)*trimmed_start_p, (uint64_t)*overmap_start_p, (uint64_t)*overmap_end_p);
18468 	return KERN_SUCCESS;
18469 }
18470 
18471 kern_return_t
vm_map_range_physical_size(vm_map_t map,vm_map_address_t start,mach_vm_size_t size,mach_vm_size_t * phys_size)18472 vm_map_range_physical_size(
18473 	vm_map_t         map,
18474 	vm_map_address_t start,
18475 	mach_vm_size_t   size,
18476 	mach_vm_size_t * phys_size)
18477 {
18478 	kern_return_t   kr;
18479 	vm_map_copy_t   copy_map, target_copy_map;
18480 	vm_map_offset_t adjusted_start, adjusted_end;
18481 	vm_map_size_t   adjusted_size;
18482 	vm_prot_t       cur_prot, max_prot;
18483 	vm_map_offset_t overmap_start, overmap_end, trimmed_start, end;
18484 	vm_map_kernel_flags_t vmk_flags;
18485 
18486 	if (size == 0) {
18487 		DEBUG4K_SHARE("map %p start 0x%llx size 0x%llx -> phys_size 0!\n", map, (uint64_t)start, (uint64_t)size);
18488 		*phys_size = 0;
18489 		return KERN_SUCCESS;
18490 	}
18491 
18492 	adjusted_start = vm_map_trunc_page(start, VM_MAP_PAGE_MASK(map));
18493 	adjusted_end = vm_map_round_page(start + size, VM_MAP_PAGE_MASK(map));
18494 	if (__improbable(os_add_overflow(start, size, &end) ||
18495 	    adjusted_end <= adjusted_start)) {
18496 		/* wraparound */
18497 		printf("%s:%d(start=0x%llx, size=0x%llx) pgmask 0x%x: wraparound\n", __FUNCTION__, __LINE__, (uint64_t)start, (uint64_t)size, VM_MAP_PAGE_MASK(map));
18498 		*phys_size = 0;
18499 		return KERN_INVALID_ARGUMENT;
18500 	}
18501 	if (__improbable(vm_map_range_overflows(map, start, size))) {
18502 		*phys_size = 0;
18503 		return KERN_INVALID_ADDRESS;
18504 	}
18505 	assert(adjusted_end > adjusted_start);
18506 	adjusted_size = adjusted_end - adjusted_start;
18507 	*phys_size = adjusted_size;
18508 	if (VM_MAP_PAGE_SIZE(map) == PAGE_SIZE) {
18509 		return KERN_SUCCESS;
18510 	}
18511 	if (start == 0) {
18512 		adjusted_start = vm_map_trunc_page(start, PAGE_MASK);
18513 		adjusted_end = vm_map_round_page(start + size, PAGE_MASK);
18514 		if (__improbable(adjusted_end <= adjusted_start)) {
18515 			/* wraparound */
18516 			printf("%s:%d(start=0x%llx, size=0x%llx) pgmask 0x%x: wraparound\n", __FUNCTION__, __LINE__, (uint64_t)start, (uint64_t)size, PAGE_MASK);
18517 			*phys_size = 0;
18518 			return KERN_INVALID_ARGUMENT;
18519 		}
18520 		assert(adjusted_end > adjusted_start);
18521 		adjusted_size = adjusted_end - adjusted_start;
18522 		*phys_size = adjusted_size;
18523 		return KERN_SUCCESS;
18524 	}
18525 
18526 	vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
18527 	vmk_flags.vmkf_copy_pageable = TRUE;
18528 	vmk_flags.vmkf_copy_same_map = TRUE;
18529 	assert(adjusted_size != 0);
18530 	cur_prot = VM_PROT_NONE; /* legacy mode */
18531 	max_prot = VM_PROT_NONE; /* legacy mode */
18532 	kr = vm_map_copy_extract(map, adjusted_start, adjusted_size,
18533 	    FALSE /* copy */,
18534 	    &copy_map,
18535 	    &cur_prot, &max_prot, VM_INHERIT_DEFAULT,
18536 	    vmk_flags);
18537 	if (kr != KERN_SUCCESS) {
18538 		DEBUG4K_ERROR("map %p start 0x%llx 0x%llx size 0x%llx 0x%llx kr 0x%x\n", map, (uint64_t)start, (uint64_t)adjusted_start, size, (uint64_t)adjusted_size, kr);
18539 		//assert(0);
18540 		*phys_size = 0;
18541 		return kr;
18542 	}
18543 	assert(copy_map != VM_MAP_COPY_NULL);
18544 	target_copy_map = copy_map;
18545 	DEBUG4K_ADJUST("adjusting...\n");
18546 	kr = vm_map_copy_adjust_to_target(
18547 		copy_map,
18548 		start - adjusted_start, /* offset */
18549 		size, /* size */
18550 		kernel_map,
18551 		FALSE,                          /* copy */
18552 		&target_copy_map,
18553 		&overmap_start,
18554 		&overmap_end,
18555 		&trimmed_start);
18556 	if (kr == KERN_SUCCESS) {
18557 		if (target_copy_map->size != *phys_size) {
18558 			DEBUG4K_ADJUST("map %p (%d) start 0x%llx size 0x%llx adjusted_start 0x%llx adjusted_end 0x%llx overmap_start 0x%llx overmap_end 0x%llx trimmed_start 0x%llx phys_size 0x%llx -> 0x%llx\n", map, VM_MAP_PAGE_SHIFT(map), (uint64_t)start, (uint64_t)size, (uint64_t)adjusted_start, (uint64_t)adjusted_end, (uint64_t)overmap_start, (uint64_t)overmap_end, (uint64_t)trimmed_start, (uint64_t)*phys_size, (uint64_t)target_copy_map->size);
18559 		}
18560 		*phys_size = target_copy_map->size;
18561 	} else {
18562 		DEBUG4K_ERROR("map %p start 0x%llx 0x%llx size 0x%llx 0x%llx kr 0x%x\n", map, (uint64_t)start, (uint64_t)adjusted_start, size, (uint64_t)adjusted_size, kr);
18563 		//assert(0);
18564 		*phys_size = 0;
18565 	}
18566 	vm_map_copy_discard(copy_map);
18567 	copy_map = VM_MAP_COPY_NULL;
18568 
18569 	return kr;
18570 }
18571 
18572 
18573 kern_return_t
memory_entry_check_for_adjustment(vm_map_t src_map,ipc_port_t port,vm_map_offset_t * overmap_start,vm_map_offset_t * overmap_end)18574 memory_entry_check_for_adjustment(
18575 	vm_map_t                        src_map,
18576 	ipc_port_t                      port,
18577 	vm_map_offset_t         *overmap_start,
18578 	vm_map_offset_t         *overmap_end)
18579 {
18580 	kern_return_t kr = KERN_SUCCESS;
18581 	vm_map_copy_t copy_map = VM_MAP_COPY_NULL, target_copy_map = VM_MAP_COPY_NULL;
18582 
18583 	assert(port);
18584 	assertf(ip_kotype(port) == IKOT_NAMED_ENTRY, "Port Type expected: %d...received:%d\n", IKOT_NAMED_ENTRY, ip_kotype(port));
18585 
18586 	vm_named_entry_t        named_entry;
18587 
18588 	named_entry = mach_memory_entry_from_port(port);
18589 	named_entry_lock(named_entry);
18590 	copy_map = named_entry->backing.copy;
18591 	target_copy_map = copy_map;
18592 
18593 	if (src_map && VM_MAP_PAGE_SHIFT(src_map) < PAGE_SHIFT) {
18594 		vm_map_offset_t trimmed_start;
18595 
18596 		trimmed_start = 0;
18597 		DEBUG4K_ADJUST("adjusting...\n");
18598 		kr = vm_map_copy_adjust_to_target(
18599 			copy_map,
18600 			0, /* offset */
18601 			copy_map->size, /* size */
18602 			src_map,
18603 			FALSE, /* copy */
18604 			&target_copy_map,
18605 			overmap_start,
18606 			overmap_end,
18607 			&trimmed_start);
18608 		assert(trimmed_start == 0);
18609 	}
18610 	named_entry_unlock(named_entry);
18611 
18612 	return kr;
18613 }
18614 
18615 
18616 /*
18617  *	Routine:	vm_remap
18618  *
18619  *			Map portion of a task's address space.
18620  *			Mapped region must not overlap more than
18621  *			one vm memory object. Protections and
18622  *			inheritance attributes remain the same
18623  *			as in the original task and are	out parameters.
18624  *			Source and Target task can be identical
18625  *			Other attributes are identical as for vm_map()
18626  */
18627 kern_return_t
vm_map_remap(vm_map_t target_map,vm_map_address_t * address,vm_map_size_t size,vm_map_offset_t mask,vm_map_kernel_flags_t vmk_flags,vm_map_t src_map,vm_map_offset_t memory_address,boolean_t copy,vm_prot_t * cur_protection,vm_prot_t * max_protection,vm_inherit_t inheritance)18628 vm_map_remap(
18629 	vm_map_t                target_map,
18630 	vm_map_address_t        *address,
18631 	vm_map_size_t           size,
18632 	vm_map_offset_t         mask,
18633 	vm_map_kernel_flags_t   vmk_flags,
18634 	vm_map_t                src_map,
18635 	vm_map_offset_t         memory_address,
18636 	boolean_t               copy,
18637 	vm_prot_t               *cur_protection, /* IN/OUT */
18638 	vm_prot_t               *max_protection, /* IN/OUT */
18639 	vm_inherit_t            inheritance)
18640 {
18641 	kern_return_t           result;
18642 	vm_map_entry_t          entry;
18643 	vm_map_entry_t          insp_entry = VM_MAP_ENTRY_NULL;
18644 	vm_map_entry_t          new_entry;
18645 	vm_map_copy_t           copy_map;
18646 	vm_map_offset_t         offset_in_mapping;
18647 	vm_map_size_t           target_size = 0;
18648 	vm_map_size_t           src_page_mask, target_page_mask;
18649 	vm_map_offset_t         overmap_start, overmap_end, trimmed_start;
18650 	vm_map_offset_t         initial_memory_address;
18651 	vm_map_size_t           initial_size;
18652 	VM_MAP_ZAP_DECLARE(zap_list);
18653 
18654 	if (target_map == VM_MAP_NULL) {
18655 		return KERN_INVALID_ARGUMENT;
18656 	}
18657 
18658 	if (__improbable(vm_map_range_overflows(src_map, memory_address, size))) {
18659 		return KERN_INVALID_ARGUMENT;
18660 	}
18661 
18662 	initial_memory_address = memory_address;
18663 	initial_size = size;
18664 	src_page_mask = VM_MAP_PAGE_MASK(src_map);
18665 	target_page_mask = VM_MAP_PAGE_MASK(target_map);
18666 
18667 	switch (inheritance) {
18668 	case VM_INHERIT_NONE:
18669 	case VM_INHERIT_COPY:
18670 	case VM_INHERIT_SHARE:
18671 		if (size != 0 && src_map != VM_MAP_NULL) {
18672 			break;
18673 		}
18674 		OS_FALLTHROUGH;
18675 	default:
18676 		return KERN_INVALID_ARGUMENT;
18677 	}
18678 
18679 	if (src_page_mask != target_page_mask) {
18680 		if (copy) {
18681 			DEBUG4K_COPY("src_map %p pgsz 0x%x addr 0x%llx size 0x%llx copy %d -> target_map %p pgsz 0x%x\n", src_map, VM_MAP_PAGE_SIZE(src_map), (uint64_t)memory_address, (uint64_t)size, copy, target_map, VM_MAP_PAGE_SIZE(target_map));
18682 		} else {
18683 			DEBUG4K_SHARE("src_map %p pgsz 0x%x addr 0x%llx size 0x%llx copy %d -> target_map %p pgsz 0x%x\n", src_map, VM_MAP_PAGE_SIZE(src_map), (uint64_t)memory_address, (uint64_t)size, copy, target_map, VM_MAP_PAGE_SIZE(target_map));
18684 		}
18685 	}
18686 
18687 	/*
18688 	 * If the user is requesting that we return the address of the
18689 	 * first byte of the data (rather than the base of the page),
18690 	 * then we use different rounding semantics: specifically,
18691 	 * we assume that (memory_address, size) describes a region
18692 	 * all of whose pages we must cover, rather than a base to be truncated
18693 	 * down and a size to be added to that base.  So we figure out
18694 	 * the highest page that the requested region includes and make
18695 	 * sure that the size will cover it.
18696 	 *
18697 	 * The key example we're worried about it is of the form:
18698 	 *
18699 	 *              memory_address = 0x1ff0, size = 0x20
18700 	 *
18701 	 * With the old semantics, we round down the memory_address to 0x1000
18702 	 * and round up the size to 0x1000, resulting in our covering *only*
18703 	 * page 0x1000.  With the new semantics, we'd realize that the region covers
18704 	 * 0x1ff0-0x2010, and compute a size of 0x2000.  Thus, we cover both page
18705 	 * 0x1000 and page 0x2000 in the region we remap.
18706 	 */
18707 	if (vmk_flags.vmf_return_data_addr) {
18708 		vm_map_offset_t range_start, range_end;
18709 
18710 		range_start = vm_map_trunc_page(memory_address, src_page_mask);
18711 		range_end = vm_map_round_page(memory_address + size, src_page_mask);
18712 		memory_address = range_start;
18713 		size = range_end - range_start;
18714 		offset_in_mapping = initial_memory_address - memory_address;
18715 	} else {
18716 		/*
18717 		 * IMPORTANT:
18718 		 * This legacy code path is broken: for the range mentioned
18719 		 * above [ memory_address = 0x1ff0,size = 0x20 ], which spans
18720 		 * two 4k pages, it yields [ memory_address = 0x1000,
18721 		 * size = 0x1000 ], which covers only the first 4k page.
18722 		 * BUT some code unfortunately depends on this bug, so we
18723 		 * can't fix it without breaking something.
18724 		 * New code should get automatically opted in the new
18725 		 * behavior with the new VM_FLAGS_RETURN_DATA_ADDR flags.
18726 		 */
18727 		offset_in_mapping = 0;
18728 		memory_address = vm_map_trunc_page(memory_address, src_page_mask);
18729 		size = vm_map_round_page(size, src_page_mask);
18730 		initial_memory_address = memory_address;
18731 		initial_size = size;
18732 	}
18733 
18734 
18735 	if (size == 0) {
18736 		return KERN_INVALID_ARGUMENT;
18737 	}
18738 
18739 	if (vmk_flags.vmf_resilient_media) {
18740 		/* must be copy-on-write to be "media resilient" */
18741 		if (!copy) {
18742 			return KERN_INVALID_ARGUMENT;
18743 		}
18744 	}
18745 
18746 	vmk_flags.vmkf_copy_pageable = target_map->hdr.entries_pageable;
18747 	vmk_flags.vmkf_copy_same_map = (src_map == target_map);
18748 
18749 	assert(size != 0);
18750 	result = vm_map_copy_extract(src_map,
18751 	    memory_address,
18752 	    size,
18753 	    copy, &copy_map,
18754 	    cur_protection, /* IN/OUT */
18755 	    max_protection, /* IN/OUT */
18756 	    inheritance,
18757 	    vmk_flags);
18758 	if (result != KERN_SUCCESS) {
18759 		return result;
18760 	}
18761 	assert(copy_map != VM_MAP_COPY_NULL);
18762 
18763 	/*
18764 	 * Handle the policy for vm map ranges
18765 	 *
18766 	 * If the maps differ, the target_map policy applies like for vm_map()
18767 	 * For same mapping remaps, we preserve the range.
18768 	 */
18769 	if (vmk_flags.vmkf_copy_same_map) {
18770 		vmk_flags.vmkf_range_id = copy_map->orig_range;
18771 	} else {
18772 		vm_map_kernel_flags_update_range_id(&vmk_flags, target_map);
18773 	}
18774 
18775 	overmap_start = 0;
18776 	overmap_end = 0;
18777 	trimmed_start = 0;
18778 	target_size = size;
18779 	if (src_page_mask != target_page_mask) {
18780 		vm_map_copy_t target_copy_map;
18781 
18782 		target_copy_map = copy_map; /* can modify "copy_map" itself */
18783 		DEBUG4K_ADJUST("adjusting...\n");
18784 		result = vm_map_copy_adjust_to_target(
18785 			copy_map,
18786 			offset_in_mapping, /* offset */
18787 			initial_size,
18788 			target_map,
18789 			copy,
18790 			&target_copy_map,
18791 			&overmap_start,
18792 			&overmap_end,
18793 			&trimmed_start);
18794 		if (result != KERN_SUCCESS) {
18795 			DEBUG4K_COPY("failed to adjust 0x%x\n", result);
18796 			vm_map_copy_discard(copy_map);
18797 			return result;
18798 		}
18799 		if (trimmed_start == 0) {
18800 			/* nothing trimmed: no adjustment needed */
18801 		} else if (trimmed_start >= offset_in_mapping) {
18802 			/* trimmed more than offset_in_mapping: nothing left */
18803 			assert(overmap_start == 0);
18804 			assert(overmap_end == 0);
18805 			offset_in_mapping = 0;
18806 		} else {
18807 			/* trimmed some of offset_in_mapping: adjust */
18808 			assert(overmap_start == 0);
18809 			assert(overmap_end == 0);
18810 			offset_in_mapping -= trimmed_start;
18811 		}
18812 		offset_in_mapping += overmap_start;
18813 		target_size = target_copy_map->size;
18814 	}
18815 
18816 	/*
18817 	 * Allocate/check a range of free virtual address
18818 	 * space for the target
18819 	 */
18820 	*address = vm_map_trunc_page(*address, target_page_mask);
18821 	vm_map_lock(target_map);
18822 	target_size = vm_map_round_page(target_size, target_page_mask);
18823 	result = vm_map_remap_range_allocate(target_map, address,
18824 	    target_size, mask, vmk_flags,
18825 	    &insp_entry, &zap_list);
18826 
18827 	for (entry = vm_map_copy_first_entry(copy_map);
18828 	    entry != vm_map_copy_to_entry(copy_map);
18829 	    entry = new_entry) {
18830 		new_entry = entry->vme_next;
18831 		vm_map_copy_entry_unlink(copy_map, entry);
18832 		if (result == KERN_SUCCESS) {
18833 			if (vmk_flags.vmkf_remap_prot_copy) {
18834 				/*
18835 				 * This vm_map_remap() is for a
18836 				 * vm_protect(VM_PROT_COPY), so the caller
18837 				 * expects to be allowed to add write access
18838 				 * to this new mapping.  This is done by
18839 				 * adding VM_PROT_WRITE to each entry's
18840 				 * max_protection... unless some security
18841 				 * settings disallow it.
18842 				 */
18843 				bool allow_write = false;
18844 				if (entry->vme_permanent) {
18845 					/* immutable mapping... */
18846 					if ((entry->max_protection & VM_PROT_EXECUTE) &&
18847 					    developer_mode_state()) {
18848 						/*
18849 						 * ... but executable and
18850 						 * possibly being debugged,
18851 						 * so let's allow it to become
18852 						 * writable, for breakpoints
18853 						 * and dtrace probes, for
18854 						 * example.
18855 						 */
18856 						allow_write = true;
18857 					} else {
18858 						printf("%d[%s] vm_remap(0x%llx,0x%llx) VM_PROT_COPY denied on permanent mapping prot 0x%x/0x%x developer %d\n",
18859 						    proc_selfpid(),
18860 						    (get_bsdtask_info(current_task())
18861 						    ? proc_name_address(get_bsdtask_info(current_task()))
18862 						    : "?"),
18863 						    (uint64_t)memory_address,
18864 						    (uint64_t)size,
18865 						    entry->protection,
18866 						    entry->max_protection,
18867 						    developer_mode_state());
18868 						DTRACE_VM6(vm_map_delete_permanent_deny_protcopy,
18869 						    vm_map_entry_t, entry,
18870 						    vm_map_offset_t, entry->vme_start,
18871 						    vm_map_offset_t, entry->vme_end,
18872 						    vm_prot_t, entry->protection,
18873 						    vm_prot_t, entry->max_protection,
18874 						    int, VME_ALIAS(entry));
18875 					}
18876 				} else {
18877 					allow_write = true;
18878 				}
18879 
18880 				/*
18881 				 * VM_PROT_COPY: allow this mapping to become
18882 				 * writable, unless it was "permanent".
18883 				 */
18884 				if (allow_write) {
18885 					entry->max_protection |= VM_PROT_WRITE;
18886 				}
18887 			}
18888 			if (vmk_flags.vmf_resilient_codesign) {
18889 				/* no codesigning -> read-only access */
18890 				entry->max_protection = VM_PROT_READ;
18891 				entry->protection = VM_PROT_READ;
18892 				entry->vme_resilient_codesign = TRUE;
18893 			}
18894 			entry->vme_start += *address;
18895 			entry->vme_end += *address;
18896 			assert(!entry->map_aligned);
18897 			if (vmk_flags.vmf_resilient_media &&
18898 			    !entry->is_sub_map &&
18899 			    (VME_OBJECT(entry) == VM_OBJECT_NULL ||
18900 			    VME_OBJECT(entry)->internal)) {
18901 				entry->vme_resilient_media = TRUE;
18902 			}
18903 			assert(VM_MAP_PAGE_ALIGNED(entry->vme_start, MIN(target_page_mask, PAGE_MASK)));
18904 			assert(VM_MAP_PAGE_ALIGNED(entry->vme_end, MIN(target_page_mask, PAGE_MASK)));
18905 			assert(VM_MAP_PAGE_ALIGNED(VME_OFFSET(entry), MIN(target_page_mask, PAGE_MASK)));
18906 			vm_map_store_entry_link(target_map, insp_entry, entry,
18907 			    vmk_flags);
18908 			insp_entry = entry;
18909 		} else {
18910 			if (!entry->is_sub_map) {
18911 				vm_object_deallocate(VME_OBJECT(entry));
18912 			} else {
18913 				vm_map_deallocate(VME_SUBMAP(entry));
18914 			}
18915 			vm_map_copy_entry_dispose(entry);
18916 		}
18917 	}
18918 
18919 	if (vmk_flags.vmf_resilient_codesign) {
18920 		*cur_protection = VM_PROT_READ;
18921 		*max_protection = VM_PROT_READ;
18922 	}
18923 
18924 	if (result == KERN_SUCCESS) {
18925 		target_map->size += target_size;
18926 		SAVE_HINT_MAP_WRITE(target_map, insp_entry);
18927 	}
18928 	vm_map_unlock(target_map);
18929 
18930 	vm_map_zap_dispose(&zap_list);
18931 
18932 	if (result == KERN_SUCCESS && target_map->wiring_required) {
18933 		result = vm_map_wire_kernel(target_map, *address,
18934 		    *address + size, *cur_protection, VM_KERN_MEMORY_MLOCK,
18935 		    TRUE);
18936 	}
18937 
18938 	/*
18939 	 * If requested, return the address of the data pointed to by the
18940 	 * request, rather than the base of the resulting page.
18941 	 */
18942 	if (vmk_flags.vmf_return_data_addr) {
18943 		*address += offset_in_mapping;
18944 	}
18945 
18946 	if (src_page_mask != target_page_mask) {
18947 		DEBUG4K_SHARE("vm_remap(%p 0x%llx 0x%llx copy=%d-> %p 0x%llx 0x%llx  result=0x%x\n", src_map, (uint64_t)memory_address, (uint64_t)size, copy, target_map, (uint64_t)*address, (uint64_t)offset_in_mapping, result);
18948 	}
18949 	vm_map_copy_discard(copy_map);
18950 	copy_map = VM_MAP_COPY_NULL;
18951 
18952 	return result;
18953 }
18954 
18955 /*
18956  *	Routine:	vm_map_remap_range_allocate
18957  *
18958  *	Description:
18959  *		Allocate a range in the specified virtual address map.
18960  *		returns the address and the map entry just before the allocated
18961  *		range
18962  *
18963  *	Map must be locked.
18964  */
18965 
18966 static kern_return_t
vm_map_remap_range_allocate(vm_map_t map,vm_map_address_t * address,vm_map_size_t size,vm_map_offset_t mask,vm_map_kernel_flags_t vmk_flags,vm_map_entry_t * map_entry,vm_map_zap_t zap_list)18967 vm_map_remap_range_allocate(
18968 	vm_map_t                map,
18969 	vm_map_address_t        *address,       /* IN/OUT */
18970 	vm_map_size_t           size,
18971 	vm_map_offset_t         mask,
18972 	vm_map_kernel_flags_t   vmk_flags,
18973 	vm_map_entry_t          *map_entry,     /* OUT */
18974 	vm_map_zap_t            zap_list)
18975 {
18976 	vm_map_entry_t  entry;
18977 	vm_map_offset_t start;
18978 	kern_return_t   kr;
18979 
18980 	start = *address;
18981 
18982 	if (!vmk_flags.vmf_fixed) {
18983 		kr = vm_map_locate_space(map, size, mask, vmk_flags,
18984 		    &start, &entry);
18985 		if (kr != KERN_SUCCESS) {
18986 			return kr;
18987 		}
18988 		*address = start;
18989 	} else {
18990 		vm_map_offset_t effective_min_offset, effective_max_offset;
18991 		vm_map_entry_t  temp_entry;
18992 		vm_map_offset_t end;
18993 
18994 		effective_min_offset = map->min_offset;
18995 		effective_max_offset = map->max_offset;
18996 
18997 		/*
18998 		 *	Verify that:
18999 		 *		the address doesn't itself violate
19000 		 *		the mask requirement.
19001 		 */
19002 
19003 		if ((start & mask) != 0) {
19004 			return KERN_NO_SPACE;
19005 		}
19006 
19007 #if CONFIG_MAP_RANGES
19008 		if (map->uses_user_ranges) {
19009 			struct mach_vm_range r;
19010 
19011 			vm_map_user_range_resolve(map, start, 1, &r);
19012 			if (r.max_address == 0) {
19013 				return KERN_INVALID_ADDRESS;
19014 			}
19015 
19016 			effective_min_offset = r.min_address;
19017 			effective_max_offset = r.max_address;
19018 		}
19019 #endif /* CONFIG_MAP_RANGES */
19020 		if (map == kernel_map) {
19021 			mach_vm_range_t r = kmem_validate_range_for_overwrite(start, size);
19022 			effective_min_offset = r->min_address;
19023 			effective_min_offset = r->max_address;
19024 		}
19025 
19026 		/*
19027 		 *	...	the address is within bounds
19028 		 */
19029 
19030 		end = start + size;
19031 
19032 		if ((start < effective_min_offset) ||
19033 		    (end > effective_max_offset) ||
19034 		    (start >= end)) {
19035 			return KERN_INVALID_ADDRESS;
19036 		}
19037 
19038 		/*
19039 		 * If we're asked to overwrite whatever was mapped in that
19040 		 * range, first deallocate that range.
19041 		 */
19042 		if (vmk_flags.vmf_overwrite) {
19043 			vmr_flags_t remove_flags = VM_MAP_REMOVE_NO_MAP_ALIGN;
19044 
19045 			/*
19046 			 * We use a "zap_list" to avoid having to unlock
19047 			 * the "map" in vm_map_delete(), which would compromise
19048 			 * the atomicity of the "deallocate" and then "remap"
19049 			 * combination.
19050 			 */
19051 			remove_flags |= VM_MAP_REMOVE_NO_YIELD;
19052 
19053 			if (vmk_flags.vmkf_overwrite_immutable) {
19054 				remove_flags |= VM_MAP_REMOVE_IMMUTABLE;
19055 			}
19056 			if (vmk_flags.vmkf_remap_prot_copy) {
19057 				remove_flags |= VM_MAP_REMOVE_IMMUTABLE_CODE;
19058 			}
19059 			kr = vm_map_delete(map, start, end, remove_flags,
19060 			    KMEM_GUARD_NONE, zap_list).kmr_return;
19061 			if (kr != KERN_SUCCESS) {
19062 				/* XXX FBDP restore zap_list? */
19063 				return kr;
19064 			}
19065 		}
19066 
19067 		/*
19068 		 *	...	the starting address isn't allocated
19069 		 */
19070 
19071 		if (vm_map_lookup_entry(map, start, &temp_entry)) {
19072 			return KERN_NO_SPACE;
19073 		}
19074 
19075 		entry = temp_entry;
19076 
19077 		/*
19078 		 *	...	the next region doesn't overlap the
19079 		 *		end point.
19080 		 */
19081 
19082 		if ((entry->vme_next != vm_map_to_entry(map)) &&
19083 		    (entry->vme_next->vme_start < end)) {
19084 			return KERN_NO_SPACE;
19085 		}
19086 	}
19087 	*map_entry = entry;
19088 	return KERN_SUCCESS;
19089 }
19090 
19091 /*
19092  *	vm_map_switch:
19093  *
19094  *	Set the address map for the current thread to the specified map
19095  */
19096 
19097 vm_map_t
vm_map_switch(vm_map_t map)19098 vm_map_switch(
19099 	vm_map_t        map)
19100 {
19101 	thread_t        thread = current_thread();
19102 	vm_map_t        oldmap = thread->map;
19103 
19104 
19105 	/*
19106 	 *	Deactivate the current map and activate the requested map
19107 	 */
19108 	mp_disable_preemption();
19109 	PMAP_SWITCH_USER(thread, map, cpu_number());
19110 	mp_enable_preemption();
19111 	return oldmap;
19112 }
19113 
19114 
19115 /*
19116  *	Routine:	vm_map_write_user
19117  *
19118  *	Description:
19119  *		Copy out data from a kernel space into space in the
19120  *		destination map. The space must already exist in the
19121  *		destination map.
19122  *		NOTE:  This routine should only be called by threads
19123  *		which can block on a page fault. i.e. kernel mode user
19124  *		threads.
19125  *
19126  */
19127 kern_return_t
vm_map_write_user(vm_map_t map,void * src_p,vm_map_address_t dst_addr,vm_size_t size)19128 vm_map_write_user(
19129 	vm_map_t                map,
19130 	void                    *src_p,
19131 	vm_map_address_t        dst_addr,
19132 	vm_size_t               size)
19133 {
19134 	kern_return_t   kr = KERN_SUCCESS;
19135 
19136 	if (__improbable(vm_map_range_overflows(map, dst_addr, size))) {
19137 		return KERN_INVALID_ADDRESS;
19138 	}
19139 
19140 	if (current_map() == map) {
19141 		if (copyout(src_p, dst_addr, size)) {
19142 			kr = KERN_INVALID_ADDRESS;
19143 		}
19144 	} else {
19145 		vm_map_t        oldmap;
19146 
19147 		/* take on the identity of the target map while doing */
19148 		/* the transfer */
19149 
19150 		vm_map_reference(map);
19151 		oldmap = vm_map_switch(map);
19152 		if (copyout(src_p, dst_addr, size)) {
19153 			kr = KERN_INVALID_ADDRESS;
19154 		}
19155 		vm_map_switch(oldmap);
19156 		vm_map_deallocate(map);
19157 	}
19158 	return kr;
19159 }
19160 
19161 /*
19162  *	Routine:	vm_map_read_user
19163  *
19164  *	Description:
19165  *		Copy in data from a user space source map into the
19166  *		kernel map. The space must already exist in the
19167  *		kernel map.
19168  *		NOTE:  This routine should only be called by threads
19169  *		which can block on a page fault. i.e. kernel mode user
19170  *		threads.
19171  *
19172  */
19173 kern_return_t
vm_map_read_user(vm_map_t map,vm_map_address_t src_addr,void * dst_p,vm_size_t size)19174 vm_map_read_user(
19175 	vm_map_t                map,
19176 	vm_map_address_t        src_addr,
19177 	void                    *dst_p,
19178 	vm_size_t               size)
19179 {
19180 	kern_return_t   kr = KERN_SUCCESS;
19181 
19182 	if (__improbable(vm_map_range_overflows(map, src_addr, size))) {
19183 		return KERN_INVALID_ADDRESS;
19184 	}
19185 
19186 	if (current_map() == map) {
19187 		if (copyin(src_addr, dst_p, size)) {
19188 			kr = KERN_INVALID_ADDRESS;
19189 		}
19190 	} else {
19191 		vm_map_t        oldmap;
19192 
19193 		/* take on the identity of the target map while doing */
19194 		/* the transfer */
19195 
19196 		vm_map_reference(map);
19197 		oldmap = vm_map_switch(map);
19198 		if (copyin(src_addr, dst_p, size)) {
19199 			kr = KERN_INVALID_ADDRESS;
19200 		}
19201 		vm_map_switch(oldmap);
19202 		vm_map_deallocate(map);
19203 	}
19204 	return kr;
19205 }
19206 
19207 
19208 /*
19209  *	vm_map_check_protection:
19210  *
19211  *	Assert that the target map allows the specified
19212  *	privilege on the entire address region given.
19213  *	The entire region must be allocated.
19214  */
19215 boolean_t
vm_map_check_protection(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end,vm_prot_t protection)19216 vm_map_check_protection(vm_map_t map, vm_map_offset_t start,
19217     vm_map_offset_t end, vm_prot_t protection)
19218 {
19219 	vm_map_entry_t entry;
19220 	vm_map_entry_t tmp_entry;
19221 
19222 	if (__improbable(vm_map_range_overflows(map, start, end - start))) {
19223 		return FALSE;
19224 	}
19225 
19226 	vm_map_lock(map);
19227 
19228 	if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) {
19229 		vm_map_unlock(map);
19230 		return FALSE;
19231 	}
19232 
19233 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
19234 		vm_map_unlock(map);
19235 		return FALSE;
19236 	}
19237 
19238 	entry = tmp_entry;
19239 
19240 	while (start < end) {
19241 		if (entry == vm_map_to_entry(map)) {
19242 			vm_map_unlock(map);
19243 			return FALSE;
19244 		}
19245 
19246 		/*
19247 		 *	No holes allowed!
19248 		 */
19249 
19250 		if (start < entry->vme_start) {
19251 			vm_map_unlock(map);
19252 			return FALSE;
19253 		}
19254 
19255 		/*
19256 		 * Check protection associated with entry.
19257 		 */
19258 
19259 		if ((entry->protection & protection) != protection) {
19260 			vm_map_unlock(map);
19261 			return FALSE;
19262 		}
19263 
19264 		/* go to next entry */
19265 
19266 		start = entry->vme_end;
19267 		entry = entry->vme_next;
19268 	}
19269 	vm_map_unlock(map);
19270 	return TRUE;
19271 }
19272 
19273 kern_return_t
vm_map_purgable_control(vm_map_t map,vm_map_offset_t address,vm_purgable_t control,int * state)19274 vm_map_purgable_control(
19275 	vm_map_t                map,
19276 	vm_map_offset_t         address,
19277 	vm_purgable_t           control,
19278 	int                     *state)
19279 {
19280 	vm_map_entry_t          entry;
19281 	vm_object_t             object;
19282 	kern_return_t           kr;
19283 	boolean_t               was_nonvolatile;
19284 
19285 	/*
19286 	 * Vet all the input parameters and current type and state of the
19287 	 * underlaying object.  Return with an error if anything is amiss.
19288 	 */
19289 	if (map == VM_MAP_NULL) {
19290 		return KERN_INVALID_ARGUMENT;
19291 	}
19292 
19293 	if (control != VM_PURGABLE_SET_STATE &&
19294 	    control != VM_PURGABLE_GET_STATE &&
19295 	    control != VM_PURGABLE_PURGE_ALL &&
19296 	    control != VM_PURGABLE_SET_STATE_FROM_KERNEL) {
19297 		return KERN_INVALID_ARGUMENT;
19298 	}
19299 
19300 	if (control == VM_PURGABLE_PURGE_ALL) {
19301 		vm_purgeable_object_purge_all();
19302 		return KERN_SUCCESS;
19303 	}
19304 
19305 	if ((control == VM_PURGABLE_SET_STATE ||
19306 	    control == VM_PURGABLE_SET_STATE_FROM_KERNEL) &&
19307 	    (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) ||
19308 	    ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) {
19309 		return KERN_INVALID_ARGUMENT;
19310 	}
19311 
19312 	vm_map_lock_read(map);
19313 
19314 	if (!vm_map_lookup_entry(map, address, &entry) || entry->is_sub_map) {
19315 		/*
19316 		 * Must pass a valid non-submap address.
19317 		 */
19318 		vm_map_unlock_read(map);
19319 		return KERN_INVALID_ADDRESS;
19320 	}
19321 
19322 	if ((entry->protection & VM_PROT_WRITE) == 0 &&
19323 	    control != VM_PURGABLE_GET_STATE) {
19324 		/*
19325 		 * Can't apply purgable controls to something you can't write.
19326 		 */
19327 		vm_map_unlock_read(map);
19328 		return KERN_PROTECTION_FAILURE;
19329 	}
19330 
19331 	object = VME_OBJECT(entry);
19332 	if (object == VM_OBJECT_NULL ||
19333 	    object->purgable == VM_PURGABLE_DENY) {
19334 		/*
19335 		 * Object must already be present and be purgeable.
19336 		 */
19337 		vm_map_unlock_read(map);
19338 		return KERN_INVALID_ARGUMENT;
19339 	}
19340 
19341 	vm_object_lock(object);
19342 
19343 #if 00
19344 	if (VME_OFFSET(entry) != 0 ||
19345 	    entry->vme_end - entry->vme_start != object->vo_size) {
19346 		/*
19347 		 * Can only apply purgable controls to the whole (existing)
19348 		 * object at once.
19349 		 */
19350 		vm_map_unlock_read(map);
19351 		vm_object_unlock(object);
19352 		return KERN_INVALID_ARGUMENT;
19353 	}
19354 #endif
19355 
19356 	assert(!entry->is_sub_map);
19357 	assert(!entry->use_pmap); /* purgeable has its own accounting */
19358 
19359 	vm_map_unlock_read(map);
19360 
19361 	was_nonvolatile = (object->purgable == VM_PURGABLE_NONVOLATILE);
19362 
19363 	kr = vm_object_purgable_control(object, control, state);
19364 
19365 	if (was_nonvolatile &&
19366 	    object->purgable != VM_PURGABLE_NONVOLATILE &&
19367 	    map->pmap == kernel_pmap) {
19368 #if DEBUG
19369 		object->vo_purgeable_volatilizer = kernel_task;
19370 #endif /* DEBUG */
19371 	}
19372 
19373 	vm_object_unlock(object);
19374 
19375 	return kr;
19376 }
19377 
19378 void
vm_map_footprint_query_page_info(vm_map_t map,vm_map_entry_t map_entry,vm_map_offset_t curr_s_offset,int * disposition_p)19379 vm_map_footprint_query_page_info(
19380 	vm_map_t        map,
19381 	vm_map_entry_t  map_entry,
19382 	vm_map_offset_t curr_s_offset,
19383 	int             *disposition_p)
19384 {
19385 	int             pmap_disp;
19386 	vm_object_t     object = VM_OBJECT_NULL;
19387 	int             disposition;
19388 	int             effective_page_size;
19389 
19390 	vm_map_lock_assert_held(map);
19391 	assert(!map->has_corpse_footprint);
19392 	assert(curr_s_offset >= map_entry->vme_start);
19393 	assert(curr_s_offset < map_entry->vme_end);
19394 
19395 	if (map_entry->is_sub_map) {
19396 		if (!map_entry->use_pmap) {
19397 			/* nested pmap: no footprint */
19398 			*disposition_p = 0;
19399 			return;
19400 		}
19401 	} else {
19402 		object = VME_OBJECT(map_entry);
19403 		if (object == VM_OBJECT_NULL) {
19404 			/* nothing mapped here: no need to ask */
19405 			*disposition_p = 0;
19406 			return;
19407 		}
19408 	}
19409 
19410 	effective_page_size = MIN(PAGE_SIZE, VM_MAP_PAGE_SIZE(map));
19411 
19412 	pmap_disp = 0;
19413 
19414 	/*
19415 	 * Query the pmap.
19416 	 */
19417 	pmap_query_page_info(map->pmap, curr_s_offset, &pmap_disp);
19418 
19419 	/*
19420 	 * Compute this page's disposition.
19421 	 */
19422 	disposition = 0;
19423 
19424 	/* deal with "alternate accounting" first */
19425 	if (!map_entry->is_sub_map &&
19426 	    object->vo_no_footprint) {
19427 		/* does not count in footprint */
19428 		assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry);
19429 	} else if (!map_entry->is_sub_map &&
19430 	    (object->purgable == VM_PURGABLE_NONVOLATILE ||
19431 	    (object->purgable == VM_PURGABLE_DENY &&
19432 	    object->vo_ledger_tag)) &&
19433 	    VM_OBJECT_OWNER(object) != NULL &&
19434 	    VM_OBJECT_OWNER(object)->map == map) {
19435 		assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry);
19436 		if ((((curr_s_offset
19437 		    - map_entry->vme_start
19438 		    + VME_OFFSET(map_entry))
19439 		    / effective_page_size) <
19440 		    (object->resident_page_count +
19441 		    vm_compressor_pager_get_count(object->pager)))) {
19442 			/*
19443 			 * Non-volatile purgeable object owned
19444 			 * by this task: report the first
19445 			 * "#resident + #compressed" pages as
19446 			 * "resident" (to show that they
19447 			 * contribute to the footprint) but not
19448 			 * "dirty" (to avoid double-counting
19449 			 * with the fake "non-volatile" region
19450 			 * we'll report at the end of the
19451 			 * address space to account for all
19452 			 * (mapped or not) non-volatile memory
19453 			 * owned by this task.
19454 			 */
19455 			disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
19456 		}
19457 	} else if (!map_entry->is_sub_map &&
19458 	    (object->purgable == VM_PURGABLE_VOLATILE ||
19459 	    object->purgable == VM_PURGABLE_EMPTY) &&
19460 	    VM_OBJECT_OWNER(object) != NULL &&
19461 	    VM_OBJECT_OWNER(object)->map == map) {
19462 		assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry);
19463 		if ((((curr_s_offset
19464 		    - map_entry->vme_start
19465 		    + VME_OFFSET(map_entry))
19466 		    / effective_page_size) <
19467 		    object->wired_page_count)) {
19468 			/*
19469 			 * Volatile|empty purgeable object owned
19470 			 * by this task: report the first
19471 			 * "#wired" pages as "resident" (to
19472 			 * show that they contribute to the
19473 			 * footprint) but not "dirty" (to avoid
19474 			 * double-counting with the fake
19475 			 * "non-volatile" region we'll report
19476 			 * at the end of the address space to
19477 			 * account for all (mapped or not)
19478 			 * non-volatile memory owned by this
19479 			 * task.
19480 			 */
19481 			disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
19482 		}
19483 	} else if (!map_entry->is_sub_map &&
19484 	    map_entry->iokit_acct &&
19485 	    object->internal &&
19486 	    object->purgable == VM_PURGABLE_DENY) {
19487 		/*
19488 		 * Non-purgeable IOKit memory: phys_footprint
19489 		 * includes the entire virtual mapping.
19490 		 */
19491 		assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry);
19492 		disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
19493 		disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
19494 	} else if (pmap_disp & (PMAP_QUERY_PAGE_ALTACCT |
19495 	    PMAP_QUERY_PAGE_COMPRESSED_ALTACCT)) {
19496 		/* alternate accounting */
19497 #if __arm64__ && (DEVELOPMENT || DEBUG)
19498 		if (map->pmap->footprint_was_suspended) {
19499 			/*
19500 			 * The assertion below can fail if dyld
19501 			 * suspended footprint accounting
19502 			 * while doing some adjustments to
19503 			 * this page;  the mapping would say
19504 			 * "use pmap accounting" but the page
19505 			 * would be marked "alternate
19506 			 * accounting".
19507 			 */
19508 		} else
19509 #endif /* __arm64__ && (DEVELOPMENT || DEBUG) */
19510 		{
19511 			assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry);
19512 		}
19513 		disposition = 0;
19514 	} else {
19515 		if (pmap_disp & PMAP_QUERY_PAGE_PRESENT) {
19516 			assertf(map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry);
19517 			disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
19518 			disposition |= VM_PAGE_QUERY_PAGE_REF;
19519 			if (pmap_disp & PMAP_QUERY_PAGE_INTERNAL) {
19520 				disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
19521 			} else {
19522 				disposition |= VM_PAGE_QUERY_PAGE_EXTERNAL;
19523 			}
19524 			if (pmap_disp & PMAP_QUERY_PAGE_REUSABLE) {
19525 				disposition |= VM_PAGE_QUERY_PAGE_REUSABLE;
19526 			}
19527 		} else if (pmap_disp & PMAP_QUERY_PAGE_COMPRESSED) {
19528 			assertf(map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry);
19529 			disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT;
19530 		}
19531 	}
19532 
19533 	*disposition_p = disposition;
19534 }
19535 
19536 kern_return_t
vm_map_page_query_internal(vm_map_t target_map,vm_map_offset_t offset,int * disposition,int * ref_count)19537 vm_map_page_query_internal(
19538 	vm_map_t        target_map,
19539 	vm_map_offset_t offset,
19540 	int             *disposition,
19541 	int             *ref_count)
19542 {
19543 	kern_return_t                   kr;
19544 	vm_page_info_basic_data_t       info;
19545 	mach_msg_type_number_t          count;
19546 
19547 	count = VM_PAGE_INFO_BASIC_COUNT;
19548 	kr = vm_map_page_info(target_map,
19549 	    offset,
19550 	    VM_PAGE_INFO_BASIC,
19551 	    (vm_page_info_t) &info,
19552 	    &count);
19553 	if (kr == KERN_SUCCESS) {
19554 		*disposition = info.disposition;
19555 		*ref_count = info.ref_count;
19556 	} else {
19557 		*disposition = 0;
19558 		*ref_count = 0;
19559 	}
19560 
19561 	return kr;
19562 }
19563 
19564 kern_return_t
vm_map_page_info(vm_map_t map,vm_map_offset_t offset,vm_page_info_flavor_t flavor,vm_page_info_t info,mach_msg_type_number_t * count)19565 vm_map_page_info(
19566 	vm_map_t                map,
19567 	vm_map_offset_t         offset,
19568 	vm_page_info_flavor_t   flavor,
19569 	vm_page_info_t          info,
19570 	mach_msg_type_number_t  *count)
19571 {
19572 	return vm_map_page_range_info_internal(map,
19573 	           offset, /* start of range */
19574 	           (offset + 1), /* this will get rounded in the call to the page boundary */
19575 	           (int)-1, /* effective_page_shift: unspecified */
19576 	           flavor,
19577 	           info,
19578 	           count);
19579 }
19580 
19581 kern_return_t
vm_map_page_range_info_internal(vm_map_t map,vm_map_offset_t start_offset,vm_map_offset_t end_offset,int effective_page_shift,vm_page_info_flavor_t flavor,vm_page_info_t info,mach_msg_type_number_t * count)19582 vm_map_page_range_info_internal(
19583 	vm_map_t                map,
19584 	vm_map_offset_t         start_offset,
19585 	vm_map_offset_t         end_offset,
19586 	int                     effective_page_shift,
19587 	vm_page_info_flavor_t   flavor,
19588 	vm_page_info_t          info,
19589 	mach_msg_type_number_t  *count)
19590 {
19591 	vm_map_entry_t          map_entry = VM_MAP_ENTRY_NULL;
19592 	vm_object_t             object = VM_OBJECT_NULL, curr_object = VM_OBJECT_NULL;
19593 	vm_page_t               m = VM_PAGE_NULL;
19594 	kern_return_t           retval = KERN_SUCCESS;
19595 	int                     disposition = 0;
19596 	int                     ref_count = 0;
19597 	int                     depth = 0, info_idx = 0;
19598 	vm_page_info_basic_t    basic_info = 0;
19599 	vm_map_offset_t         offset_in_page = 0, offset_in_object = 0, curr_offset_in_object = 0;
19600 	vm_map_offset_t         start = 0, end = 0, curr_s_offset = 0, curr_e_offset = 0;
19601 	boolean_t               do_region_footprint;
19602 	ledger_amount_t         ledger_resident, ledger_compressed;
19603 	int                     effective_page_size;
19604 	vm_map_offset_t         effective_page_mask;
19605 
19606 	switch (flavor) {
19607 	case VM_PAGE_INFO_BASIC:
19608 		if (*count != VM_PAGE_INFO_BASIC_COUNT) {
19609 			/*
19610 			 * The "vm_page_info_basic_data" structure was not
19611 			 * properly padded, so allow the size to be off by
19612 			 * one to maintain backwards binary compatibility...
19613 			 */
19614 			if (*count != VM_PAGE_INFO_BASIC_COUNT - 1) {
19615 				return KERN_INVALID_ARGUMENT;
19616 			}
19617 		}
19618 		break;
19619 	default:
19620 		return KERN_INVALID_ARGUMENT;
19621 	}
19622 
19623 	if (effective_page_shift == -1) {
19624 		effective_page_shift = vm_self_region_page_shift_safely(map);
19625 		if (effective_page_shift == -1) {
19626 			return KERN_INVALID_ARGUMENT;
19627 		}
19628 	}
19629 	effective_page_size = (1 << effective_page_shift);
19630 	effective_page_mask = effective_page_size - 1;
19631 
19632 	do_region_footprint = task_self_region_footprint();
19633 	disposition = 0;
19634 	ref_count = 0;
19635 	depth = 0;
19636 	info_idx = 0; /* Tracks the next index within the info structure to be filled.*/
19637 	retval = KERN_SUCCESS;
19638 
19639 	if (__improbable(vm_map_range_overflows(map, start_offset, end_offset - start_offset))) {
19640 		return KERN_INVALID_ADDRESS;
19641 	}
19642 
19643 	offset_in_page = start_offset & effective_page_mask;
19644 	start = vm_map_trunc_page(start_offset, effective_page_mask);
19645 	end = vm_map_round_page(end_offset, effective_page_mask);
19646 
19647 	if (end < start) {
19648 		return KERN_INVALID_ARGUMENT;
19649 	}
19650 
19651 	assert((end - start) <= MAX_PAGE_RANGE_QUERY);
19652 
19653 	vm_map_lock_read(map);
19654 
19655 	task_ledgers_footprint(map->pmap->ledger, &ledger_resident, &ledger_compressed);
19656 
19657 	for (curr_s_offset = start; curr_s_offset < end;) {
19658 		/*
19659 		 * New lookup needs reset of these variables.
19660 		 */
19661 		curr_object = object = VM_OBJECT_NULL;
19662 		offset_in_object = 0;
19663 		ref_count = 0;
19664 		depth = 0;
19665 
19666 		if (do_region_footprint &&
19667 		    curr_s_offset >= vm_map_last_entry(map)->vme_end) {
19668 			/*
19669 			 * Request for "footprint" info about a page beyond
19670 			 * the end of address space: this must be for
19671 			 * the fake region vm_map_region_recurse_64()
19672 			 * reported to account for non-volatile purgeable
19673 			 * memory owned by this task.
19674 			 */
19675 			disposition = 0;
19676 
19677 			if (curr_s_offset - vm_map_last_entry(map)->vme_end <=
19678 			    (unsigned) ledger_compressed) {
19679 				/*
19680 				 * We haven't reported all the "non-volatile
19681 				 * compressed" pages yet, so report this fake
19682 				 * page as "compressed".
19683 				 */
19684 				disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT;
19685 			} else {
19686 				/*
19687 				 * We've reported all the non-volatile
19688 				 * compressed page but not all the non-volatile
19689 				 * pages , so report this fake page as
19690 				 * "resident dirty".
19691 				 */
19692 				disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
19693 				disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
19694 				disposition |= VM_PAGE_QUERY_PAGE_REF;
19695 			}
19696 			switch (flavor) {
19697 			case VM_PAGE_INFO_BASIC:
19698 				basic_info = (vm_page_info_basic_t) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic)));
19699 				basic_info->disposition = disposition;
19700 				basic_info->ref_count = 1;
19701 				basic_info->object_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile);
19702 				basic_info->offset = 0;
19703 				basic_info->depth = 0;
19704 
19705 				info_idx++;
19706 				break;
19707 			}
19708 			curr_s_offset += effective_page_size;
19709 			continue;
19710 		}
19711 
19712 		/*
19713 		 * First, find the map entry covering "curr_s_offset", going down
19714 		 * submaps if necessary.
19715 		 */
19716 		if (!vm_map_lookup_entry(map, curr_s_offset, &map_entry)) {
19717 			/* no entry -> no object -> no page */
19718 
19719 			if (curr_s_offset < vm_map_min(map)) {
19720 				/*
19721 				 * Illegal address that falls below map min.
19722 				 */
19723 				curr_e_offset = MIN(end, vm_map_min(map));
19724 			} else if (curr_s_offset >= vm_map_max(map)) {
19725 				/*
19726 				 * Illegal address that falls on/after map max.
19727 				 */
19728 				curr_e_offset = end;
19729 			} else if (map_entry == vm_map_to_entry(map)) {
19730 				/*
19731 				 * Hit a hole.
19732 				 */
19733 				if (map_entry->vme_next == vm_map_to_entry(map)) {
19734 					/*
19735 					 * Empty map.
19736 					 */
19737 					curr_e_offset = MIN(map->max_offset, end);
19738 				} else {
19739 					/*
19740 					 * Hole at start of the map.
19741 					 */
19742 					curr_e_offset = MIN(map_entry->vme_next->vme_start, end);
19743 				}
19744 			} else {
19745 				if (map_entry->vme_next == vm_map_to_entry(map)) {
19746 					/*
19747 					 * Hole at the end of the map.
19748 					 */
19749 					curr_e_offset = MIN(map->max_offset, end);
19750 				} else {
19751 					curr_e_offset = MIN(map_entry->vme_next->vme_start, end);
19752 				}
19753 			}
19754 
19755 			assert(curr_e_offset >= curr_s_offset);
19756 
19757 			uint64_t num_pages = (curr_e_offset - curr_s_offset) >> effective_page_shift;
19758 
19759 			void *info_ptr = (void*) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic)));
19760 
19761 			bzero(info_ptr, num_pages * sizeof(struct vm_page_info_basic));
19762 
19763 			curr_s_offset = curr_e_offset;
19764 
19765 			info_idx += num_pages;
19766 
19767 			continue;
19768 		}
19769 
19770 		/* compute offset from this map entry's start */
19771 		offset_in_object = curr_s_offset - map_entry->vme_start;
19772 
19773 		/* compute offset into this map entry's object (or submap) */
19774 		offset_in_object += VME_OFFSET(map_entry);
19775 
19776 		if (map_entry->is_sub_map) {
19777 			vm_map_t sub_map = VM_MAP_NULL;
19778 			vm_page_info_t submap_info = 0;
19779 			vm_map_offset_t submap_s_offset = 0, submap_e_offset = 0, range_len = 0;
19780 
19781 			range_len = MIN(map_entry->vme_end, end) - curr_s_offset;
19782 
19783 			submap_s_offset = offset_in_object;
19784 			submap_e_offset = submap_s_offset + range_len;
19785 
19786 			sub_map = VME_SUBMAP(map_entry);
19787 
19788 			vm_map_reference(sub_map);
19789 			vm_map_unlock_read(map);
19790 
19791 			submap_info = (vm_page_info_t) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic)));
19792 
19793 			assertf(VM_MAP_PAGE_SHIFT(sub_map) >= VM_MAP_PAGE_SHIFT(map),
19794 			    "Submap page size (%d) differs from current map (%d)\n", VM_MAP_PAGE_SIZE(sub_map), VM_MAP_PAGE_SIZE(map));
19795 
19796 			retval = vm_map_page_range_info_internal(sub_map,
19797 			    submap_s_offset,
19798 			    submap_e_offset,
19799 			    effective_page_shift,
19800 			    VM_PAGE_INFO_BASIC,
19801 			    (vm_page_info_t) submap_info,
19802 			    count);
19803 
19804 			assert(retval == KERN_SUCCESS);
19805 
19806 			vm_map_lock_read(map);
19807 			vm_map_deallocate(sub_map);
19808 
19809 			/* Move the "info" index by the number of pages we inspected.*/
19810 			info_idx += range_len >> effective_page_shift;
19811 
19812 			/* Move our current offset by the size of the range we inspected.*/
19813 			curr_s_offset += range_len;
19814 
19815 			continue;
19816 		}
19817 
19818 		object = VME_OBJECT(map_entry);
19819 
19820 		if (object == VM_OBJECT_NULL) {
19821 			/*
19822 			 * We don't have an object here and, hence,
19823 			 * no pages to inspect. We'll fill up the
19824 			 * info structure appropriately.
19825 			 */
19826 
19827 			curr_e_offset = MIN(map_entry->vme_end, end);
19828 
19829 			uint64_t num_pages = (curr_e_offset - curr_s_offset) >> effective_page_shift;
19830 
19831 			void *info_ptr = (void*) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic)));
19832 
19833 			bzero(info_ptr, num_pages * sizeof(struct vm_page_info_basic));
19834 
19835 			curr_s_offset = curr_e_offset;
19836 
19837 			info_idx += num_pages;
19838 
19839 			continue;
19840 		}
19841 
19842 		if (do_region_footprint) {
19843 			disposition = 0;
19844 			if (map->has_corpse_footprint) {
19845 				/*
19846 				 * Query the page info data we saved
19847 				 * while forking the corpse.
19848 				 */
19849 				vm_map_corpse_footprint_query_page_info(
19850 					map,
19851 					curr_s_offset,
19852 					&disposition);
19853 			} else {
19854 				/*
19855 				 * Query the live pmap for footprint info
19856 				 * about this page.
19857 				 */
19858 				vm_map_footprint_query_page_info(
19859 					map,
19860 					map_entry,
19861 					curr_s_offset,
19862 					&disposition);
19863 			}
19864 			switch (flavor) {
19865 			case VM_PAGE_INFO_BASIC:
19866 				basic_info = (vm_page_info_basic_t) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic)));
19867 				basic_info->disposition = disposition;
19868 				basic_info->ref_count = 1;
19869 				basic_info->object_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile);
19870 				basic_info->offset = 0;
19871 				basic_info->depth = 0;
19872 
19873 				info_idx++;
19874 				break;
19875 			}
19876 			curr_s_offset += effective_page_size;
19877 			continue;
19878 		}
19879 
19880 		vm_object_reference(object);
19881 		/*
19882 		 * Shared mode -- so we can allow other readers
19883 		 * to grab the lock too.
19884 		 */
19885 		vm_object_lock_shared(object);
19886 
19887 		curr_e_offset = MIN(map_entry->vme_end, end);
19888 
19889 		vm_map_unlock_read(map);
19890 
19891 		map_entry = NULL; /* map is unlocked, the entry is no longer valid. */
19892 
19893 		curr_object = object;
19894 
19895 		for (; curr_s_offset < curr_e_offset;) {
19896 			if (object == curr_object) {
19897 				ref_count = curr_object->ref_count - 1; /* account for our object reference above. */
19898 			} else {
19899 				ref_count = curr_object->ref_count;
19900 			}
19901 
19902 			curr_offset_in_object = offset_in_object;
19903 
19904 			for (;;) {
19905 				m = vm_page_lookup(curr_object, vm_object_trunc_page(curr_offset_in_object));
19906 
19907 				if (m != VM_PAGE_NULL) {
19908 					disposition |= VM_PAGE_QUERY_PAGE_PRESENT;
19909 					break;
19910 				} else {
19911 					if (curr_object->internal &&
19912 					    curr_object->alive &&
19913 					    !curr_object->terminating &&
19914 					    curr_object->pager_ready) {
19915 						if (VM_COMPRESSOR_PAGER_STATE_GET(curr_object, vm_object_trunc_page(curr_offset_in_object))
19916 						    == VM_EXTERNAL_STATE_EXISTS) {
19917 							/* the pager has that page */
19918 							disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT;
19919 							break;
19920 						}
19921 					}
19922 
19923 					/*
19924 					 * Go down the VM object shadow chain until we find the page
19925 					 * we're looking for.
19926 					 */
19927 
19928 					if (curr_object->shadow != VM_OBJECT_NULL) {
19929 						vm_object_t shadow = VM_OBJECT_NULL;
19930 
19931 						curr_offset_in_object += curr_object->vo_shadow_offset;
19932 						shadow = curr_object->shadow;
19933 
19934 						vm_object_lock_shared(shadow);
19935 						vm_object_unlock(curr_object);
19936 
19937 						curr_object = shadow;
19938 						depth++;
19939 						continue;
19940 					} else {
19941 						break;
19942 					}
19943 				}
19944 			}
19945 
19946 			/* The ref_count is not strictly accurate, it measures the number   */
19947 			/* of entities holding a ref on the object, they may not be mapping */
19948 			/* the object or may not be mapping the section holding the         */
19949 			/* target page but its still a ball park number and though an over- */
19950 			/* count, it picks up the copy-on-write cases                       */
19951 
19952 			/* We could also get a picture of page sharing from pmap_attributes */
19953 			/* but this would under count as only faulted-in mappings would     */
19954 			/* show up.							    */
19955 
19956 			if ((curr_object == object) && curr_object->shadow) {
19957 				disposition |= VM_PAGE_QUERY_PAGE_COPIED;
19958 			}
19959 
19960 			if (!curr_object->internal) {
19961 				disposition |= VM_PAGE_QUERY_PAGE_EXTERNAL;
19962 			}
19963 
19964 			if (m != VM_PAGE_NULL) {
19965 				if (m->vmp_fictitious) {
19966 					disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS;
19967 				} else {
19968 					if (m->vmp_dirty || pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m))) {
19969 						disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
19970 					}
19971 
19972 					if (m->vmp_reference || pmap_is_referenced(VM_PAGE_GET_PHYS_PAGE(m))) {
19973 						disposition |= VM_PAGE_QUERY_PAGE_REF;
19974 					}
19975 
19976 					if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
19977 						disposition |= VM_PAGE_QUERY_PAGE_SPECULATIVE;
19978 					}
19979 
19980 					/*
19981 					 * XXX TODO4K:
19982 					 * when this routine deals with 4k
19983 					 * pages, check the appropriate CS bit
19984 					 * here.
19985 					 */
19986 					if (m->vmp_cs_validated) {
19987 						disposition |= VM_PAGE_QUERY_PAGE_CS_VALIDATED;
19988 					}
19989 					if (m->vmp_cs_tainted) {
19990 						disposition |= VM_PAGE_QUERY_PAGE_CS_TAINTED;
19991 					}
19992 					if (m->vmp_cs_nx) {
19993 						disposition |= VM_PAGE_QUERY_PAGE_CS_NX;
19994 					}
19995 					if (m->vmp_reusable || curr_object->all_reusable) {
19996 						disposition |= VM_PAGE_QUERY_PAGE_REUSABLE;
19997 					}
19998 				}
19999 			}
20000 
20001 			switch (flavor) {
20002 			case VM_PAGE_INFO_BASIC:
20003 				basic_info = (vm_page_info_basic_t) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic)));
20004 				basic_info->disposition = disposition;
20005 				basic_info->ref_count = ref_count;
20006 				basic_info->object_id = (vm_object_id_t) (uintptr_t)
20007 				    VM_KERNEL_ADDRPERM(curr_object);
20008 				basic_info->offset =
20009 				    (memory_object_offset_t) curr_offset_in_object + offset_in_page;
20010 				basic_info->depth = depth;
20011 
20012 				info_idx++;
20013 				break;
20014 			}
20015 
20016 			disposition = 0;
20017 			offset_in_page = 0; // This doesn't really make sense for any offset other than the starting offset.
20018 
20019 			/*
20020 			 * Move to next offset in the range and in our object.
20021 			 */
20022 			curr_s_offset += effective_page_size;
20023 			offset_in_object += effective_page_size;
20024 			curr_offset_in_object = offset_in_object;
20025 
20026 			if (curr_object != object) {
20027 				vm_object_unlock(curr_object);
20028 
20029 				curr_object = object;
20030 
20031 				vm_object_lock_shared(curr_object);
20032 			} else {
20033 				vm_object_lock_yield_shared(curr_object);
20034 			}
20035 		}
20036 
20037 		vm_object_unlock(curr_object);
20038 		vm_object_deallocate(curr_object);
20039 
20040 		vm_map_lock_read(map);
20041 	}
20042 
20043 	vm_map_unlock_read(map);
20044 	return retval;
20045 }
20046 
20047 /*
20048  *	vm_map_msync
20049  *
20050  *	Synchronises the memory range specified with its backing store
20051  *	image by either flushing or cleaning the contents to the appropriate
20052  *	memory manager engaging in a memory object synchronize dialog with
20053  *	the manager.  The client doesn't return until the manager issues
20054  *	m_o_s_completed message.  MIG Magically converts user task parameter
20055  *	to the task's address map.
20056  *
20057  *	interpretation of sync_flags
20058  *	VM_SYNC_INVALIDATE	- discard pages, only return precious
20059  *				  pages to manager.
20060  *
20061  *	VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
20062  *				- discard pages, write dirty or precious
20063  *				  pages back to memory manager.
20064  *
20065  *	VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
20066  *				- write dirty or precious pages back to
20067  *				  the memory manager.
20068  *
20069  *	VM_SYNC_CONTIGUOUS	- does everything normally, but if there
20070  *				  is a hole in the region, and we would
20071  *				  have returned KERN_SUCCESS, return
20072  *				  KERN_INVALID_ADDRESS instead.
20073  *
20074  *	NOTE
20075  *	The memory object attributes have not yet been implemented, this
20076  *	function will have to deal with the invalidate attribute
20077  *
20078  *	RETURNS
20079  *	KERN_INVALID_TASK		Bad task parameter
20080  *	KERN_INVALID_ARGUMENT		both sync and async were specified.
20081  *	KERN_SUCCESS			The usual.
20082  *	KERN_INVALID_ADDRESS		There was a hole in the region.
20083  */
20084 
20085 kern_return_t
vm_map_msync(vm_map_t map,vm_map_address_t address,vm_map_size_t size,vm_sync_t sync_flags)20086 vm_map_msync(
20087 	vm_map_t                map,
20088 	vm_map_address_t        address,
20089 	vm_map_size_t           size,
20090 	vm_sync_t               sync_flags)
20091 {
20092 	vm_map_entry_t          entry;
20093 	vm_map_size_t           amount_left;
20094 	vm_object_offset_t      offset;
20095 	vm_object_offset_t      start_offset, end_offset;
20096 	boolean_t               do_sync_req;
20097 	boolean_t               had_hole = FALSE;
20098 	vm_map_offset_t         pmap_offset;
20099 
20100 	if ((sync_flags & VM_SYNC_ASYNCHRONOUS) &&
20101 	    (sync_flags & VM_SYNC_SYNCHRONOUS)) {
20102 		return KERN_INVALID_ARGUMENT;
20103 	}
20104 
20105 	if (__improbable(vm_map_range_overflows(map, address, size))) {
20106 		return KERN_INVALID_ADDRESS;
20107 	}
20108 
20109 	if (VM_MAP_PAGE_MASK(map) < PAGE_MASK) {
20110 		DEBUG4K_SHARE("map %p address 0x%llx size 0x%llx flags 0x%x\n", map, (uint64_t)address, (uint64_t)size, sync_flags);
20111 	}
20112 
20113 	/*
20114 	 * align address and size on page boundaries
20115 	 */
20116 	size = (vm_map_round_page(address + size,
20117 	    VM_MAP_PAGE_MASK(map)) -
20118 	    vm_map_trunc_page(address,
20119 	    VM_MAP_PAGE_MASK(map)));
20120 	address = vm_map_trunc_page(address,
20121 	    VM_MAP_PAGE_MASK(map));
20122 
20123 	if (map == VM_MAP_NULL) {
20124 		return KERN_INVALID_TASK;
20125 	}
20126 
20127 	if (size == 0) {
20128 		return KERN_SUCCESS;
20129 	}
20130 
20131 	amount_left = size;
20132 
20133 	while (amount_left > 0) {
20134 		vm_object_size_t        flush_size;
20135 		vm_object_t             object;
20136 
20137 		vm_map_lock(map);
20138 		if (!vm_map_lookup_entry(map,
20139 		    address,
20140 		    &entry)) {
20141 			vm_map_size_t   skip;
20142 
20143 			/*
20144 			 * hole in the address map.
20145 			 */
20146 			had_hole = TRUE;
20147 
20148 			if (sync_flags & VM_SYNC_KILLPAGES) {
20149 				/*
20150 				 * For VM_SYNC_KILLPAGES, there should be
20151 				 * no holes in the range, since we couldn't
20152 				 * prevent someone else from allocating in
20153 				 * that hole and we wouldn't want to "kill"
20154 				 * their pages.
20155 				 */
20156 				vm_map_unlock(map);
20157 				break;
20158 			}
20159 
20160 			/*
20161 			 * Check for empty map.
20162 			 */
20163 			if (entry == vm_map_to_entry(map) &&
20164 			    entry->vme_next == entry) {
20165 				vm_map_unlock(map);
20166 				break;
20167 			}
20168 			/*
20169 			 * Check that we don't wrap and that
20170 			 * we have at least one real map entry.
20171 			 */
20172 			if ((map->hdr.nentries == 0) ||
20173 			    (entry->vme_next->vme_start < address)) {
20174 				vm_map_unlock(map);
20175 				break;
20176 			}
20177 			/*
20178 			 * Move up to the next entry if needed
20179 			 */
20180 			skip = (entry->vme_next->vme_start - address);
20181 			if (skip >= amount_left) {
20182 				amount_left = 0;
20183 			} else {
20184 				amount_left -= skip;
20185 			}
20186 			address = entry->vme_next->vme_start;
20187 			vm_map_unlock(map);
20188 			continue;
20189 		}
20190 
20191 		offset = address - entry->vme_start;
20192 		pmap_offset = address;
20193 
20194 		/*
20195 		 * do we have more to flush than is contained in this
20196 		 * entry ?
20197 		 */
20198 		if (amount_left + entry->vme_start + offset > entry->vme_end) {
20199 			flush_size = entry->vme_end -
20200 			    (entry->vme_start + offset);
20201 		} else {
20202 			flush_size = amount_left;
20203 		}
20204 		amount_left -= flush_size;
20205 		address += flush_size;
20206 
20207 		if (entry->is_sub_map == TRUE) {
20208 			vm_map_t        local_map;
20209 			vm_map_offset_t local_offset;
20210 
20211 			local_map = VME_SUBMAP(entry);
20212 			local_offset = VME_OFFSET(entry);
20213 			vm_map_reference(local_map);
20214 			vm_map_unlock(map);
20215 			if (vm_map_msync(
20216 				    local_map,
20217 				    local_offset,
20218 				    flush_size,
20219 				    sync_flags) == KERN_INVALID_ADDRESS) {
20220 				had_hole = TRUE;
20221 			}
20222 			vm_map_deallocate(local_map);
20223 			continue;
20224 		}
20225 		object = VME_OBJECT(entry);
20226 
20227 		/*
20228 		 * We can't sync this object if the object has not been
20229 		 * created yet
20230 		 */
20231 		if (object == VM_OBJECT_NULL) {
20232 			vm_map_unlock(map);
20233 			continue;
20234 		}
20235 		offset += VME_OFFSET(entry);
20236 
20237 		vm_object_lock(object);
20238 
20239 		if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) {
20240 			int kill_pages = 0;
20241 
20242 			if (VM_MAP_PAGE_MASK(map) < PAGE_MASK) {
20243 				/*
20244 				 * This is a destructive operation and so we
20245 				 * err on the side of limiting the range of
20246 				 * the operation.
20247 				 */
20248 				start_offset = vm_object_round_page(offset);
20249 				end_offset = vm_object_trunc_page(offset + flush_size);
20250 
20251 				if (end_offset <= start_offset) {
20252 					vm_object_unlock(object);
20253 					vm_map_unlock(map);
20254 					continue;
20255 				}
20256 
20257 				pmap_offset += start_offset - offset;
20258 			} else {
20259 				start_offset = offset;
20260 				end_offset = offset + flush_size;
20261 			}
20262 
20263 			if (sync_flags & VM_SYNC_KILLPAGES) {
20264 				if (((object->ref_count == 1) ||
20265 				    ((object->copy_strategy !=
20266 				    MEMORY_OBJECT_COPY_SYMMETRIC) &&
20267 				    (object->vo_copy == VM_OBJECT_NULL))) &&
20268 				    (object->shadow == VM_OBJECT_NULL)) {
20269 					if (object->ref_count != 1) {
20270 						vm_page_stats_reusable.free_shared++;
20271 					}
20272 					kill_pages = 1;
20273 				} else {
20274 					kill_pages = -1;
20275 				}
20276 			}
20277 			if (kill_pages != -1) {
20278 				vm_object_deactivate_pages(
20279 					object,
20280 					start_offset,
20281 					(vm_object_size_t) (end_offset - start_offset),
20282 					kill_pages,
20283 					FALSE, /* reusable_pages */
20284 					FALSE, /* reusable_no_write */
20285 					map->pmap,
20286 					pmap_offset);
20287 			}
20288 			vm_object_unlock(object);
20289 			vm_map_unlock(map);
20290 			continue;
20291 		}
20292 		/*
20293 		 * We can't sync this object if there isn't a pager.
20294 		 * Don't bother to sync internal objects, since there can't
20295 		 * be any "permanent" storage for these objects anyway.
20296 		 */
20297 		if ((object->pager == MEMORY_OBJECT_NULL) ||
20298 		    (object->internal) || (object->private)) {
20299 			vm_object_unlock(object);
20300 			vm_map_unlock(map);
20301 			continue;
20302 		}
20303 		/*
20304 		 * keep reference on the object until syncing is done
20305 		 */
20306 		vm_object_reference_locked(object);
20307 		vm_object_unlock(object);
20308 
20309 		vm_map_unlock(map);
20310 
20311 		if (VM_MAP_PAGE_MASK(map) < PAGE_MASK) {
20312 			start_offset = vm_object_trunc_page(offset);
20313 			end_offset = vm_object_round_page(offset + flush_size);
20314 		} else {
20315 			start_offset = offset;
20316 			end_offset = offset + flush_size;
20317 		}
20318 
20319 		do_sync_req = vm_object_sync(object,
20320 		    start_offset,
20321 		    (end_offset - start_offset),
20322 		    sync_flags & VM_SYNC_INVALIDATE,
20323 		    ((sync_flags & VM_SYNC_SYNCHRONOUS) ||
20324 		    (sync_flags & VM_SYNC_ASYNCHRONOUS)),
20325 		    sync_flags & VM_SYNC_SYNCHRONOUS);
20326 
20327 		if ((sync_flags & VM_SYNC_INVALIDATE) && object->resident_page_count == 0) {
20328 			/*
20329 			 * clear out the clustering and read-ahead hints
20330 			 */
20331 			vm_object_lock(object);
20332 
20333 			object->pages_created = 0;
20334 			object->pages_used = 0;
20335 			object->sequential = 0;
20336 			object->last_alloc = 0;
20337 
20338 			vm_object_unlock(object);
20339 		}
20340 		vm_object_deallocate(object);
20341 	} /* while */
20342 
20343 	/* for proper msync() behaviour */
20344 	if (had_hole == TRUE && (sync_flags & VM_SYNC_CONTIGUOUS)) {
20345 		return KERN_INVALID_ADDRESS;
20346 	}
20347 
20348 	return KERN_SUCCESS;
20349 }/* vm_msync */
20350 
20351 void
vm_named_entry_associate_vm_object(vm_named_entry_t named_entry,vm_object_t object,vm_object_offset_t offset,vm_object_size_t size,vm_prot_t prot)20352 vm_named_entry_associate_vm_object(
20353 	vm_named_entry_t        named_entry,
20354 	vm_object_t             object,
20355 	vm_object_offset_t      offset,
20356 	vm_object_size_t        size,
20357 	vm_prot_t               prot)
20358 {
20359 	vm_map_copy_t copy;
20360 	vm_map_entry_t copy_entry;
20361 
20362 	assert(!named_entry->is_sub_map);
20363 	assert(!named_entry->is_copy);
20364 	assert(!named_entry->is_object);
20365 	assert(!named_entry->internal);
20366 	assert(named_entry->backing.copy == VM_MAP_COPY_NULL);
20367 
20368 	copy = vm_map_copy_allocate(VM_MAP_COPY_ENTRY_LIST);
20369 	copy->offset = offset;
20370 	copy->size = size;
20371 	copy->cpy_hdr.page_shift = (uint16_t)PAGE_SHIFT;
20372 
20373 	copy_entry = vm_map_copy_entry_create(copy);
20374 	copy_entry->protection = prot;
20375 	copy_entry->max_protection = prot;
20376 	copy_entry->use_pmap = TRUE;
20377 	copy_entry->vme_start = VM_MAP_TRUNC_PAGE(offset, PAGE_MASK);
20378 	copy_entry->vme_end = VM_MAP_ROUND_PAGE(offset + size, PAGE_MASK);
20379 	VME_OBJECT_SET(copy_entry, object, false, 0);
20380 	VME_OFFSET_SET(copy_entry, vm_object_trunc_page(offset));
20381 	vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy), copy_entry);
20382 
20383 	named_entry->backing.copy = copy;
20384 	named_entry->is_object = TRUE;
20385 	if (object->internal) {
20386 		named_entry->internal = TRUE;
20387 	}
20388 
20389 	DEBUG4K_MEMENTRY("named_entry %p copy %p object %p offset 0x%llx size 0x%llx prot 0x%x\n",
20390 	    named_entry, copy, object, offset, size, prot);
20391 }
20392 
20393 vm_object_t
vm_named_entry_to_vm_object(vm_named_entry_t named_entry)20394 vm_named_entry_to_vm_object(
20395 	vm_named_entry_t named_entry)
20396 {
20397 	vm_map_copy_t   copy;
20398 	vm_map_entry_t  copy_entry;
20399 	vm_object_t     object;
20400 
20401 	assert(!named_entry->is_sub_map);
20402 	assert(!named_entry->is_copy);
20403 	assert(named_entry->is_object);
20404 	copy = named_entry->backing.copy;
20405 	assert(copy != VM_MAP_COPY_NULL);
20406 	/*
20407 	 * Assert that the vm_map_copy is coming from the right
20408 	 * zone and hasn't been forged
20409 	 */
20410 	vm_map_copy_require(copy);
20411 	assert(copy->cpy_hdr.nentries == 1);
20412 	copy_entry = vm_map_copy_first_entry(copy);
20413 	object = VME_OBJECT(copy_entry);
20414 
20415 	DEBUG4K_MEMENTRY("%p -> %p -> %p [0x%llx 0x%llx 0x%llx 0x%x/0x%x ] -> %p offset 0x%llx size 0x%llx prot 0x%x\n", named_entry, copy, copy_entry, (uint64_t)copy_entry->vme_start, (uint64_t)copy_entry->vme_end, copy_entry->vme_offset, copy_entry->protection, copy_entry->max_protection, object, named_entry->offset, named_entry->size, named_entry->protection);
20416 
20417 	return object;
20418 }
20419 
20420 /*
20421  *	Routine:	convert_port_entry_to_map
20422  *	Purpose:
20423  *		Convert from a port specifying an entry or a task
20424  *		to a map. Doesn't consume the port ref; produces a map ref,
20425  *		which may be null.  Unlike convert_port_to_map, the
20426  *		port may be task or a named entry backed.
20427  *	Conditions:
20428  *		Nothing locked.
20429  */
20430 
20431 vm_map_t
convert_port_entry_to_map(ipc_port_t port)20432 convert_port_entry_to_map(
20433 	ipc_port_t      port)
20434 {
20435 	vm_map_t map = VM_MAP_NULL;
20436 	vm_named_entry_t named_entry;
20437 
20438 	if (!IP_VALID(port)) {
20439 		return VM_MAP_NULL;
20440 	}
20441 
20442 	if (ip_kotype(port) != IKOT_NAMED_ENTRY) {
20443 		return convert_port_to_map(port);
20444 	}
20445 
20446 	named_entry = mach_memory_entry_from_port(port);
20447 
20448 	if ((named_entry->is_sub_map) &&
20449 	    (named_entry->protection & VM_PROT_WRITE)) {
20450 		map = named_entry->backing.map;
20451 		if (map->pmap != PMAP_NULL) {
20452 			if (map->pmap == kernel_pmap) {
20453 				panic("userspace has access "
20454 				    "to a kernel map %p", map);
20455 			}
20456 			pmap_require(map->pmap);
20457 		}
20458 		vm_map_reference(map);
20459 	}
20460 
20461 	return map;
20462 }
20463 
20464 /*
20465  * Export routines to other components for the things we access locally through
20466  * macros.
20467  */
20468 #undef current_map
20469 vm_map_t
current_map(void)20470 current_map(void)
20471 {
20472 	return current_map_fast();
20473 }
20474 
20475 /*
20476  *	vm_map_reference:
20477  *
20478  *	Takes a reference on the specified map.
20479  */
20480 void
vm_map_reference(vm_map_t map)20481 vm_map_reference(
20482 	vm_map_t        map)
20483 {
20484 	if (__probable(map != VM_MAP_NULL)) {
20485 		vm_map_require(map);
20486 		os_ref_retain_raw(&map->map_refcnt, &map_refgrp);
20487 	}
20488 }
20489 
20490 /*
20491  *	vm_map_deallocate:
20492  *
20493  *	Removes a reference from the specified map,
20494  *	destroying it if no references remain.
20495  *	The map should not be locked.
20496  */
20497 void
vm_map_deallocate(vm_map_t map)20498 vm_map_deallocate(
20499 	vm_map_t        map)
20500 {
20501 	if (__probable(map != VM_MAP_NULL)) {
20502 		vm_map_require(map);
20503 		if (os_ref_release_raw(&map->map_refcnt, &map_refgrp) == 0) {
20504 			vm_map_destroy(map);
20505 		}
20506 	}
20507 }
20508 
20509 void
vm_map_inspect_deallocate(vm_map_inspect_t map)20510 vm_map_inspect_deallocate(
20511 	vm_map_inspect_t      map)
20512 {
20513 	vm_map_deallocate((vm_map_t)map);
20514 }
20515 
20516 void
vm_map_read_deallocate(vm_map_read_t map)20517 vm_map_read_deallocate(
20518 	vm_map_read_t      map)
20519 {
20520 	vm_map_deallocate((vm_map_t)map);
20521 }
20522 
20523 
20524 void
vm_map_disable_NX(vm_map_t map)20525 vm_map_disable_NX(vm_map_t map)
20526 {
20527 	if (map == NULL) {
20528 		return;
20529 	}
20530 	if (map->pmap == NULL) {
20531 		return;
20532 	}
20533 
20534 	pmap_disable_NX(map->pmap);
20535 }
20536 
20537 void
vm_map_disallow_data_exec(vm_map_t map)20538 vm_map_disallow_data_exec(vm_map_t map)
20539 {
20540 	if (map == NULL) {
20541 		return;
20542 	}
20543 
20544 	map->map_disallow_data_exec = TRUE;
20545 }
20546 
20547 /* XXX Consider making these constants (VM_MAX_ADDRESS and MACH_VM_MAX_ADDRESS)
20548  * more descriptive.
20549  */
20550 void
vm_map_set_32bit(vm_map_t map)20551 vm_map_set_32bit(vm_map_t map)
20552 {
20553 #if defined(__arm64__)
20554 	map->max_offset = pmap_max_offset(FALSE, ARM_PMAP_MAX_OFFSET_DEVICE);
20555 #else
20556 	map->max_offset = (vm_map_offset_t)VM_MAX_ADDRESS;
20557 #endif
20558 }
20559 
20560 
20561 void
vm_map_set_64bit(vm_map_t map)20562 vm_map_set_64bit(vm_map_t map)
20563 {
20564 #if defined(__arm64__)
20565 	map->max_offset = pmap_max_offset(TRUE, ARM_PMAP_MAX_OFFSET_DEVICE);
20566 #else
20567 	map->max_offset = (vm_map_offset_t)MACH_VM_MAX_ADDRESS;
20568 #endif
20569 }
20570 
20571 /*
20572  * Expand the maximum size of an existing map to the maximum supported.
20573  */
20574 void
vm_map_set_jumbo(vm_map_t map)20575 vm_map_set_jumbo(vm_map_t map)
20576 {
20577 #if defined (__arm64__) && !XNU_TARGET_OS_OSX
20578 	vm_map_set_max_addr(map, ~0);
20579 #else /* arm64 */
20580 	(void) map;
20581 #endif
20582 }
20583 
20584 /*
20585  * This map has a JIT entitlement
20586  */
20587 void
vm_map_set_jit_entitled(vm_map_t map)20588 vm_map_set_jit_entitled(vm_map_t map)
20589 {
20590 #if defined (__arm64__)
20591 	pmap_set_jit_entitled(map->pmap);
20592 #else /* arm64 */
20593 	(void) map;
20594 #endif
20595 }
20596 
20597 /*
20598  * Get status of this maps TPRO flag
20599  */
20600 boolean_t
vm_map_tpro(vm_map_t map)20601 vm_map_tpro(vm_map_t map)
20602 {
20603 #if defined (__arm64e__)
20604 	return pmap_get_tpro(map->pmap);
20605 #else /* arm64e */
20606 	(void) map;
20607 	return FALSE;
20608 #endif
20609 }
20610 
20611 /*
20612  * This map has TPRO enabled
20613  */
20614 void
vm_map_set_tpro(vm_map_t map)20615 vm_map_set_tpro(vm_map_t map)
20616 {
20617 #if defined (__arm64e__)
20618 	pmap_set_tpro(map->pmap);
20619 #else /* arm64e */
20620 	(void) map;
20621 #endif
20622 }
20623 
20624 /*
20625  * Does this map have TPRO enforcement enabled
20626  */
20627 boolean_t
vm_map_tpro_enforcement(vm_map_t map)20628 vm_map_tpro_enforcement(vm_map_t map)
20629 {
20630 	return map->tpro_enforcement;
20631 }
20632 
20633 /*
20634  * Set TPRO enforcement for this map
20635  */
20636 void
vm_map_set_tpro_enforcement(vm_map_t map)20637 vm_map_set_tpro_enforcement(vm_map_t map)
20638 {
20639 	if (vm_map_tpro(map)) {
20640 		vm_map_lock(map);
20641 		map->tpro_enforcement = TRUE;
20642 		vm_map_unlock(map);
20643 	}
20644 }
20645 
20646 /*
20647  * Enable TPRO on the requested region
20648  *
20649  * Note:
20650  *     This routine is primarily intended to be called during/soon after map
20651  *     creation before the associated task has been released to run. It is only
20652  *     currently safe when we have no resident pages.
20653  */
20654 boolean_t
vm_map_set_tpro_range(__unused vm_map_t map,__unused vm_map_address_t start,__unused vm_map_address_t end)20655 vm_map_set_tpro_range(
20656 	__unused vm_map_t map,
20657 	__unused vm_map_address_t start,
20658 	__unused vm_map_address_t end)
20659 {
20660 	return TRUE;
20661 }
20662 
20663 /*
20664  * Expand the maximum size of an existing map.
20665  */
20666 void
vm_map_set_max_addr(vm_map_t map,vm_map_offset_t new_max_offset)20667 vm_map_set_max_addr(vm_map_t map, vm_map_offset_t new_max_offset)
20668 {
20669 #if defined(__arm64__)
20670 	vm_map_offset_t max_supported_offset;
20671 	vm_map_offset_t old_max_offset;
20672 
20673 	vm_map_lock(map);
20674 
20675 	old_max_offset = map->max_offset;
20676 	max_supported_offset = pmap_max_offset(vm_map_is_64bit(map), ARM_PMAP_MAX_OFFSET_JUMBO);
20677 
20678 	new_max_offset = trunc_page(new_max_offset);
20679 
20680 	/* The address space cannot be shrunk using this routine. */
20681 	if (old_max_offset >= new_max_offset) {
20682 		vm_map_unlock(map);
20683 		return;
20684 	}
20685 
20686 	if (max_supported_offset < new_max_offset) {
20687 		new_max_offset = max_supported_offset;
20688 	}
20689 
20690 	map->max_offset = new_max_offset;
20691 
20692 	if (map->holelistenabled) {
20693 		if (map->holes_list->prev->vme_end == old_max_offset) {
20694 			/*
20695 			 * There is already a hole at the end of the map; simply make it bigger.
20696 			 */
20697 			map->holes_list->prev->vme_end = map->max_offset;
20698 		} else {
20699 			/*
20700 			 * There is no hole at the end, so we need to create a new hole
20701 			 * for the new empty space we're creating.
20702 			 */
20703 			struct vm_map_links *new_hole;
20704 
20705 			new_hole = zalloc_id(ZONE_ID_VM_MAP_HOLES, Z_WAITOK | Z_NOFAIL);
20706 			new_hole->start = old_max_offset;
20707 			new_hole->end = map->max_offset;
20708 			new_hole->prev = map->holes_list->prev;
20709 			new_hole->next = (struct vm_map_entry *)map->holes_list;
20710 			map->holes_list->prev->vme_next = (struct vm_map_entry *)new_hole;
20711 			map->holes_list->prev = (struct vm_map_entry *)new_hole;
20712 		}
20713 	}
20714 
20715 	vm_map_unlock(map);
20716 #else
20717 	(void)map;
20718 	(void)new_max_offset;
20719 #endif
20720 }
20721 
20722 vm_map_offset_t
vm_compute_max_offset(boolean_t is64)20723 vm_compute_max_offset(boolean_t is64)
20724 {
20725 #if defined(__arm64__)
20726 	return pmap_max_offset(is64, ARM_PMAP_MAX_OFFSET_DEVICE);
20727 #else
20728 	return is64 ? (vm_map_offset_t)MACH_VM_MAX_ADDRESS : (vm_map_offset_t)VM_MAX_ADDRESS;
20729 #endif
20730 }
20731 
20732 void
vm_map_get_max_aslr_slide_section(vm_map_t map __unused,int64_t * max_sections,int64_t * section_size)20733 vm_map_get_max_aslr_slide_section(
20734 	vm_map_t                map __unused,
20735 	int64_t                 *max_sections,
20736 	int64_t                 *section_size)
20737 {
20738 #if defined(__arm64__)
20739 	*max_sections = 3;
20740 	*section_size = ARM_TT_TWIG_SIZE;
20741 #else
20742 	*max_sections = 1;
20743 	*section_size = 0;
20744 #endif
20745 }
20746 
20747 uint64_t
vm_map_get_max_aslr_slide_pages(vm_map_t map)20748 vm_map_get_max_aslr_slide_pages(vm_map_t map)
20749 {
20750 #if defined(__arm64__)
20751 	/* Limit arm64 slide to 16MB to conserve contiguous VA space in the more
20752 	 * limited embedded address space; this is also meant to minimize pmap
20753 	 * memory usage on 16KB page systems.
20754 	 */
20755 	return 1 << (24 - VM_MAP_PAGE_SHIFT(map));
20756 #else
20757 	return 1 << (vm_map_is_64bit(map) ? 16 : 8);
20758 #endif
20759 }
20760 
20761 uint64_t
vm_map_get_max_loader_aslr_slide_pages(vm_map_t map)20762 vm_map_get_max_loader_aslr_slide_pages(vm_map_t map)
20763 {
20764 #if defined(__arm64__)
20765 	/* We limit the loader slide to 4MB, in order to ensure at least 8 bits
20766 	 * of independent entropy on 16KB page systems.
20767 	 */
20768 	return 1 << (22 - VM_MAP_PAGE_SHIFT(map));
20769 #else
20770 	return 1 << (vm_map_is_64bit(map) ? 16 : 8);
20771 #endif
20772 }
20773 
20774 boolean_t
vm_map_is_64bit(vm_map_t map)20775 vm_map_is_64bit(
20776 	vm_map_t map)
20777 {
20778 	return map->max_offset > ((vm_map_offset_t)VM_MAX_ADDRESS);
20779 }
20780 
20781 boolean_t
vm_map_has_hard_pagezero(vm_map_t map,vm_map_offset_t pagezero_size)20782 vm_map_has_hard_pagezero(
20783 	vm_map_t        map,
20784 	vm_map_offset_t pagezero_size)
20785 {
20786 	/*
20787 	 * XXX FBDP
20788 	 * We should lock the VM map (for read) here but we can get away
20789 	 * with it for now because there can't really be any race condition:
20790 	 * the VM map's min_offset is changed only when the VM map is created
20791 	 * and when the zero page is established (when the binary gets loaded),
20792 	 * and this routine gets called only when the task terminates and the
20793 	 * VM map is being torn down, and when a new map is created via
20794 	 * load_machfile()/execve().
20795 	 */
20796 	return map->min_offset >= pagezero_size;
20797 }
20798 
20799 /*
20800  * Raise a VM map's maximun offset.
20801  */
20802 kern_return_t
vm_map_raise_max_offset(vm_map_t map,vm_map_offset_t new_max_offset)20803 vm_map_raise_max_offset(
20804 	vm_map_t        map,
20805 	vm_map_offset_t new_max_offset)
20806 {
20807 	kern_return_t   ret;
20808 
20809 	vm_map_lock(map);
20810 	ret = KERN_INVALID_ADDRESS;
20811 
20812 	if (new_max_offset >= map->max_offset) {
20813 		if (!vm_map_is_64bit(map)) {
20814 			if (new_max_offset <= (vm_map_offset_t)VM_MAX_ADDRESS) {
20815 				map->max_offset = new_max_offset;
20816 				ret = KERN_SUCCESS;
20817 			}
20818 		} else {
20819 			if (new_max_offset <= (vm_map_offset_t)MACH_VM_MAX_ADDRESS) {
20820 				map->max_offset = new_max_offset;
20821 				ret = KERN_SUCCESS;
20822 			}
20823 		}
20824 	}
20825 
20826 	vm_map_unlock(map);
20827 	return ret;
20828 }
20829 
20830 
20831 /*
20832  * Raise a VM map's minimum offset.
20833  * To strictly enforce "page zero" reservation.
20834  */
20835 kern_return_t
vm_map_raise_min_offset(vm_map_t map,vm_map_offset_t new_min_offset)20836 vm_map_raise_min_offset(
20837 	vm_map_t        map,
20838 	vm_map_offset_t new_min_offset)
20839 {
20840 	vm_map_entry_t  first_entry;
20841 
20842 	new_min_offset = vm_map_round_page(new_min_offset,
20843 	    VM_MAP_PAGE_MASK(map));
20844 
20845 	vm_map_lock(map);
20846 
20847 	if (new_min_offset < map->min_offset) {
20848 		/*
20849 		 * Can't move min_offset backwards, as that would expose
20850 		 * a part of the address space that was previously, and for
20851 		 * possibly good reasons, inaccessible.
20852 		 */
20853 		vm_map_unlock(map);
20854 		return KERN_INVALID_ADDRESS;
20855 	}
20856 	if (new_min_offset >= map->max_offset) {
20857 		/* can't go beyond the end of the address space */
20858 		vm_map_unlock(map);
20859 		return KERN_INVALID_ADDRESS;
20860 	}
20861 
20862 	first_entry = vm_map_first_entry(map);
20863 	if (first_entry != vm_map_to_entry(map) &&
20864 	    first_entry->vme_start < new_min_offset) {
20865 		/*
20866 		 * Some memory was already allocated below the new
20867 		 * minimun offset.  It's too late to change it now...
20868 		 */
20869 		vm_map_unlock(map);
20870 		return KERN_NO_SPACE;
20871 	}
20872 
20873 	map->min_offset = new_min_offset;
20874 
20875 	if (map->holelistenabled) {
20876 		assert(map->holes_list);
20877 		map->holes_list->start = new_min_offset;
20878 		assert(new_min_offset < map->holes_list->end);
20879 	}
20880 
20881 	vm_map_unlock(map);
20882 
20883 	return KERN_SUCCESS;
20884 }
20885 
20886 /*
20887  * Set the limit on the maximum amount of address space and user wired memory allowed for this map.
20888  * This is basically a copy of the RLIMIT_AS and RLIMIT_MEMLOCK rlimit value maintained by the BSD
20889  * side of the kernel. The limits are checked in the mach VM side, so we keep a copy so we don't
20890  * have to reach over to the BSD data structures.
20891  */
20892 
20893 uint64_t vm_map_set_size_limit_count = 0;
20894 kern_return_t
vm_map_set_size_limit(vm_map_t map,uint64_t new_size_limit)20895 vm_map_set_size_limit(vm_map_t map, uint64_t new_size_limit)
20896 {
20897 	kern_return_t kr;
20898 
20899 	vm_map_lock(map);
20900 	if (new_size_limit < map->size) {
20901 		/* new limit should not be lower than its current size */
20902 		DTRACE_VM2(vm_map_set_size_limit_fail,
20903 		    vm_map_size_t, map->size,
20904 		    uint64_t, new_size_limit);
20905 		kr = KERN_FAILURE;
20906 	} else if (new_size_limit == map->size_limit) {
20907 		/* no change */
20908 		kr = KERN_SUCCESS;
20909 	} else {
20910 		/* set new limit */
20911 		DTRACE_VM2(vm_map_set_size_limit,
20912 		    vm_map_size_t, map->size,
20913 		    uint64_t, new_size_limit);
20914 		if (new_size_limit != RLIM_INFINITY) {
20915 			vm_map_set_size_limit_count++;
20916 		}
20917 		map->size_limit = new_size_limit;
20918 		kr = KERN_SUCCESS;
20919 	}
20920 	vm_map_unlock(map);
20921 	return kr;
20922 }
20923 
20924 uint64_t vm_map_set_data_limit_count = 0;
20925 kern_return_t
vm_map_set_data_limit(vm_map_t map,uint64_t new_data_limit)20926 vm_map_set_data_limit(vm_map_t map, uint64_t new_data_limit)
20927 {
20928 	kern_return_t kr;
20929 
20930 	vm_map_lock(map);
20931 	if (new_data_limit < map->size) {
20932 		/* new limit should not be lower than its current size */
20933 		DTRACE_VM2(vm_map_set_data_limit_fail,
20934 		    vm_map_size_t, map->size,
20935 		    uint64_t, new_data_limit);
20936 		kr = KERN_FAILURE;
20937 	} else if (new_data_limit == map->data_limit) {
20938 		/* no change */
20939 		kr = KERN_SUCCESS;
20940 	} else {
20941 		/* set new limit */
20942 		DTRACE_VM2(vm_map_set_data_limit,
20943 		    vm_map_size_t, map->size,
20944 		    uint64_t, new_data_limit);
20945 		if (new_data_limit != RLIM_INFINITY) {
20946 			vm_map_set_data_limit_count++;
20947 		}
20948 		map->data_limit = new_data_limit;
20949 		kr = KERN_SUCCESS;
20950 	}
20951 	vm_map_unlock(map);
20952 	return kr;
20953 }
20954 
20955 void
vm_map_set_user_wire_limit(vm_map_t map,vm_size_t limit)20956 vm_map_set_user_wire_limit(vm_map_t     map,
20957     vm_size_t    limit)
20958 {
20959 	vm_map_lock(map);
20960 	map->user_wire_limit = limit;
20961 	vm_map_unlock(map);
20962 }
20963 
20964 
20965 void
vm_map_switch_protect(vm_map_t map,boolean_t val)20966 vm_map_switch_protect(vm_map_t     map,
20967     boolean_t    val)
20968 {
20969 	vm_map_lock(map);
20970 	map->switch_protect = val;
20971 	vm_map_unlock(map);
20972 }
20973 
20974 extern int cs_process_enforcement_enable;
20975 boolean_t
vm_map_cs_enforcement(vm_map_t map)20976 vm_map_cs_enforcement(
20977 	vm_map_t map)
20978 {
20979 	if (cs_process_enforcement_enable) {
20980 		return TRUE;
20981 	}
20982 	return map->cs_enforcement;
20983 }
20984 
20985 kern_return_t
vm_map_cs_wx_enable(__unused vm_map_t map)20986 vm_map_cs_wx_enable(
20987 	__unused vm_map_t map)
20988 {
20989 #if CODE_SIGNING_MONITOR
20990 	kern_return_t ret = csm_allow_invalid_code(vm_map_pmap(map));
20991 	if ((ret == KERN_SUCCESS) || (ret == KERN_NOT_SUPPORTED)) {
20992 		return KERN_SUCCESS;
20993 	}
20994 	return ret;
20995 #else
20996 	/* The VM manages WX memory entirely on its own */
20997 	return KERN_SUCCESS;
20998 #endif
20999 }
21000 
21001 kern_return_t
vm_map_csm_allow_jit(__unused vm_map_t map)21002 vm_map_csm_allow_jit(
21003 	__unused vm_map_t map)
21004 {
21005 #if CODE_SIGNING_MONITOR
21006 	return csm_allow_jit_region(vm_map_pmap(map));
21007 #else
21008 	/* No code signing monitor to enforce JIT policy */
21009 	return KERN_SUCCESS;
21010 #endif
21011 }
21012 
21013 void
vm_map_cs_debugged_set(vm_map_t map,boolean_t val)21014 vm_map_cs_debugged_set(
21015 	vm_map_t map,
21016 	boolean_t val)
21017 {
21018 	vm_map_lock(map);
21019 	map->cs_debugged = val;
21020 	vm_map_unlock(map);
21021 }
21022 
21023 void
vm_map_cs_enforcement_set(vm_map_t map,boolean_t val)21024 vm_map_cs_enforcement_set(
21025 	vm_map_t map,
21026 	boolean_t val)
21027 {
21028 	vm_map_lock(map);
21029 	map->cs_enforcement = val;
21030 	pmap_set_vm_map_cs_enforced(map->pmap, val);
21031 	vm_map_unlock(map);
21032 }
21033 
21034 /*
21035  * IOKit has mapped a region into this map; adjust the pmap's ledgers appropriately.
21036  * phys_footprint is a composite limit consisting of iokit + physmem, so we need to
21037  * bump both counters.
21038  */
21039 void
vm_map_iokit_mapped_region(vm_map_t map,vm_size_t bytes)21040 vm_map_iokit_mapped_region(vm_map_t map, vm_size_t bytes)
21041 {
21042 	pmap_t pmap = vm_map_pmap(map);
21043 
21044 	ledger_credit(pmap->ledger, task_ledgers.iokit_mapped, bytes);
21045 	ledger_credit(pmap->ledger, task_ledgers.phys_footprint, bytes);
21046 }
21047 
21048 void
vm_map_iokit_unmapped_region(vm_map_t map,vm_size_t bytes)21049 vm_map_iokit_unmapped_region(vm_map_t map, vm_size_t bytes)
21050 {
21051 	pmap_t pmap = vm_map_pmap(map);
21052 
21053 	ledger_debit(pmap->ledger, task_ledgers.iokit_mapped, bytes);
21054 	ledger_debit(pmap->ledger, task_ledgers.phys_footprint, bytes);
21055 }
21056 
21057 /* Add (generate) code signature for memory range */
21058 #if CONFIG_DYNAMIC_CODE_SIGNING
21059 kern_return_t
vm_map_sign(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)21060 vm_map_sign(vm_map_t map,
21061     vm_map_offset_t start,
21062     vm_map_offset_t end)
21063 {
21064 	vm_map_entry_t entry;
21065 	vm_page_t m;
21066 	vm_object_t object;
21067 
21068 	/*
21069 	 * Vet all the input parameters and current type and state of the
21070 	 * underlaying object.  Return with an error if anything is amiss.
21071 	 */
21072 	if (map == VM_MAP_NULL) {
21073 		return KERN_INVALID_ARGUMENT;
21074 	}
21075 
21076 	if (__improbable(vm_map_range_overflows(map, start, end - start))) {
21077 		return KERN_INVALID_ADDRESS;
21078 	}
21079 
21080 	vm_map_lock_read(map);
21081 
21082 	if (!vm_map_lookup_entry(map, start, &entry) || entry->is_sub_map) {
21083 		/*
21084 		 * Must pass a valid non-submap address.
21085 		 */
21086 		vm_map_unlock_read(map);
21087 		return KERN_INVALID_ADDRESS;
21088 	}
21089 
21090 	if ((entry->vme_start > start) || (entry->vme_end < end)) {
21091 		/*
21092 		 * Map entry doesn't cover the requested range. Not handling
21093 		 * this situation currently.
21094 		 */
21095 		vm_map_unlock_read(map);
21096 		return KERN_INVALID_ARGUMENT;
21097 	}
21098 
21099 	object = VME_OBJECT(entry);
21100 	if (object == VM_OBJECT_NULL) {
21101 		/*
21102 		 * Object must already be present or we can't sign.
21103 		 */
21104 		vm_map_unlock_read(map);
21105 		return KERN_INVALID_ARGUMENT;
21106 	}
21107 
21108 	vm_object_lock(object);
21109 	vm_map_unlock_read(map);
21110 
21111 	while (start < end) {
21112 		uint32_t refmod;
21113 
21114 		m = vm_page_lookup(object,
21115 		    start - entry->vme_start + VME_OFFSET(entry));
21116 		if (m == VM_PAGE_NULL) {
21117 			/* shoud we try to fault a page here? we can probably
21118 			 * demand it exists and is locked for this request */
21119 			vm_object_unlock(object);
21120 			return KERN_FAILURE;
21121 		}
21122 		/* deal with special page status */
21123 		if (m->vmp_busy ||
21124 		    (m->vmp_unusual && (VMP_ERROR_GET(m) || m->vmp_restart || m->vmp_private || m->vmp_absent))) {
21125 			vm_object_unlock(object);
21126 			return KERN_FAILURE;
21127 		}
21128 
21129 		/* Page is OK... now "validate" it */
21130 		/* This is the place where we'll call out to create a code
21131 		 * directory, later */
21132 		/* XXX TODO4K: deal with 4k subpages individually? */
21133 		m->vmp_cs_validated = VMP_CS_ALL_TRUE;
21134 
21135 		/* The page is now "clean" for codesigning purposes. That means
21136 		 * we don't consider it as modified (wpmapped) anymore. But
21137 		 * we'll disconnect the page so we note any future modification
21138 		 * attempts. */
21139 		m->vmp_wpmapped = FALSE;
21140 		refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
21141 
21142 		/* Pull the dirty status from the pmap, since we cleared the
21143 		 * wpmapped bit */
21144 		if ((refmod & VM_MEM_MODIFIED) && !m->vmp_dirty) {
21145 			SET_PAGE_DIRTY(m, FALSE);
21146 		}
21147 
21148 		/* On to the next page */
21149 		start += PAGE_SIZE;
21150 	}
21151 	vm_object_unlock(object);
21152 
21153 	return KERN_SUCCESS;
21154 }
21155 #endif
21156 
21157 kern_return_t
vm_map_partial_reap(vm_map_t map,unsigned int * reclaimed_resident,unsigned int * reclaimed_compressed)21158 vm_map_partial_reap(vm_map_t map, unsigned int *reclaimed_resident, unsigned int *reclaimed_compressed)
21159 {
21160 	vm_map_entry_t  entry = VM_MAP_ENTRY_NULL;
21161 	vm_map_entry_t  next_entry;
21162 	kern_return_t   kr = KERN_SUCCESS;
21163 	VM_MAP_ZAP_DECLARE(zap_list);
21164 
21165 	vm_map_lock(map);
21166 
21167 	for (entry = vm_map_first_entry(map);
21168 	    entry != vm_map_to_entry(map);
21169 	    entry = next_entry) {
21170 		next_entry = entry->vme_next;
21171 
21172 		if (!entry->is_sub_map &&
21173 		    VME_OBJECT(entry) &&
21174 		    (VME_OBJECT(entry)->internal == TRUE) &&
21175 		    (VME_OBJECT(entry)->ref_count == 1)) {
21176 			*reclaimed_resident += VME_OBJECT(entry)->resident_page_count;
21177 			*reclaimed_compressed += vm_compressor_pager_get_count(VME_OBJECT(entry)->pager);
21178 
21179 			(void)vm_map_delete(map, entry->vme_start,
21180 			    entry->vme_end, VM_MAP_REMOVE_NO_YIELD,
21181 			    KMEM_GUARD_NONE, &zap_list);
21182 		}
21183 	}
21184 
21185 	vm_map_unlock(map);
21186 
21187 	vm_map_zap_dispose(&zap_list);
21188 
21189 	return kr;
21190 }
21191 
21192 
21193 #if DEVELOPMENT || DEBUG
21194 
21195 int
vm_map_disconnect_page_mappings(vm_map_t map,boolean_t do_unnest)21196 vm_map_disconnect_page_mappings(
21197 	vm_map_t map,
21198 	boolean_t do_unnest)
21199 {
21200 	vm_map_entry_t entry;
21201 	ledger_amount_t byte_count = 0;
21202 
21203 	if (do_unnest == TRUE) {
21204 #ifndef NO_NESTED_PMAP
21205 		vm_map_lock(map);
21206 
21207 		for (entry = vm_map_first_entry(map);
21208 		    entry != vm_map_to_entry(map);
21209 		    entry = entry->vme_next) {
21210 			if (entry->is_sub_map && entry->use_pmap) {
21211 				/*
21212 				 * Make sure the range between the start of this entry and
21213 				 * the end of this entry is no longer nested, so that
21214 				 * we will only remove mappings from the pmap in use by this
21215 				 * this task
21216 				 */
21217 				vm_map_clip_unnest(map, entry, entry->vme_start, entry->vme_end);
21218 			}
21219 		}
21220 		vm_map_unlock(map);
21221 #endif
21222 	}
21223 	vm_map_lock_read(map);
21224 
21225 	ledger_get_balance(map->pmap->ledger, task_ledgers.phys_mem, &byte_count);
21226 
21227 	for (entry = vm_map_first_entry(map);
21228 	    entry != vm_map_to_entry(map);
21229 	    entry = entry->vme_next) {
21230 		if (!entry->is_sub_map && ((VME_OBJECT(entry) == 0) ||
21231 		    (VME_OBJECT(entry)->phys_contiguous))) {
21232 			continue;
21233 		}
21234 		if (entry->is_sub_map) {
21235 			assert(!entry->use_pmap);
21236 		}
21237 
21238 		pmap_remove_options(map->pmap, entry->vme_start, entry->vme_end, 0);
21239 	}
21240 	vm_map_unlock_read(map);
21241 
21242 	return (int) (byte_count / VM_MAP_PAGE_SIZE(map));
21243 }
21244 
21245 kern_return_t
vm_map_inject_error(vm_map_t map,vm_map_offset_t vaddr)21246 vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr)
21247 {
21248 	vm_object_t object = NULL;
21249 	vm_object_offset_t offset;
21250 	vm_prot_t prot;
21251 	boolean_t wired;
21252 	vm_map_version_t version;
21253 	vm_map_t real_map;
21254 	int result = KERN_FAILURE;
21255 
21256 	vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
21257 	vm_map_lock(map);
21258 
21259 	result = vm_map_lookup_and_lock_object(&map, vaddr, VM_PROT_READ,
21260 	    OBJECT_LOCK_EXCLUSIVE, &version, &object, &offset, &prot, &wired,
21261 	    NULL, &real_map, NULL);
21262 	if (object == NULL) {
21263 		result = KERN_MEMORY_ERROR;
21264 	} else if (object->pager) {
21265 		result = vm_compressor_pager_inject_error(object->pager,
21266 		    offset);
21267 	} else {
21268 		result = KERN_MEMORY_PRESENT;
21269 	}
21270 
21271 	if (object != NULL) {
21272 		vm_object_unlock(object);
21273 	}
21274 
21275 	if (real_map != map) {
21276 		vm_map_unlock(real_map);
21277 	}
21278 	vm_map_unlock(map);
21279 
21280 	return result;
21281 }
21282 
21283 #endif
21284 
21285 
21286 #if CONFIG_FREEZE
21287 
21288 
21289 extern struct freezer_context freezer_context_global;
21290 AbsoluteTime c_freezer_last_yield_ts = 0;
21291 
21292 extern unsigned int memorystatus_freeze_private_shared_pages_ratio;
21293 extern unsigned int memorystatus_freeze_shared_mb_per_process_max;
21294 
21295 kern_return_t
vm_map_freeze(task_t task,unsigned int * purgeable_count,unsigned int * wired_count,unsigned int * clean_count,unsigned int * dirty_count,unsigned int dirty_budget,unsigned int * shared_count,int * freezer_error_code,boolean_t eval_only)21296 vm_map_freeze(
21297 	task_t       task,
21298 	unsigned int *purgeable_count,
21299 	unsigned int *wired_count,
21300 	unsigned int *clean_count,
21301 	unsigned int *dirty_count,
21302 	unsigned int dirty_budget,
21303 	unsigned int *shared_count,
21304 	int          *freezer_error_code,
21305 	boolean_t    eval_only)
21306 {
21307 	vm_map_entry_t  entry2 = VM_MAP_ENTRY_NULL;
21308 	kern_return_t   kr = KERN_SUCCESS;
21309 	boolean_t       evaluation_phase = TRUE;
21310 	vm_object_t     cur_shared_object = NULL;
21311 	int             cur_shared_obj_ref_cnt = 0;
21312 	unsigned int    dirty_private_count = 0, dirty_shared_count = 0, obj_pages_snapshot = 0;
21313 
21314 	*purgeable_count = *wired_count = *clean_count = *dirty_count = *shared_count = 0;
21315 
21316 	/*
21317 	 * We need the exclusive lock here so that we can
21318 	 * block any page faults or lookups while we are
21319 	 * in the middle of freezing this vm map.
21320 	 */
21321 	vm_map_t map = task->map;
21322 
21323 	vm_map_lock(map);
21324 
21325 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
21326 
21327 	if (vm_compressor_low_on_space() || vm_swap_low_on_space()) {
21328 		if (vm_compressor_low_on_space()) {
21329 			*freezer_error_code = FREEZER_ERROR_NO_COMPRESSOR_SPACE;
21330 		}
21331 
21332 		if (vm_swap_low_on_space()) {
21333 			*freezer_error_code = FREEZER_ERROR_NO_SWAP_SPACE;
21334 		}
21335 
21336 		kr = KERN_NO_SPACE;
21337 		goto done;
21338 	}
21339 
21340 	if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE == FALSE) {
21341 		/*
21342 		 * In-memory compressor backing the freezer. No disk.
21343 		 * So no need to do the evaluation phase.
21344 		 */
21345 		evaluation_phase = FALSE;
21346 
21347 		if (eval_only == TRUE) {
21348 			/*
21349 			 * We don't support 'eval_only' mode
21350 			 * in this non-swap config.
21351 			 */
21352 			*freezer_error_code = FREEZER_ERROR_GENERIC;
21353 			kr = KERN_INVALID_ARGUMENT;
21354 			goto done;
21355 		}
21356 
21357 		freezer_context_global.freezer_ctx_uncompressed_pages = 0;
21358 		clock_get_uptime(&c_freezer_last_yield_ts);
21359 	}
21360 again:
21361 
21362 	for (entry2 = vm_map_first_entry(map);
21363 	    entry2 != vm_map_to_entry(map);
21364 	    entry2 = entry2->vme_next) {
21365 		vm_object_t src_object;
21366 
21367 		if (entry2->is_sub_map) {
21368 			continue;
21369 		}
21370 
21371 		src_object = VME_OBJECT(entry2);
21372 		if (!src_object ||
21373 		    src_object->phys_contiguous ||
21374 		    !src_object->internal) {
21375 			continue;
21376 		}
21377 
21378 		/* If eligible, scan the entry, moving eligible pages over to our parent object */
21379 
21380 		if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
21381 			/*
21382 			 * We skip purgeable objects during evaluation phase only.
21383 			 * If we decide to freeze this process, we'll explicitly
21384 			 * purge these objects before we go around again with
21385 			 * 'evaluation_phase' set to FALSE.
21386 			 */
21387 
21388 			if ((src_object->purgable == VM_PURGABLE_EMPTY) || (src_object->purgable == VM_PURGABLE_VOLATILE)) {
21389 				/*
21390 				 * We want to purge objects that may not belong to this task but are mapped
21391 				 * in this task alone. Since we already purged this task's purgeable memory
21392 				 * at the end of a successful evaluation phase, we want to avoid doing no-op calls
21393 				 * on this task's purgeable objects. Hence the check for only volatile objects.
21394 				 */
21395 				if (evaluation_phase == FALSE &&
21396 				    (src_object->purgable == VM_PURGABLE_VOLATILE) &&
21397 				    (src_object->ref_count == 1)) {
21398 					vm_object_lock(src_object);
21399 					vm_object_purge(src_object, 0);
21400 					vm_object_unlock(src_object);
21401 				}
21402 				continue;
21403 			}
21404 
21405 			/*
21406 			 * Pages belonging to this object could be swapped to disk.
21407 			 * Make sure it's not a shared object because we could end
21408 			 * up just bringing it back in again.
21409 			 *
21410 			 * We try to optimize somewhat by checking for objects that are mapped
21411 			 * more than once within our own map. But we don't do full searches,
21412 			 * we just look at the entries following our current entry.
21413 			 */
21414 
21415 			if (src_object->ref_count > 1) {
21416 				if (src_object != cur_shared_object) {
21417 					obj_pages_snapshot = (src_object->resident_page_count - src_object->wired_page_count) + vm_compressor_pager_get_count(src_object->pager);
21418 					dirty_shared_count += obj_pages_snapshot;
21419 
21420 					cur_shared_object = src_object;
21421 					cur_shared_obj_ref_cnt = 1;
21422 					continue;
21423 				} else {
21424 					cur_shared_obj_ref_cnt++;
21425 					if (src_object->ref_count == cur_shared_obj_ref_cnt) {
21426 						/*
21427 						 * Fall through to below and treat this object as private.
21428 						 * So deduct its pages from our shared total and add it to the
21429 						 * private total.
21430 						 */
21431 
21432 						dirty_shared_count -= obj_pages_snapshot;
21433 						dirty_private_count += obj_pages_snapshot;
21434 					} else {
21435 						continue;
21436 					}
21437 				}
21438 			}
21439 
21440 
21441 			if (src_object->ref_count == 1) {
21442 				dirty_private_count += (src_object->resident_page_count - src_object->wired_page_count) + vm_compressor_pager_get_count(src_object->pager);
21443 			}
21444 
21445 			if (evaluation_phase == TRUE) {
21446 				continue;
21447 			}
21448 		}
21449 
21450 		uint32_t paged_out_count = vm_object_compressed_freezer_pageout(src_object, dirty_budget);
21451 		*wired_count += src_object->wired_page_count;
21452 
21453 		if (vm_compressor_low_on_space() || vm_swap_low_on_space()) {
21454 			if (vm_compressor_low_on_space()) {
21455 				*freezer_error_code = FREEZER_ERROR_NO_COMPRESSOR_SPACE;
21456 			}
21457 
21458 			if (vm_swap_low_on_space()) {
21459 				*freezer_error_code = FREEZER_ERROR_NO_SWAP_SPACE;
21460 			}
21461 
21462 			kr = KERN_NO_SPACE;
21463 			break;
21464 		}
21465 		if (paged_out_count >= dirty_budget) {
21466 			break;
21467 		}
21468 		dirty_budget -= paged_out_count;
21469 	}
21470 
21471 	*shared_count = (unsigned int) ((dirty_shared_count * PAGE_SIZE_64) / (1024 * 1024ULL));
21472 	if (evaluation_phase) {
21473 		unsigned int shared_pages_threshold = (memorystatus_freeze_shared_mb_per_process_max * 1024 * 1024ULL) / PAGE_SIZE_64;
21474 
21475 		if (dirty_shared_count > shared_pages_threshold) {
21476 			*freezer_error_code = FREEZER_ERROR_EXCESS_SHARED_MEMORY;
21477 			kr = KERN_FAILURE;
21478 			goto done;
21479 		}
21480 
21481 		if (dirty_shared_count &&
21482 		    ((dirty_private_count / dirty_shared_count) < memorystatus_freeze_private_shared_pages_ratio)) {
21483 			*freezer_error_code = FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO;
21484 			kr = KERN_FAILURE;
21485 			goto done;
21486 		}
21487 
21488 		evaluation_phase = FALSE;
21489 		dirty_shared_count = dirty_private_count = 0;
21490 
21491 		freezer_context_global.freezer_ctx_uncompressed_pages = 0;
21492 		clock_get_uptime(&c_freezer_last_yield_ts);
21493 
21494 		if (eval_only) {
21495 			kr = KERN_SUCCESS;
21496 			goto done;
21497 		}
21498 
21499 		vm_purgeable_purge_task_owned(task);
21500 
21501 		goto again;
21502 	} else {
21503 		kr = KERN_SUCCESS;
21504 	}
21505 
21506 done:
21507 	vm_map_unlock(map);
21508 
21509 	if ((eval_only == FALSE) && (kr == KERN_SUCCESS)) {
21510 		vm_object_compressed_freezer_done();
21511 	}
21512 	return kr;
21513 }
21514 
21515 #endif
21516 
21517 /*
21518  * vm_map_entry_should_cow_for_true_share:
21519  *
21520  * Determines if the map entry should be clipped and setup for copy-on-write
21521  * to avoid applying "true_share" to a large VM object when only a subset is
21522  * targeted.
21523  *
21524  * For now, we target only the map entries created for the Objective C
21525  * Garbage Collector, which initially have the following properties:
21526  *	- alias == VM_MEMORY_MALLOC
21527  *      - wired_count == 0
21528  *      - !needs_copy
21529  * and a VM object with:
21530  *      - internal
21531  *      - copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC
21532  *      - !true_share
21533  *      - vo_size == ANON_CHUNK_SIZE
21534  *
21535  * Only non-kernel map entries.
21536  */
21537 boolean_t
vm_map_entry_should_cow_for_true_share(vm_map_entry_t entry)21538 vm_map_entry_should_cow_for_true_share(
21539 	vm_map_entry_t  entry)
21540 {
21541 	vm_object_t     object;
21542 
21543 	if (entry->is_sub_map) {
21544 		/* entry does not point at a VM object */
21545 		return FALSE;
21546 	}
21547 
21548 	if (entry->needs_copy) {
21549 		/* already set for copy_on_write: done! */
21550 		return FALSE;
21551 	}
21552 
21553 	if (VME_ALIAS(entry) != VM_MEMORY_MALLOC &&
21554 	    VME_ALIAS(entry) != VM_MEMORY_MALLOC_SMALL) {
21555 		/* not a malloc heap or Obj-C Garbage Collector heap */
21556 		return FALSE;
21557 	}
21558 
21559 	if (entry->wired_count) {
21560 		/* wired: can't change the map entry... */
21561 		vm_counters.should_cow_but_wired++;
21562 		return FALSE;
21563 	}
21564 
21565 	object = VME_OBJECT(entry);
21566 
21567 	if (object == VM_OBJECT_NULL) {
21568 		/* no object yet... */
21569 		return FALSE;
21570 	}
21571 
21572 	if (!object->internal) {
21573 		/* not an internal object */
21574 		return FALSE;
21575 	}
21576 
21577 	if (object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) {
21578 		/* not the default copy strategy */
21579 		return FALSE;
21580 	}
21581 
21582 	if (object->true_share) {
21583 		/* already true_share: too late to avoid it */
21584 		return FALSE;
21585 	}
21586 
21587 	if (VME_ALIAS(entry) == VM_MEMORY_MALLOC &&
21588 	    object->vo_size != ANON_CHUNK_SIZE) {
21589 		/* ... not an object created for the ObjC Garbage Collector */
21590 		return FALSE;
21591 	}
21592 
21593 	if (VME_ALIAS(entry) == VM_MEMORY_MALLOC_SMALL &&
21594 	    object->vo_size != 2048 * 4096) {
21595 		/* ... not a "MALLOC_SMALL" heap */
21596 		return FALSE;
21597 	}
21598 
21599 	/*
21600 	 * All the criteria match: we have a large object being targeted for "true_share".
21601 	 * To limit the adverse side-effects linked with "true_share", tell the caller to
21602 	 * try and avoid setting up the entire object for "true_share" by clipping the
21603 	 * targeted range and setting it up for copy-on-write.
21604 	 */
21605 	return TRUE;
21606 }
21607 
21608 uint64_t vm_map_range_overflows_count = 0;
21609 TUNABLE_WRITEABLE(boolean_t, vm_map_range_overflows_log, "vm_map_range_overflows_log", FALSE);
21610 bool
vm_map_range_overflows(vm_map_t map,vm_map_offset_t addr,vm_map_size_t size)21611 vm_map_range_overflows(
21612 	vm_map_t map,
21613 	vm_map_offset_t addr,
21614 	vm_map_size_t size)
21615 {
21616 	vm_map_offset_t start, end, sum;
21617 	vm_map_offset_t pgmask;
21618 
21619 	if (size == 0) {
21620 		/* empty range -> no overflow */
21621 		return false;
21622 	}
21623 	pgmask = vm_map_page_mask(map);
21624 	start = vm_map_trunc_page_mask(addr, pgmask);
21625 	end = vm_map_round_page_mask(addr + size, pgmask);
21626 	if (__improbable(os_add_overflow(addr, size, &sum) || end <= start)) {
21627 		vm_map_range_overflows_count++;
21628 		if (vm_map_range_overflows_log) {
21629 			printf("%d[%s] vm_map_range_overflows addr 0x%llx size 0x%llx pgmask 0x%llx\n",
21630 			    proc_selfpid(),
21631 			    proc_best_name(current_proc()),
21632 			    (uint64_t)addr,
21633 			    (uint64_t)size,
21634 			    (uint64_t)pgmask);
21635 		}
21636 		DTRACE_VM4(vm_map_range_overflows,
21637 		    vm_map_t, map,
21638 		    uint32_t, pgmask,
21639 		    uint64_t, (uint64_t)addr,
21640 		    uint64_t, (uint64_t)size);
21641 		return true;
21642 	}
21643 	return false;
21644 }
21645 
21646 vm_map_offset_t
vm_map_round_page_mask(vm_map_offset_t offset,vm_map_offset_t mask)21647 vm_map_round_page_mask(
21648 	vm_map_offset_t offset,
21649 	vm_map_offset_t mask)
21650 {
21651 	return VM_MAP_ROUND_PAGE(offset, mask);
21652 }
21653 
21654 vm_map_offset_t
vm_map_trunc_page_mask(vm_map_offset_t offset,vm_map_offset_t mask)21655 vm_map_trunc_page_mask(
21656 	vm_map_offset_t offset,
21657 	vm_map_offset_t mask)
21658 {
21659 	return VM_MAP_TRUNC_PAGE(offset, mask);
21660 }
21661 
21662 boolean_t
vm_map_page_aligned(vm_map_offset_t offset,vm_map_offset_t mask)21663 vm_map_page_aligned(
21664 	vm_map_offset_t offset,
21665 	vm_map_offset_t mask)
21666 {
21667 	return ((offset) & mask) == 0;
21668 }
21669 
21670 int
vm_map_page_shift(vm_map_t map)21671 vm_map_page_shift(
21672 	vm_map_t map)
21673 {
21674 	return VM_MAP_PAGE_SHIFT(map);
21675 }
21676 
21677 int
vm_map_page_size(vm_map_t map)21678 vm_map_page_size(
21679 	vm_map_t map)
21680 {
21681 	return VM_MAP_PAGE_SIZE(map);
21682 }
21683 
21684 vm_map_offset_t
vm_map_page_mask(vm_map_t map)21685 vm_map_page_mask(
21686 	vm_map_t map)
21687 {
21688 	return VM_MAP_PAGE_MASK(map);
21689 }
21690 
21691 kern_return_t
vm_map_set_page_shift(vm_map_t map,int pageshift)21692 vm_map_set_page_shift(
21693 	vm_map_t        map,
21694 	int             pageshift)
21695 {
21696 	if (map->hdr.nentries != 0) {
21697 		/* too late to change page size */
21698 		return KERN_FAILURE;
21699 	}
21700 
21701 	map->hdr.page_shift = (uint16_t)pageshift;
21702 
21703 	return KERN_SUCCESS;
21704 }
21705 
21706 kern_return_t
vm_map_query_volatile(vm_map_t map,mach_vm_size_t * volatile_virtual_size_p,mach_vm_size_t * volatile_resident_size_p,mach_vm_size_t * volatile_compressed_size_p,mach_vm_size_t * volatile_pmap_size_p,mach_vm_size_t * volatile_compressed_pmap_size_p)21707 vm_map_query_volatile(
21708 	vm_map_t        map,
21709 	mach_vm_size_t  *volatile_virtual_size_p,
21710 	mach_vm_size_t  *volatile_resident_size_p,
21711 	mach_vm_size_t  *volatile_compressed_size_p,
21712 	mach_vm_size_t  *volatile_pmap_size_p,
21713 	mach_vm_size_t  *volatile_compressed_pmap_size_p)
21714 {
21715 	mach_vm_size_t  volatile_virtual_size;
21716 	mach_vm_size_t  volatile_resident_count;
21717 	mach_vm_size_t  volatile_compressed_count;
21718 	mach_vm_size_t  volatile_pmap_count;
21719 	mach_vm_size_t  volatile_compressed_pmap_count;
21720 	mach_vm_size_t  resident_count;
21721 	vm_map_entry_t  entry;
21722 	vm_object_t     object;
21723 
21724 	/* map should be locked by caller */
21725 
21726 	volatile_virtual_size = 0;
21727 	volatile_resident_count = 0;
21728 	volatile_compressed_count = 0;
21729 	volatile_pmap_count = 0;
21730 	volatile_compressed_pmap_count = 0;
21731 
21732 	for (entry = vm_map_first_entry(map);
21733 	    entry != vm_map_to_entry(map);
21734 	    entry = entry->vme_next) {
21735 		mach_vm_size_t  pmap_resident_bytes, pmap_compressed_bytes;
21736 
21737 		if (entry->is_sub_map) {
21738 			continue;
21739 		}
21740 		if (!(entry->protection & VM_PROT_WRITE)) {
21741 			continue;
21742 		}
21743 		object = VME_OBJECT(entry);
21744 		if (object == VM_OBJECT_NULL) {
21745 			continue;
21746 		}
21747 		if (object->purgable != VM_PURGABLE_VOLATILE &&
21748 		    object->purgable != VM_PURGABLE_EMPTY) {
21749 			continue;
21750 		}
21751 		if (VME_OFFSET(entry)) {
21752 			/*
21753 			 * If the map entry has been split and the object now
21754 			 * appears several times in the VM map, we don't want
21755 			 * to count the object's resident_page_count more than
21756 			 * once.  We count it only for the first one, starting
21757 			 * at offset 0 and ignore the other VM map entries.
21758 			 */
21759 			continue;
21760 		}
21761 		resident_count = object->resident_page_count;
21762 		if ((VME_OFFSET(entry) / PAGE_SIZE) >= resident_count) {
21763 			resident_count = 0;
21764 		} else {
21765 			resident_count -= (VME_OFFSET(entry) / PAGE_SIZE);
21766 		}
21767 
21768 		volatile_virtual_size += entry->vme_end - entry->vme_start;
21769 		volatile_resident_count += resident_count;
21770 		if (object->pager) {
21771 			volatile_compressed_count +=
21772 			    vm_compressor_pager_get_count(object->pager);
21773 		}
21774 		pmap_compressed_bytes = 0;
21775 		pmap_resident_bytes =
21776 		    pmap_query_resident(map->pmap,
21777 		    entry->vme_start,
21778 		    entry->vme_end,
21779 		    &pmap_compressed_bytes);
21780 		volatile_pmap_count += (pmap_resident_bytes / PAGE_SIZE);
21781 		volatile_compressed_pmap_count += (pmap_compressed_bytes
21782 		    / PAGE_SIZE);
21783 	}
21784 
21785 	/* map is still locked on return */
21786 
21787 	*volatile_virtual_size_p = volatile_virtual_size;
21788 	*volatile_resident_size_p = volatile_resident_count * PAGE_SIZE;
21789 	*volatile_compressed_size_p = volatile_compressed_count * PAGE_SIZE;
21790 	*volatile_pmap_size_p = volatile_pmap_count * PAGE_SIZE;
21791 	*volatile_compressed_pmap_size_p = volatile_compressed_pmap_count * PAGE_SIZE;
21792 
21793 	return KERN_SUCCESS;
21794 }
21795 
21796 void
vm_map_sizes(vm_map_t map,vm_map_size_t * psize,vm_map_size_t * pfree,vm_map_size_t * plargest_free)21797 vm_map_sizes(vm_map_t map,
21798     vm_map_size_t * psize,
21799     vm_map_size_t * pfree,
21800     vm_map_size_t * plargest_free)
21801 {
21802 	vm_map_entry_t  entry;
21803 	vm_map_offset_t prev;
21804 	vm_map_size_t   free, total_free, largest_free;
21805 	boolean_t       end;
21806 
21807 	if (!map) {
21808 		*psize = *pfree = *plargest_free = 0;
21809 		return;
21810 	}
21811 	total_free = largest_free = 0;
21812 
21813 	vm_map_lock_read(map);
21814 	if (psize) {
21815 		*psize = map->max_offset - map->min_offset;
21816 	}
21817 
21818 	prev = map->min_offset;
21819 	for (entry = vm_map_first_entry(map);; entry = entry->vme_next) {
21820 		end = (entry == vm_map_to_entry(map));
21821 
21822 		if (end) {
21823 			free = entry->vme_end   - prev;
21824 		} else {
21825 			free = entry->vme_start - prev;
21826 		}
21827 
21828 		total_free += free;
21829 		if (free > largest_free) {
21830 			largest_free = free;
21831 		}
21832 
21833 		if (end) {
21834 			break;
21835 		}
21836 		prev = entry->vme_end;
21837 	}
21838 	vm_map_unlock_read(map);
21839 	if (pfree) {
21840 		*pfree = total_free;
21841 	}
21842 	if (plargest_free) {
21843 		*plargest_free = largest_free;
21844 	}
21845 }
21846 
21847 #if VM_SCAN_FOR_SHADOW_CHAIN
21848 int vm_map_shadow_max(vm_map_t map);
21849 int
vm_map_shadow_max(vm_map_t map)21850 vm_map_shadow_max(
21851 	vm_map_t map)
21852 {
21853 	int             shadows, shadows_max;
21854 	vm_map_entry_t  entry;
21855 	vm_object_t     object, next_object;
21856 
21857 	if (map == NULL) {
21858 		return 0;
21859 	}
21860 
21861 	shadows_max = 0;
21862 
21863 	vm_map_lock_read(map);
21864 
21865 	for (entry = vm_map_first_entry(map);
21866 	    entry != vm_map_to_entry(map);
21867 	    entry = entry->vme_next) {
21868 		if (entry->is_sub_map) {
21869 			continue;
21870 		}
21871 		object = VME_OBJECT(entry);
21872 		if (object == NULL) {
21873 			continue;
21874 		}
21875 		vm_object_lock_shared(object);
21876 		for (shadows = 0;
21877 		    object->shadow != NULL;
21878 		    shadows++, object = next_object) {
21879 			next_object = object->shadow;
21880 			vm_object_lock_shared(next_object);
21881 			vm_object_unlock(object);
21882 		}
21883 		vm_object_unlock(object);
21884 		if (shadows > shadows_max) {
21885 			shadows_max = shadows;
21886 		}
21887 	}
21888 
21889 	vm_map_unlock_read(map);
21890 
21891 	return shadows_max;
21892 }
21893 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
21894 
21895 void
vm_commit_pagezero_status(vm_map_t lmap)21896 vm_commit_pagezero_status(vm_map_t lmap)
21897 {
21898 	pmap_advise_pagezero_range(lmap->pmap, lmap->min_offset);
21899 }
21900 
21901 #if __x86_64__
21902 void
vm_map_set_high_start(vm_map_t map,vm_map_offset_t high_start)21903 vm_map_set_high_start(
21904 	vm_map_t        map,
21905 	vm_map_offset_t high_start)
21906 {
21907 	map->vmmap_high_start = high_start;
21908 }
21909 #endif /* __x86_64__ */
21910 
21911 #if CODE_SIGNING_MONITOR
21912 
21913 kern_return_t
vm_map_entry_cs_associate(vm_map_t map,vm_map_entry_t entry,vm_map_kernel_flags_t vmk_flags)21914 vm_map_entry_cs_associate(
21915 	vm_map_t                map,
21916 	vm_map_entry_t          entry,
21917 	vm_map_kernel_flags_t   vmk_flags)
21918 {
21919 	vm_object_t cs_object, cs_shadow, backing_object;
21920 	vm_object_offset_t cs_offset, backing_offset;
21921 	void *cs_blobs;
21922 	struct vnode *cs_vnode;
21923 	kern_return_t cs_ret;
21924 
21925 	if (map->pmap == NULL ||
21926 	    entry->is_sub_map || /* XXX FBDP: recurse on sub-range? */
21927 	    (csm_address_space_exempt(map->pmap) == KERN_SUCCESS) ||
21928 	    VME_OBJECT(entry) == VM_OBJECT_NULL) {
21929 		return KERN_SUCCESS;
21930 	}
21931 
21932 	if (!(entry->protection & VM_PROT_EXECUTE)) {
21933 		/*
21934 		 * This memory region is not executable, so the code-signing
21935 		 * monitor would usually not care about it...
21936 		 */
21937 		if (vmk_flags.vmkf_remap_prot_copy &&
21938 		    (entry->max_protection & VM_PROT_EXECUTE)) {
21939 			/*
21940 			 * ... except if the memory region is being remapped
21941 			 * from r-x/r-x to rw-/rwx via vm_protect(VM_PROT_COPY)
21942 			 * which is what a debugger or dtrace would be doing
21943 			 * to prepare to modify an executable page to insert
21944 			 * a breakpoint or activate a probe.
21945 			 * In that case, fall through so that we can mark
21946 			 * this region as being "debugged" and no longer
21947 			 * strictly code-signed.
21948 			 */
21949 		} else {
21950 			/*
21951 			 * Really not executable, so no need to tell the
21952 			 * code-signing monitor.
21953 			 */
21954 			return KERN_SUCCESS;
21955 		}
21956 	}
21957 
21958 	vm_map_lock_assert_exclusive(map);
21959 
21960 	if (entry->used_for_jit) {
21961 		cs_ret = csm_associate_jit_region(
21962 			map->pmap,
21963 			entry->vme_start,
21964 			entry->vme_end - entry->vme_start);
21965 		goto done;
21966 	}
21967 
21968 	if (vmk_flags.vmkf_remap_prot_copy) {
21969 		cs_ret = csm_associate_debug_region(
21970 			map->pmap,
21971 			entry->vme_start,
21972 			entry->vme_end - entry->vme_start);
21973 		if (cs_ret == KERN_SUCCESS) {
21974 			entry->vme_xnu_user_debug = TRUE;
21975 		}
21976 #if DEVELOPMENT || DEBUG
21977 		if (vm_log_xnu_user_debug) {
21978 			printf("FBDP %d[%s] %s:%d map %p entry %p [ 0x%llx 0x%llx ]  vme_xnu_user_debug=%d cs_ret %d\n",
21979 			    proc_selfpid(),
21980 			    (get_bsdtask_info(current_task()) ? proc_name_address(get_bsdtask_info(current_task())) : "?"),
21981 			    __FUNCTION__, __LINE__,
21982 			    map, entry,
21983 			    (uint64_t)entry->vme_start, (uint64_t)entry->vme_end,
21984 			    entry->vme_xnu_user_debug,
21985 			    cs_ret);
21986 		}
21987 #endif /* DEVELOPMENT || DEBUG */
21988 		goto done;
21989 	}
21990 
21991 	cs_object = VME_OBJECT(entry);
21992 	vm_object_lock_shared(cs_object);
21993 	cs_offset = VME_OFFSET(entry);
21994 
21995 	/* find the VM object backed by the code-signed vnode */
21996 	for (;;) {
21997 		/* go to the bottom of cs_object's shadow chain */
21998 		for (;
21999 		    cs_object->shadow != VM_OBJECT_NULL;
22000 		    cs_object = cs_shadow) {
22001 			cs_shadow = cs_object->shadow;
22002 			cs_offset += cs_object->vo_shadow_offset;
22003 			vm_object_lock_shared(cs_shadow);
22004 			vm_object_unlock(cs_object);
22005 		}
22006 		if (cs_object->internal ||
22007 		    cs_object->pager == MEMORY_OBJECT_NULL) {
22008 			vm_object_unlock(cs_object);
22009 			return KERN_SUCCESS;
22010 		}
22011 
22012 		cs_offset += cs_object->paging_offset;
22013 
22014 		/*
22015 		 * cs_object could be backed by a:
22016 		 *      vnode_pager
22017 		 *	apple_protect_pager
22018 		 *      shared_region_pager
22019 		 *	fourk_pager (multiple backing objects -> fail?)
22020 		 * ask the pager if it has a backing VM object
22021 		 */
22022 		if (!memory_object_backing_object(cs_object->pager,
22023 		    cs_offset,
22024 		    &backing_object,
22025 		    &backing_offset)) {
22026 			/* no backing object: cs_object is it */
22027 			break;
22028 		}
22029 
22030 		/* look down the backing object's shadow chain */
22031 		vm_object_lock_shared(backing_object);
22032 		vm_object_unlock(cs_object);
22033 		cs_object = backing_object;
22034 		cs_offset = backing_offset;
22035 	}
22036 
22037 	cs_vnode = vnode_pager_lookup_vnode(cs_object->pager);
22038 	if (cs_vnode == NULL) {
22039 		/* no vnode, no code signatures to associate */
22040 		cs_ret = KERN_SUCCESS;
22041 	} else {
22042 		cs_ret = vnode_pager_get_cs_blobs(cs_vnode,
22043 		    &cs_blobs);
22044 		assert(cs_ret == KERN_SUCCESS);
22045 		cs_ret = cs_associate_blob_with_mapping(map->pmap,
22046 		    entry->vme_start,
22047 		    (entry->vme_end - entry->vme_start),
22048 		    cs_offset,
22049 		    cs_blobs);
22050 	}
22051 	vm_object_unlock(cs_object);
22052 	cs_object = VM_OBJECT_NULL;
22053 
22054 done:
22055 	if (cs_ret == KERN_SUCCESS) {
22056 		DTRACE_VM2(vm_map_entry_cs_associate_success,
22057 		    vm_map_offset_t, entry->vme_start,
22058 		    vm_map_offset_t, entry->vme_end);
22059 		if (vm_map_executable_immutable) {
22060 			/*
22061 			 * Prevent this executable
22062 			 * mapping from being unmapped
22063 			 * or modified.
22064 			 */
22065 			entry->vme_permanent = TRUE;
22066 		}
22067 		/*
22068 		 * pmap says it will validate the
22069 		 * code-signing validity of pages
22070 		 * faulted in via this mapping, so
22071 		 * this map entry should be marked so
22072 		 * that vm_fault() bypasses code-signing
22073 		 * validation for faults coming through
22074 		 * this mapping.
22075 		 */
22076 		entry->csm_associated = TRUE;
22077 	} else if (cs_ret == KERN_NOT_SUPPORTED) {
22078 		/*
22079 		 * pmap won't check the code-signing
22080 		 * validity of pages faulted in via
22081 		 * this mapping, so VM should keep
22082 		 * doing it.
22083 		 */
22084 		DTRACE_VM3(vm_map_entry_cs_associate_off,
22085 		    vm_map_offset_t, entry->vme_start,
22086 		    vm_map_offset_t, entry->vme_end,
22087 		    int, cs_ret);
22088 	} else {
22089 		/*
22090 		 * A real error: do not allow
22091 		 * execution in this mapping.
22092 		 */
22093 		DTRACE_VM3(vm_map_entry_cs_associate_failure,
22094 		    vm_map_offset_t, entry->vme_start,
22095 		    vm_map_offset_t, entry->vme_end,
22096 		    int, cs_ret);
22097 		if (vmk_flags.vmkf_overwrite_immutable) {
22098 			/*
22099 			 * We can get here when we remap an apple_protect pager
22100 			 * on top of an already cs_associated executable mapping
22101 			 * with the same code signatures, so we don't want to
22102 			 * lose VM_PROT_EXECUTE in that case...
22103 			 */
22104 		} else {
22105 			entry->protection &= ~VM_PROT_ALLEXEC;
22106 			entry->max_protection &= ~VM_PROT_ALLEXEC;
22107 		}
22108 	}
22109 
22110 	return cs_ret;
22111 }
22112 
22113 #endif /* CODE_SIGNING_MONITOR */
22114 
22115 /*
22116  * FORKED CORPSE FOOTPRINT
22117  *
22118  * A forked corpse gets a copy of the original VM map but its pmap is mostly
22119  * empty since it never ran and never got to fault in any pages.
22120  * Collecting footprint info (via "sysctl vm.self_region_footprint") for
22121  * a forked corpse would therefore return very little information.
22122  *
22123  * When forking a corpse, we can pass the VM_MAP_FORK_CORPSE_FOOTPRINT option
22124  * to vm_map_fork() to collect footprint information from the original VM map
22125  * and its pmap, and store it in the forked corpse's VM map.  That information
22126  * is stored in place of the VM map's "hole list" since we'll never need to
22127  * lookup for holes in the corpse's map.
22128  *
22129  * The corpse's footprint info looks like this:
22130  *
22131  * vm_map->vmmap_corpse_footprint points to pageable kernel memory laid out
22132  * as follows:
22133  *                     +---------------------------------------+
22134  *            header-> | cf_size                               |
22135  *                     +-------------------+-------------------+
22136  *                     | cf_last_region    | cf_last_zeroes    |
22137  *                     +-------------------+-------------------+
22138  *           region1-> | cfr_vaddr                             |
22139  *                     +-------------------+-------------------+
22140  *                     | cfr_num_pages     | d0 | d1 | d2 | d3 |
22141  *                     +---------------------------------------+
22142  *                     | d4 | d5 | ...                         |
22143  *                     +---------------------------------------+
22144  *                     | ...                                   |
22145  *                     +-------------------+-------------------+
22146  *                     | dy | dz | na | na | cfr_vaddr...      | <-region2
22147  *                     +-------------------+-------------------+
22148  *                     | cfr_vaddr (ctd)   | cfr_num_pages     |
22149  *                     +---------------------------------------+
22150  *                     | d0 | d1 ...                           |
22151  *                     +---------------------------------------+
22152  *                       ...
22153  *                     +---------------------------------------+
22154  *       last region-> | cfr_vaddr                             |
22155  *                     +---------------------------------------+
22156  *                     + cfr_num_pages     | d0 | d1 | d2 | d3 |
22157  *                     +---------------------------------------+
22158  *                       ...
22159  *                     +---------------------------------------+
22160  *                     | dx | dy | dz | na | na | na | na | na |
22161  *                     +---------------------------------------+
22162  *
22163  * where:
22164  *      cf_size:	total size of the buffer (rounded to page size)
22165  *      cf_last_region:	offset in the buffer of the last "region" sub-header
22166  *	cf_last_zeroes: number of trailing "zero" dispositions at the end
22167  *			of last region
22168  *	cfr_vaddr:	virtual address of the start of the covered "region"
22169  *	cfr_num_pages:	number of pages in the covered "region"
22170  *	d*:		disposition of the page at that virtual address
22171  * Regions in the buffer are word-aligned.
22172  *
22173  * We estimate the size of the buffer based on the number of memory regions
22174  * and the virtual size of the address space.  While copying each memory region
22175  * during vm_map_fork(), we also collect the footprint info for that region
22176  * and store it in the buffer, packing it as much as possible (coalescing
22177  * contiguous memory regions to avoid having too many region headers and
22178  * avoiding long streaks of "zero" page dispositions by splitting footprint
22179  * "regions", so the number of regions in the footprint buffer might not match
22180  * the number of memory regions in the address space.
22181  *
22182  * We also have to copy the original task's "nonvolatile" ledgers since that's
22183  * part of the footprint and will need to be reported to any tool asking for
22184  * the footprint information of the forked corpse.
22185  */
22186 
22187 uint64_t vm_map_corpse_footprint_count = 0;
22188 uint64_t vm_map_corpse_footprint_size_avg = 0;
22189 uint64_t vm_map_corpse_footprint_size_max = 0;
22190 uint64_t vm_map_corpse_footprint_full = 0;
22191 uint64_t vm_map_corpse_footprint_no_buf = 0;
22192 
22193 struct vm_map_corpse_footprint_header {
22194 	vm_size_t       cf_size;        /* allocated buffer size */
22195 	uint32_t        cf_last_region; /* offset of last region in buffer */
22196 	union {
22197 		uint32_t cfu_last_zeroes; /* during creation:
22198 		                           * number of "zero" dispositions at
22199 		                           * end of last region */
22200 		uint32_t cfu_hint_region; /* during lookup:
22201 		                           * offset of last looked up region */
22202 #define cf_last_zeroes cfu.cfu_last_zeroes
22203 #define cf_hint_region cfu.cfu_hint_region
22204 	} cfu;
22205 };
22206 typedef uint8_t cf_disp_t;
22207 struct vm_map_corpse_footprint_region {
22208 	vm_map_offset_t cfr_vaddr;      /* region start virtual address */
22209 	uint32_t        cfr_num_pages;  /* number of pages in this "region" */
22210 	cf_disp_t   cfr_disposition[0]; /* disposition of each page */
22211 } __attribute__((packed));
22212 
22213 static cf_disp_t
vm_page_disposition_to_cf_disp(int disposition)22214 vm_page_disposition_to_cf_disp(
22215 	int disposition)
22216 {
22217 	assert(sizeof(cf_disp_t) == 1);
22218 	/* relocate bits that don't fit in a "uint8_t" */
22219 	if (disposition & VM_PAGE_QUERY_PAGE_REUSABLE) {
22220 		disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS;
22221 	}
22222 	/* cast gets rid of extra bits */
22223 	return (cf_disp_t) disposition;
22224 }
22225 
22226 static int
vm_page_cf_disp_to_disposition(cf_disp_t cf_disp)22227 vm_page_cf_disp_to_disposition(
22228 	cf_disp_t cf_disp)
22229 {
22230 	int disposition;
22231 
22232 	assert(sizeof(cf_disp_t) == 1);
22233 	disposition = (int) cf_disp;
22234 	/* move relocated bits back in place */
22235 	if (cf_disp & VM_PAGE_QUERY_PAGE_FICTITIOUS) {
22236 		disposition |= VM_PAGE_QUERY_PAGE_REUSABLE;
22237 		disposition &= ~VM_PAGE_QUERY_PAGE_FICTITIOUS;
22238 	}
22239 	return disposition;
22240 }
22241 
22242 /*
22243  * vm_map_corpse_footprint_new_region:
22244  *      closes the current footprint "region" and creates a new one
22245  *
22246  * Returns NULL if there's not enough space in the buffer for a new region.
22247  */
22248 static struct vm_map_corpse_footprint_region *
vm_map_corpse_footprint_new_region(struct vm_map_corpse_footprint_header * footprint_header)22249 vm_map_corpse_footprint_new_region(
22250 	struct vm_map_corpse_footprint_header *footprint_header)
22251 {
22252 	uintptr_t       footprint_edge;
22253 	uint32_t        new_region_offset;
22254 	struct vm_map_corpse_footprint_region *footprint_region;
22255 	struct vm_map_corpse_footprint_region *new_footprint_region;
22256 
22257 	footprint_edge = ((uintptr_t)footprint_header +
22258 	    footprint_header->cf_size);
22259 	footprint_region = ((struct vm_map_corpse_footprint_region *)
22260 	    ((char *)footprint_header +
22261 	    footprint_header->cf_last_region));
22262 	assert((uintptr_t)footprint_region + sizeof(*footprint_region) <=
22263 	    footprint_edge);
22264 
22265 	/* get rid of trailing zeroes in the last region */
22266 	assert(footprint_region->cfr_num_pages >=
22267 	    footprint_header->cf_last_zeroes);
22268 	footprint_region->cfr_num_pages -=
22269 	    footprint_header->cf_last_zeroes;
22270 	footprint_header->cf_last_zeroes = 0;
22271 
22272 	/* reuse this region if it's now empty */
22273 	if (footprint_region->cfr_num_pages == 0) {
22274 		return footprint_region;
22275 	}
22276 
22277 	/* compute offset of new region */
22278 	new_region_offset = footprint_header->cf_last_region;
22279 	new_region_offset += sizeof(*footprint_region);
22280 	new_region_offset += (footprint_region->cfr_num_pages * sizeof(cf_disp_t));
22281 	new_region_offset = roundup(new_region_offset, sizeof(int));
22282 
22283 	/* check if we're going over the edge */
22284 	if (((uintptr_t)footprint_header +
22285 	    new_region_offset +
22286 	    sizeof(*footprint_region)) >=
22287 	    footprint_edge) {
22288 		/* over the edge: no new region */
22289 		return NULL;
22290 	}
22291 
22292 	/* adjust offset of last region in header */
22293 	footprint_header->cf_last_region = new_region_offset;
22294 
22295 	new_footprint_region = (struct vm_map_corpse_footprint_region *)
22296 	    ((char *)footprint_header +
22297 	    footprint_header->cf_last_region);
22298 	new_footprint_region->cfr_vaddr = 0;
22299 	new_footprint_region->cfr_num_pages = 0;
22300 	/* caller needs to initialize new region */
22301 
22302 	return new_footprint_region;
22303 }
22304 
22305 /*
22306  * vm_map_corpse_footprint_collect:
22307  *	collect footprint information for "old_entry" in "old_map" and
22308  *	stores it in "new_map"'s vmmap_footprint_info.
22309  */
22310 kern_return_t
vm_map_corpse_footprint_collect(vm_map_t old_map,vm_map_entry_t old_entry,vm_map_t new_map)22311 vm_map_corpse_footprint_collect(
22312 	vm_map_t        old_map,
22313 	vm_map_entry_t  old_entry,
22314 	vm_map_t        new_map)
22315 {
22316 	vm_map_offset_t va;
22317 	kern_return_t   kr;
22318 	struct vm_map_corpse_footprint_header *footprint_header;
22319 	struct vm_map_corpse_footprint_region *footprint_region;
22320 	struct vm_map_corpse_footprint_region *new_footprint_region;
22321 	cf_disp_t       *next_disp_p;
22322 	uintptr_t       footprint_edge;
22323 	uint32_t        num_pages_tmp;
22324 	int             effective_page_size;
22325 
22326 	effective_page_size = MIN(PAGE_SIZE, VM_MAP_PAGE_SIZE(old_map));
22327 
22328 	va = old_entry->vme_start;
22329 
22330 	vm_map_lock_assert_exclusive(old_map);
22331 	vm_map_lock_assert_exclusive(new_map);
22332 
22333 	assert(new_map->has_corpse_footprint);
22334 	assert(!old_map->has_corpse_footprint);
22335 	if (!new_map->has_corpse_footprint ||
22336 	    old_map->has_corpse_footprint) {
22337 		/*
22338 		 * This can only transfer footprint info from a
22339 		 * map with a live pmap to a map with a corpse footprint.
22340 		 */
22341 		return KERN_NOT_SUPPORTED;
22342 	}
22343 
22344 	if (new_map->vmmap_corpse_footprint == NULL) {
22345 		vm_offset_t     buf;
22346 		vm_size_t       buf_size;
22347 
22348 		buf = 0;
22349 		buf_size = (sizeof(*footprint_header) +
22350 		    (old_map->hdr.nentries
22351 		    *
22352 		    (sizeof(*footprint_region) +
22353 		    +3))            /* potential alignment for each region */
22354 		    +
22355 		    ((old_map->size / effective_page_size)
22356 		    *
22357 		    sizeof(cf_disp_t)));      /* disposition for each page */
22358 //		printf("FBDP corpse map %p guestimate footprint size 0x%llx\n", new_map, (uint64_t) buf_size);
22359 		buf_size = round_page(buf_size);
22360 
22361 		/* limit buffer to 1 page to validate overflow detection */
22362 //		buf_size = PAGE_SIZE;
22363 
22364 		/* limit size to a somewhat sane amount */
22365 #if XNU_TARGET_OS_OSX
22366 #define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE   (8*1024*1024)   /* 8MB */
22367 #else /* XNU_TARGET_OS_OSX */
22368 #define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE   (256*1024)      /* 256KB */
22369 #endif /* XNU_TARGET_OS_OSX */
22370 		if (buf_size > VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE) {
22371 			buf_size = VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE;
22372 		}
22373 
22374 		/*
22375 		 * Allocate the pageable buffer (with a trailing guard page).
22376 		 * It will be zero-filled on demand.
22377 		 */
22378 		kr = kmem_alloc(kernel_map, &buf, buf_size + PAGE_SIZE,
22379 		    KMA_DATA | KMA_PAGEABLE | KMA_GUARD_LAST,
22380 		    VM_KERN_MEMORY_DIAG);
22381 		if (kr != KERN_SUCCESS) {
22382 			vm_map_corpse_footprint_no_buf++;
22383 			return kr;
22384 		}
22385 
22386 		/* initialize header and 1st region */
22387 		footprint_header = (struct vm_map_corpse_footprint_header *)buf;
22388 		new_map->vmmap_corpse_footprint = footprint_header;
22389 
22390 		footprint_header->cf_size = buf_size;
22391 		footprint_header->cf_last_region =
22392 		    sizeof(*footprint_header);
22393 		footprint_header->cf_last_zeroes = 0;
22394 
22395 		footprint_region = (struct vm_map_corpse_footprint_region *)
22396 		    ((char *)footprint_header +
22397 		    footprint_header->cf_last_region);
22398 		footprint_region->cfr_vaddr = 0;
22399 		footprint_region->cfr_num_pages = 0;
22400 	} else {
22401 		/* retrieve header and last region */
22402 		footprint_header = (struct vm_map_corpse_footprint_header *)
22403 		    new_map->vmmap_corpse_footprint;
22404 		footprint_region = (struct vm_map_corpse_footprint_region *)
22405 		    ((char *)footprint_header +
22406 		    footprint_header->cf_last_region);
22407 	}
22408 	footprint_edge = ((uintptr_t)footprint_header +
22409 	    footprint_header->cf_size);
22410 
22411 	if ((footprint_region->cfr_vaddr +
22412 	    (((vm_map_offset_t)footprint_region->cfr_num_pages) *
22413 	    effective_page_size))
22414 	    != old_entry->vme_start) {
22415 		uint64_t num_pages_delta, num_pages_delta_size;
22416 		uint32_t region_offset_delta_size;
22417 
22418 		/*
22419 		 * Not the next contiguous virtual address:
22420 		 * start a new region or store "zero" dispositions for
22421 		 * the missing pages?
22422 		 */
22423 		/* size of gap in actual page dispositions */
22424 		num_pages_delta = ((old_entry->vme_start -
22425 		    footprint_region->cfr_vaddr) / effective_page_size)
22426 		    - footprint_region->cfr_num_pages;
22427 		num_pages_delta_size = num_pages_delta * sizeof(cf_disp_t);
22428 		/* size of gap as a new footprint region header */
22429 		region_offset_delta_size =
22430 		    (sizeof(*footprint_region) +
22431 		    roundup(((footprint_region->cfr_num_pages -
22432 		    footprint_header->cf_last_zeroes) * sizeof(cf_disp_t)),
22433 		    sizeof(int)) -
22434 		    ((footprint_region->cfr_num_pages -
22435 		    footprint_header->cf_last_zeroes) * sizeof(cf_disp_t)));
22436 //		printf("FBDP %s:%d region 0x%x 0x%llx 0x%x vme_start 0x%llx pages_delta 0x%llx region_delta 0x%x\n", __FUNCTION__, __LINE__, footprint_header->cf_last_region, footprint_region->cfr_vaddr, footprint_region->cfr_num_pages, old_entry->vme_start, num_pages_delta, region_offset_delta);
22437 		if (region_offset_delta_size < num_pages_delta_size ||
22438 		    os_add3_overflow(footprint_region->cfr_num_pages,
22439 		    (uint32_t) num_pages_delta,
22440 		    1,
22441 		    &num_pages_tmp)) {
22442 			/*
22443 			 * Storing data for this gap would take more space
22444 			 * than inserting a new footprint region header:
22445 			 * let's start a new region and save space. If it's a
22446 			 * tie, let's avoid using a new region, since that
22447 			 * would require more region hops to find the right
22448 			 * range during lookups.
22449 			 *
22450 			 * If the current region's cfr_num_pages would overflow
22451 			 * if we added "zero" page dispositions for the gap,
22452 			 * no choice but to start a new region.
22453 			 */
22454 //			printf("FBDP %s:%d new region\n", __FUNCTION__, __LINE__);
22455 			new_footprint_region =
22456 			    vm_map_corpse_footprint_new_region(footprint_header);
22457 			/* check that we're not going over the edge */
22458 			if (new_footprint_region == NULL) {
22459 				goto over_the_edge;
22460 			}
22461 			footprint_region = new_footprint_region;
22462 			/* initialize new region as empty */
22463 			footprint_region->cfr_vaddr = old_entry->vme_start;
22464 			footprint_region->cfr_num_pages = 0;
22465 		} else {
22466 			/*
22467 			 * Store "zero" page dispositions for the missing
22468 			 * pages.
22469 			 */
22470 //			printf("FBDP %s:%d zero gap\n", __FUNCTION__, __LINE__);
22471 			for (; num_pages_delta > 0; num_pages_delta--) {
22472 				next_disp_p = (cf_disp_t *)
22473 				    ((uintptr_t) footprint_region +
22474 				    sizeof(*footprint_region));
22475 				next_disp_p += footprint_region->cfr_num_pages;
22476 				/* check that we're not going over the edge */
22477 				if ((uintptr_t)next_disp_p >= footprint_edge) {
22478 					goto over_the_edge;
22479 				}
22480 				/* store "zero" disposition for this gap page */
22481 				footprint_region->cfr_num_pages++;
22482 				*next_disp_p = (cf_disp_t) 0;
22483 				footprint_header->cf_last_zeroes++;
22484 			}
22485 		}
22486 	}
22487 
22488 	for (va = old_entry->vme_start;
22489 	    va < old_entry->vme_end;
22490 	    va += effective_page_size) {
22491 		int             disposition;
22492 		cf_disp_t       cf_disp;
22493 
22494 		vm_map_footprint_query_page_info(old_map,
22495 		    old_entry,
22496 		    va,
22497 		    &disposition);
22498 		cf_disp = vm_page_disposition_to_cf_disp(disposition);
22499 
22500 //		if (va < SHARED_REGION_BASE_ARM64) printf("FBDP collect map %p va 0x%llx disp 0x%x\n", new_map, va, disp);
22501 
22502 		if (cf_disp == 0 && footprint_region->cfr_num_pages == 0) {
22503 			/*
22504 			 * Ignore "zero" dispositions at start of
22505 			 * region: just move start of region.
22506 			 */
22507 			footprint_region->cfr_vaddr += effective_page_size;
22508 			continue;
22509 		}
22510 
22511 		/* would region's cfr_num_pages overflow? */
22512 		if (os_add_overflow(footprint_region->cfr_num_pages, 1,
22513 		    &num_pages_tmp)) {
22514 			/* overflow: create a new region */
22515 			new_footprint_region =
22516 			    vm_map_corpse_footprint_new_region(
22517 				footprint_header);
22518 			if (new_footprint_region == NULL) {
22519 				goto over_the_edge;
22520 			}
22521 			footprint_region = new_footprint_region;
22522 			footprint_region->cfr_vaddr = va;
22523 			footprint_region->cfr_num_pages = 0;
22524 		}
22525 
22526 		next_disp_p = (cf_disp_t *) ((uintptr_t) footprint_region +
22527 		    sizeof(*footprint_region));
22528 		next_disp_p += footprint_region->cfr_num_pages;
22529 		/* check that we're not going over the edge */
22530 		if ((uintptr_t)next_disp_p >= footprint_edge) {
22531 			goto over_the_edge;
22532 		}
22533 		/* store this dispostion */
22534 		*next_disp_p = cf_disp;
22535 		footprint_region->cfr_num_pages++;
22536 
22537 		if (cf_disp != 0) {
22538 			/* non-zero disp: break the current zero streak */
22539 			footprint_header->cf_last_zeroes = 0;
22540 			/* done */
22541 			continue;
22542 		}
22543 
22544 		/* zero disp: add to the current streak of zeroes */
22545 		footprint_header->cf_last_zeroes++;
22546 		if ((footprint_header->cf_last_zeroes +
22547 		    roundup(((footprint_region->cfr_num_pages -
22548 		    footprint_header->cf_last_zeroes) * sizeof(cf_disp_t)) &
22549 		    (sizeof(int) - 1),
22550 		    sizeof(int))) <
22551 		    (sizeof(*footprint_header))) {
22552 			/*
22553 			 * There are not enough trailing "zero" dispositions
22554 			 * (+ the extra padding we would need for the previous
22555 			 * region); creating a new region would not save space
22556 			 * at this point, so let's keep this "zero" disposition
22557 			 * in this region and reconsider later.
22558 			 */
22559 			continue;
22560 		}
22561 		/*
22562 		 * Create a new region to avoid having too many consecutive
22563 		 * "zero" dispositions.
22564 		 */
22565 		new_footprint_region =
22566 		    vm_map_corpse_footprint_new_region(footprint_header);
22567 		if (new_footprint_region == NULL) {
22568 			goto over_the_edge;
22569 		}
22570 		footprint_region = new_footprint_region;
22571 		/* initialize the new region as empty ... */
22572 		footprint_region->cfr_num_pages = 0;
22573 		/* ... and skip this "zero" disp */
22574 		footprint_region->cfr_vaddr = va + effective_page_size;
22575 	}
22576 
22577 	return KERN_SUCCESS;
22578 
22579 over_the_edge:
22580 //	printf("FBDP map %p footprint was full for va 0x%llx\n", new_map, va);
22581 	vm_map_corpse_footprint_full++;
22582 	return KERN_RESOURCE_SHORTAGE;
22583 }
22584 
22585 /*
22586  * vm_map_corpse_footprint_collect_done:
22587  *	completes the footprint collection by getting rid of any remaining
22588  *	trailing "zero" dispositions and trimming the unused part of the
22589  *	kernel buffer
22590  */
22591 void
vm_map_corpse_footprint_collect_done(vm_map_t new_map)22592 vm_map_corpse_footprint_collect_done(
22593 	vm_map_t        new_map)
22594 {
22595 	struct vm_map_corpse_footprint_header *footprint_header;
22596 	struct vm_map_corpse_footprint_region *footprint_region;
22597 	vm_size_t       buf_size, actual_size;
22598 	kern_return_t   kr;
22599 
22600 	assert(new_map->has_corpse_footprint);
22601 	if (!new_map->has_corpse_footprint ||
22602 	    new_map->vmmap_corpse_footprint == NULL) {
22603 		return;
22604 	}
22605 
22606 	footprint_header = (struct vm_map_corpse_footprint_header *)
22607 	    new_map->vmmap_corpse_footprint;
22608 	buf_size = footprint_header->cf_size;
22609 
22610 	footprint_region = (struct vm_map_corpse_footprint_region *)
22611 	    ((char *)footprint_header +
22612 	    footprint_header->cf_last_region);
22613 
22614 	/* get rid of trailing zeroes in last region */
22615 	assert(footprint_region->cfr_num_pages >= footprint_header->cf_last_zeroes);
22616 	footprint_region->cfr_num_pages -= footprint_header->cf_last_zeroes;
22617 	footprint_header->cf_last_zeroes = 0;
22618 
22619 	actual_size = (vm_size_t)(footprint_header->cf_last_region +
22620 	    sizeof(*footprint_region) +
22621 	    (footprint_region->cfr_num_pages * sizeof(cf_disp_t)));
22622 
22623 //	printf("FBDP map %p buf_size 0x%llx actual_size 0x%llx\n", new_map, (uint64_t) buf_size, (uint64_t) actual_size);
22624 	vm_map_corpse_footprint_size_avg =
22625 	    (((vm_map_corpse_footprint_size_avg *
22626 	    vm_map_corpse_footprint_count) +
22627 	    actual_size) /
22628 	    (vm_map_corpse_footprint_count + 1));
22629 	vm_map_corpse_footprint_count++;
22630 	if (actual_size > vm_map_corpse_footprint_size_max) {
22631 		vm_map_corpse_footprint_size_max = actual_size;
22632 	}
22633 
22634 	actual_size = round_page(actual_size);
22635 	if (buf_size > actual_size) {
22636 		kr = vm_deallocate(kernel_map,
22637 		    ((vm_address_t)footprint_header +
22638 		    actual_size +
22639 		    PAGE_SIZE),                 /* trailing guard page */
22640 		    (buf_size - actual_size));
22641 		assertf(kr == KERN_SUCCESS,
22642 		    "trim: footprint_header %p buf_size 0x%llx actual_size 0x%llx kr=0x%x\n",
22643 		    footprint_header,
22644 		    (uint64_t) buf_size,
22645 		    (uint64_t) actual_size,
22646 		    kr);
22647 		kr = vm_protect(kernel_map,
22648 		    ((vm_address_t)footprint_header +
22649 		    actual_size),
22650 		    PAGE_SIZE,
22651 		    FALSE,             /* set_maximum */
22652 		    VM_PROT_NONE);
22653 		assertf(kr == KERN_SUCCESS,
22654 		    "guard: footprint_header %p buf_size 0x%llx actual_size 0x%llx kr=0x%x\n",
22655 		    footprint_header,
22656 		    (uint64_t) buf_size,
22657 		    (uint64_t) actual_size,
22658 		    kr);
22659 	}
22660 
22661 	footprint_header->cf_size = actual_size;
22662 }
22663 
22664 /*
22665  * vm_map_corpse_footprint_query_page_info:
22666  *	retrieves the disposition of the page at virtual address "vaddr"
22667  *	in the forked corpse's VM map
22668  *
22669  * This is the equivalent of vm_map_footprint_query_page_info() for a forked corpse.
22670  */
22671 kern_return_t
vm_map_corpse_footprint_query_page_info(vm_map_t map,vm_map_offset_t va,int * disposition_p)22672 vm_map_corpse_footprint_query_page_info(
22673 	vm_map_t        map,
22674 	vm_map_offset_t va,
22675 	int             *disposition_p)
22676 {
22677 	struct vm_map_corpse_footprint_header *footprint_header;
22678 	struct vm_map_corpse_footprint_region *footprint_region;
22679 	uint32_t        footprint_region_offset;
22680 	vm_map_offset_t region_start, region_end;
22681 	int             disp_idx;
22682 	kern_return_t   kr;
22683 	int             effective_page_size;
22684 	cf_disp_t       cf_disp;
22685 
22686 	if (!map->has_corpse_footprint) {
22687 		*disposition_p = 0;
22688 		kr = KERN_INVALID_ARGUMENT;
22689 		goto done;
22690 	}
22691 
22692 	footprint_header = map->vmmap_corpse_footprint;
22693 	if (footprint_header == NULL) {
22694 		*disposition_p = 0;
22695 //		if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disposition_p);
22696 		kr = KERN_INVALID_ARGUMENT;
22697 		goto done;
22698 	}
22699 
22700 	/* start looking at the hint ("cf_hint_region") */
22701 	footprint_region_offset = footprint_header->cf_hint_region;
22702 
22703 	effective_page_size = MIN(PAGE_SIZE, VM_MAP_PAGE_SIZE(map));
22704 
22705 lookup_again:
22706 	if (footprint_region_offset < sizeof(*footprint_header)) {
22707 		/* hint too low: start from 1st region */
22708 		footprint_region_offset = sizeof(*footprint_header);
22709 	}
22710 	if (footprint_region_offset >= footprint_header->cf_last_region) {
22711 		/* hint too high: re-start from 1st region */
22712 		footprint_region_offset = sizeof(*footprint_header);
22713 	}
22714 	footprint_region = (struct vm_map_corpse_footprint_region *)
22715 	    ((char *)footprint_header + footprint_region_offset);
22716 	region_start = footprint_region->cfr_vaddr;
22717 	region_end = (region_start +
22718 	    ((vm_map_offset_t)(footprint_region->cfr_num_pages) *
22719 	    effective_page_size));
22720 	if (va < region_start &&
22721 	    footprint_region_offset != sizeof(*footprint_header)) {
22722 		/* our range starts before the hint region */
22723 
22724 		/* reset the hint (in a racy way...) */
22725 		footprint_header->cf_hint_region = sizeof(*footprint_header);
22726 		/* lookup "va" again from 1st region */
22727 		footprint_region_offset = sizeof(*footprint_header);
22728 		goto lookup_again;
22729 	}
22730 
22731 	while (va >= region_end) {
22732 		if (footprint_region_offset >= footprint_header->cf_last_region) {
22733 			break;
22734 		}
22735 		/* skip the region's header */
22736 		footprint_region_offset += sizeof(*footprint_region);
22737 		/* skip the region's page dispositions */
22738 		footprint_region_offset += (footprint_region->cfr_num_pages * sizeof(cf_disp_t));
22739 		/* align to next word boundary */
22740 		footprint_region_offset =
22741 		    roundup(footprint_region_offset,
22742 		    sizeof(int));
22743 		footprint_region = (struct vm_map_corpse_footprint_region *)
22744 		    ((char *)footprint_header + footprint_region_offset);
22745 		region_start = footprint_region->cfr_vaddr;
22746 		region_end = (region_start +
22747 		    ((vm_map_offset_t)(footprint_region->cfr_num_pages) *
22748 		    effective_page_size));
22749 	}
22750 	if (va < region_start || va >= region_end) {
22751 		/* page not found */
22752 		*disposition_p = 0;
22753 //		if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disposition_p);
22754 		kr = KERN_SUCCESS;
22755 		goto done;
22756 	}
22757 
22758 	/* "va" found: set the lookup hint for next lookup (in a racy way...) */
22759 	footprint_header->cf_hint_region = footprint_region_offset;
22760 
22761 	/* get page disposition for "va" in this region */
22762 	disp_idx = (int) ((va - footprint_region->cfr_vaddr) / effective_page_size);
22763 	cf_disp = footprint_region->cfr_disposition[disp_idx];
22764 	*disposition_p = vm_page_cf_disp_to_disposition(cf_disp);
22765 	kr = KERN_SUCCESS;
22766 done:
22767 //	if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disposition_p);
22768 	/* dtrace -n 'vminfo:::footprint_query_page_info { printf("map 0x%p va 0x%llx disp 0x%x kr 0x%x", arg0, arg1, arg2, arg3); }' */
22769 	DTRACE_VM4(footprint_query_page_info,
22770 	    vm_map_t, map,
22771 	    vm_map_offset_t, va,
22772 	    int, *disposition_p,
22773 	    kern_return_t, kr);
22774 
22775 	return kr;
22776 }
22777 
22778 void
vm_map_corpse_footprint_destroy(vm_map_t map)22779 vm_map_corpse_footprint_destroy(
22780 	vm_map_t        map)
22781 {
22782 	if (map->has_corpse_footprint &&
22783 	    map->vmmap_corpse_footprint != 0) {
22784 		struct vm_map_corpse_footprint_header *footprint_header;
22785 		vm_size_t buf_size;
22786 		kern_return_t kr;
22787 
22788 		footprint_header = map->vmmap_corpse_footprint;
22789 		buf_size = footprint_header->cf_size;
22790 		kr = vm_deallocate(kernel_map,
22791 		    (vm_offset_t) map->vmmap_corpse_footprint,
22792 		    ((vm_size_t) buf_size
22793 		    + PAGE_SIZE));                 /* trailing guard page */
22794 		assertf(kr == KERN_SUCCESS, "kr=0x%x\n", kr);
22795 		map->vmmap_corpse_footprint = 0;
22796 		map->has_corpse_footprint = FALSE;
22797 	}
22798 }
22799 
22800 /*
22801  * vm_map_copy_footprint_ledgers:
22802  *	copies any ledger that's relevant to the memory footprint of "old_task"
22803  *	into the forked corpse's task ("new_task")
22804  */
22805 void
vm_map_copy_footprint_ledgers(task_t old_task,task_t new_task)22806 vm_map_copy_footprint_ledgers(
22807 	task_t  old_task,
22808 	task_t  new_task)
22809 {
22810 	vm_map_copy_ledger(old_task, new_task, task_ledgers.phys_footprint);
22811 	vm_map_copy_ledger(old_task, new_task, task_ledgers.purgeable_nonvolatile);
22812 	vm_map_copy_ledger(old_task, new_task, task_ledgers.purgeable_nonvolatile_compressed);
22813 	vm_map_copy_ledger(old_task, new_task, task_ledgers.internal);
22814 	vm_map_copy_ledger(old_task, new_task, task_ledgers.internal_compressed);
22815 	vm_map_copy_ledger(old_task, new_task, task_ledgers.iokit_mapped);
22816 	vm_map_copy_ledger(old_task, new_task, task_ledgers.alternate_accounting);
22817 	vm_map_copy_ledger(old_task, new_task, task_ledgers.alternate_accounting_compressed);
22818 	vm_map_copy_ledger(old_task, new_task, task_ledgers.page_table);
22819 	vm_map_copy_ledger(old_task, new_task, task_ledgers.tagged_footprint);
22820 	vm_map_copy_ledger(old_task, new_task, task_ledgers.tagged_footprint_compressed);
22821 	vm_map_copy_ledger(old_task, new_task, task_ledgers.network_nonvolatile);
22822 	vm_map_copy_ledger(old_task, new_task, task_ledgers.network_nonvolatile_compressed);
22823 	vm_map_copy_ledger(old_task, new_task, task_ledgers.media_footprint);
22824 	vm_map_copy_ledger(old_task, new_task, task_ledgers.media_footprint_compressed);
22825 	vm_map_copy_ledger(old_task, new_task, task_ledgers.graphics_footprint);
22826 	vm_map_copy_ledger(old_task, new_task, task_ledgers.graphics_footprint_compressed);
22827 	vm_map_copy_ledger(old_task, new_task, task_ledgers.neural_footprint);
22828 	vm_map_copy_ledger(old_task, new_task, task_ledgers.neural_footprint_compressed);
22829 	vm_map_copy_ledger(old_task, new_task, task_ledgers.wired_mem);
22830 }
22831 
22832 /*
22833  * vm_map_copy_ledger:
22834  *	copy a single ledger from "old_task" to "new_task"
22835  */
22836 void
vm_map_copy_ledger(task_t old_task,task_t new_task,int ledger_entry)22837 vm_map_copy_ledger(
22838 	task_t  old_task,
22839 	task_t  new_task,
22840 	int     ledger_entry)
22841 {
22842 	ledger_amount_t old_balance, new_balance, delta;
22843 
22844 	assert(new_task->map->has_corpse_footprint);
22845 	if (!new_task->map->has_corpse_footprint) {
22846 		return;
22847 	}
22848 
22849 	/* turn off sanity checks for the ledger we're about to mess with */
22850 	ledger_disable_panic_on_negative(new_task->ledger,
22851 	    ledger_entry);
22852 
22853 	/* adjust "new_task" to match "old_task" */
22854 	ledger_get_balance(old_task->ledger,
22855 	    ledger_entry,
22856 	    &old_balance);
22857 	ledger_get_balance(new_task->ledger,
22858 	    ledger_entry,
22859 	    &new_balance);
22860 	if (new_balance == old_balance) {
22861 		/* new == old: done */
22862 	} else if (new_balance > old_balance) {
22863 		/* new > old ==> new -= new - old */
22864 		delta = new_balance - old_balance;
22865 		ledger_debit(new_task->ledger,
22866 		    ledger_entry,
22867 		    delta);
22868 	} else {
22869 		/* new < old ==> new += old - new */
22870 		delta = old_balance - new_balance;
22871 		ledger_credit(new_task->ledger,
22872 		    ledger_entry,
22873 		    delta);
22874 	}
22875 }
22876 
22877 /*
22878  * vm_map_get_pmap:
22879  * returns the pmap associated with the vm_map
22880  */
22881 pmap_t
vm_map_get_pmap(vm_map_t map)22882 vm_map_get_pmap(vm_map_t map)
22883 {
22884 	return vm_map_pmap(map);
22885 }
22886 
22887 #if CONFIG_MAP_RANGES
22888 static bitmap_t vm_map_user_range_heap_map[BITMAP_LEN(VM_MEMORY_COUNT)];
22889 
22890 static_assert(UMEM_RANGE_ID_DEFAULT == MACH_VM_RANGE_DEFAULT);
22891 static_assert(UMEM_RANGE_ID_HEAP == MACH_VM_RANGE_DATA);
22892 
22893 /*
22894  * vm_map_range_map_init:
22895  *  initializes the VM range ID map to enable index lookup
22896  *  of user VM ranges based on VM tag from userspace.
22897  */
22898 static void
vm_map_range_map_init(void)22899 vm_map_range_map_init(void)
22900 {
22901 	/*
22902 	 * VM_MEMORY_MALLOC{,_NANO} are skipped on purpose:
22903 	 * - the former is malloc metadata which should be kept separate
22904 	 * - the latter has its own ranges
22905 	 */
22906 	bitmap_set(vm_map_user_range_heap_map, VM_MEMORY_MALLOC_HUGE);
22907 	bitmap_set(vm_map_user_range_heap_map, VM_MEMORY_MALLOC_LARGE);
22908 	bitmap_set(vm_map_user_range_heap_map, VM_MEMORY_MALLOC_LARGE_REUSED);
22909 	bitmap_set(vm_map_user_range_heap_map, VM_MEMORY_MALLOC_MEDIUM);
22910 	bitmap_set(vm_map_user_range_heap_map, VM_MEMORY_MALLOC_PROB_GUARD);
22911 	bitmap_set(vm_map_user_range_heap_map, VM_MEMORY_MALLOC_SMALL);
22912 	bitmap_set(vm_map_user_range_heap_map, VM_MEMORY_MALLOC_TINY);
22913 }
22914 
22915 static struct mach_vm_range
vm_map_range_random_uniform(vm_map_size_t req_size,vm_map_offset_t min_addr,vm_map_offset_t max_addr,vm_map_offset_t offmask)22916 vm_map_range_random_uniform(
22917 	vm_map_size_t           req_size,
22918 	vm_map_offset_t         min_addr,
22919 	vm_map_offset_t         max_addr,
22920 	vm_map_offset_t         offmask)
22921 {
22922 	vm_map_offset_t random_addr;
22923 	struct mach_vm_range alloc;
22924 
22925 	req_size = (req_size + offmask) & ~offmask;
22926 	min_addr = (min_addr + offmask) & ~offmask;
22927 	max_addr = max_addr & ~offmask;
22928 
22929 	read_random(&random_addr, sizeof(random_addr));
22930 	random_addr %= (max_addr - req_size - min_addr);
22931 	random_addr &= ~offmask;
22932 
22933 	alloc.min_address = min_addr + random_addr;
22934 	alloc.max_address = min_addr + random_addr + req_size;
22935 	return alloc;
22936 }
22937 
22938 static vm_map_offset_t
vm_map_range_offmask(void)22939 vm_map_range_offmask(void)
22940 {
22941 	uint32_t pte_depth;
22942 
22943 	/*
22944 	 * PTE optimizations
22945 	 *
22946 	 *
22947 	 * 16k pages systems
22948 	 * ~~~~~~~~~~~~~~~~~
22949 	 *
22950 	 * A single L1 (sub-)page covers the address space.
22951 	 * - L2 pages cover 64G,
22952 	 * - L3 pages cover 32M.
22953 	 *
22954 	 * On embedded, the dynamic VA range is 64G and uses a single L2 page.
22955 	 * As a result, we really only need to align the ranges to 32M to avoid
22956 	 * partial L3 pages.
22957 	 *
22958 	 * On macOS, the usage of L2 pages will increase, so as a result we will
22959 	 * want to align ranges to 64G in order to utilize them fully.
22960 	 *
22961 	 *
22962 	 * 4k pages systems
22963 	 * ~~~~~~~~~~~~~~~~
22964 	 *
22965 	 * A single L0 (sub-)page covers the address space.
22966 	 * - L1 pages cover 512G,
22967 	 * - L2 pages cover 1G,
22968 	 * - L3 pages cover 2M.
22969 	 *
22970 	 * The long tail of processes on a system will tend to have a VA usage
22971 	 * (ignoring the shared regions) in the 100s of MB order of magnitnude.
22972 	 * This is achievable with a single L1 and a few L2s without
22973 	 * randomization.
22974 	 *
22975 	 * However once randomization is introduced, the system will immediately
22976 	 * need several L1s and many more L2s. As a result:
22977 	 *
22978 	 * - on embedded devices, the cost of these extra pages isn't
22979 	 *   sustainable, and we just disable the feature entirely,
22980 	 *
22981 	 * - on macOS we align ranges to a 512G boundary so that the extra L1
22982 	 *   pages can be used to their full potential.
22983 	 */
22984 
22985 	/*
22986 	 * note, this function assumes _non exotic mappings_
22987 	 * which is why it uses the native kernel's PAGE_SHIFT.
22988 	 */
22989 #if XNU_PLATFORM_MacOSX
22990 	pte_depth = PAGE_SHIFT > 12 ? 2 : 3;
22991 #else /* !XNU_PLATFORM_MacOSX */
22992 	pte_depth = PAGE_SHIFT > 12 ? 1 : 0;
22993 #endif /* !XNU_PLATFORM_MacOSX */
22994 
22995 	if (pte_depth == 0) {
22996 		return 0;
22997 	}
22998 
22999 	return (1ull << ((PAGE_SHIFT - 3) * pte_depth + PAGE_SHIFT)) - 1;
23000 }
23001 
23002 /*
23003  * vm_map_range_configure:
23004  *	configures the user vm_map ranges by increasing the maximum VA range of
23005  *  the map and carving out a range at the end of VA space (searching backwards
23006  *  in the newly expanded map).
23007  */
23008 kern_return_t
vm_map_range_configure(vm_map_t map)23009 vm_map_range_configure(vm_map_t map)
23010 {
23011 	const vm_map_offset_t offmask = vm_map_range_offmask();
23012 	struct mach_vm_range data_range;
23013 	vm_map_offset_t default_end;
23014 	kern_return_t kr;
23015 
23016 	if (!vm_map_is_64bit(map) || vm_map_is_exotic(map) || offmask == 0) {
23017 		/*
23018 		 * No point doing vm ranges in a 32bit address space.
23019 		 */
23020 		return KERN_NOT_SUPPORTED;
23021 	}
23022 
23023 	/* Should not be applying ranges to kernel map or kernel map submaps */
23024 	assert(vm_map_pmap(map) != kernel_pmap);
23025 
23026 #if XNU_PLATFORM_MacOSX
23027 
23028 	/*
23029 	 * on macOS, the address space is a massive 47 bits (128T),
23030 	 * with several carve outs that processes can't use:
23031 	 * - the shared region
23032 	 * - the commpage region
23033 	 * - the GPU carve out (if applicable)
23034 	 *
23035 	 * and when nano-malloc is in use it desires memory at the 96T mark.
23036 	 *
23037 	 * However, their location is architecture dependent:
23038 	 * - On intel, the shared region and commpage are
23039 	 *   at the very end of the usable address space (above +127T),
23040 	 *   and there is no GPU carve out, and pthread wants to place
23041 	 *   threads at the 112T mark (0x70T).
23042 	 *
23043 	 * - On arm64, these are in the same spot as on embedded devices:
23044 	 *   o shared region:   [ 6G,  10G)  [ will likely grow over time ]
23045 	 *   o commpage region: [63G,  64G)
23046 	 *   o GPU carve out:   [64G, 448G)
23047 	 *
23048 	 * This is conveninent because the mappings at the end of the address
23049 	 * space (when they exist) are made by the kernel.
23050 	 *
23051 	 * The policy is to allocate a random 1T for the data heap
23052 	 * in the end of the address-space in the:
23053 	 * - [0x71, 0x7f) range on Intel (to leave space for pthread stacks)
23054 	 * - [0x61, 0x7f) range on ASM (to leave space for Nano malloc).
23055 	 */
23056 
23057 	/* see NANOZONE_SIGNATURE in libmalloc */
23058 #if __x86_64__
23059 	default_end = 0x71ull << 40;
23060 #else
23061 	default_end = 0x61ull << 40;
23062 #endif
23063 	data_range  = vm_map_range_random_uniform(1ull << 40,
23064 	        default_end, 0x7full << 40, offmask);
23065 
23066 #else /* !XNU_PLATFORM_MacOSX */
23067 
23068 	/*
23069 	 * Embedded devices:
23070 	 *
23071 	 *   The default VA Size scales with the device physical memory.
23072 	 *
23073 	 *   Out of that:
23074 	 *   - the "zero" page typically uses 4G + some slide
23075 	 *   - the shared region uses SHARED_REGION_SIZE bytes (4G)
23076 	 *
23077 	 *   Without the use of jumbo or any adjustment to the address space,
23078 	 *   a default VM map typically looks like this:
23079 	 *
23080 	 *       0G -->╒════════════╕
23081 	 *             │  pagezero  │
23082 	 *             │  + slide   │
23083 	 *      ~4G -->╞════════════╡<-- vm_map_min(map)
23084 	 *             │            │
23085 	 *       6G -->├────────────┤
23086 	 *             │   shared   │
23087 	 *             │   region   │
23088 	 *      10G -->├────────────┤
23089 	 *             │            │
23090 	 *   max_va -->├────────────┤<-- vm_map_max(map)
23091 	 *             │            │
23092 	 *             ╎   jumbo    ╎
23093 	 *             ╎            ╎
23094 	 *             │            │
23095 	 *      63G -->╞════════════╡<-- MACH_VM_MAX_ADDRESS
23096 	 *             │  commpage  │
23097 	 *      64G -->├────────────┤<-- MACH_VM_MIN_GPU_CARVEOUT_ADDRESS
23098 	 *             │            │
23099 	 *             ╎    GPU     ╎
23100 	 *             ╎  carveout  ╎
23101 	 *             │            │
23102 	 *     448G -->├────────────┤<-- MACH_VM_MAX_GPU_CARVEOUT_ADDRESS
23103 	 *             │            │
23104 	 *             ╎            ╎
23105 	 *             ╎            ╎
23106 	 *             │            │
23107 	 *     512G -->╘════════════╛<-- (1ull << ARM_16K_TT_L1_SHIFT)
23108 	 *
23109 	 *   When this drawing was made, "max_va" was smaller than
23110 	 *   ARM64_MAX_OFFSET_DEVICE_LARGE (~15.5G), leaving shy of
23111 	 *   12G of address space for the zero-page, slide, files,
23112 	 *   binaries, heap ...
23113 	 *
23114 	 *   We will want to make a "heap/data" carve out inside
23115 	 *   the jumbo range of half of that usable space, assuming
23116 	 *   that this is less than a forth of the jumbo range.
23117 	 *
23118 	 *   The assert below intends to catch when max_va grows
23119 	 *   too large for this heuristic.
23120 	 */
23121 
23122 	vm_map_lock_read(map);
23123 	default_end = vm_map_max(map);
23124 	vm_map_unlock_read(map);
23125 
23126 	/*
23127 	 * Check that we're not already jumbo'd,
23128 	 * or our address space was somehow modified.
23129 	 *
23130 	 * If so we cannot guarantee that we can set up the ranges
23131 	 * safely without interfering with the existing map.
23132 	 */
23133 	if (default_end > vm_compute_max_offset(true)) {
23134 		return KERN_NO_SPACE;
23135 	}
23136 
23137 	if (pmap_max_offset(true, ARM_PMAP_MAX_OFFSET_DEFAULT)) {
23138 		/*
23139 		 * an override boot-arg was set, disable user-ranges
23140 		 *
23141 		 * XXX: this is problematic because it means these boot-args
23142 		 *      no longer test the behavior changing the value
23143 		 *      of ARM64_MAX_OFFSET_DEVICE_* would have.
23144 		 */
23145 		return KERN_NOT_SUPPORTED;
23146 	}
23147 
23148 	/* expand the default VM space to the largest possible address */
23149 	vm_map_set_jumbo(map);
23150 
23151 	assert3u(4 * GiB(10), <=, vm_map_max(map) - default_end);
23152 	data_range = vm_map_range_random_uniform(GiB(10),
23153 	    default_end + PAGE_SIZE, vm_map_max(map), offmask);
23154 
23155 #endif /* !XNU_PLATFORM_MacOSX */
23156 
23157 	/*
23158 	 * Poke holes so that ASAN or people listing regions
23159 	 * do not think this space is free.
23160 	 */
23161 
23162 	if (default_end != data_range.min_address) {
23163 		kr = vm_map_enter(map, &default_end,
23164 		    data_range.min_address - default_end,
23165 		    0, VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(), VM_OBJECT_NULL,
23166 		    0, FALSE, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_DEFAULT);
23167 		assert(kr == KERN_SUCCESS);
23168 	}
23169 
23170 	if (data_range.max_address != vm_map_max(map)) {
23171 		vm_map_entry_t entry;
23172 		vm_size_t size;
23173 
23174 		vm_map_lock_read(map);
23175 		vm_map_lookup_entry_or_next(map, data_range.max_address, &entry);
23176 		if (entry != vm_map_to_entry(map)) {
23177 			size = vm_map_max(map) - data_range.max_address;
23178 		} else {
23179 			size = entry->vme_start - data_range.max_address;
23180 		}
23181 		vm_map_unlock_read(map);
23182 
23183 		kr = vm_map_enter(map, &data_range.max_address, size,
23184 		    0, VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(), VM_OBJECT_NULL,
23185 		    0, FALSE, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_DEFAULT);
23186 		assert(kr == KERN_SUCCESS);
23187 	}
23188 
23189 	vm_map_lock(map);
23190 	map->default_range.min_address = vm_map_min(map);
23191 	map->default_range.max_address = default_end;
23192 	map->data_range = data_range;
23193 	map->uses_user_ranges = true;
23194 	vm_map_unlock(map);
23195 
23196 	return KERN_SUCCESS;
23197 }
23198 
23199 /*
23200  * vm_map_range_fork:
23201  *	clones the array of ranges from old_map to new_map in support
23202  *  of a VM map fork.
23203  */
23204 void
vm_map_range_fork(vm_map_t new_map,vm_map_t old_map)23205 vm_map_range_fork(vm_map_t new_map, vm_map_t old_map)
23206 {
23207 	if (!old_map->uses_user_ranges) {
23208 		/* nothing to do */
23209 		return;
23210 	}
23211 
23212 	new_map->default_range = old_map->default_range;
23213 	new_map->data_range = old_map->data_range;
23214 
23215 	if (old_map->extra_ranges_count) {
23216 		vm_map_user_range_t otable, ntable;
23217 		uint16_t count;
23218 
23219 		otable = old_map->extra_ranges;
23220 		count  = old_map->extra_ranges_count;
23221 		ntable = kalloc_data(count * sizeof(struct vm_map_user_range),
23222 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
23223 		memcpy(ntable, otable,
23224 		    count * sizeof(struct vm_map_user_range));
23225 
23226 		new_map->extra_ranges_count = count;
23227 		new_map->extra_ranges = ntable;
23228 	}
23229 
23230 	new_map->uses_user_ranges = true;
23231 }
23232 
23233 /*
23234  * vm_map_get_user_range:
23235  *	copy the VM user range for the given VM map and range ID.
23236  */
23237 kern_return_t
vm_map_get_user_range(vm_map_t map,vm_map_range_id_t range_id,mach_vm_range_t range)23238 vm_map_get_user_range(
23239 	vm_map_t                map,
23240 	vm_map_range_id_t       range_id,
23241 	mach_vm_range_t         range)
23242 {
23243 	if (map == NULL || !map->uses_user_ranges || range == NULL) {
23244 		return KERN_INVALID_ARGUMENT;
23245 	}
23246 
23247 	switch (range_id) {
23248 	case UMEM_RANGE_ID_DEFAULT:
23249 		*range = map->default_range;
23250 		return KERN_SUCCESS;
23251 
23252 	case UMEM_RANGE_ID_HEAP:
23253 		*range = map->data_range;
23254 		return KERN_SUCCESS;
23255 
23256 	default:
23257 		return KERN_INVALID_ARGUMENT;
23258 	}
23259 }
23260 
23261 static vm_map_range_id_t
vm_map_user_range_resolve(vm_map_t map,mach_vm_address_t addr,mach_vm_size_t size,mach_vm_range_t range)23262 vm_map_user_range_resolve(
23263 	vm_map_t                map,
23264 	mach_vm_address_t       addr,
23265 	mach_vm_size_t          size,
23266 	mach_vm_range_t         range)
23267 {
23268 	struct mach_vm_range tmp;
23269 
23270 	vm_map_lock_assert_held(map);
23271 
23272 	static_assert(UMEM_RANGE_ID_DEFAULT == MACH_VM_RANGE_DEFAULT);
23273 	static_assert(UMEM_RANGE_ID_HEAP == MACH_VM_RANGE_DATA);
23274 
23275 	if (mach_vm_range_contains(&map->default_range, addr, size)) {
23276 		if (range) {
23277 			*range = map->default_range;
23278 		}
23279 		return UMEM_RANGE_ID_DEFAULT;
23280 	}
23281 
23282 	if (mach_vm_range_contains(&map->data_range, addr, size)) {
23283 		if (range) {
23284 			*range = map->data_range;
23285 		}
23286 		return UMEM_RANGE_ID_HEAP;
23287 	}
23288 
23289 	for (size_t i = 0; i < map->extra_ranges_count; i++) {
23290 		vm_map_user_range_t r = &map->extra_ranges[i];
23291 
23292 		tmp.min_address = r->vmur_min_address;
23293 		tmp.max_address = r->vmur_max_address;
23294 
23295 		if (mach_vm_range_contains(&tmp, addr, size)) {
23296 			if (range) {
23297 				*range = tmp;
23298 			}
23299 			return r->vmur_range_id;
23300 		}
23301 	}
23302 
23303 	if (range) {
23304 		range->min_address = range->max_address = 0;
23305 	}
23306 	return UMEM_RANGE_ID_DEFAULT;
23307 }
23308 
23309 static int
vm_map_user_range_cmp(const void * e1,const void * e2)23310 vm_map_user_range_cmp(const void *e1, const void *e2)
23311 {
23312 	const struct vm_map_user_range *r1 = e1;
23313 	const struct vm_map_user_range *r2 = e2;
23314 
23315 	if (r1->vmur_min_address != r2->vmur_min_address) {
23316 		return r1->vmur_min_address < r2->vmur_min_address ? -1 : 1;
23317 	}
23318 
23319 	return 0;
23320 }
23321 
23322 static int
mach_vm_range_recipe_v1_cmp(const void * e1,const void * e2)23323 mach_vm_range_recipe_v1_cmp(const void *e1, const void *e2)
23324 {
23325 	const mach_vm_range_recipe_v1_t *r1 = e1;
23326 	const mach_vm_range_recipe_v1_t *r2 = e2;
23327 
23328 	if (r1->range.min_address != r2->range.min_address) {
23329 		return r1->range.min_address < r2->range.min_address ? -1 : 1;
23330 	}
23331 
23332 	return 0;
23333 }
23334 
23335 /*!
23336  * @function mach_vm_range_create_v1()
23337  *
23338  * @brief
23339  * Handle the backend for mach_vm_range_create() for the
23340  * MACH_VM_RANGE_FLAVOR_V1 flavor.
23341  *
23342  * @description
23343  * This call allows to create "ranges" in the map of a task
23344  * that have special semantics/policies around placement of
23345  * new allocations (in the vm_map_locate_space() sense).
23346  *
23347  * @returns
23348  * - KERN_SUCCESS on success
23349  * - KERN_INVALID_ARGUMENT for incorrect arguments
23350  * - KERN_NO_SPACE if the maximum amount of ranges would be exceeded
23351  * - KERN_MEMORY_PRESENT if any of the requested ranges
23352  *   overlaps with existing ranges or allocations in the map.
23353  */
23354 static kern_return_t
mach_vm_range_create_v1(vm_map_t map,mach_vm_range_recipe_v1_t * recipe,uint32_t new_count)23355 mach_vm_range_create_v1(
23356 	vm_map_t                map,
23357 	mach_vm_range_recipe_v1_t *recipe,
23358 	uint32_t                new_count)
23359 {
23360 	const vm_offset_t mask = VM_MAP_PAGE_MASK(map);
23361 	vm_map_user_range_t table;
23362 	kern_return_t kr = KERN_SUCCESS;
23363 	uint16_t count;
23364 
23365 	struct mach_vm_range void1 = {
23366 		.min_address = map->default_range.max_address,
23367 		.max_address = map->data_range.min_address,
23368 	};
23369 	struct mach_vm_range void2 = {
23370 		.min_address = map->data_range.max_address,
23371 		.max_address = vm_map_max(map),
23372 	};
23373 
23374 	qsort(recipe, new_count, sizeof(mach_vm_range_recipe_v1_t),
23375 	    mach_vm_range_recipe_v1_cmp);
23376 
23377 	/*
23378 	 * Step 1: Validate that the recipes have no intersections.
23379 	 */
23380 
23381 	for (size_t i = 0; i < new_count; i++) {
23382 		mach_vm_range_t r = &recipe[i].range;
23383 		mach_vm_size_t s = mach_vm_range_size(r);
23384 
23385 		if (recipe[i].flags) {
23386 			return KERN_INVALID_ARGUMENT;
23387 		}
23388 
23389 		static_assert(UMEM_RANGE_ID_FIXED == MACH_VM_RANGE_FIXED);
23390 		switch (recipe[i].range_tag) {
23391 		case MACH_VM_RANGE_FIXED:
23392 			break;
23393 		default:
23394 			return KERN_INVALID_ARGUMENT;
23395 		}
23396 
23397 		if (!VM_MAP_PAGE_ALIGNED(r->min_address, mask) ||
23398 		    !VM_MAP_PAGE_ALIGNED(r->max_address, mask)) {
23399 			return KERN_INVALID_ARGUMENT;
23400 		}
23401 
23402 		if (!mach_vm_range_contains(&void1, r->min_address, s) &&
23403 		    !mach_vm_range_contains(&void2, r->min_address, s)) {
23404 			return KERN_INVALID_ARGUMENT;
23405 		}
23406 
23407 		if (i > 0 && recipe[i - 1].range.max_address >
23408 		    recipe[i].range.min_address) {
23409 			return KERN_INVALID_ARGUMENT;
23410 		}
23411 	}
23412 
23413 	vm_map_lock(map);
23414 
23415 	table = map->extra_ranges;
23416 	count = map->extra_ranges_count;
23417 
23418 	if (count + new_count > VM_MAP_EXTRA_RANGES_MAX) {
23419 		kr = KERN_NO_SPACE;
23420 		goto out_unlock;
23421 	}
23422 
23423 	/*
23424 	 * Step 2: Check that there is no intersection with existing ranges.
23425 	 */
23426 
23427 	for (size_t i = 0, j = 0; i < new_count && j < count;) {
23428 		mach_vm_range_t     r1 = &recipe[i].range;
23429 		vm_map_user_range_t r2 = &table[j];
23430 
23431 		if (r1->max_address <= r2->vmur_min_address) {
23432 			i++;
23433 		} else if (r2->vmur_max_address <= r1->min_address) {
23434 			j++;
23435 		} else {
23436 			kr = KERN_MEMORY_PRESENT;
23437 			goto out_unlock;
23438 		}
23439 	}
23440 
23441 	/*
23442 	 * Step 4: commit the new ranges.
23443 	 */
23444 
23445 	static_assert(VM_MAP_EXTRA_RANGES_MAX * sizeof(struct vm_map_user_range) <=
23446 	    KALLOC_SAFE_ALLOC_SIZE);
23447 
23448 	table = krealloc_data(table,
23449 	    count * sizeof(struct vm_map_user_range),
23450 	    (count + new_count) * sizeof(struct vm_map_user_range),
23451 	    Z_ZERO | Z_WAITOK | Z_NOFAIL);
23452 
23453 	for (size_t i = 0; i < new_count; i++) {
23454 		static_assert(MACH_VM_MAX_ADDRESS < (1ull << 56));
23455 
23456 		table[count + i] = (struct vm_map_user_range){
23457 			.vmur_min_address = recipe[i].range.min_address,
23458 			.vmur_max_address = recipe[i].range.max_address,
23459 			.vmur_range_id    = (vm_map_range_id_t)recipe[i].range_tag,
23460 		};
23461 	}
23462 
23463 	qsort(table, count + new_count,
23464 	    sizeof(struct vm_map_user_range), vm_map_user_range_cmp);
23465 
23466 	map->extra_ranges_count += new_count;
23467 	map->extra_ranges = table;
23468 
23469 out_unlock:
23470 	vm_map_unlock(map);
23471 
23472 	if (kr == KERN_SUCCESS) {
23473 		for (size_t i = 0; i < new_count; i++) {
23474 			vm_map_kernel_flags_t vmk_flags = {
23475 				.vmf_fixed = true,
23476 				.vmf_overwrite = true,
23477 				.vmkf_overwrite_immutable = true,
23478 				.vm_tag = recipe[i].vm_tag,
23479 			};
23480 			__assert_only kern_return_t kr2;
23481 
23482 			kr2 = vm_map_enter(map, &recipe[i].range.min_address,
23483 			    mach_vm_range_size(&recipe[i].range),
23484 			    0, vmk_flags, VM_OBJECT_NULL, 0, FALSE,
23485 			    VM_PROT_NONE, VM_PROT_ALL,
23486 			    VM_INHERIT_DEFAULT);
23487 			assert(kr2 == KERN_SUCCESS);
23488 		}
23489 	}
23490 	return kr;
23491 }
23492 
23493 kern_return_t
mach_vm_range_create(vm_map_t map,mach_vm_range_flavor_t flavor,mach_vm_range_recipes_raw_t recipe,natural_t size)23494 mach_vm_range_create(
23495 	vm_map_t                map,
23496 	mach_vm_range_flavor_t  flavor,
23497 	mach_vm_range_recipes_raw_t recipe,
23498 	natural_t               size)
23499 {
23500 	if (map != current_map()) {
23501 		return KERN_INVALID_ARGUMENT;
23502 	}
23503 
23504 	if (!map->uses_user_ranges) {
23505 		return KERN_NOT_SUPPORTED;
23506 	}
23507 
23508 	if (size == 0) {
23509 		return KERN_SUCCESS;
23510 	}
23511 
23512 	if (flavor == MACH_VM_RANGE_FLAVOR_V1) {
23513 		mach_vm_range_recipe_v1_t *array;
23514 
23515 		if (size % sizeof(mach_vm_range_recipe_v1_t)) {
23516 			return KERN_INVALID_ARGUMENT;
23517 		}
23518 
23519 		size /= sizeof(mach_vm_range_recipe_v1_t);
23520 		if (size > VM_MAP_EXTRA_RANGES_MAX) {
23521 			return KERN_NO_SPACE;
23522 		}
23523 
23524 		array = (mach_vm_range_recipe_v1_t *)recipe;
23525 		return mach_vm_range_create_v1(map, array, size);
23526 	}
23527 
23528 	return KERN_INVALID_ARGUMENT;
23529 }
23530 
23531 #else /* !CONFIG_MAP_RANGES */
23532 
23533 kern_return_t
mach_vm_range_create(vm_map_t map,mach_vm_range_flavor_t flavor,mach_vm_range_recipes_raw_t recipe,natural_t size)23534 mach_vm_range_create(
23535 	vm_map_t                map,
23536 	mach_vm_range_flavor_t  flavor,
23537 	mach_vm_range_recipes_raw_t recipe,
23538 	natural_t               size)
23539 {
23540 #pragma unused(map, flavor, recipe, size)
23541 	return KERN_NOT_SUPPORTED;
23542 }
23543 
23544 #endif /* !CONFIG_MAP_RANGES */
23545 
23546 void
vm_map_kernel_flags_update_range_id(vm_map_kernel_flags_t * vmkf,vm_map_t map)23547 vm_map_kernel_flags_update_range_id(vm_map_kernel_flags_t *vmkf, vm_map_t map)
23548 {
23549 	if (map == kernel_map) {
23550 		if (vmkf->vmkf_range_id == KMEM_RANGE_ID_NONE) {
23551 			vmkf->vmkf_range_id = KMEM_RANGE_ID_DATA;
23552 		}
23553 #if CONFIG_MAP_RANGES
23554 	} else if (vmkf->vm_tag < VM_MEMORY_COUNT &&
23555 	    vmkf->vmkf_range_id == UMEM_RANGE_ID_DEFAULT &&
23556 	    bitmap_test(vm_map_user_range_heap_map, vmkf->vm_tag)) {
23557 		vmkf->vmkf_range_id = UMEM_RANGE_ID_HEAP;
23558 #endif /* CONFIG_MAP_RANGES */
23559 	}
23560 }
23561 
23562 /*
23563  * vm_map_entry_has_device_pager:
23564  * Check if the vm map entry specified by the virtual address has a device pager.
23565  * If the vm map entry does not exist or if the map is NULL, this returns FALSE.
23566  */
23567 boolean_t
vm_map_entry_has_device_pager(vm_map_t map,vm_map_offset_t vaddr)23568 vm_map_entry_has_device_pager(vm_map_t map, vm_map_offset_t vaddr)
23569 {
23570 	vm_map_entry_t entry;
23571 	vm_object_t object;
23572 	boolean_t result;
23573 
23574 	if (map == NULL) {
23575 		return FALSE;
23576 	}
23577 
23578 	vm_map_lock(map);
23579 	while (TRUE) {
23580 		if (!vm_map_lookup_entry(map, vaddr, &entry)) {
23581 			result = FALSE;
23582 			break;
23583 		}
23584 		if (entry->is_sub_map) {
23585 			// Check the submap
23586 			vm_map_t submap = VME_SUBMAP(entry);
23587 			assert(submap != NULL);
23588 			vm_map_lock(submap);
23589 			vm_map_unlock(map);
23590 			map = submap;
23591 			continue;
23592 		}
23593 		object = VME_OBJECT(entry);
23594 		if (object != NULL && object->pager != NULL && is_device_pager_ops(object->pager->mo_pager_ops)) {
23595 			result = TRUE;
23596 			break;
23597 		}
23598 		result = FALSE;
23599 		break;
23600 	}
23601 
23602 	vm_map_unlock(map);
23603 	return result;
23604 }
23605 
23606 
23607 #if MACH_ASSERT
23608 
23609 extern int pmap_ledgers_panic;
23610 extern int pmap_ledgers_panic_leeway;
23611 
23612 #define LEDGER_DRIFT(__LEDGER)                    \
23613 	int             __LEDGER##_over;          \
23614 	ledger_amount_t __LEDGER##_over_total;    \
23615 	ledger_amount_t __LEDGER##_over_max;      \
23616 	int             __LEDGER##_under;         \
23617 	ledger_amount_t __LEDGER##_under_total;   \
23618 	ledger_amount_t __LEDGER##_under_max
23619 
23620 struct {
23621 	uint64_t        num_pmaps_checked;
23622 
23623 	LEDGER_DRIFT(phys_footprint);
23624 	LEDGER_DRIFT(internal);
23625 	LEDGER_DRIFT(internal_compressed);
23626 	LEDGER_DRIFT(external);
23627 	LEDGER_DRIFT(reusable);
23628 	LEDGER_DRIFT(iokit_mapped);
23629 	LEDGER_DRIFT(alternate_accounting);
23630 	LEDGER_DRIFT(alternate_accounting_compressed);
23631 	LEDGER_DRIFT(page_table);
23632 	LEDGER_DRIFT(purgeable_volatile);
23633 	LEDGER_DRIFT(purgeable_nonvolatile);
23634 	LEDGER_DRIFT(purgeable_volatile_compressed);
23635 	LEDGER_DRIFT(purgeable_nonvolatile_compressed);
23636 	LEDGER_DRIFT(tagged_nofootprint);
23637 	LEDGER_DRIFT(tagged_footprint);
23638 	LEDGER_DRIFT(tagged_nofootprint_compressed);
23639 	LEDGER_DRIFT(tagged_footprint_compressed);
23640 	LEDGER_DRIFT(network_volatile);
23641 	LEDGER_DRIFT(network_nonvolatile);
23642 	LEDGER_DRIFT(network_volatile_compressed);
23643 	LEDGER_DRIFT(network_nonvolatile_compressed);
23644 	LEDGER_DRIFT(media_nofootprint);
23645 	LEDGER_DRIFT(media_footprint);
23646 	LEDGER_DRIFT(media_nofootprint_compressed);
23647 	LEDGER_DRIFT(media_footprint_compressed);
23648 	LEDGER_DRIFT(graphics_nofootprint);
23649 	LEDGER_DRIFT(graphics_footprint);
23650 	LEDGER_DRIFT(graphics_nofootprint_compressed);
23651 	LEDGER_DRIFT(graphics_footprint_compressed);
23652 	LEDGER_DRIFT(neural_nofootprint);
23653 	LEDGER_DRIFT(neural_footprint);
23654 	LEDGER_DRIFT(neural_nofootprint_compressed);
23655 	LEDGER_DRIFT(neural_footprint_compressed);
23656 } pmap_ledgers_drift;
23657 
23658 void
vm_map_pmap_check_ledgers(pmap_t pmap,ledger_t ledger,int pid,char * procname)23659 vm_map_pmap_check_ledgers(
23660 	pmap_t          pmap,
23661 	ledger_t        ledger,
23662 	int             pid,
23663 	char            *procname)
23664 {
23665 	ledger_amount_t bal;
23666 	boolean_t       do_panic;
23667 
23668 	do_panic = FALSE;
23669 
23670 	pmap_ledgers_drift.num_pmaps_checked++;
23671 
23672 #define LEDGER_CHECK_BALANCE(__LEDGER)                                  \
23673 MACRO_BEGIN                                                             \
23674 	int panic_on_negative = TRUE;                                   \
23675 	ledger_get_balance(ledger,                                      \
23676 	                   task_ledgers.__LEDGER,                       \
23677 	                   &bal);                                       \
23678 	ledger_get_panic_on_negative(ledger,                            \
23679 	                             task_ledgers.__LEDGER,             \
23680 	                             &panic_on_negative);               \
23681 	if (bal != 0) {                                                 \
23682 	        if (panic_on_negative ||                                \
23683 	            (pmap_ledgers_panic &&                              \
23684 	             pmap_ledgers_panic_leeway > 0 &&                   \
23685 	             (bal > (pmap_ledgers_panic_leeway * PAGE_SIZE) ||  \
23686 	              bal < (-pmap_ledgers_panic_leeway * PAGE_SIZE)))) { \
23687 	                do_panic = TRUE;                                \
23688 	        }                                                       \
23689 	        printf("LEDGER BALANCE proc %d (%s) "                   \
23690 	               "\"%s\" = %lld\n",                               \
23691 	               pid, procname, #__LEDGER, bal);                  \
23692 	        if (bal > 0) {                                          \
23693 	                pmap_ledgers_drift.__LEDGER##_over++;           \
23694 	                pmap_ledgers_drift.__LEDGER##_over_total += bal; \
23695 	                if (bal > pmap_ledgers_drift.__LEDGER##_over_max) { \
23696 	                        pmap_ledgers_drift.__LEDGER##_over_max = bal; \
23697 	                }                                               \
23698 	        } else if (bal < 0) {                                   \
23699 	                pmap_ledgers_drift.__LEDGER##_under++;          \
23700 	                pmap_ledgers_drift.__LEDGER##_under_total += bal; \
23701 	                if (bal < pmap_ledgers_drift.__LEDGER##_under_max) { \
23702 	                        pmap_ledgers_drift.__LEDGER##_under_max = bal; \
23703 	                }                                               \
23704 	        }                                                       \
23705 	}                                                               \
23706 MACRO_END
23707 
23708 	LEDGER_CHECK_BALANCE(phys_footprint);
23709 	LEDGER_CHECK_BALANCE(internal);
23710 	LEDGER_CHECK_BALANCE(internal_compressed);
23711 	LEDGER_CHECK_BALANCE(external);
23712 	LEDGER_CHECK_BALANCE(reusable);
23713 	LEDGER_CHECK_BALANCE(iokit_mapped);
23714 	LEDGER_CHECK_BALANCE(alternate_accounting);
23715 	LEDGER_CHECK_BALANCE(alternate_accounting_compressed);
23716 	LEDGER_CHECK_BALANCE(page_table);
23717 	LEDGER_CHECK_BALANCE(purgeable_volatile);
23718 	LEDGER_CHECK_BALANCE(purgeable_nonvolatile);
23719 	LEDGER_CHECK_BALANCE(purgeable_volatile_compressed);
23720 	LEDGER_CHECK_BALANCE(purgeable_nonvolatile_compressed);
23721 	LEDGER_CHECK_BALANCE(tagged_nofootprint);
23722 	LEDGER_CHECK_BALANCE(tagged_footprint);
23723 	LEDGER_CHECK_BALANCE(tagged_nofootprint_compressed);
23724 	LEDGER_CHECK_BALANCE(tagged_footprint_compressed);
23725 	LEDGER_CHECK_BALANCE(network_volatile);
23726 	LEDGER_CHECK_BALANCE(network_nonvolatile);
23727 	LEDGER_CHECK_BALANCE(network_volatile_compressed);
23728 	LEDGER_CHECK_BALANCE(network_nonvolatile_compressed);
23729 	LEDGER_CHECK_BALANCE(media_nofootprint);
23730 	LEDGER_CHECK_BALANCE(media_footprint);
23731 	LEDGER_CHECK_BALANCE(media_nofootprint_compressed);
23732 	LEDGER_CHECK_BALANCE(media_footprint_compressed);
23733 	LEDGER_CHECK_BALANCE(graphics_nofootprint);
23734 	LEDGER_CHECK_BALANCE(graphics_footprint);
23735 	LEDGER_CHECK_BALANCE(graphics_nofootprint_compressed);
23736 	LEDGER_CHECK_BALANCE(graphics_footprint_compressed);
23737 	LEDGER_CHECK_BALANCE(neural_nofootprint);
23738 	LEDGER_CHECK_BALANCE(neural_footprint);
23739 	LEDGER_CHECK_BALANCE(neural_nofootprint_compressed);
23740 	LEDGER_CHECK_BALANCE(neural_footprint_compressed);
23741 
23742 	if (do_panic) {
23743 		if (pmap_ledgers_panic) {
23744 			panic("pmap_destroy(%p) %d[%s] has imbalanced ledgers",
23745 			    pmap, pid, procname);
23746 		} else {
23747 			printf("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n",
23748 			    pmap, pid, procname);
23749 		}
23750 	}
23751 }
23752 
23753 void
vm_map_pmap_set_process(vm_map_t map,int pid,char * procname)23754 vm_map_pmap_set_process(
23755 	vm_map_t map,
23756 	int pid,
23757 	char *procname)
23758 {
23759 	pmap_set_process(vm_map_pmap(map), pid, procname);
23760 }
23761 
23762 #endif /* MACH_ASSERT */
23763