xref: /xnu-11215.1.10/osfmk/vm/vm_memory_entry.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/memory_entry.h>
30 #include <mach/memory_entry_server.h>
31 #include <mach/vm_map_server.h>
32 #include <mach/mach_vm_server.h>
33 #include <vm/vm_purgeable_internal.h>
34 #include <mach/mach_host_server.h>
35 #include <IOKit/IOBSD.h>
36 #include <vm/vm_memory_entry_xnu.h>
37 #include <vm/vm_map_internal.h>
38 #include <vm/memory_object_internal.h>
39 #include <vm/vm_protos_internal.h>
40 #include <vm/vm_object_internal.h>
41 #include <vm/vm_iokit.h>
42 
43 static void mach_memory_entry_no_senders(ipc_port_t, mach_port_mscount_t);
44 
45 IPC_KOBJECT_DEFINE(IKOT_NAMED_ENTRY,
46     .iko_op_stable     = true,
47     .iko_op_no_senders = mach_memory_entry_no_senders);
48 
49 /*
50  * mach_make_memory_entry_64
51  *
52  * Think of it as a two-stage vm_remap() operation.  First
53  * you get a handle.  Second, you get map that handle in
54  * somewhere else. Rather than doing it all at once (and
55  * without needing access to the other whole map).
56  */
57 kern_return_t
mach_make_memory_entry_64(vm_map_t target_map,memory_object_size_ut * size_u,memory_object_offset_ut offset_u,vm_prot_ut permission_u,ipc_port_t * object_handle,ipc_port_t parent_handle)58 mach_make_memory_entry_64(
59 	vm_map_t                target_map,
60 	memory_object_size_ut  *size_u,
61 	memory_object_offset_ut offset_u,
62 	vm_prot_ut              permission_u,
63 	ipc_port_t              *object_handle,
64 	ipc_port_t              parent_handle)
65 {
66 	vm_named_entry_kernel_flags_t   vmne_kflags;
67 
68 	vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
69 	if (VM_SANITIZE_UNSAFE_UNWRAP(permission_u) & MAP_MEM_LEDGER_TAGGED) {
70 		vmne_kflags.vmnekf_ledger_tag = VM_LEDGER_TAG_DEFAULT;
71 	}
72 	return mach_make_memory_entry_internal(target_map,
73 	           size_u,
74 	           offset_u,
75 	           permission_u,
76 	           vmne_kflags,
77 	           object_handle,
78 	           parent_handle);
79 }
80 
81 static inline void
vm_memory_entry_decode_perm(vm_prot_t permission,unsigned int * access,vm_prot_t * protections,bool * mask_protections,bool * use_data_addr,bool * use_4K_compat)82 vm_memory_entry_decode_perm(
83 	vm_prot_t                       permission,
84 	unsigned int                   *access,
85 	vm_prot_t                      *protections,
86 	bool                           *mask_protections,
87 	bool                           *use_data_addr,
88 	bool                           *use_4K_compat)
89 {
90 	*protections = permission & VM_PROT_ALL;
91 	*mask_protections = permission & VM_PROT_IS_MASK;
92 	*access = GET_MAP_MEM(permission);
93 	*use_data_addr = ((permission & MAP_MEM_USE_DATA_ADDR) != 0);
94 	*use_4K_compat = ((permission & MAP_MEM_4K_DATA_ADDR) != 0);
95 }
96 
97 static inline vm_map_offset_t
vm_memory_entry_get_offset_in_page(vm_map_offset_t offset,vm_map_offset_t map_start,bool use_data_addr,bool use_4K_compat)98 vm_memory_entry_get_offset_in_page(
99 	vm_map_offset_t                 offset,
100 	vm_map_offset_t                 map_start,
101 	bool                            use_data_addr,
102 	bool                            use_4K_compat)
103 {
104 	vm_map_offset_t         offset_in_page;
105 
106 	if (use_data_addr || use_4K_compat) {
107 		offset_in_page = offset - map_start;
108 		if (use_4K_compat) {
109 			offset_in_page &= ~((signed)(0xFFF));
110 		}
111 	} else {
112 		offset_in_page = 0;
113 	}
114 
115 	return offset_in_page;
116 }
117 
118 static inline kern_return_t
mach_make_memory_entry_cleanup(kern_return_t kr,vm_map_t target_map __unused,memory_object_size_ut * size_u,vm_map_offset_ut offset_u __unused,vm_prot_t permission __unused,vm_named_entry_t user_entry __unused)119 mach_make_memory_entry_cleanup(
120 	kern_return_t           kr,
121 	vm_map_t                target_map __unused,
122 	memory_object_size_ut  *size_u,
123 	vm_map_offset_ut        offset_u __unused,
124 	vm_prot_t               permission __unused,
125 	vm_named_entry_t        user_entry __unused)
126 {
127 	DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry "
128 	    "%p kr 0x%x\n", target_map, VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
129 	    VM_SANITIZE_UNSAFE_UNWRAP(*size_u), permission, user_entry,
130 	    vm_sanitize_get_kr(kr));
131 	/*
132 	 * Set safe size value on failed return
133 	 */
134 	*size_u = vm_sanitize_wrap_size(0);
135 	return vm_sanitize_get_kr(kr);
136 }
137 
138 static inline kern_return_t
mach_make_memory_entry_mem_only_sanitize(vm_map_t target_map,memory_object_size_ut size_u,vm_map_offset_ut offset_u,vm_map_offset_t * map_start,vm_map_offset_t * map_end,vm_map_size_t * map_size)139 mach_make_memory_entry_mem_only_sanitize(
140 	vm_map_t                target_map,
141 	memory_object_size_ut   size_u,
142 	vm_map_offset_ut        offset_u,
143 	vm_map_offset_t        *map_start,
144 	vm_map_offset_t        *map_end,
145 	vm_map_size_t          *map_size)
146 {
147 	/*
148 	 * This code path doesn't use offset and size. They don't need to be
149 	 * validated. However inorder to maintain backward compatibility some
150 	 * checks on offset and size have been left.
151 	 */
152 	return vm_sanitize_addr_size(offset_u, size_u,
153 	           VM_SANITIZE_CALLER_MACH_MAKE_MEMORY_ENTRY,
154 	           target_map, VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH,
155 	           map_start, map_end, map_size);
156 }
157 
158 static kern_return_t
mach_make_memory_entry_mem_only(vm_map_t target_map,memory_object_size_ut * size_u,memory_object_offset_ut offset_u,vm_prot_t permission,ipc_port_t * object_handle,vm_named_entry_t parent_entry)159 mach_make_memory_entry_mem_only(
160 	vm_map_t                        target_map,
161 	memory_object_size_ut          *size_u,
162 	memory_object_offset_ut         offset_u,
163 	vm_prot_t                       permission,
164 	ipc_port_t                     *object_handle,
165 	vm_named_entry_t                parent_entry)
166 {
167 	boolean_t               parent_is_object;
168 	vm_object_t             object;
169 	unsigned int            access;
170 	vm_prot_t               protections;
171 	bool                    mask_protections;
172 	unsigned int            wimg_mode;
173 	bool                    use_data_addr;
174 	bool                    use_4K_compat;
175 	vm_named_entry_t        user_entry __unused = NULL;
176 	kern_return_t           kr;
177 	vm_map_size_t           map_size;
178 	vm_map_offset_t         map_start, map_end;
179 
180 	/*
181 	 * Sanitize addr and size. Permimssions have been sanitized prior to
182 	 * dispatch
183 	 */
184 	kr = mach_make_memory_entry_mem_only_sanitize(target_map,
185 	    *size_u,
186 	    offset_u,
187 	    &map_start,
188 	    &map_end,
189 	    &map_size);
190 	if (__improbable(kr != KERN_SUCCESS)) {
191 		return mach_make_memory_entry_cleanup(kr, target_map,
192 		           size_u, offset_u, permission, user_entry);
193 	}
194 
195 	vm_memory_entry_decode_perm(permission, &access, &protections,
196 	    &mask_protections, &use_data_addr, &use_4K_compat);
197 
198 	if (use_data_addr || use_4K_compat || parent_entry == NULL) {
199 		return mach_make_memory_entry_cleanup(KERN_INVALID_ARGUMENT, target_map,
200 		           size_u, offset_u, permission, user_entry);
201 	}
202 
203 	parent_is_object = parent_entry->is_object;
204 	if (!parent_is_object) {
205 		return mach_make_memory_entry_cleanup(KERN_INVALID_ARGUMENT, target_map,
206 		           size_u, offset_u, permission, user_entry);
207 	}
208 
209 	if ((access != parent_entry->access) &&
210 	    !(parent_entry->protection & VM_PROT_WRITE)) {
211 		return mach_make_memory_entry_cleanup(KERN_INVALID_RIGHT, target_map,
212 		           size_u, offset_u, permission, user_entry);
213 	}
214 
215 	object = vm_named_entry_to_vm_object(parent_entry);
216 	if (parent_is_object && object != VM_OBJECT_NULL) {
217 		wimg_mode = object->wimg_bits;
218 	} else {
219 		wimg_mode = VM_WIMG_USE_DEFAULT;
220 	}
221 	vm_prot_to_wimg(access, &wimg_mode);
222 	if (access != MAP_MEM_NOOP) {
223 		parent_entry->access = access;
224 	}
225 	if (parent_is_object && object &&
226 	    (access != MAP_MEM_NOOP) &&
227 	    (!(object->nophyscache))) {
228 		if (object->wimg_bits != wimg_mode) {
229 			vm_object_lock(object);
230 			vm_object_change_wimg_mode(object, wimg_mode);
231 			vm_object_unlock(object);
232 		}
233 	}
234 	if (object_handle) {
235 		*object_handle = IP_NULL;
236 	}
237 	DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry "
238 	    "%p kr 0x%x\n", target_map, VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
239 	    VM_SANITIZE_UNSAFE_UNWRAP(*size_u), permission, user_entry, KERN_SUCCESS);
240 	/*
241 	 * TODO: Size isn't being set in this path
242 	 */
243 	return KERN_SUCCESS;
244 }
245 
246 #if CONFIG_PROB_GZALLOC
247 static inline vm_map_offset_ut
vm_memory_entry_pgz_decode_offset(vm_map_t target_map,vm_map_offset_ut offset_u,memory_object_size_ut * size_u __unused)248 vm_memory_entry_pgz_decode_offset(
249 	vm_map_t                        target_map,
250 	vm_map_offset_ut                offset_u,
251 	memory_object_size_ut          *size_u __unused)
252 {
253 	if (target_map == NULL || target_map->pmap == kernel_pmap) {
254 		vm_map_offset_t pgz_offset;
255 
256 		pgz_offset = pgz_decode(VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
257 		    VM_SANITIZE_UNSAFE_UNWRAP(*size_u));
258 		return vm_sanitize_wrap_addr(pgz_offset);
259 	}
260 	return offset_u;
261 }
262 #endif /* CONFIG_PROB_GZALLOC */
263 
264 static inline kern_return_t
mach_make_memory_entry_generic_sanitize(vm_map_t target_map,memory_object_size_ut size_u,vm_map_offset_ut offset_u,vm_map_offset_t * map_start,vm_map_offset_t * map_end,vm_map_size_t * map_size,vm_map_offset_t * offset)265 mach_make_memory_entry_generic_sanitize(
266 	vm_map_t                target_map,
267 	memory_object_size_ut   size_u,
268 	vm_map_offset_ut        offset_u,
269 	vm_map_offset_t        *map_start,
270 	vm_map_offset_t        *map_end,
271 	vm_map_size_t          *map_size,
272 	vm_map_offset_t        *offset)
273 {
274 	kern_return_t           kr;
275 
276 	/*
277 	 * Validate start and end
278 	 */
279 	kr = vm_sanitize_addr_size(offset_u, size_u,
280 	    VM_SANITIZE_CALLER_MACH_MAKE_MEMORY_ENTRY,
281 	    target_map, VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH,
282 	    map_start, map_end, map_size);
283 	if (__improbable(kr != KERN_SUCCESS)) {
284 		return kr;
285 	}
286 	/*
287 	 * Validate offset
288 	 */
289 	kr = vm_sanitize_offset(offset_u, VM_SANITIZE_CALLER_MACH_MAKE_MEMORY_ENTRY,
290 	    *map_start, *map_end, offset);
291 	if (__improbable(kr != KERN_SUCCESS)) {
292 		return kr;
293 	}
294 
295 	return KERN_SUCCESS;
296 }
297 
298 static kern_return_t
mach_make_memory_entry_named_create(vm_map_t target_map,memory_object_size_ut * size_u,vm_map_offset_ut offset_u,vm_prot_t permission,vm_named_entry_kernel_flags_t vmne_kflags,ipc_port_t * object_handle)299 mach_make_memory_entry_named_create(
300 	vm_map_t                        target_map,
301 	memory_object_size_ut          *size_u,
302 	vm_map_offset_ut                offset_u,
303 	vm_prot_t                       permission,
304 	vm_named_entry_kernel_flags_t   vmne_kflags,
305 	ipc_port_t                     *object_handle)
306 {
307 	vm_object_t             object;
308 	unsigned int            access;
309 	vm_prot_t               protections;
310 	bool                    mask_protections;
311 	unsigned int            wimg_mode;
312 	bool                    use_data_addr;
313 	bool                    use_4K_compat;
314 	int                     ledger_flags = 0;
315 	task_t                  owner;
316 	bool                    fully_owned = false;
317 	vm_named_entry_t        user_entry = NULL;
318 	kern_return_t           kr;
319 	vm_map_size_t           map_size;
320 	vm_map_offset_t         map_start, map_end, offset;
321 
322 	if (VM_SANITIZE_UNSAFE_IS_ZERO(*size_u)) {
323 		*object_handle = IPC_PORT_NULL;
324 		return mach_make_memory_entry_cleanup(KERN_SUCCESS, target_map,
325 		           size_u, offset_u, permission, user_entry);
326 	}
327 
328 #if CONFIG_PROB_GZALLOC
329 	/*
330 	 * If offset is PGZ protected we need PGZ to fix it up to the right
331 	 * value prior to validation and use.
332 	 */
333 	offset_u = vm_memory_entry_pgz_decode_offset(target_map, offset_u, size_u);
334 #endif /* CONFIG_PROB_GZALLOC */
335 
336 	/*
337 	 * Sanitize addr and size. Permimssions have been sanitized prior to
338 	 * dispatch
339 	 */
340 	kr = mach_make_memory_entry_generic_sanitize(target_map,
341 	    *size_u,
342 	    offset_u,
343 	    &map_start,
344 	    &map_end,
345 	    &map_size,
346 	    &offset);
347 	if (__improbable(kr != KERN_SUCCESS)) {
348 		return mach_make_memory_entry_cleanup(kr, target_map,
349 		           size_u, offset_u, permission, user_entry);
350 	}
351 
352 	assert(map_size != 0);
353 
354 	vm_memory_entry_decode_perm(permission, &access, &protections,
355 	    &mask_protections, &use_data_addr, &use_4K_compat);
356 
357 	if (use_data_addr || use_4K_compat) {
358 		return mach_make_memory_entry_cleanup(KERN_INVALID_ARGUMENT, target_map,
359 		           size_u, offset_u, permission, user_entry);
360 	}
361 
362 	/*
363 	 * Force the creation of the VM object now.
364 	 */
365 #if __LP64__
366 	if (map_size > ANON_MAX_SIZE) {
367 		return mach_make_memory_entry_cleanup(KERN_FAILURE, target_map,
368 		           size_u, offset_u, permission, user_entry);
369 	}
370 #endif /* __LP64__ */
371 
372 	object = vm_object_allocate(map_size);
373 	assert(object != VM_OBJECT_NULL);
374 	vm_object_lock(object);
375 
376 	/*
377 	 * XXX
378 	 * We use this path when we want to make sure that
379 	 * nobody messes with the object (coalesce, for
380 	 * example) before we map it.
381 	 * We might want to use these objects for transposition via
382 	 * vm_object_transpose() too, so we don't want any copy or
383 	 * shadow objects either...
384 	 */
385 	object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
386 	VM_OBJECT_SET_TRUE_SHARE(object, TRUE);
387 
388 	owner = current_task();
389 	if ((permission & MAP_MEM_PURGABLE) ||
390 	    vmne_kflags.vmnekf_ledger_tag) {
391 		assert(object->vo_owner == NULL);
392 		assert(object->resident_page_count == 0);
393 		assert(object->wired_page_count == 0);
394 		assert(owner != TASK_NULL);
395 		if (vmne_kflags.vmnekf_ledger_no_footprint) {
396 			ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT;
397 			object->vo_no_footprint = TRUE;
398 		}
399 		if (permission & MAP_MEM_PURGABLE) {
400 			if (!(permission & VM_PROT_WRITE)) {
401 				/* if we can't write, we can't purge */
402 				vm_object_unlock(object);
403 				vm_object_deallocate(object);
404 				return mach_make_memory_entry_cleanup(KERN_INVALID_ARGUMENT,
405 				           target_map, size_u, offset_u, permission, user_entry);
406 			}
407 			VM_OBJECT_SET_PURGABLE(object, VM_PURGABLE_NONVOLATILE);
408 			if (permission & MAP_MEM_PURGABLE_KERNEL_ONLY) {
409 				VM_OBJECT_SET_PURGEABLE_ONLY_BY_KERNEL(object, TRUE);
410 			}
411 #if __arm64__
412 			if (owner->task_legacy_footprint) {
413 				/*
414 				 * For ios11, we failed to account for
415 				 * this memory.  Keep doing that for
416 				 * legacy apps (built before ios12),
417 				 * for backwards compatibility's sake...
418 				 */
419 				owner = kernel_task;
420 			}
421 #endif /* __arm64__ */
422 			vm_purgeable_nonvolatile_enqueue(object, owner);
423 			/* all memory in this named entry is "owned" */
424 			fully_owned = true;
425 		}
426 	}
427 
428 	if (vmne_kflags.vmnekf_ledger_tag) {
429 		/*
430 		 * Bill this object to the current task's
431 		 * ledgers for the given tag.
432 		 */
433 		if (vmne_kflags.vmnekf_ledger_no_footprint) {
434 			ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT;
435 		}
436 		kr = vm_object_ownership_change(
437 			object,
438 			vmne_kflags.vmnekf_ledger_tag,
439 			owner,         /* new owner */
440 			ledger_flags,
441 			FALSE);         /* task_objq locked? */
442 		if (kr != KERN_SUCCESS) {
443 			vm_object_unlock(object);
444 			vm_object_deallocate(object);
445 			return mach_make_memory_entry_cleanup(kr, target_map,
446 			           size_u, offset_u, permission, user_entry);
447 		}
448 		/* all memory in this named entry is "owned" */
449 		fully_owned = true;
450 	}
451 
452 #if CONFIG_SECLUDED_MEMORY
453 	if (secluded_for_iokit && /* global boot-arg */
454 	    ((permission & MAP_MEM_GRAB_SECLUDED))) {
455 		object->can_grab_secluded = TRUE;
456 		assert(!object->eligible_for_secluded);
457 	}
458 #endif /* CONFIG_SECLUDED_MEMORY */
459 
460 	/*
461 	 * The VM object is brand new and nobody else knows about it,
462 	 * so we don't need to lock it.
463 	 */
464 
465 	wimg_mode = object->wimg_bits;
466 	vm_prot_to_wimg(access, &wimg_mode);
467 	if (access != MAP_MEM_NOOP) {
468 		object->wimg_bits = wimg_mode;
469 	}
470 
471 	vm_object_unlock(object);
472 
473 	/* the object has no pages, so no WIMG bits to update here */
474 
475 	user_entry = mach_memory_entry_allocate(object_handle);
476 	vm_named_entry_associate_vm_object(
477 		user_entry,
478 		object,
479 		0,
480 		map_size,
481 		(protections & VM_PROT_ALL));
482 	user_entry->internal = TRUE;
483 	user_entry->is_sub_map = FALSE;
484 	user_entry->offset = 0;
485 	user_entry->data_offset = 0;
486 	user_entry->protection = protections;
487 	user_entry->access = access;
488 	user_entry->size = map_size;
489 	user_entry->is_fully_owned = fully_owned;
490 
491 	/* user_object pager and internal fields are not used */
492 	/* when the object field is filled in.		      */
493 
494 	*size_u = vm_sanitize_wrap_size(user_entry->size - user_entry->data_offset);
495 	DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry "
496 	    "%p kr 0x%x\n", target_map, offset, VM_SANITIZE_UNSAFE_UNWRAP(*size_u),
497 	    permission, user_entry, KERN_SUCCESS);
498 	return KERN_SUCCESS;
499 }
500 
501 static kern_return_t
mach_make_memory_entry_copy(vm_map_t target_map,memory_object_size_ut * size_u,vm_map_offset_ut offset_u,vm_prot_t permission,ipc_port_t * object_handle)502 mach_make_memory_entry_copy(
503 	vm_map_t                target_map,
504 	memory_object_size_ut  *size_u,
505 	vm_map_offset_ut        offset_u,
506 	vm_prot_t               permission,
507 	ipc_port_t             *object_handle)
508 {
509 	unsigned int            access;
510 	vm_prot_t               protections;
511 	bool                    mask_protections;
512 	bool                    use_data_addr;
513 	bool                    use_4K_compat;
514 	vm_named_entry_t        user_entry = NULL;
515 	vm_map_copy_t           copy;
516 	/*
517 	 * Stash the offset in the page for use by vm_map_enter_mem_object()
518 	 * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
519 	 */
520 	vm_object_offset_t      offset_in_page;
521 	kern_return_t           kr;
522 	vm_map_size_t           map_size;
523 	vm_map_offset_t         map_start, map_end, offset;
524 
525 	if (VM_SANITIZE_UNSAFE_IS_ZERO(*size_u)) {
526 		return mach_make_memory_entry_cleanup(KERN_INVALID_ARGUMENT, target_map,
527 		           size_u, offset_u, permission, user_entry);
528 	}
529 
530 #if CONFIG_PROB_GZALLOC
531 	/*
532 	 * If offset is PGZ protected we need PGZ to fix it up to the right
533 	 * value prior to validation and use.
534 	 */
535 	offset_u = vm_memory_entry_pgz_decode_offset(target_map, offset_u, size_u);
536 #endif /* CONFIG_PROB_GZALLOC */
537 
538 	/*
539 	 * Sanitize addr and size. Permimssions have been sanitized prior to
540 	 * dispatch
541 	 */
542 	kr = mach_make_memory_entry_generic_sanitize(target_map,
543 	    *size_u,
544 	    offset_u,
545 	    &map_start,
546 	    &map_end,
547 	    &map_size,
548 	    &offset);
549 	if (__improbable(kr != KERN_SUCCESS)) {
550 		return mach_make_memory_entry_cleanup(kr, target_map,
551 		           size_u, offset_u, permission, user_entry);
552 	}
553 
554 	assert(map_size != 0);
555 
556 	vm_memory_entry_decode_perm(permission, &access, &protections,
557 	    &mask_protections, &use_data_addr, &use_4K_compat);
558 
559 	if (target_map == VM_MAP_NULL) {
560 		return mach_make_memory_entry_cleanup(KERN_INVALID_TASK, target_map,
561 		           size_u, offset_u, permission, user_entry);
562 	}
563 
564 	offset_in_page = vm_memory_entry_get_offset_in_page(offset, map_start,
565 	    use_data_addr, use_4K_compat);
566 
567 	kr = vm_map_copyin_internal(target_map,
568 	    map_start,
569 	    map_size,
570 	    VM_MAP_COPYIN_ENTRY_LIST,
571 	    &copy);
572 	if (kr != KERN_SUCCESS) {
573 		return mach_make_memory_entry_cleanup(kr, target_map,
574 		           size_u, offset_u, permission, user_entry);
575 	}
576 	assert(copy != VM_MAP_COPY_NULL);
577 
578 	user_entry = mach_memory_entry_allocate(object_handle);
579 	user_entry->backing.copy = copy;
580 	user_entry->internal = FALSE;
581 	user_entry->is_sub_map = FALSE;
582 	user_entry->is_copy = TRUE;
583 	user_entry->offset = 0;
584 	user_entry->protection = protections;
585 	user_entry->size = map_size;
586 	user_entry->data_offset = offset_in_page;
587 
588 	/* is all memory in this named entry "owned"? */
589 	vm_map_entry_t entry;
590 	user_entry->is_fully_owned = TRUE;
591 	for (entry = vm_map_copy_first_entry(copy);
592 	    entry != vm_map_copy_to_entry(copy);
593 	    entry = entry->vme_next) {
594 		if (entry->is_sub_map ||
595 		    VME_OBJECT(entry) == VM_OBJECT_NULL ||
596 		    VM_OBJECT_OWNER(VME_OBJECT(entry)) == TASK_NULL) {
597 			/* this memory is not "owned" */
598 			user_entry->is_fully_owned = FALSE;
599 			break;
600 		}
601 	}
602 
603 	*size_u = vm_sanitize_wrap_size(user_entry->size - user_entry->data_offset);
604 	DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> "
605 	    "entry %p kr 0x%x\n", target_map, offset, VM_SANITIZE_UNSAFE_UNWRAP(*size_u),
606 	    permission, user_entry, KERN_SUCCESS);
607 	return KERN_SUCCESS;
608 }
609 
610 static kern_return_t
mach_make_memory_entry_share(vm_map_t target_map,memory_object_size_ut * size_u,vm_map_offset_ut offset_u,vm_prot_t permission,ipc_port_t * object_handle,ipc_port_t parent_handle,vm_named_entry_t parent_entry)611 mach_make_memory_entry_share(
612 	vm_map_t                target_map,
613 	memory_object_size_ut  *size_u,
614 	vm_map_offset_ut        offset_u,
615 	vm_prot_t               permission,
616 	ipc_port_t             *object_handle,
617 	ipc_port_t              parent_handle,
618 	vm_named_entry_t        parent_entry)
619 {
620 	vm_object_t             object;
621 	unsigned int            access;
622 	vm_prot_t               protections;
623 	bool                    mask_protections;
624 	bool                    use_data_addr;
625 	bool                    use_4K_compat;
626 	vm_named_entry_t        user_entry = NULL;
627 	vm_map_copy_t           copy;
628 	vm_prot_t               cur_prot, max_prot;
629 	vm_map_kernel_flags_t   vmk_flags;
630 	vm_map_entry_t          parent_copy_entry;
631 	/*
632 	 * Stash the offset in the page for use by vm_map_enter_mem_object()
633 	 * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
634 	 */
635 	vm_object_offset_t      offset_in_page;
636 	unsigned int            wimg_mode;
637 	kern_return_t           kr;
638 	vm_map_size_t           map_size;
639 	vm_map_offset_t         map_start, map_end, offset;
640 
641 	if (VM_SANITIZE_UNSAFE_IS_ZERO(*size_u)) {
642 		return mach_make_memory_entry_cleanup(KERN_INVALID_ARGUMENT, target_map,
643 		           size_u, offset_u, permission, user_entry);
644 	}
645 
646 #if CONFIG_PROB_GZALLOC
647 	/*
648 	 * If offset is PGZ protected we need PGZ to fix it up to the right
649 	 * value prior to validation and use.
650 	 */
651 	offset_u = vm_memory_entry_pgz_decode_offset(target_map, offset_u, size_u);
652 #endif /* CONFIG_PROB_GZALLOC */
653 
654 	/*
655 	 * Sanitize addr and size. Permimssions have been sanitized prior to
656 	 * dispatch
657 	 */
658 	kr = mach_make_memory_entry_generic_sanitize(target_map,
659 	    *size_u,
660 	    offset_u,
661 	    &map_start,
662 	    &map_end,
663 	    &map_size,
664 	    &offset);
665 	if (__improbable(kr != KERN_SUCCESS)) {
666 		return mach_make_memory_entry_cleanup(kr, target_map,
667 		           size_u, offset_u, permission, user_entry);
668 	}
669 
670 	assert(map_size != 0);
671 
672 	vm_memory_entry_decode_perm(permission, &access, &protections,
673 	    &mask_protections, &use_data_addr, &use_4K_compat);
674 
675 	if (target_map == VM_MAP_NULL) {
676 		return mach_make_memory_entry_cleanup(KERN_INVALID_TASK, target_map,
677 		           size_u, offset_u, permission, user_entry);
678 	}
679 
680 	vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
681 	vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
682 	parent_copy_entry = VM_MAP_ENTRY_NULL;
683 	if (!(permission & MAP_MEM_VM_SHARE)) {
684 		vm_map_t tmp_map, real_map;
685 		vm_map_version_t version;
686 		vm_object_t tmp_object;
687 		vm_object_offset_t obj_off;
688 		vm_prot_t prot;
689 		boolean_t wired;
690 		bool contended;
691 
692 		/* resolve any pending submap copy-on-write... */
693 		if (protections & VM_PROT_WRITE) {
694 			tmp_map = target_map;
695 			vm_map_lock_read(tmp_map);
696 			kr = vm_map_lookup_and_lock_object(&tmp_map,
697 			    map_start,
698 			    protections | (mask_protections ? VM_PROT_IS_MASK : 0),
699 			    OBJECT_LOCK_EXCLUSIVE,
700 			    &version,
701 			    &tmp_object,
702 			    &obj_off,
703 			    &prot,
704 			    &wired,
705 			    NULL,                               /* fault_info */
706 			    &real_map,
707 			    &contended);
708 			if (kr != KERN_SUCCESS) {
709 				vm_map_unlock_read(tmp_map);
710 			} else {
711 				vm_object_unlock(tmp_object);
712 				vm_map_unlock_read(tmp_map);
713 				if (real_map != tmp_map) {
714 					vm_map_unlock_read(real_map);
715 				}
716 			}
717 		}
718 		/* ... and carry on */
719 
720 		/* stop extracting if VM object changes */
721 		vmk_flags.vmkf_copy_single_object = TRUE;
722 		if ((permission & MAP_MEM_NAMED_REUSE) &&
723 		    parent_entry != NULL &&
724 		    parent_entry->is_object) {
725 			vm_map_copy_t parent_copy;
726 			parent_copy = parent_entry->backing.copy;
727 			/*
728 			 * Assert that the vm_map_copy is coming from the right
729 			 * zone and hasn't been forged
730 			 */
731 			vm_map_copy_require(parent_copy);
732 			assert(parent_copy->cpy_hdr.nentries == 1);
733 			parent_copy_entry = vm_map_copy_first_entry(parent_copy);
734 			assert(!parent_copy_entry->is_sub_map);
735 		}
736 	}
737 
738 	offset_in_page = vm_memory_entry_get_offset_in_page(offset, map_start,
739 	    use_data_addr, use_4K_compat);
740 
741 	if (mask_protections) {
742 		/*
743 		 * caller is asking for whichever proctections are
744 		 * available: no required protections.
745 		 */
746 		cur_prot = VM_PROT_NONE;
747 		max_prot = VM_PROT_NONE;
748 	} else {
749 		/*
750 		 * Caller wants a memory entry with "protections".
751 		 * Make sure we extract only memory that matches that.
752 		 */
753 		cur_prot = protections;
754 		max_prot = protections;
755 	}
756 	if (target_map->pmap == kernel_pmap) {
757 		/*
758 		 * Get "reserved" map entries to avoid deadlocking
759 		 * on the kernel map or a kernel submap if we
760 		 * run out of VM map entries and need to refill that
761 		 * zone.
762 		 */
763 		vmk_flags.vmkf_copy_pageable = FALSE;
764 	} else {
765 		vmk_flags.vmkf_copy_pageable = TRUE;
766 	}
767 	vmk_flags.vmkf_copy_same_map = FALSE;
768 	assert(map_size != 0);
769 	kr = vm_map_copy_extract(target_map,
770 	    map_start,
771 	    map_size,
772 	    FALSE,                              /* copy */
773 	    &copy,
774 	    &cur_prot,
775 	    &max_prot,
776 	    VM_INHERIT_SHARE,
777 	    vmk_flags);
778 	if (kr != KERN_SUCCESS) {
779 		return mach_make_memory_entry_cleanup(kr, target_map,
780 		           size_u, offset_u, permission, user_entry);
781 	}
782 	assert(copy != VM_MAP_COPY_NULL);
783 
784 	if (mask_protections) {
785 		/*
786 		 * We just want as much of "original_protections"
787 		 * as we can get out of the actual "cur_prot".
788 		 */
789 		protections &= cur_prot;
790 		if (protections == VM_PROT_NONE) {
791 			/* no access at all: fail */
792 			vm_map_copy_discard(copy);
793 			return mach_make_memory_entry_cleanup(KERN_PROTECTION_FAILURE,
794 			           target_map, size_u, offset_u, permission, user_entry);
795 		}
796 	} else {
797 		/*
798 		 * We want exactly "original_protections"
799 		 * out of "cur_prot".
800 		 */
801 		assert((cur_prot & protections) == protections);
802 		assert((max_prot & protections) == protections);
803 		/* XXX FBDP TODO: no longer needed? */
804 		if ((cur_prot & protections) != protections) {
805 			vm_map_copy_discard(copy);
806 			return mach_make_memory_entry_cleanup(KERN_PROTECTION_FAILURE,
807 			           target_map, size_u, offset_u, permission, user_entry);
808 		}
809 	}
810 
811 	if (!(permission & MAP_MEM_VM_SHARE)) {
812 		vm_map_entry_t copy_entry;
813 
814 		/* limit size to what's actually covered by "copy" */
815 		assert(copy->cpy_hdr.nentries == 1);
816 		copy_entry = vm_map_copy_first_entry(copy);
817 		map_size = copy_entry->vme_end - copy_entry->vme_start;
818 
819 		if ((permission & MAP_MEM_NAMED_REUSE) &&
820 		    parent_copy_entry != VM_MAP_ENTRY_NULL &&
821 		    VME_OBJECT(copy_entry) == VME_OBJECT(parent_copy_entry) &&
822 		    VME_OFFSET(copy_entry) == VME_OFFSET(parent_copy_entry) &&
823 		    parent_entry->offset == 0 &&
824 		    parent_entry->size == map_size &&
825 		    (parent_entry->data_offset == offset_in_page)) {
826 			/* we have a match: re-use "parent_entry" */
827 
828 			/* release our new "copy" */
829 			vm_map_copy_discard(copy);
830 			/* get extra send right on handle */
831 			parent_handle = ipc_port_copy_send_any(parent_handle);
832 
833 			*size_u = vm_sanitize_wrap_size(parent_entry->size -
834 			    parent_entry->data_offset);
835 			*object_handle = parent_handle;
836 			DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> "
837 			    "entry %p kr 0x%x\n", target_map, offset, VM_SANITIZE_UNSAFE_UNWRAP(*size_u),
838 			    permission, user_entry, KERN_SUCCESS);
839 			return KERN_SUCCESS;
840 		}
841 
842 		/* no match: we need to create a new entry */
843 		object = VME_OBJECT(copy_entry);
844 		vm_object_lock(object);
845 		wimg_mode = object->wimg_bits;
846 		if (!(object->nophyscache)) {
847 			vm_prot_to_wimg(access, &wimg_mode);
848 		}
849 		if (object->wimg_bits != wimg_mode) {
850 			vm_object_change_wimg_mode(object, wimg_mode);
851 		}
852 		vm_object_unlock(object);
853 	}
854 
855 	user_entry = mach_memory_entry_allocate(object_handle);
856 	user_entry->backing.copy = copy;
857 	user_entry->is_sub_map = FALSE;
858 	user_entry->is_object = FALSE;
859 	user_entry->internal = FALSE;
860 	user_entry->protection = protections;
861 	user_entry->size = map_size;
862 	user_entry->data_offset = offset_in_page;
863 
864 	if (permission & MAP_MEM_VM_SHARE) {
865 		vm_map_entry_t copy_entry;
866 
867 		user_entry->is_copy = TRUE;
868 		user_entry->offset = 0;
869 
870 		/* is all memory in this named entry "owned"? */
871 		user_entry->is_fully_owned = TRUE;
872 		for (copy_entry = vm_map_copy_first_entry(copy);
873 		    copy_entry != vm_map_copy_to_entry(copy);
874 		    copy_entry = copy_entry->vme_next) {
875 			if (copy_entry->is_sub_map) {
876 				/* submaps can't be owned */
877 				user_entry->is_fully_owned = FALSE;
878 				break;
879 			}
880 			if (VM_OBJECT_OWNER(VME_OBJECT(copy_entry)) == TASK_NULL) {
881 				object = VME_OBJECT(copy_entry);
882 				if (object && !object->internal) {
883 					/* external objects can be "owned" */
884 					continue;
885 				}
886 				/* this memory is not "owned" */
887 				user_entry->is_fully_owned = FALSE;
888 				break;
889 			}
890 		}
891 	} else {
892 		user_entry->is_object = TRUE;
893 		user_entry->internal = object->internal;
894 		user_entry->offset = VME_OFFSET(vm_map_copy_first_entry(copy));
895 		user_entry->access = GET_MAP_MEM(permission);
896 		/* is all memory in this named entry "owned"? */
897 		user_entry->is_fully_owned = FALSE;
898 		object = vm_named_entry_to_vm_object(user_entry);
899 		if (VM_OBJECT_OWNER(object) != TASK_NULL) {
900 			/* object is owned */
901 			user_entry->is_fully_owned = TRUE;
902 		} else if (object && !object->internal) {
903 			/* external objects can become "owned" */
904 			user_entry->is_fully_owned = TRUE;
905 		}
906 	}
907 
908 	*size_u = vm_sanitize_wrap_size(user_entry->size -
909 	    user_entry->data_offset);
910 	DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry "
911 	    "%p kr 0x%x\n", target_map, offset, VM_SANITIZE_UNSAFE_UNWRAP(*size_u),
912 	    permission, user_entry, KERN_SUCCESS);
913 	return KERN_SUCCESS;
914 }
915 
916 static inline kern_return_t
mach_make_memory_entry_from_parent_entry_sanitize(vm_map_t target_map,memory_object_size_ut size_u,vm_map_offset_ut offset_u,vm_prot_t permission,vm_named_entry_t parent_entry,vm_map_offset_t * map_start,vm_map_offset_t * map_end,vm_map_size_t * map_size,vm_map_offset_t * offset,vm_map_offset_t * user_entry_offset)917 mach_make_memory_entry_from_parent_entry_sanitize(
918 	vm_map_t                target_map,
919 	memory_object_size_ut   size_u,
920 	vm_map_offset_ut        offset_u,
921 	vm_prot_t               permission,
922 	vm_named_entry_t        parent_entry,
923 	vm_map_offset_t        *map_start,
924 	vm_map_offset_t        *map_end,
925 	vm_map_size_t          *map_size,
926 	vm_map_offset_t        *offset,
927 	vm_map_offset_t        *user_entry_offset)
928 {
929 	bool                    mask_protections;
930 	unsigned int            access;
931 	vm_prot_t               protections;
932 	bool                    use_data_addr;
933 	bool                    use_4K_compat;
934 	vm_map_offset_t         start_mask = vm_map_page_mask(target_map);
935 	kern_return_t           kr;
936 
937 	vm_memory_entry_decode_perm(permission, &access, &protections,
938 	    &mask_protections, &use_data_addr, &use_4K_compat);
939 
940 	if (use_data_addr || use_4K_compat) {
941 		/*
942 		 * Validate offset doesn't overflow when added to parent entry's offset
943 		 */
944 		if (vm_sanitize_add_overflow(offset_u, parent_entry->data_offset,
945 		    &offset_u)) {
946 			return KERN_INVALID_ARGUMENT;
947 		}
948 		start_mask = PAGE_MASK;
949 	}
950 
951 	/*
952 	 * Currently the map_start is truncated using page mask from target_map
953 	 * when use_data_addr || use_4K_compat is false, while map_end uses
954 	 * PAGE_MASK. In order to maintain that behavior, we
955 	 * request for unaligned values and perform the truncing/rounding
956 	 * explicitly.
957 	 */
958 	kr = vm_sanitize_addr_size(offset_u, size_u,
959 	    VM_SANITIZE_CALLER_MACH_MAKE_MEMORY_ENTRY, PAGE_MASK,
960 	    VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH | VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES,
961 	    map_start, map_end, map_size);
962 	if (__improbable(kr != KERN_SUCCESS)) {
963 		return kr;
964 	}
965 
966 	*map_start =  vm_map_trunc_page_mask(*map_start, start_mask);
967 	*map_end = vm_map_round_page_mask(*map_end, PAGE_MASK);
968 	*map_size = *map_end - *map_start;
969 
970 	/*
971 	 * Additional checks to make sure explicitly computed aligned start and end
972 	 * still make sense.
973 	 */
974 	if (__improbable(*map_end < *map_start) || (*map_end > parent_entry->size)) {
975 		return KERN_INVALID_ARGUMENT;
976 	}
977 
978 	/*
979 	 * Validate offset
980 	 */
981 	kr = vm_sanitize_offset(offset_u, VM_SANITIZE_CALLER_MACH_MAKE_MEMORY_ENTRY,
982 	    *map_start, *map_end, offset);
983 	if (__improbable(kr != KERN_SUCCESS)) {
984 		return kr;
985 	}
986 
987 	if (__improbable(os_add_overflow(parent_entry->offset, *map_start,
988 	    user_entry_offset))) {
989 		return KERN_INVALID_ARGUMENT;
990 	}
991 
992 	return KERN_SUCCESS;
993 }
994 
995 static kern_return_t
mach_make_memory_entry_from_parent_entry(vm_map_t target_map,memory_object_size_ut * size_u,vm_map_offset_ut offset_u,vm_prot_t permission,ipc_port_t * object_handle,vm_named_entry_t parent_entry)996 mach_make_memory_entry_from_parent_entry(
997 	vm_map_t                target_map,
998 	memory_object_size_ut  *size_u,
999 	vm_map_offset_ut        offset_u,
1000 	vm_prot_t               permission,
1001 	ipc_port_t             *object_handle,
1002 	vm_named_entry_t        parent_entry)
1003 {
1004 	vm_object_t             object;
1005 	unsigned int            access;
1006 	vm_prot_t               protections;
1007 	bool                    mask_protections;
1008 	bool                    use_data_addr;
1009 	bool                    use_4K_compat;
1010 	vm_named_entry_t        user_entry = NULL;
1011 	kern_return_t           kr;
1012 	/*
1013 	 * Stash the offset in the page for use by vm_map_enter_mem_object()
1014 	 * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
1015 	 */
1016 	vm_object_offset_t      offset_in_page;
1017 	vm_map_offset_t         map_start, map_end;
1018 	vm_map_size_t           map_size;
1019 	vm_map_offset_t         user_entry_offset, offset;
1020 
1021 	vm_memory_entry_decode_perm(permission, &access, &protections,
1022 	    &mask_protections, &use_data_addr, &use_4K_compat);
1023 
1024 	/*
1025 	 * Sanitize addr and size. Permimssions have been sanitized prior to
1026 	 * dispatch
1027 	 */
1028 	kr = mach_make_memory_entry_from_parent_entry_sanitize(target_map,
1029 	    *size_u,
1030 	    offset_u,
1031 	    permission,
1032 	    parent_entry,
1033 	    &map_start,
1034 	    &map_end,
1035 	    &map_size,
1036 	    &offset,
1037 	    &user_entry_offset);
1038 	if (__improbable(kr != KERN_SUCCESS)) {
1039 		return mach_make_memory_entry_cleanup(kr, target_map,
1040 		           size_u, offset_u, permission, user_entry);
1041 	}
1042 
1043 	if (use_data_addr || use_4K_compat) {
1044 		/*
1045 		 * submaps and pagers should only be accessible from within
1046 		 * the kernel, which shouldn't use the data address flag, so can fail here.
1047 		 */
1048 		if (parent_entry->is_sub_map) {
1049 			panic("Shouldn't be using data address with a parent entry that is a submap.");
1050 		}
1051 	}
1052 
1053 	if (mask_protections) {
1054 		/*
1055 		 * The caller asked us to use the "protections" as
1056 		 * a mask, so restrict "protections" to what this
1057 		 * mapping actually allows.
1058 		 */
1059 		protections &= parent_entry->protection;
1060 	}
1061 	if ((protections & parent_entry->protection) != protections) {
1062 		return mach_make_memory_entry_cleanup(KERN_PROTECTION_FAILURE, target_map,
1063 		           size_u, offset_u, permission, user_entry);
1064 	}
1065 
1066 	offset_in_page = vm_memory_entry_get_offset_in_page(offset, map_start,
1067 	    use_data_addr, use_4K_compat);
1068 
1069 	user_entry = mach_memory_entry_allocate(object_handle);
1070 	user_entry->size = map_size;
1071 	user_entry->offset = user_entry_offset;
1072 	user_entry->data_offset = offset_in_page;
1073 	user_entry->is_sub_map = parent_entry->is_sub_map;
1074 	user_entry->is_copy = parent_entry->is_copy;
1075 	user_entry->protection = protections;
1076 
1077 	if (access != MAP_MEM_NOOP) {
1078 		user_entry->access = access;
1079 	}
1080 
1081 	if (parent_entry->is_sub_map) {
1082 		vm_map_t map = parent_entry->backing.map;
1083 		vm_map_reference(map);
1084 		user_entry->backing.map = map;
1085 	} else {
1086 		object = vm_named_entry_to_vm_object(parent_entry);
1087 		assert(object != VM_OBJECT_NULL);
1088 		assert(object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC);
1089 		vm_named_entry_associate_vm_object(
1090 			user_entry,
1091 			object,
1092 			user_entry->offset,
1093 			user_entry->size,
1094 			(user_entry->protection & VM_PROT_ALL));
1095 		assert(user_entry->is_object);
1096 		/* we now point to this object, hold on */
1097 		vm_object_lock(object);
1098 		vm_object_reference_locked(object);
1099 #if VM_OBJECT_TRACKING_OP_TRUESHARE
1100 		if (!object->true_share &&
1101 		    vm_object_tracking_btlog) {
1102 			btlog_record(vm_object_tracking_btlog, object,
1103 			    VM_OBJECT_TRACKING_OP_TRUESHARE,
1104 			    btref_get(__builtin_frame_address(0), 0));
1105 		}
1106 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
1107 
1108 		VM_OBJECT_SET_TRUE_SHARE(object, TRUE);
1109 		if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
1110 			object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
1111 		}
1112 		vm_object_unlock(object);
1113 	}
1114 	*size_u = vm_sanitize_wrap_size(user_entry->size -
1115 	    user_entry->data_offset);
1116 	DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry "
1117 	    "%p kr 0x%x\n", target_map, offset, VM_SANITIZE_UNSAFE_UNWRAP(*size_u),
1118 	    permission, user_entry, KERN_SUCCESS);
1119 	return KERN_SUCCESS;
1120 }
1121 
1122 static inline kern_return_t
mach_make_memory_entry_sanitize_perm(vm_prot_ut permission_u,vm_prot_t * permission)1123 mach_make_memory_entry_sanitize_perm(
1124 	vm_prot_ut              permission_u,
1125 	vm_prot_t              *permission)
1126 {
1127 	return vm_sanitize_memory_entry_perm(permission_u,
1128 	           VM_SANITIZE_CALLER_MACH_MAKE_MEMORY_ENTRY,
1129 	           VM_SANITIZE_FLAGS_CHECK_USER_MEM_MAP_FLAGS,
1130 	           VM_PROT_IS_MASK, permission);
1131 }
1132 
1133 kern_return_t
mach_make_memory_entry_internal(vm_map_t target_map,memory_object_size_ut * size_u,memory_object_offset_ut offset_u,vm_prot_ut permission_u,vm_named_entry_kernel_flags_t vmne_kflags,ipc_port_t * object_handle,ipc_port_t parent_handle)1134 mach_make_memory_entry_internal(
1135 	vm_map_t                        target_map,
1136 	memory_object_size_ut          *size_u,
1137 	memory_object_offset_ut         offset_u,
1138 	vm_prot_ut                      permission_u,
1139 	vm_named_entry_kernel_flags_t   vmne_kflags,
1140 	ipc_port_t                     *object_handle,
1141 	ipc_port_t                      parent_handle)
1142 {
1143 	vm_named_entry_t        user_entry __unused = NULL;
1144 	vm_named_entry_t        parent_entry;
1145 	kern_return_t           kr;
1146 	vm_prot_t               permission;
1147 
1148 	DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x\n",
1149 	    target_map, VM_SANITIZE_UNSAFE_UNWRAP(offset_u), VM_SANITIZE_UNSAFE_UNWRAP(*size_u),
1150 	    VM_SANITIZE_UNSAFE_UNWRAP(permission_u));
1151 
1152 	/*
1153 	 * Validate permissions as we need to dispatch the corresponding flavor
1154 	 */
1155 	kr = mach_make_memory_entry_sanitize_perm(permission_u, &permission);
1156 	if (__improbable(kr != KERN_SUCCESS)) {
1157 		return mach_make_memory_entry_cleanup(kr, target_map,
1158 		           size_u, offset_u, permission, user_entry);
1159 	}
1160 
1161 	parent_entry = mach_memory_entry_from_port(parent_handle);
1162 	if (parent_entry && parent_entry->is_copy) {
1163 		return mach_make_memory_entry_cleanup(KERN_INVALID_ARGUMENT, target_map,
1164 		           size_u, offset_u, permission, user_entry);
1165 	}
1166 
1167 	if (permission & MAP_MEM_ONLY) {
1168 		return mach_make_memory_entry_mem_only(target_map, size_u, offset_u,
1169 		           permission, object_handle, parent_entry);
1170 	}
1171 
1172 	if (permission & MAP_MEM_NAMED_CREATE) {
1173 		return mach_make_memory_entry_named_create(target_map, size_u, offset_u,
1174 		           permission, vmne_kflags, object_handle);
1175 	}
1176 
1177 	if (permission & MAP_MEM_VM_COPY) {
1178 		return mach_make_memory_entry_copy(target_map, size_u, offset_u,
1179 		           permission, object_handle);
1180 	}
1181 
1182 	if ((permission & MAP_MEM_VM_SHARE)
1183 	    || parent_entry == NULL
1184 	    || (permission & MAP_MEM_NAMED_REUSE)) {
1185 		return mach_make_memory_entry_share(target_map, size_u, offset_u,
1186 		           permission, object_handle, parent_handle, parent_entry);
1187 	}
1188 
1189 	/*
1190 	 * This function will compute map start, end and size by including the
1191 	 * parent entry's offset. Therefore redo validation.
1192 	 */
1193 	return mach_make_memory_entry_from_parent_entry(target_map, size_u,
1194 	           offset_u, permission, object_handle, parent_entry);
1195 }
1196 
1197 kern_return_t
_mach_make_memory_entry(vm_map_t target_map,memory_object_size_ut * size_u,memory_object_offset_ut offset_u,vm_prot_ut permission_u,ipc_port_t * object_handle,ipc_port_t parent_entry)1198 _mach_make_memory_entry(
1199 	vm_map_t                target_map,
1200 	memory_object_size_ut  *size_u,
1201 	memory_object_offset_ut offset_u,
1202 	vm_prot_ut              permission_u,
1203 	ipc_port_t              *object_handle,
1204 	ipc_port_t              parent_entry)
1205 {
1206 	return mach_make_memory_entry_64(target_map, size_u,
1207 	           offset_u, permission_u, object_handle, parent_entry);
1208 }
1209 
1210 kern_return_t
mach_make_memory_entry(vm_map_t target_map,vm_size_ut * size_u,vm_offset_ut offset_u,vm_prot_ut permission_u,ipc_port_t * object_handle,ipc_port_t parent_entry)1211 mach_make_memory_entry(
1212 	vm_map_t                target_map,
1213 	vm_size_ut             *size_u,
1214 	vm_offset_ut            offset_u,
1215 	vm_prot_ut              permission_u,
1216 	ipc_port_t              *object_handle,
1217 	ipc_port_t              parent_entry)
1218 {
1219 	kern_return_t           kr;
1220 
1221 	kr = mach_make_memory_entry_64(target_map, size_u,
1222 	    offset_u, permission_u, object_handle, parent_entry);
1223 	return kr;
1224 }
1225 
1226 __private_extern__ vm_named_entry_t
mach_memory_entry_allocate(ipc_port_t * user_handle_p)1227 mach_memory_entry_allocate(ipc_port_t *user_handle_p)
1228 {
1229 	vm_named_entry_t user_entry;
1230 
1231 	user_entry = kalloc_type(struct vm_named_entry,
1232 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1233 	named_entry_lock_init(user_entry);
1234 
1235 	*user_handle_p = ipc_kobject_alloc_port((ipc_kobject_t)user_entry,
1236 	    IKOT_NAMED_ENTRY,
1237 	    IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
1238 
1239 #if VM_NAMED_ENTRY_DEBUG
1240 	/* backtrace at allocation time, for debugging only */
1241 	user_entry->named_entry_bt = btref_get(__builtin_frame_address(0), 0);
1242 #endif /* VM_NAMED_ENTRY_DEBUG */
1243 	return user_entry;
1244 }
1245 
1246 static inline kern_return_t
mach_memory_object_memory_entry_64_sanitize(vm_object_size_ut size_u,vm_prot_ut permission_u,vm_object_size_t * size,vm_prot_t * permission)1247 mach_memory_object_memory_entry_64_sanitize(
1248 	vm_object_size_ut       size_u,
1249 	vm_prot_ut              permission_u,
1250 	vm_object_size_t       *size,
1251 	vm_prot_t              *permission)
1252 {
1253 	kern_return_t           kr;
1254 
1255 	kr = vm_sanitize_object_size(size_u,
1256 	    VM_SANITIZE_CALLER_MACH_MEMORY_OBJECT_MEMORY_ENTRY,
1257 	    VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS, size);
1258 	if (__improbable(kr != KERN_SUCCESS)) {
1259 		return kr;
1260 	}
1261 	kr = vm_sanitize_memory_entry_perm(permission_u,
1262 	    VM_SANITIZE_CALLER_MACH_MEMORY_OBJECT_MEMORY_ENTRY,
1263 	    VM_SANITIZE_FLAGS_NONE, VM_PROT_NONE,
1264 	    permission);
1265 	if (__improbable(kr != KERN_SUCCESS)) {
1266 		return kr;
1267 	}
1268 
1269 	return KERN_SUCCESS;
1270 }
1271 
1272 /*
1273  *	mach_memory_object_memory_entry_64
1274  *
1275  *	Create a named entry backed by the provided pager.
1276  *
1277  */
1278 kern_return_t
mach_memory_object_memory_entry_64(host_t host,boolean_t internal,vm_object_size_ut size_u,vm_prot_ut permission_u,memory_object_t pager,ipc_port_t * entry_handle)1279 mach_memory_object_memory_entry_64(
1280 	host_t                  host,
1281 	boolean_t               internal,
1282 	vm_object_size_ut       size_u,
1283 	vm_prot_ut              permission_u,
1284 	memory_object_t         pager,
1285 	ipc_port_t              *entry_handle)
1286 {
1287 	vm_named_entry_t        user_entry;
1288 	ipc_port_t              user_handle;
1289 	vm_object_t             object;
1290 	vm_object_size_t        size;
1291 	vm_prot_t               permission;
1292 	kern_return_t           kr;
1293 
1294 	if (host == HOST_NULL) {
1295 		return KERN_INVALID_HOST;
1296 	}
1297 
1298 	/*
1299 	 * Validate size and permission
1300 	 */
1301 	kr = mach_memory_object_memory_entry_64_sanitize(size_u,
1302 	    permission_u,
1303 	    &size,
1304 	    &permission);
1305 	if (__improbable(kr != KERN_SUCCESS)) {
1306 		return vm_sanitize_get_kr(kr);
1307 	}
1308 
1309 	if (pager == MEMORY_OBJECT_NULL && internal) {
1310 		object = vm_object_allocate(size);
1311 		if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
1312 			object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
1313 		}
1314 	} else {
1315 		object = memory_object_to_vm_object(pager);
1316 		if (object != VM_OBJECT_NULL) {
1317 			vm_object_reference(object);
1318 		}
1319 	}
1320 	if (object == VM_OBJECT_NULL) {
1321 		return KERN_INVALID_ARGUMENT;
1322 	}
1323 
1324 	user_entry = mach_memory_entry_allocate(&user_handle);
1325 	user_entry->size = size;
1326 	user_entry->offset = 0;
1327 	user_entry->protection = permission & VM_PROT_ALL;
1328 	user_entry->access = GET_MAP_MEM(permission);
1329 	user_entry->is_sub_map = FALSE;
1330 
1331 	vm_named_entry_associate_vm_object(user_entry, object, 0, size,
1332 	    (user_entry->protection & VM_PROT_ALL));
1333 	user_entry->internal = object->internal;
1334 	assert(object->internal == internal);
1335 	if (VM_OBJECT_OWNER(object) != TASK_NULL) {
1336 		/* all memory in this entry is "owned" */
1337 		user_entry->is_fully_owned = TRUE;
1338 	} else if (object && !object->internal) {
1339 		/* external objects can become "owned" */
1340 		user_entry->is_fully_owned = TRUE;
1341 	}
1342 
1343 	*entry_handle = user_handle;
1344 	return KERN_SUCCESS;
1345 }
1346 
1347 kern_return_t
mach_memory_object_memory_entry(host_t host,boolean_t internal,vm_size_ut size_u,vm_prot_ut permission_u,memory_object_t pager,ipc_port_t * entry_handle)1348 mach_memory_object_memory_entry(
1349 	host_t          host,
1350 	boolean_t       internal,
1351 	vm_size_ut      size_u,
1352 	vm_prot_ut      permission_u,
1353 	memory_object_t pager,
1354 	ipc_port_t      *entry_handle)
1355 {
1356 	return mach_memory_object_memory_entry_64( host, internal,
1357 	           size_u, permission_u, pager, entry_handle);
1358 }
1359 
1360 kern_return_t
mach_memory_entry_purgable_control(ipc_port_t entry_port,vm_purgable_t control,int * state)1361 mach_memory_entry_purgable_control(
1362 	ipc_port_t      entry_port,
1363 	vm_purgable_t   control,
1364 	int             *state)
1365 {
1366 	if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
1367 		/* not allowed from user-space */
1368 		return KERN_INVALID_ARGUMENT;
1369 	}
1370 
1371 	return memory_entry_purgeable_control_internal(entry_port, control, state);
1372 }
1373 
1374 kern_return_t
memory_entry_purgeable_control_internal(ipc_port_t entry_port,vm_purgable_t control,int * state)1375 memory_entry_purgeable_control_internal(
1376 	ipc_port_t      entry_port,
1377 	vm_purgable_t   control,
1378 	int             *state)
1379 {
1380 	kern_return_t           kr;
1381 	vm_named_entry_t        mem_entry;
1382 	vm_object_t             object;
1383 
1384 	mem_entry = mach_memory_entry_from_port(entry_port);
1385 	if (mem_entry == NULL) {
1386 		return KERN_INVALID_ARGUMENT;
1387 	}
1388 
1389 	if (control != VM_PURGABLE_SET_STATE &&
1390 	    control != VM_PURGABLE_GET_STATE &&
1391 	    control != VM_PURGABLE_SET_STATE_FROM_KERNEL) {
1392 		return KERN_INVALID_ARGUMENT;
1393 	}
1394 
1395 	if ((control == VM_PURGABLE_SET_STATE ||
1396 	    control == VM_PURGABLE_SET_STATE_FROM_KERNEL) &&
1397 	    (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) ||
1398 	    ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) {
1399 		return KERN_INVALID_ARGUMENT;
1400 	}
1401 
1402 	named_entry_lock(mem_entry);
1403 
1404 	if (mem_entry->is_sub_map ||
1405 	    mem_entry->is_copy) {
1406 		named_entry_unlock(mem_entry);
1407 		return KERN_INVALID_ARGUMENT;
1408 	}
1409 
1410 	assert(mem_entry->is_object);
1411 	object = vm_named_entry_to_vm_object(mem_entry);
1412 	if (object == VM_OBJECT_NULL) {
1413 		named_entry_unlock(mem_entry);
1414 		return KERN_INVALID_ARGUMENT;
1415 	}
1416 
1417 	vm_object_lock(object);
1418 
1419 	/* check that named entry covers entire object ? */
1420 	if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) {
1421 		vm_object_unlock(object);
1422 		named_entry_unlock(mem_entry);
1423 		return KERN_INVALID_ARGUMENT;
1424 	}
1425 
1426 	named_entry_unlock(mem_entry);
1427 
1428 	kr = vm_object_purgable_control(object, control, state);
1429 
1430 	vm_object_unlock(object);
1431 
1432 	return kr;
1433 }
1434 
1435 static kern_return_t
memory_entry_access_tracking_internal(ipc_port_t entry_port,int * access_tracking,uint32_t * access_tracking_reads,uint32_t * access_tracking_writes)1436 memory_entry_access_tracking_internal(
1437 	ipc_port_t      entry_port,
1438 	int             *access_tracking,
1439 	uint32_t        *access_tracking_reads,
1440 	uint32_t        *access_tracking_writes)
1441 {
1442 	vm_named_entry_t        mem_entry;
1443 	vm_object_t             object;
1444 	kern_return_t           kr;
1445 
1446 	mem_entry = mach_memory_entry_from_port(entry_port);
1447 	if (mem_entry == NULL) {
1448 		return KERN_INVALID_ARGUMENT;
1449 	}
1450 
1451 	named_entry_lock(mem_entry);
1452 
1453 	if (mem_entry->is_sub_map ||
1454 	    mem_entry->is_copy) {
1455 		named_entry_unlock(mem_entry);
1456 		return KERN_INVALID_ARGUMENT;
1457 	}
1458 
1459 	assert(mem_entry->is_object);
1460 	object = vm_named_entry_to_vm_object(mem_entry);
1461 	if (object == VM_OBJECT_NULL) {
1462 		named_entry_unlock(mem_entry);
1463 		return KERN_INVALID_ARGUMENT;
1464 	}
1465 
1466 #if VM_OBJECT_ACCESS_TRACKING
1467 	vm_object_access_tracking(object,
1468 	    access_tracking,
1469 	    access_tracking_reads,
1470 	    access_tracking_writes);
1471 	kr = KERN_SUCCESS;
1472 #else /* VM_OBJECT_ACCESS_TRACKING */
1473 	(void) access_tracking;
1474 	(void) access_tracking_reads;
1475 	(void) access_tracking_writes;
1476 	kr = KERN_NOT_SUPPORTED;
1477 #endif /* VM_OBJECT_ACCESS_TRACKING */
1478 
1479 	named_entry_unlock(mem_entry);
1480 
1481 	return kr;
1482 }
1483 
1484 kern_return_t
mach_memory_entry_access_tracking(ipc_port_t entry_port,int * access_tracking,uint32_t * access_tracking_reads,uint32_t * access_tracking_writes)1485 mach_memory_entry_access_tracking(
1486 	ipc_port_t      entry_port,
1487 	int             *access_tracking,
1488 	uint32_t        *access_tracking_reads,
1489 	uint32_t        *access_tracking_writes)
1490 {
1491 	return memory_entry_access_tracking_internal(entry_port,
1492 	           access_tracking,
1493 	           access_tracking_reads,
1494 	           access_tracking_writes);
1495 }
1496 
1497 #if DEVELOPMENT || DEBUG
1498 /* For dtrace probe in mach_memory_entry_ownership */
1499 extern int proc_selfpid(void);
1500 extern char *proc_name_address(void *p);
1501 #endif /* DEVELOPMENT || DEBUG */
1502 
1503 /* Kernel call only, MIG uses *_from_user() below */
1504 kern_return_t
mach_memory_entry_ownership(ipc_port_t entry_port,task_t owner,int ledger_tag,int ledger_flags)1505 mach_memory_entry_ownership(
1506 	ipc_port_t      entry_port,
1507 	task_t          owner,
1508 	int             ledger_tag,
1509 	int             ledger_flags)
1510 {
1511 	task_t                  cur_task;
1512 	kern_return_t           kr;
1513 	vm_named_entry_t        mem_entry;
1514 	vm_object_t             object;
1515 
1516 	if (ledger_flags & ~VM_LEDGER_FLAGS_ALL) {
1517 		/* reject unexpected flags */
1518 		return KERN_INVALID_ARGUMENT;
1519 	}
1520 
1521 	cur_task = current_task();
1522 	if (cur_task == kernel_task) {
1523 		/* kernel thread: no entitlement needed */
1524 	} else if (ledger_flags & VM_LEDGER_FLAG_FROM_KERNEL) {
1525 		/* call is from trusted kernel code: no entitlement needed */
1526 	} else if ((owner != cur_task && owner != TASK_NULL) ||
1527 	    (ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) ||
1528 	    (ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG) ||
1529 	    ledger_tag == VM_LEDGER_TAG_NETWORK) {
1530 		bool transfer_ok = false;
1531 
1532 		/*
1533 		 * An entitlement is required to:
1534 		 * + tranfer memory ownership to someone else,
1535 		 * + request that the memory not count against the footprint,
1536 		 * + tag as "network" (since that implies "no footprint")
1537 		 *
1538 		 * Exception: task with task_no_footprint_for_debug == 1 on internal build
1539 		 */
1540 		if (!cur_task->task_can_transfer_memory_ownership &&
1541 		    IOCurrentTaskHasEntitlement("com.apple.private.memory.ownership_transfer")) {
1542 			cur_task->task_can_transfer_memory_ownership = TRUE;
1543 		}
1544 		if (cur_task->task_can_transfer_memory_ownership) {
1545 			/* we're allowed to transfer ownership to any task */
1546 			transfer_ok = true;
1547 		}
1548 #if DEVELOPMENT || DEBUG
1549 		if (!transfer_ok &&
1550 		    ledger_tag == VM_LEDGER_TAG_DEFAULT &&
1551 		    (ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG) &&
1552 		    cur_task->task_no_footprint_for_debug) {
1553 			int         to_panic = 0;
1554 			static bool init_bootarg = false;
1555 
1556 			/*
1557 			 * Allow performance tools running on internal builds to hide memory usage from phys_footprint even
1558 			 * WITHOUT an entitlement. This can be enabled by per task sysctl vm.task_no_footprint_for_debug=1
1559 			 * with the ledger tag VM_LEDGER_TAG_DEFAULT and flag VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG.
1560 			 *
1561 			 * If the boot-arg "panic_on_no_footprint_for_debug" is set, the kernel will
1562 			 * panic here in order to detect any abuse of this feature, which is intended solely for
1563 			 * memory debugging purpose.
1564 			 */
1565 			if (!init_bootarg) {
1566 				PE_parse_boot_argn("panic_on_no_footprint_for_debug", &to_panic, sizeof(to_panic));
1567 				init_bootarg = true;
1568 			}
1569 			if (to_panic) {
1570 				panic("%s: panic_on_no_footprint_for_debug is triggered by pid %d procname %s", __func__, proc_selfpid(), get_bsdtask_info(cur_task)? proc_name_address(get_bsdtask_info(cur_task)) : "?");
1571 			}
1572 
1573 			/*
1574 			 * Flushing out user space processes using this interface:
1575 			 * $ dtrace -n 'task_no_footprint_for_debug {printf("%d[%s]\n", pid, execname); stack(); ustack();}'
1576 			 */
1577 			DTRACE_VM(task_no_footprint_for_debug);
1578 			transfer_ok = true;
1579 		}
1580 #endif /* DEVELOPMENT || DEBUG */
1581 		if (!transfer_ok) {
1582 #define TRANSFER_ENTITLEMENT_MAX_LENGTH 1024 /* XXX ? */
1583 			const char *our_id, *their_id;
1584 			our_id = IOTaskGetEntitlement(current_task(), "com.apple.developer.memory.transfer-send");
1585 			their_id = IOTaskGetEntitlement(owner, "com.apple.developer.memory.transfer-accept");
1586 			if (our_id && their_id &&
1587 			    !strncmp(our_id, their_id, TRANSFER_ENTITLEMENT_MAX_LENGTH)) {
1588 				/* allow transfer between tasks that have matching entitlements */
1589 				if (strnlen(our_id, TRANSFER_ENTITLEMENT_MAX_LENGTH) < TRANSFER_ENTITLEMENT_MAX_LENGTH &&
1590 				    strnlen(their_id, TRANSFER_ENTITLEMENT_MAX_LENGTH) < TRANSFER_ENTITLEMENT_MAX_LENGTH) {
1591 					transfer_ok = true;
1592 				} else {
1593 					/* complain about entitlement(s) being too long... */
1594 					assertf((strlen(our_id) <= TRANSFER_ENTITLEMENT_MAX_LENGTH &&
1595 					    strlen(their_id) <= TRANSFER_ENTITLEMENT_MAX_LENGTH),
1596 					    "our_id:%lu their_id:%lu",
1597 					    strlen(our_id), strlen(their_id));
1598 				}
1599 			}
1600 		}
1601 		if (!transfer_ok) {
1602 			/* transfer denied */
1603 			return KERN_NO_ACCESS;
1604 		}
1605 
1606 		if (ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG) {
1607 			/*
1608 			 * We've made it past the checks above, so we either
1609 			 * have the entitlement or the sysctl.
1610 			 * Convert to VM_LEDGER_FLAG_NO_FOOTPRINT.
1611 			 */
1612 			ledger_flags &= ~VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG;
1613 			ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT;
1614 		}
1615 	}
1616 
1617 	if (ledger_tag == VM_LEDGER_TAG_UNCHANGED) {
1618 		/* leave "ledger_tag" unchanged */
1619 	} else if (ledger_tag < 0 ||
1620 	    ledger_tag > VM_LEDGER_TAG_MAX) {
1621 		return KERN_INVALID_ARGUMENT;
1622 	}
1623 	if (owner == TASK_NULL) {
1624 		/* leave "owner" unchanged */
1625 		owner = VM_OBJECT_OWNER_UNCHANGED;
1626 	}
1627 
1628 	mem_entry = mach_memory_entry_from_port(entry_port);
1629 	if (mem_entry == NULL) {
1630 		return KERN_INVALID_ARGUMENT;
1631 	}
1632 
1633 	named_entry_lock(mem_entry);
1634 
1635 	if (mem_entry->is_sub_map ||
1636 	    !mem_entry->is_fully_owned) {
1637 		named_entry_unlock(mem_entry);
1638 		return KERN_INVALID_ARGUMENT;
1639 	}
1640 
1641 	if (mem_entry->is_object) {
1642 		object = vm_named_entry_to_vm_object(mem_entry);
1643 		if (object == VM_OBJECT_NULL) {
1644 			named_entry_unlock(mem_entry);
1645 			return KERN_INVALID_ARGUMENT;
1646 		}
1647 		vm_object_lock(object);
1648 		if (object->internal) {
1649 			/* check that named entry covers entire object ? */
1650 			if (mem_entry->offset != 0 ||
1651 			    object->vo_size != mem_entry->size) {
1652 				vm_object_unlock(object);
1653 				named_entry_unlock(mem_entry);
1654 				return KERN_INVALID_ARGUMENT;
1655 			}
1656 		}
1657 		named_entry_unlock(mem_entry);
1658 		kr = vm_object_ownership_change(object,
1659 		    ledger_tag,
1660 		    owner,
1661 		    ledger_flags,
1662 		    FALSE);                             /* task_objq_locked */
1663 		vm_object_unlock(object);
1664 	} else if (mem_entry->is_copy) {
1665 		vm_map_copy_t copy;
1666 		vm_map_entry_t entry;
1667 
1668 		copy = mem_entry->backing.copy;
1669 		named_entry_unlock(mem_entry);
1670 		for (entry = vm_map_copy_first_entry(copy);
1671 		    entry != vm_map_copy_to_entry(copy);
1672 		    entry = entry->vme_next) {
1673 			object = VME_OBJECT(entry);
1674 			if (entry->is_sub_map ||
1675 			    object == VM_OBJECT_NULL) {
1676 				kr = KERN_INVALID_ARGUMENT;
1677 				break;
1678 			}
1679 			vm_object_lock(object);
1680 			if (object->internal) {
1681 				if (VME_OFFSET(entry) != 0 ||
1682 				    entry->vme_end - entry->vme_start != object->vo_size) {
1683 					vm_object_unlock(object);
1684 					kr = KERN_INVALID_ARGUMENT;
1685 					break;
1686 				}
1687 			}
1688 			kr = vm_object_ownership_change(object,
1689 			    ledger_tag,
1690 			    owner,
1691 			    ledger_flags,
1692 			    FALSE);                             /* task_objq_locked */
1693 			vm_object_unlock(object);
1694 			if (kr != KERN_SUCCESS) {
1695 				kr = KERN_INVALID_ARGUMENT;
1696 				break;
1697 			}
1698 		}
1699 	} else {
1700 		named_entry_unlock(mem_entry);
1701 		return KERN_INVALID_ARGUMENT;
1702 	}
1703 
1704 	return kr;
1705 }
1706 
1707 /* MIG call from userspace */
1708 kern_return_t
mach_memory_entry_ownership_from_user(ipc_port_t entry_port,mach_port_t owner_port,int ledger_tag,int ledger_flags)1709 mach_memory_entry_ownership_from_user(
1710 	ipc_port_t      entry_port,
1711 	mach_port_t     owner_port,
1712 	int             ledger_tag,
1713 	int             ledger_flags)
1714 {
1715 	task_t owner = TASK_NULL;
1716 	kern_return_t kr;
1717 
1718 	if (ledger_flags & ~VM_LEDGER_FLAGS_USER) {
1719 		return KERN_INVALID_ARGUMENT;
1720 	}
1721 
1722 	if (IP_VALID(owner_port)) {
1723 		if (ip_kotype(owner_port) == IKOT_TASK_ID_TOKEN) {
1724 			task_id_token_t token = convert_port_to_task_id_token(owner_port);
1725 			(void)task_identity_token_get_task_grp(token, &owner, TASK_GRP_MIG);
1726 			task_id_token_release(token);
1727 			/* token ref released */
1728 		} else {
1729 			owner = convert_port_to_task_mig(owner_port);
1730 		}
1731 	}
1732 	/* hold task ref on owner (Nullable) */
1733 
1734 	if (owner && task_is_a_corpse(owner)) {
1735 		/* identity token can represent a corpse, disallow it */
1736 		task_deallocate_mig(owner);
1737 		owner = TASK_NULL;
1738 	}
1739 
1740 	/* mach_memory_entry_ownership() will handle TASK_NULL owner */
1741 	kr = mach_memory_entry_ownership(entry_port, owner, /* Nullable */
1742 	    ledger_tag, ledger_flags);
1743 
1744 	if (owner) {
1745 		task_deallocate_mig(owner);
1746 	}
1747 
1748 	if (kr == KERN_SUCCESS) {
1749 		/* MIG rule, consume port right on success */
1750 		ipc_port_release_send(owner_port);
1751 	}
1752 	return kr;
1753 }
1754 
1755 kern_return_t
mach_memory_entry_get_page_counts(ipc_port_t entry_port,unsigned int * resident_page_count,unsigned int * dirty_page_count)1756 mach_memory_entry_get_page_counts(
1757 	ipc_port_t      entry_port,
1758 	unsigned int    *resident_page_count,
1759 	unsigned int    *dirty_page_count)
1760 {
1761 	kern_return_t           kr;
1762 	vm_named_entry_t        mem_entry;
1763 	vm_object_t             object;
1764 	vm_object_offset_t      offset;
1765 	vm_object_size_t        size;
1766 
1767 	mem_entry = mach_memory_entry_from_port(entry_port);
1768 	if (mem_entry == NULL) {
1769 		return KERN_INVALID_ARGUMENT;
1770 	}
1771 
1772 	named_entry_lock(mem_entry);
1773 
1774 	if (mem_entry->is_sub_map ||
1775 	    mem_entry->is_copy) {
1776 		named_entry_unlock(mem_entry);
1777 		return KERN_INVALID_ARGUMENT;
1778 	}
1779 
1780 	assert(mem_entry->is_object);
1781 	object = vm_named_entry_to_vm_object(mem_entry);
1782 	if (object == VM_OBJECT_NULL) {
1783 		named_entry_unlock(mem_entry);
1784 		return KERN_INVALID_ARGUMENT;
1785 	}
1786 
1787 	vm_object_lock(object);
1788 
1789 	offset = mem_entry->offset;
1790 	size = mem_entry->size;
1791 	size = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
1792 	offset = vm_object_trunc_page(offset);
1793 
1794 	named_entry_unlock(mem_entry);
1795 
1796 	kr = vm_object_get_page_counts(object, offset, size, resident_page_count, dirty_page_count);
1797 
1798 	vm_object_unlock(object);
1799 
1800 	return kr;
1801 }
1802 
1803 kern_return_t
mach_memory_entry_phys_page_offset(ipc_port_t entry_port,vm_object_offset_t * offset_p)1804 mach_memory_entry_phys_page_offset(
1805 	ipc_port_t              entry_port,
1806 	vm_object_offset_t      *offset_p)
1807 {
1808 	vm_named_entry_t        mem_entry;
1809 	vm_object_t             object;
1810 	vm_object_offset_t      offset;
1811 	vm_object_offset_t      data_offset;
1812 
1813 	mem_entry = mach_memory_entry_from_port(entry_port);
1814 	if (mem_entry == NULL) {
1815 		return KERN_INVALID_ARGUMENT;
1816 	}
1817 
1818 	named_entry_lock(mem_entry);
1819 
1820 	if (mem_entry->is_sub_map ||
1821 	    mem_entry->is_copy) {
1822 		named_entry_unlock(mem_entry);
1823 		return KERN_INVALID_ARGUMENT;
1824 	}
1825 
1826 	assert(mem_entry->is_object);
1827 	object = vm_named_entry_to_vm_object(mem_entry);
1828 	if (object == VM_OBJECT_NULL) {
1829 		named_entry_unlock(mem_entry);
1830 		return KERN_INVALID_ARGUMENT;
1831 	}
1832 
1833 	offset = mem_entry->offset;
1834 	data_offset = mem_entry->data_offset;
1835 
1836 	named_entry_unlock(mem_entry);
1837 
1838 	*offset_p = offset - vm_object_trunc_page(offset) + data_offset;
1839 	return KERN_SUCCESS;
1840 }
1841 
1842 static inline kern_return_t
mach_memory_entry_map_size_sanitize_locked(vm_map_t map,memory_object_offset_ut * offset_u,memory_object_size_ut size_u,vm_named_entry_t mem_entry,memory_object_offset_t * offset,memory_object_offset_t * end,mach_vm_size_t * map_size)1843 mach_memory_entry_map_size_sanitize_locked(
1844 	vm_map_t                   map,
1845 	memory_object_offset_ut   *offset_u,
1846 	memory_object_size_ut      size_u,
1847 	vm_named_entry_t           mem_entry,
1848 	memory_object_offset_t    *offset,
1849 	memory_object_offset_t    *end,
1850 	mach_vm_size_t            *map_size)
1851 {
1852 	kern_return_t           kr;
1853 
1854 	if (mem_entry->is_object ||
1855 	    (mem_entry->is_copy &&
1856 	    (VM_MAP_COPY_PAGE_MASK(mem_entry->backing.copy) ==
1857 	    VM_MAP_PAGE_MASK(map)))) {
1858 		if (__improbable(vm_sanitize_add_overflow(*offset_u, mem_entry->offset,
1859 		    offset_u))) {
1860 			return KERN_INVALID_ARGUMENT;
1861 		}
1862 	}
1863 
1864 	if (__improbable(vm_sanitize_add_overflow(*offset_u, mem_entry->data_offset,
1865 	    offset_u))) {
1866 		return KERN_INVALID_ARGUMENT;
1867 	}
1868 
1869 	kr = vm_sanitize_addr_size(*offset_u, size_u,
1870 	    VM_SANITIZE_CALLER_MACH_MEMORY_ENTRY_MAP_SIZE, map,
1871 	    VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, offset, end, map_size);
1872 	if (__improbable(kr != KERN_SUCCESS)) {
1873 		return vm_sanitize_get_kr(kr);
1874 	}
1875 
1876 	return KERN_SUCCESS;
1877 }
1878 
1879 kern_return_t
mach_memory_entry_map_size(ipc_port_t entry_port,vm_map_t map,memory_object_offset_ut offset_u,memory_object_size_ut size_u,mach_vm_size_t * map_size_out)1880 mach_memory_entry_map_size(
1881 	ipc_port_t                 entry_port,
1882 	vm_map_t                   map,
1883 	memory_object_offset_ut    offset_u,
1884 	memory_object_size_ut      size_u,
1885 	mach_vm_size_t            *map_size_out)
1886 {
1887 	vm_named_entry_t        mem_entry;
1888 	vm_object_t             object;
1889 	vm_map_copy_t           copy_map, target_copy_map;
1890 	vm_map_offset_t         overmap_start, overmap_end, trimmed_start;
1891 	kern_return_t           kr;
1892 	memory_object_offset_t  offset;
1893 	memory_object_offset_t  end;
1894 	mach_vm_size_t          map_size;
1895 
1896 	mem_entry = mach_memory_entry_from_port(entry_port);
1897 	if (mem_entry == NULL) {
1898 		return KERN_INVALID_ARGUMENT;
1899 	}
1900 
1901 	named_entry_lock(mem_entry);
1902 
1903 	if (mem_entry->is_sub_map) {
1904 		named_entry_unlock(mem_entry);
1905 		return KERN_INVALID_ARGUMENT;
1906 	}
1907 
1908 	/*
1909 	 * Sanitize offset and size before use
1910 	 */
1911 	kr = mach_memory_entry_map_size_sanitize_locked(map,
1912 	    &offset_u,
1913 	    size_u,
1914 	    mem_entry,
1915 	    &offset,
1916 	    &end,
1917 	    &map_size);
1918 	if (__improbable(kr != KERN_SUCCESS)) {
1919 		named_entry_unlock(mem_entry);
1920 		return kr;
1921 	}
1922 
1923 	if (mem_entry->is_object) {
1924 		object = vm_named_entry_to_vm_object(mem_entry);
1925 		if (object == VM_OBJECT_NULL) {
1926 			named_entry_unlock(mem_entry);
1927 			return KERN_INVALID_ARGUMENT;
1928 		}
1929 
1930 		named_entry_unlock(mem_entry);
1931 		*map_size_out = map_size;
1932 		return KERN_SUCCESS;
1933 	}
1934 
1935 	if (!mem_entry->is_copy) {
1936 		panic("unsupported type of mem_entry %p", mem_entry);
1937 	}
1938 
1939 	assert(mem_entry->is_copy);
1940 	if (VM_MAP_COPY_PAGE_MASK(mem_entry->backing.copy) == VM_MAP_PAGE_MASK(map)) {
1941 		DEBUG4K_SHARE("map %p (%d) mem_entry %p offset 0x%llx + 0x%llx + 0x%llx size 0x%llx -> map_size 0x%llx\n", map, VM_MAP_PAGE_MASK(map), mem_entry, mem_entry->offset, mem_entry->data_offset, offset, VM_SANITIZE_UNSAFE_UNWRAP(size_u), map_size);
1942 		named_entry_unlock(mem_entry);
1943 		*map_size_out = map_size;
1944 		return KERN_SUCCESS;
1945 	}
1946 
1947 	DEBUG4K_SHARE("mem_entry %p copy %p (%d) map %p (%d) offset 0x%llx size 0x%llx\n", mem_entry, mem_entry->backing.copy, VM_MAP_COPY_PAGE_SHIFT(mem_entry->backing.copy), map, VM_MAP_PAGE_SHIFT(map), offset, VM_SANITIZE_UNSAFE_UNWRAP(size_u));
1948 	copy_map = mem_entry->backing.copy;
1949 	target_copy_map = VM_MAP_COPY_NULL;
1950 	DEBUG4K_ADJUST("adjusting...\n");
1951 	kr = vm_map_copy_adjust_to_target(copy_map,
1952 	    offset_u,
1953 	    size_u,
1954 	    map,
1955 	    FALSE,
1956 	    &target_copy_map,
1957 	    &overmap_start,
1958 	    &overmap_end,
1959 	    &trimmed_start);
1960 	if (kr == KERN_SUCCESS) {
1961 		if (target_copy_map->size != copy_map->size) {
1962 			DEBUG4K_ADJUST("copy %p (%d) map %p (%d) offset 0x%llx size 0x%llx overmap_start 0x%llx overmap_end 0x%llx trimmed_start 0x%llx map_size 0x%llx -> 0x%llx\n", copy_map, VM_MAP_COPY_PAGE_SHIFT(copy_map), map, VM_MAP_PAGE_SHIFT(map), (uint64_t)offset, (uint64_t)VM_SANITIZE_UNSAFE_UNWRAP(size_u), (uint64_t)overmap_start, (uint64_t)overmap_end, (uint64_t)trimmed_start, (uint64_t)copy_map->size, (uint64_t)target_copy_map->size);
1963 		}
1964 		*map_size_out = target_copy_map->size;
1965 		if (target_copy_map != copy_map) {
1966 			vm_map_copy_discard(target_copy_map);
1967 		}
1968 		target_copy_map = VM_MAP_COPY_NULL;
1969 	}
1970 	named_entry_unlock(mem_entry);
1971 	return kr;
1972 }
1973 
1974 /*
1975  * mach_memory_entry_port_release:
1976  *
1977  * Release a send right on a named entry port.  This is the correct
1978  * way to destroy a named entry.  When the last right on the port is
1979  * released, mach_memory_entry_no_senders() willl be called.
1980  */
1981 void
mach_memory_entry_port_release(ipc_port_t port)1982 mach_memory_entry_port_release(
1983 	ipc_port_t      port)
1984 {
1985 	assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
1986 	ipc_port_release_send(port);
1987 }
1988 
1989 vm_named_entry_t
mach_memory_entry_from_port(ipc_port_t port)1990 mach_memory_entry_from_port(ipc_port_t port)
1991 {
1992 	if (IP_VALID(port)) {
1993 		return ipc_kobject_get_stable(port, IKOT_NAMED_ENTRY);
1994 	}
1995 	return NULL;
1996 }
1997 
1998 /*
1999  * mach_memory_entry_no_senders:
2000  *
2001  * Destroys the memory entry associated with a mach port.
2002  * Memory entries have the exact same lifetime as their owning port.
2003  *
2004  * Releasing a memory entry is done by calling
2005  * mach_memory_entry_port_release() on its owning port.
2006  */
2007 static void
mach_memory_entry_no_senders(ipc_port_t port,mach_port_mscount_t mscount)2008 mach_memory_entry_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
2009 {
2010 	vm_named_entry_t named_entry;
2011 
2012 	named_entry = ipc_kobject_dealloc_port(port, mscount, IKOT_NAMED_ENTRY);
2013 
2014 	if (named_entry->is_sub_map) {
2015 		vm_map_deallocate(named_entry->backing.map);
2016 	} else if (named_entry->is_copy) {
2017 		vm_map_copy_discard(named_entry->backing.copy);
2018 	} else if (named_entry->is_object) {
2019 		assert(named_entry->backing.copy->cpy_hdr.nentries == 1);
2020 		vm_map_copy_discard(named_entry->backing.copy);
2021 	} else {
2022 		assert(named_entry->backing.copy == VM_MAP_COPY_NULL);
2023 	}
2024 
2025 #if VM_NAMED_ENTRY_DEBUG
2026 	btref_put(named_entry->named_entry_bt);
2027 #endif /* VM_NAMED_ENTRY_DEBUG */
2028 
2029 	named_entry_lock_destroy(named_entry);
2030 	kfree_type(struct vm_named_entry, named_entry);
2031 }
2032 
2033 #if XNU_PLATFORM_MacOSX
2034 /* Allow manipulation of individual page state.  This is actually part of */
2035 /* the UPL regimen but takes place on the memory entry rather than on a UPL */
2036 
2037 kern_return_t
mach_memory_entry_page_op(ipc_port_t entry_port,vm_object_offset_ut offset_u,int ops,ppnum_t * phys_entry,int * flags)2038 mach_memory_entry_page_op(
2039 	ipc_port_t              entry_port,
2040 	vm_object_offset_ut     offset_u,
2041 	int                     ops,
2042 	ppnum_t                 *phys_entry,
2043 	int                     *flags)
2044 {
2045 	vm_named_entry_t        mem_entry;
2046 	vm_object_t             object;
2047 	kern_return_t           kr;
2048 	/*
2049 	 * Unwrap offset as no mathematical operations are
2050 	 * performed on it.
2051 	 */
2052 	vm_object_offset_t      offset = VM_SANITIZE_UNSAFE_UNWRAP(offset_u);
2053 
2054 	mem_entry = mach_memory_entry_from_port(entry_port);
2055 	if (mem_entry == NULL) {
2056 		return KERN_INVALID_ARGUMENT;
2057 	}
2058 
2059 	named_entry_lock(mem_entry);
2060 
2061 	if (mem_entry->is_sub_map ||
2062 	    mem_entry->is_copy) {
2063 		named_entry_unlock(mem_entry);
2064 		return KERN_INVALID_ARGUMENT;
2065 	}
2066 
2067 	assert(mem_entry->is_object);
2068 	object = vm_named_entry_to_vm_object(mem_entry);
2069 	if (object == VM_OBJECT_NULL) {
2070 		named_entry_unlock(mem_entry);
2071 		return KERN_INVALID_ARGUMENT;
2072 	}
2073 
2074 	vm_object_reference(object);
2075 	named_entry_unlock(mem_entry);
2076 
2077 	kr = vm_object_page_op(object, offset, ops, phys_entry, flags);
2078 
2079 	vm_object_deallocate(object);
2080 
2081 	return kr;
2082 }
2083 
2084 /*
2085  * mach_memory_entry_range_op offers performance enhancement over
2086  * mach_memory_entry_page_op for page_op functions which do not require page
2087  * level state to be returned from the call.  Page_op was created to provide
2088  * a low-cost alternative to page manipulation via UPLs when only a single
2089  * page was involved.  The range_op call establishes the ability in the _op
2090  * family of functions to work on multiple pages where the lack of page level
2091  * state handling allows the caller to avoid the overhead of the upl structures.
2092  */
2093 
2094 kern_return_t
mach_memory_entry_range_op(ipc_port_t entry_port,vm_object_offset_ut offset_beg_u,vm_object_offset_ut offset_end_u,int ops,int * range)2095 mach_memory_entry_range_op(
2096 	ipc_port_t              entry_port,
2097 	vm_object_offset_ut     offset_beg_u,
2098 	vm_object_offset_ut     offset_end_u,
2099 	int                     ops,
2100 	int                     *range)
2101 {
2102 	vm_named_entry_t        mem_entry;
2103 	vm_object_t             object;
2104 	kern_return_t           kr;
2105 	vm_object_offset_t      offset_range;
2106 	/*
2107 	 * Unwrap offset beginning and end as no mathematical operations are
2108 	 * performed on these quantities.
2109 	 */
2110 	vm_object_offset_t      offset_beg = VM_SANITIZE_UNSAFE_UNWRAP(offset_beg_u);
2111 	vm_object_offset_t      offset_end = VM_SANITIZE_UNSAFE_UNWRAP(offset_end_u);
2112 
2113 	mem_entry = mach_memory_entry_from_port(entry_port);
2114 	if (mem_entry == NULL) {
2115 		return KERN_INVALID_ARGUMENT;
2116 	}
2117 
2118 	named_entry_lock(mem_entry);
2119 
2120 	if (__improbable(os_sub_overflow(offset_end, offset_beg, &offset_range) ||
2121 	    (offset_range > (uint32_t) -1))) {
2122 		/* range is too big and would overflow "*range" */
2123 		named_entry_unlock(mem_entry);
2124 		return KERN_INVALID_ARGUMENT;
2125 	}
2126 
2127 	if (mem_entry->is_sub_map ||
2128 	    mem_entry->is_copy) {
2129 		named_entry_unlock(mem_entry);
2130 		return KERN_INVALID_ARGUMENT;
2131 	}
2132 
2133 	assert(mem_entry->is_object);
2134 	object = vm_named_entry_to_vm_object(mem_entry);
2135 	if (object == VM_OBJECT_NULL) {
2136 		named_entry_unlock(mem_entry);
2137 		return KERN_INVALID_ARGUMENT;
2138 	}
2139 
2140 	vm_object_reference(object);
2141 	named_entry_unlock(mem_entry);
2142 
2143 	kr = vm_object_range_op(object,
2144 	    offset_beg,
2145 	    offset_end,
2146 	    ops,
2147 	    (uint32_t *) range);
2148 
2149 	vm_object_deallocate(object);
2150 
2151 	return kr;
2152 }
2153 #endif /* XNU_PLATFORM_MacOSX */
2154 
2155 kern_return_t
memory_entry_check_for_adjustment(vm_map_t src_map,ipc_port_t port,vm_map_offset_t * overmap_start,vm_map_offset_t * overmap_end)2156 memory_entry_check_for_adjustment(
2157 	vm_map_t                        src_map,
2158 	ipc_port_t                      port,
2159 	vm_map_offset_t         *overmap_start,
2160 	vm_map_offset_t         *overmap_end)
2161 {
2162 	kern_return_t kr = KERN_SUCCESS;
2163 	vm_map_copy_t copy_map = VM_MAP_COPY_NULL, target_copy_map = VM_MAP_COPY_NULL;
2164 
2165 	assert(port);
2166 	assertf(ip_kotype(port) == IKOT_NAMED_ENTRY, "Port Type expected: %d...received:%d\n", IKOT_NAMED_ENTRY, ip_kotype(port));
2167 
2168 	vm_named_entry_t        named_entry;
2169 
2170 	named_entry = mach_memory_entry_from_port(port);
2171 	named_entry_lock(named_entry);
2172 	copy_map = named_entry->backing.copy;
2173 	target_copy_map = copy_map;
2174 
2175 	if (src_map && VM_MAP_PAGE_SHIFT(src_map) < PAGE_SHIFT) {
2176 		vm_map_offset_t trimmed_start;
2177 
2178 		trimmed_start = 0;
2179 		DEBUG4K_ADJUST("adjusting...\n");
2180 		kr = vm_map_copy_adjust_to_target(
2181 			copy_map,
2182 			vm_sanitize_wrap_addr(0), /* offset */
2183 			vm_sanitize_wrap_size(copy_map->size), /* size */
2184 			src_map,
2185 			FALSE, /* copy */
2186 			&target_copy_map,
2187 			overmap_start,
2188 			overmap_end,
2189 			&trimmed_start);
2190 		assert(trimmed_start == 0);
2191 	}
2192 	named_entry_unlock(named_entry);
2193 
2194 	return kr;
2195 }
2196