xref: /xnu-11215.41.3/osfmk/vm/vm32_user.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm32_user.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	User-exported virtual memory functions.
63  */
64 
65 #include <debug.h>
66 
67 #include <mach/boolean.h>
68 #include <mach/kern_return.h>
69 #include <mach/mach_types.h>    /* to get vm_address_t */
70 #include <mach/memory_object.h>
71 #include <mach/std_types.h>     /* to get pointer_t */
72 #include <mach/vm_attributes.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_statistics.h>
75 #include <mach/mach_syscalls.h>
76 
77 #include <mach/host_priv_server.h>
78 #include <mach/mach_vm_server.h>
79 #include <mach/vm32_map_server.h>
80 
81 #include <kern/host.h>
82 #include <kern/task.h>
83 #include <kern/misc_protos.h>
84 #include <vm/vm_fault.h>
85 #include <vm/vm_map_internal.h>
86 #include <vm/vm_object_xnu.h>
87 #include <vm/vm_page.h>
88 #include <vm/memory_object.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_protos.h>
91 #include <vm/vm_iokit.h>
92 #include <vm/vm_sanitize_internal.h>
93 #include <vm/vm_map_internal.h>
94 
95 #ifdef VM32_SUPPORT
96 
97 /*
98  * See vm_user.c for the real implementation of all of these functions.
99  * We call through to the mach_ "wide" versions of the routines, and trust
100  * that the VM system verifies the arguments and only returns address that
101  * are appropriate for the task's address space size.
102  *
103  * New VM call implementations should not be added here, because they would
104  * be available only to 32-bit userspace clients. Add them to vm_user.c
105  * and the corresponding prototype to mach_vm.defs (subsystem 4800).
106  */
107 
108 kern_return_t
vm32_allocate(vm_map_t map,vm32_address_ut * addr32,vm32_size_ut size32,int flags)109 vm32_allocate(
110 	vm_map_t                map,
111 	vm32_address_ut        *addr32,
112 	vm32_size_ut            size32,
113 	int                     flags)
114 {
115 	mach_vm_address_ut      addr;
116 	mach_vm_size_ut         size;
117 	kern_return_t           kr;
118 
119 	addr    = vm_sanitize_expand_addr_to_64(*addr32);
120 	size    = vm_sanitize_expand_size_to_64(size32);
121 	kr      = mach_vm_allocate_external(map, &addr, size, flags);
122 	*addr32 = vm_sanitize_trunc_addr_to_32(addr);
123 
124 	return kr;
125 }
126 
127 kern_return_t
vm32_deallocate(vm_map_t map,vm32_offset_ut start32,vm32_size_ut size32)128 vm32_deallocate(
129 	vm_map_t                map,
130 	vm32_offset_ut          start32,
131 	vm32_size_ut            size32)
132 {
133 	mach_vm_offset_ut start;
134 	mach_vm_size_ut   size;
135 	kern_return_t     kr;
136 
137 	kr = vm_sanitize_expand_addr_size_to_64(start32, size32, &start, &size);
138 
139 	if (kr != KERN_SUCCESS) {
140 		return kr;
141 	}
142 
143 	return mach_vm_deallocate(map, start, size);
144 }
145 
146 kern_return_t
vm32_inherit(vm_map_t map,vm32_offset_t start,vm32_size_t size,vm_inherit_t new_inheritance)147 vm32_inherit(
148 	vm_map_t        map,
149 	vm32_offset_t           start,
150 	vm32_size_t             size,
151 	vm_inherit_t            new_inheritance)
152 {
153 	if ((map == VM_MAP_NULL) || (start + size < start)) {
154 		return KERN_INVALID_ARGUMENT;
155 	}
156 
157 	return mach_vm_inherit(map, start, size, new_inheritance);
158 }
159 
160 kern_return_t
vm32_protect(vm_map_t map,vm32_offset_t start,vm32_size_t size,boolean_t set_maximum,vm_prot_t new_protection)161 vm32_protect(
162 	vm_map_t                map,
163 	vm32_offset_t           start,
164 	vm32_size_t             size,
165 	boolean_t               set_maximum,
166 	vm_prot_t               new_protection)
167 {
168 	if ((map == VM_MAP_NULL) || (start + size < start)) {
169 		return KERN_INVALID_ARGUMENT;
170 	}
171 
172 	return mach_vm_protect(map, start, size, set_maximum, new_protection);
173 }
174 
175 kern_return_t
vm32_machine_attribute(vm_map_t map,vm32_address_t addr,vm32_size_t size,vm_machine_attribute_t attribute,vm_machine_attribute_val_t * value)176 vm32_machine_attribute(
177 	vm_map_t        map,
178 	vm32_address_t  addr,
179 	vm32_size_t     size,
180 	vm_machine_attribute_t  attribute,
181 	vm_machine_attribute_val_t* value)              /* IN/OUT */
182 {
183 	if ((map == VM_MAP_NULL) || (addr + size < addr)) {
184 		return KERN_INVALID_ARGUMENT;
185 	}
186 
187 	return mach_vm_machine_attribute(map, addr, size, attribute, value);
188 }
189 
190 kern_return_t
vm32_read(vm_map_t map,vm32_address_ut addr32,vm32_size_ut size32,pointer_t * data,mach_msg_type_number_t * data_size)191 vm32_read(
192 	vm_map_t                map,
193 	vm32_address_ut         addr32,
194 	vm32_size_ut            size32,
195 	pointer_t              *data,
196 	mach_msg_type_number_t *data_size)
197 {
198 	mach_vm_offset_ut addr;
199 	mach_vm_size_ut   size;
200 
201 	addr = vm_sanitize_expand_addr_to_64(addr32);
202 	size = vm_sanitize_expand_size_to_64(size32);
203 
204 	return mach_vm_read(map, addr, size, data, data_size);
205 }
206 
207 kern_return_t
vm32_read_list(vm_map_t map,vm32_read_entry_t data_list,natural_t count)208 vm32_read_list(
209 	vm_map_t                map,
210 	vm32_read_entry_t       data_list,
211 	natural_t               count)
212 {
213 	mach_vm_read_entry_t    mdata_list;
214 	mach_msg_type_number_t  i;
215 	kern_return_t                   result;
216 
217 	for (i = 0; i < VM_MAP_ENTRY_MAX; i++) {
218 		mdata_list[i].address = data_list[i].address;
219 		mdata_list[i].size = data_list[i].size;
220 	}
221 
222 	result = mach_vm_read_list(map, mdata_list, count);
223 
224 	for (i = 0; i < VM_MAP_ENTRY_MAX; i++) {
225 		data_list[i].address = CAST_DOWN_EXPLICIT(vm32_address_t, mdata_list[i].address);
226 		data_list[i].size = CAST_DOWN_EXPLICIT(vm32_size_t, mdata_list[i].size);
227 	}
228 
229 	return result;
230 }
231 
232 kern_return_t
vm32_read_overwrite(vm_map_t map,vm32_address_ut addr32,vm32_size_ut size32,vm32_address_ut data32,vm32_size_ut * data_size32)233 vm32_read_overwrite(
234 	vm_map_t                map,
235 	vm32_address_ut         addr32,
236 	vm32_size_ut            size32,
237 	vm32_address_ut         data32,
238 	vm32_size_ut           *data_size32)
239 {
240 	mach_vm_offset_ut addr, data;
241 	mach_vm_size_ut   size, data_size;
242 	kern_return_t     result;
243 
244 	addr = vm_sanitize_expand_addr_to_64(addr32);
245 	size = vm_sanitize_expand_size_to_64(size32);
246 	data = vm_sanitize_expand_addr_to_64(data32);
247 	data_size = vm_sanitize_expand_size_to_64(*data_size32);
248 
249 	result = mach_vm_read_overwrite(map, addr, size, data, &data_size);
250 	*data_size32 = vm_sanitize_trunc_size_to_32(data_size);
251 
252 	return result;
253 }
254 
255 kern_return_t
vm32_write(vm_map_t map,vm32_address_ut addr32,pointer_t data,mach_msg_type_number_t size)256 vm32_write(
257 	vm_map_t                map,
258 	vm32_address_ut         addr32,
259 	pointer_t               data,
260 	mach_msg_type_number_t  size)
261 {
262 	mach_vm_offset_ut addr;
263 
264 	addr = vm_sanitize_expand_addr_to_64(addr32);
265 	return mach_vm_write(map, addr, data, size);
266 }
267 
268 kern_return_t
vm32_copy(vm_map_t map,vm32_address_ut src_addr32,vm32_size_ut size32,vm32_address_ut dst_addr32)269 vm32_copy(
270 	vm_map_t                map,
271 	vm32_address_ut         src_addr32,
272 	vm32_size_ut            size32,
273 	vm32_address_ut         dst_addr32)
274 {
275 	mach_vm_offset_ut src_addr, dst_addr;
276 	mach_vm_size_ut   size;
277 
278 	src_addr = vm_sanitize_expand_addr_to_64(src_addr32);
279 	size     = vm_sanitize_expand_size_to_64(size32);
280 	dst_addr = vm_sanitize_expand_addr_to_64(dst_addr32);
281 
282 	return mach_vm_copy(map, src_addr, size, dst_addr);
283 }
284 
285 kern_return_t
vm32_map_64(vm_map_t target_map,vm32_offset_ut * addr32,vm32_size_ut size32,vm32_offset_ut mask32,int flags,ipc_port_t port,memory_object_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)286 vm32_map_64(
287 	vm_map_t                target_map,
288 	vm32_offset_ut         *addr32,
289 	vm32_size_ut            size32,
290 	vm32_offset_ut          mask32,
291 	int                     flags,
292 	ipc_port_t              port,
293 	memory_object_offset_ut offset,
294 	boolean_t               copy,
295 	vm_prot_ut              cur_protection,
296 	vm_prot_ut              max_protection,
297 	vm_inherit_ut           inheritance)
298 {
299 	mach_vm_offset_ut addr, mask;
300 	mach_vm_size_ut   size;
301 	kern_return_t     result;
302 
303 	addr = vm_sanitize_expand_addr_to_64(*addr32);
304 	size = vm_sanitize_expand_size_to_64(size32);
305 	mask = vm_sanitize_expand_addr_to_64(mask32);
306 
307 	result  = mach_vm_map_external(target_map, &addr, size, mask,
308 	    flags, port, offset, copy,
309 	    cur_protection, max_protection, inheritance);
310 	*addr32 = vm_sanitize_trunc_addr_to_32(addr);
311 
312 	return result;
313 }
314 
315 kern_return_t
vm32_map(vm_map_t target_map,vm32_offset_ut * address,vm32_size_ut size,vm32_offset_ut mask,int flags,ipc_port_t port,vm32_offset_ut offset32,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)316 vm32_map(
317 	vm_map_t                target_map,
318 	vm32_offset_ut         *address,
319 	vm32_size_ut            size,
320 	vm32_offset_ut          mask,
321 	int                     flags,
322 	ipc_port_t              port,
323 	vm32_offset_ut          offset32,
324 	boolean_t               copy,
325 	vm_prot_ut              cur_protection,
326 	vm_prot_ut              max_protection,
327 	vm_inherit_ut           inheritance)
328 {
329 	memory_object_offset_ut offset;
330 
331 	offset = vm_sanitize_expand_addr_to_64(offset32);
332 	return vm32_map_64(target_map, address, size, mask,
333 	           flags, port, offset, copy,
334 	           cur_protection, max_protection, inheritance);
335 }
336 
337 kern_return_t
vm32_remap(vm_map_t target_map,vm32_offset_ut * addr32,vm32_size_ut size32,vm32_offset_ut mask32,boolean_t anywhere,vm_map_t src_map,vm32_offset_ut src_addr32,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)338 vm32_remap(
339 	vm_map_t                target_map,
340 	vm32_offset_ut         *addr32,
341 	vm32_size_ut            size32,
342 	vm32_offset_ut          mask32,
343 	boolean_t               anywhere,
344 	vm_map_t                src_map,
345 	vm32_offset_ut          src_addr32,
346 	boolean_t               copy,
347 	vm_prot_ut             *cur_protection,
348 	vm_prot_ut             *max_protection,
349 	vm_inherit_ut           inheritance)
350 {
351 	mach_vm_offset_ut addr, mask, src_addr;
352 	mach_vm_size_ut   size;
353 	kern_return_t     result;
354 
355 	addr = vm_sanitize_expand_addr_to_64(*addr32);
356 	size = vm_sanitize_expand_size_to_64(size32);
357 	mask = vm_sanitize_expand_addr_to_64(mask32);
358 	src_addr = vm_sanitize_expand_addr_to_64(src_addr32);
359 
360 	result  = mach_vm_remap_external(target_map, &addr, size, mask,
361 	    anywhere, src_map, src_addr, copy,
362 	    cur_protection, max_protection, inheritance);
363 	*addr32 = vm_sanitize_trunc_addr_to_32(addr);
364 
365 
366 	return result;
367 }
368 
369 kern_return_t
vm32_msync(vm_map_t map,vm32_address_t address,vm32_size_t size,vm_sync_t sync_flags)370 vm32_msync(
371 	vm_map_t        map,
372 	vm32_address_t  address,
373 	vm32_size_t     size,
374 	vm_sync_t       sync_flags)
375 {
376 	return mach_vm_msync(map, address, size, sync_flags);
377 }
378 
379 kern_return_t
vm32_behavior_set(vm_map_t map,vm32_offset_t start,vm32_size_t size,vm_behavior_t new_behavior)380 vm32_behavior_set(
381 	vm_map_t                map,
382 	vm32_offset_t           start,
383 	vm32_size_t             size,
384 	vm_behavior_t           new_behavior)
385 {
386 	if ((map == VM_MAP_NULL) || (start + size < start)) {
387 		return KERN_INVALID_ARGUMENT;
388 	}
389 
390 	return mach_vm_behavior_set(map, start, size, new_behavior);
391 }
392 
393 kern_return_t
vm32_region_64(vm_map_t map,vm32_offset_t * address,vm32_size_t * size,vm_region_flavor_t flavor,vm_region_info_t info,mach_msg_type_number_t * count,mach_port_t * object_name)394 vm32_region_64(
395 	vm_map_t                 map,
396 	vm32_offset_t           *address,               /* IN/OUT */
397 	vm32_size_t             *size,                  /* OUT */
398 	vm_region_flavor_t       flavor,                /* IN */
399 	vm_region_info_t         info,                  /* OUT */
400 	mach_msg_type_number_t  *count,                 /* IN/OUT */
401 	mach_port_t             *object_name)           /* OUT */
402 {
403 	mach_vm_offset_t        maddress;
404 	mach_vm_size_t          msize;
405 	kern_return_t           result;
406 
407 	maddress = *address;
408 	msize = *size;
409 	result = mach_vm_region(map, &maddress, &msize, flavor, info, count, object_name);
410 	*size = CAST_DOWN_EXPLICIT(vm32_size_t, msize);
411 	*address = CAST_DOWN_EXPLICIT(vm32_offset_t, maddress);
412 
413 	return result;
414 }
415 
416 kern_return_t
vm32_region(vm_map_t map,vm32_address_t * address,vm32_size_t * size,vm_region_flavor_t flavor,vm_region_info_t info,mach_msg_type_number_t * count,mach_port_t * object_name)417 vm32_region(
418 	vm_map_t                        map,
419 	vm32_address_t                  *address,       /* IN/OUT */
420 	vm32_size_t                     *size,          /* OUT */
421 	vm_region_flavor_t              flavor, /* IN */
422 	vm_region_info_t                info,           /* OUT */
423 	mach_msg_type_number_t  *count, /* IN/OUT */
424 	mach_port_t                     *object_name)   /* OUT */
425 {
426 	vm_map_address_t        map_addr;
427 	vm_map_size_t           map_size;
428 	kern_return_t           kr;
429 
430 	if (VM_MAP_NULL == map) {
431 		return KERN_INVALID_ARGUMENT;
432 	}
433 
434 	map_addr = (vm_map_address_t)*address;
435 	map_size = (vm_map_size_t)*size;
436 
437 	kr = vm_map_region(map,
438 	    &map_addr, &map_size,
439 	    flavor, info, count,
440 	    object_name);
441 
442 	*address = CAST_DOWN_EXPLICIT(vm32_address_t, map_addr);
443 	*size = CAST_DOWN_EXPLICIT(vm32_size_t, map_size);
444 
445 	if (KERN_SUCCESS == kr && map_addr + map_size > VM32_MAX_ADDRESS) {
446 		return KERN_INVALID_ADDRESS;
447 	}
448 	return kr;
449 }
450 
451 kern_return_t
vm32_region_recurse_64(vm_map_t map,vm32_address_t * address,vm32_size_t * size,uint32_t * depth,vm_region_recurse_info_64_t info,mach_msg_type_number_t * infoCnt)452 vm32_region_recurse_64(
453 	vm_map_t                        map,
454 	vm32_address_t                  *address,
455 	vm32_size_t                     *size,
456 	uint32_t                        *depth,
457 	vm_region_recurse_info_64_t     info,
458 	mach_msg_type_number_t  *infoCnt)
459 {
460 	mach_vm_address_t       maddress;
461 	mach_vm_size_t          msize;
462 	kern_return_t           result;
463 
464 	maddress = *address;
465 	msize = *size;
466 	result = mach_vm_region_recurse(map, &maddress, &msize, depth, info, infoCnt);
467 	*address = CAST_DOWN_EXPLICIT(vm32_address_t, maddress);
468 	*size = CAST_DOWN_EXPLICIT(vm32_size_t, msize);
469 
470 	return result;
471 }
472 
473 kern_return_t
vm32_region_recurse(vm_map_t map,vm32_offset_t * address,vm32_size_t * size,natural_t * depth,vm_region_recurse_info_t info32,mach_msg_type_number_t * infoCnt)474 vm32_region_recurse(
475 	vm_map_t                        map,
476 	vm32_offset_t           *address,       /* IN/OUT */
477 	vm32_size_t                     *size,          /* OUT */
478 	natural_t                       *depth, /* IN/OUT */
479 	vm_region_recurse_info_t        info32, /* IN/OUT */
480 	mach_msg_type_number_t  *infoCnt)       /* IN/OUT */
481 {
482 	vm_region_submap_info_data_64_t info64;
483 	vm_region_submap_info_t info;
484 	vm_map_address_t        map_addr;
485 	vm_map_size_t           map_size;
486 	kern_return_t           kr;
487 
488 	if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) {
489 		return KERN_INVALID_ARGUMENT;
490 	}
491 
492 
493 	map_addr = (vm_map_address_t)*address;
494 	map_size = (vm_map_size_t)*size;
495 	info = (vm_region_submap_info_t)info32;
496 	*infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
497 
498 	kr = vm_map_region_recurse_64(map, &map_addr, &map_size,
499 	    depth, &info64, infoCnt);
500 
501 	info->protection = info64.protection;
502 	info->max_protection = info64.max_protection;
503 	info->inheritance = info64.inheritance;
504 	info->offset = (uint32_t)info64.offset; /* trouble-maker */
505 	info->user_tag = info64.user_tag;
506 	info->pages_resident = info64.pages_resident;
507 	info->pages_shared_now_private = info64.pages_shared_now_private;
508 	info->pages_swapped_out = info64.pages_swapped_out;
509 	info->pages_dirtied = info64.pages_dirtied;
510 	info->ref_count = info64.ref_count;
511 	info->shadow_depth = info64.shadow_depth;
512 	info->external_pager = info64.external_pager;
513 	info->share_mode = info64.share_mode;
514 	info->is_submap = info64.is_submap;
515 	info->behavior = info64.behavior;
516 	info->object_id = info64.object_id;
517 	info->user_wired_count = info64.user_wired_count;
518 
519 	*address = CAST_DOWN_EXPLICIT(vm32_address_t, map_addr);
520 	*size = CAST_DOWN_EXPLICIT(vm32_size_t, map_size);
521 	*infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
522 
523 	if (KERN_SUCCESS == kr && map_addr + map_size > VM32_MAX_ADDRESS) {
524 		return KERN_INVALID_ADDRESS;
525 	}
526 	return kr;
527 }
528 
529 kern_return_t
vm32_purgable_control(vm_map_t map,vm32_offset_t address,vm_purgable_t control,int * state)530 vm32_purgable_control(
531 	vm_map_t                map,
532 	vm32_offset_t           address,
533 	vm_purgable_t           control,
534 	int                     *state)
535 {
536 	if (VM_MAP_NULL == map) {
537 		return KERN_INVALID_ARGUMENT;
538 	}
539 
540 	return vm_map_purgable_control(map,
541 	           vm_map_trunc_page(address, PAGE_MASK),
542 	           control,
543 	           state);
544 }
545 
546 kern_return_t
vm32_map_page_query(vm_map_t map,vm32_offset_t offset,int * disposition,int * ref_count)547 vm32_map_page_query(
548 	vm_map_t                map,
549 	vm32_offset_t           offset,
550 	int                     *disposition,
551 	int                     *ref_count)
552 {
553 	if (VM_MAP_NULL == map) {
554 		return KERN_INVALID_ARGUMENT;
555 	}
556 
557 	return vm_map_page_query_internal(
558 		map,
559 		vm_map_trunc_page(offset, PAGE_MASK),
560 		disposition,
561 		ref_count);
562 }
563 
564 kern_return_t
vm32_make_memory_entry_64(vm_map_t target_map,memory_object_size_ut * size,memory_object_offset_ut offset,vm_prot_ut permission,ipc_port_t * object_handle,ipc_port_t parent_handle)565 vm32_make_memory_entry_64(
566 	vm_map_t                target_map,
567 	memory_object_size_ut  *size,
568 	memory_object_offset_ut offset,
569 	vm_prot_ut              permission,
570 	ipc_port_t              *object_handle,
571 	ipc_port_t              parent_handle)
572 {
573 	// use the existing entrypoint
574 	return _mach_make_memory_entry(target_map, size, offset, permission, object_handle, parent_handle);
575 }
576 
577 kern_return_t
vm32_make_memory_entry(vm_map_t target_map,vm32_size_ut * size,vm32_offset_ut offset,vm_prot_ut permission,ipc_port_t * object_handle,ipc_port_t parent_entry)578 vm32_make_memory_entry(
579 	vm_map_t                target_map,
580 	vm32_size_ut           *size,
581 	vm32_offset_ut          offset,
582 	vm_prot_ut              permission,
583 	ipc_port_t              *object_handle,
584 	ipc_port_t              parent_entry)
585 {
586 	memory_object_size_ut   mo_size = vm_sanitize_expand_size_to_64(*size);
587 	memory_object_offset_ut mo_offset = vm_sanitize_expand_addr_to_64(offset);
588 	kern_return_t           kr;
589 
590 	kr = _mach_make_memory_entry(target_map, &mo_size,
591 	    mo_offset, permission, object_handle, parent_entry);
592 	*size = vm_sanitize_trunc_size_to_32(mo_size);
593 	return kr;
594 }
595 
596 kern_return_t
vm32__task_wire(vm_map_t map,boolean_t must_wire __unused)597 vm32__task_wire(
598 	vm_map_t        map,
599 	boolean_t       must_wire __unused)
600 {
601 	if (map == VM_MAP_NULL) {
602 		return KERN_INVALID_ARGUMENT;
603 	}
604 
605 	return KERN_NOT_SUPPORTED;
606 }
607 
608 kern_return_t
vm32__map_exec_lockdown(vm_map_t map)609 vm32__map_exec_lockdown(
610 	vm_map_t        map)
611 {
612 	if (map == VM_MAP_NULL) {
613 		return KERN_INVALID_ARGUMENT;
614 	}
615 
616 	vm_map_lock(map);
617 	map->map_disallow_new_exec = TRUE;
618 	vm_map_unlock(map);
619 
620 	return KERN_SUCCESS;
621 }
622 
623 
624 #endif /* VM32_SUPPORT */
625