xref: /xnu-11417.140.69/osfmk/mach/vm_map.defs (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  [email protected]
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 *	File:	mach/vm_map.defs
60 *
61 *	Exported (native-sized) kernel VM calls.
62 */
63
64subsystem
65#if	KERNEL_SERVER || KOBJECT_SERVER
66	KernelServer
67#endif	/* KERNEL_SERVER || KOBJECT_SERVER */
68	  vm_map 3800;
69
70#if KERNEL_SERVER
71#define VM_KERNEL_SERVER 1
72#endif
73
74#include <mach/std_types.defs>
75#include <mach/mach_types.defs>
76#include <mach_debug/mach_debug_types.defs>
77
78#define CONCAT(a,b) a ## b
79#if !KERNEL && !LIBSYSCALL_INTERFACE && !KERNEL_VM_TEST
80#define PREFIX(NAME) CONCAT(_kernelrpc_, NAME)
81#else
82#define PREFIX(NAME) NAME
83#endif
84
85#if	KERNEL_SERVER
86#define KERNEL_SERVER_SUFFIX(NAME) CONCAT(NAME, _external)
87#else
88#define KERNEL_SERVER_SUFFIX(NAME) NAME
89#endif
90
91/* If building for Sandbox, keep NAME unchanged */
92#if SANDBOX_COMPILER
93#define KERNEL_SERVER_SUFFIX_SANDBOX(NAME) NAME
94#else
95#define KERNEL_SERVER_SUFFIX_SANDBOX(NAME) KERNEL_SERVER_SUFFIX(NAME)
96#endif
97
98/*
99 *      Returns information about the contents of the virtual
100 *      address space of the target task at the specified
101 *      address.  The returned protection, inheritance, sharing
102 *      and memory object values apply to the entire range described
103 *      by the address range returned; the memory object offset
104 *      corresponds to the beginning of the address range.
105 *      [If the specified address is not allocated, the next
106 *      highest address range is described.  If no addresses beyond
107 *      the one specified are allocated, the call returns KERN_NO_SPACE.]
108 */
109routine vm_region(
110                target_task     : vm_map_read_t;
111	inout	address		: vm_address_t;
112        out     size            : vm_size_t;
113		flavor		: vm_region_flavor_t;
114	out	info		: vm_region_info_t, CountInOut;
115        out     object_name     : memory_object_name_t =
116                                        MACH_MSG_TYPE_MOVE_SEND
117                                        ctype: mach_port_t);
118
119/*
120 *	Allocate zero-filled memory in the address space
121 *	of the target task, either at the specified address,
122 *	or wherever space can be found (if anywhere is TRUE),
123 *	of the specified size.  The address at which the
124 *	allocation actually took place is returned.
125 */
126
127#if !KERNEL && !LIBSYSCALL_INTERFACE && !KERNEL_VM_TEST
128skip;
129#else
130routine PREFIX(KERNEL_SERVER_SUFFIX(vm_allocate))(
131		target_task	: vm_task_entry_t;
132	inout	address		: vm_address_t;
133		size		: vm_size_t;
134		flags		: int);
135
136#endif
137
138/*
139 *	Deallocate the specified range from the virtual
140 *	address space of the target task.
141 */
142
143#if !KERNEL && !LIBSYSCALL_INTERFACE && !KERNEL_VM_TEST
144skip;
145#else
146routine PREFIX(vm_deallocate)(
147		target_task	: vm_task_entry_t;
148		address		: vm_address_t;
149		size		: vm_size_t);
150
151#endif
152
153/*
154 *	Set the current or maximum protection attribute
155 *	for the specified range of the virtual address
156 *	space of the target task.  The current protection
157 *	limits the memory access rights of threads within
158 *	the task; the maximum protection limits the accesses
159 *	that may be given in the current protection.
160 *	Protections are specified as a set of {read, write, execute}
161 *	*permissions*.
162 */
163
164#if !KERNEL && !LIBSYSCALL_INTERFACE && !KERNEL_VM_TEST
165skip;
166#else
167routine PREFIX(vm_protect)(
168		target_task	: vm_task_entry_t;
169		address		: vm_address_t;
170		size		: vm_size_t;
171		set_maximum	: boolean_t;
172		new_protection	: vm_prot_t);
173#endif
174
175/*
176 *	Set the inheritance attribute for the specified range
177 *	of the virtual address space of the target task.
178 *	The inheritance value is one of {none, copy, share}, and
179 *	specifies how the child address space should acquire
180 *	this memory at the time of a task_create call.
181 */
182routine vm_inherit(
183		target_task	: vm_task_entry_t;
184		address		: vm_address_t;
185		size		: vm_size_t;
186		new_inheritance	: vm_inherit_t);
187
188/*
189 *	Returns the contents of the specified range of the
190 *	virtual address space of the target task.  [The
191 *	range must be aligned on a virtual page boundary,
192 *	and must be a multiple of pages in extent.  The
193 *	protection on the specified range must permit reading.]
194 */
195routine PREFIX(vm_read) (
196		target_task	: vm_map_read_t;
197		address		: vm_address_t;
198		size		: vm_size_t;
199	out	data		: pointer_t);
200
201/*
202 * List corrollary to vm_read, returns mapped contents of specified
203 * ranges within target address space.
204 */
205routine vm_read_list(
206		target_task 	: vm_map_read_t;
207	inout	data_list   	: vm_read_entry_t;
208		count		: natural_t);
209
210/*
211 *	Writes the contents of the specified range of the
212 *	virtual address space of the target task.  [The
213 *	range must be aligned on a virtual page boundary,
214 *	and must be a multiple of pages in extent.  The
215 *	protection on the specified range must permit writing.]
216 */
217routine vm_write(
218		target_task	: vm_map_t;
219		address		: vm_address_t;
220		data		: pointer_t);
221
222/*
223 *	Copy the contents of the source range of the virtual
224 *	address space of the target task to the destination
225 *	range in that same address space.  [Both of the
226 *	ranges must be aligned on a virtual page boundary,
227 *	and must be multiples of pages in extent.  The
228 *	protection on the source range must permit reading,
229 *	and the protection on the destination range must
230 *	permit writing.]
231 */
232routine vm_copy(
233		target_task	: vm_map_t;
234		source_address	: vm_address_t;
235		size		: vm_size_t;
236		dest_address	: vm_address_t);
237
238/*
239 *	Returns the contents of the specified range of the
240 *	virtual address space of the target task.  [There
241 *	are no alignment restrictions, and the results will
242 *      overwrite the area pointed to by data - which must
243 *      already exist. The protection on the specified range
244 *	must permit reading.]
245 */
246routine vm_read_overwrite(
247		target_task	: vm_map_read_t;
248		address		: vm_address_t;
249		size		: vm_size_t;
250		data		: vm_address_t;
251	out	outsize		: vm_size_t);
252
253
254routine vm_msync(
255		target_task	: vm_map_t;
256		address		: vm_address_t;
257		size		: vm_size_t;
258		sync_flags	: vm_sync_t);
259
260/*
261 *	Set the paging behavior attribute for the specified range
262 *	of the virtual address space of the target task.
263 *	The behavior value is one of {default, random, forward
264 *	sequential, reverse sequential} and indicates the expected
265 *	page reference pattern for the specified range.
266 */
267routine vm_behavior_set(
268		target_task	: vm_map_t;
269		address		: vm_address_t;
270		size		: vm_size_t;
271		new_behavior	: vm_behavior_t);
272
273
274/*
275 *	Map a user-defined memory object into the virtual address
276 *	space of the target task.  If desired (anywhere is TRUE),
277 *	the kernel will find a suitable address range of the
278 *	specified size; else, the specific address will be allocated.
279 *
280 *	The beginning address of the range will be aligned on a virtual
281 *	page boundary, be at or beyond the address specified, and
282 *	meet the mask requirements (bits turned on in the mask must not
283 *	be turned on in the result); the size of the range, in bytes,
284 *	will be rounded	up to an integral number of virtual pages.
285 *
286 *	The memory in the resulting range will be associated with the
287 *	specified memory object, with the beginning of the memory range
288 *	referring to the specified offset into the memory object.
289 *
290 *	The mapping will take the current and maximum protections and
291 *	the inheritance attributes specified; see the vm_protect and
292 *	vm_inherit calls for a description of these attributes.
293 *
294 *	If desired (copy is TRUE), the memory range will be filled
295 *	with a copy of the data from the memory object; this copy will
296 *	be private to this mapping in this target task.  Otherwise,
297 *	the memory in this mapping will be shared with other mappings
298 *	of the same memory object at the same offset (in this task or
299 *	in other tasks).  [The Mach kernel only enforces shared memory
300 *	consistency among mappings on one host with similar page alignments.
301 *	The user-defined memory manager for this object is responsible
302 *	for further consistency.]
303 */
304routine PREFIX(KERNEL_SERVER_SUFFIX(vm_map)) (
305		target_task	: vm_task_entry_t;
306	inout	address		: vm_address_t;
307		size		: vm_size_t;
308		mask		: vm_address_t;
309		flags		: int;
310		object		: mem_entry_name_port_t;
311		offset		: vm_offset_t;
312		copy		: boolean_t;
313		cur_protection	: vm_prot_t;
314		max_protection	: vm_prot_t;
315		inheritance	: vm_inherit_t);
316
317/*
318 *	Set/Get special properties of memory associated
319 *	to some virtual address range, such as cachability,
320 *	migrability, replicability.  Machine-dependent.
321 */
322routine vm_machine_attribute(
323		target_task	: vm_map_t;
324		address		: vm_address_t;
325		size		: vm_size_t;
326		attribute	: vm_machine_attribute_t;
327	inout	value		: vm_machine_attribute_val_t);
328
329/*
330 *      Map portion of a task's address space.
331 */
332routine PREFIX(KERNEL_SERVER_SUFFIX(vm_remap)) (
333		target_task	: vm_map_t;
334	inout	target_address	: vm_address_t;
335		size		: vm_size_t;
336		mask		: vm_address_t;
337		flags		: int;
338		src_task	: vm_map_t;
339		src_address	: vm_address_t;
340		copy		: boolean_t;
341	out	cur_protection	: vm_prot_t;
342	out	max_protection	: vm_prot_t;
343		inheritance	: vm_inherit_t);
344
345/*
346 *	Require that all future virtual memory allocation
347 *	allocates wired memory.  Setting must_wire to FALSE
348 *	disables the wired future feature.
349 */
350routine task_wire(
351		target_task	: vm_map_t;
352		must_wire	: boolean_t);
353
354
355/*
356 *	Allow application level processes to create named entries which
357 *	correspond to mapped portions of their address space.  These named
358 *	entries can then be manipulated, shared with other processes in
359 *	other address spaces and ultimately mapped in ohter address spaces
360 */
361
362routine mach_make_memory_entry(
363		target_task	:vm_map_t;
364	inout	size		:vm_size_t;
365		offset		:vm_offset_t;
366		permission	:vm_prot_t;
367	out	object_handle	:mem_entry_name_port_move_send_t;
368		parent_entry	:mem_entry_name_port_t);
369
370/*
371 *      Give the caller information on the given location in a virtual
372 *      address space.  If a page is mapped return ref and dirty info.
373 */
374routine vm_map_page_query(
375                target_map      :vm_map_read_t;
376                offset          :vm_offset_t;
377        out     disposition     :integer_t;
378        out     ref_count       :integer_t);
379
380/*
381 *	Returns information about a region of memory.
382 *	Includes info about the chain of objects rooted at that region.
383 *      Only available in MACH_VM_DEBUG compiled kernels,
384 *      otherwise returns KERN_FAILURE.
385 */
386routine mach_vm_region_info(
387		task		: vm_map_read_t;
388		address		: vm_address_t;
389	out	region		: vm_info_region_t;
390	out	objects		: vm_info_object_array_t);
391
392routine	vm_mapped_pages_info(  /* OBSOLETE */
393		task		: vm_map_read_t;
394	out	pages		: page_address_array_t);
395
396skip; /* was vm_region_object_create */
397
398/*
399 *	A recursive form of vm_region which probes submaps withint the
400 *	address space.
401 */
402routine vm_region_recurse(
403                target_task     : vm_map_read_t;
404	inout	address		: vm_address_t;
405        out     size            : vm_size_t;
406	inout	nesting_depth	: natural_t;
407	out	info		: vm_region_recurse_info_t,CountInOut);
408
409
410/*
411 *	The routines below are temporary, meant for transitional use
412 *	as their counterparts are moved from 32 to 64 bit data path
413 */
414
415
416routine vm_region_recurse_64(
417                target_task     : vm_map_read_t;
418	inout	address		: vm_address_t;
419        out     size            : vm_size_t;
420	inout	nesting_depth	: natural_t;
421	out	info		: vm_region_recurse_info_t,CountInOut);
422
423routine mach_vm_region_info_64( /* OBSOLETE */
424		task		: vm_map_read_t;
425		address		: vm_address_t;
426	out	region		: vm_info_region_64_t;
427	out	objects		: vm_info_object_array_t);
428
429routine vm_region_64(
430                target_task     : vm_map_read_t;
431	inout	address		: vm_address_t;
432        out     size            : vm_size_t;
433		flavor		: vm_region_flavor_t;
434	out	info		: vm_region_info_t, CountInOut;
435        out     object_name     : memory_object_name_t =
436                                        MACH_MSG_TYPE_MOVE_SEND
437                                        ctype: mach_port_t);
438
439routine mach_make_memory_entry_64(
440		target_task	:vm_map_t;
441	inout	size		:memory_object_size_t;
442		offset		:memory_object_offset_t;
443		permission	:vm_prot_t;
444	out	object_handle	:mach_port_move_send_t;
445		parent_entry	:mem_entry_name_port_t);
446
447
448
449routine KERNEL_SERVER_SUFFIX(vm_map_64)(
450		target_task	: vm_task_entry_t;
451	inout	address		: vm_address_t;
452		size		: vm_size_t;
453		mask		: vm_address_t;
454		flags		: int;
455		object		: mem_entry_name_port_t;
456		offset		: memory_object_offset_t;
457		copy		: boolean_t;
458		cur_protection	: vm_prot_t;
459		max_protection	: vm_prot_t;
460		inheritance	: vm_inherit_t);
461
462skip; /* was vm_map_get_upl */
463skip; /* was vm_upl_map */
464skip; /* was vm_upl_unmap */
465
466/*
467 *	Control behavior and investigate state of a "purgable" object in
468 *	the virtual address space of the target task.  A purgable object is
469 *	created via a call to vm_allocate() with VM_FLAGS_PURGABLE
470 *	specified.  See the routine implementation for a complete
471 *	definition of the routine.
472 */
473routine PREFIX(KERNEL_SERVER_SUFFIX_SANDBOX(vm_purgable_control)) (
474#if KERNEL_SERVER
475		target_tport: mach_port_t;
476#else
477		target_task	: vm_map_t;
478#endif
479		address		: vm_address_t;
480		control		: vm_purgable_t;
481	inout	state		: int);
482
483
484routine vm_map_exec_lockdown(
485		target_task 	: vm_map_t);
486
487routine PREFIX(KERNEL_SERVER_SUFFIX(vm_remap_new)) (
488		target_task	: vm_map_t;
489	inout	target_address	: vm_address_t;
490		size		: vm_size_t;
491		mask		: vm_address_t;
492		flags		: int;
493#ifdef KERNEL_SERVER
494		src_tport	: mach_port_t;
495#else
496		src_task	: vm_map_read_t;
497#endif
498		src_address	: vm_address_t;
499		copy		: boolean_t;
500	inout	cur_protection	: vm_prot_t;
501	inout	max_protection	: vm_prot_t;
502		inheritance	: vm_inherit_t);
503
504/* vim: set ft=c : */
505