xref: /xnu-12377.41.6/osfmk/mach/memory_object_types.h (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	memory_object.h
60  *	Author:	Michael Wayne Young
61  *
62  *	External memory management interface definition.
63  */
64 
65 #ifndef _MACH_MEMORY_OBJECT_TYPES_H_
66 #define _MACH_MEMORY_OBJECT_TYPES_H_
67 
68 /*
69  *	User-visible types used in the external memory
70  *	management interface:
71  */
72 
73 #include <mach/port.h>
74 #include <mach/message.h>
75 #include <mach/vm_prot.h>
76 #include <mach/vm_sync.h>
77 #include <mach/vm_types.h>
78 #include <mach/machine/vm_types.h>
79 
80 #include <sys/cdefs.h>
81 
82 #if XNU_KERNEL_PRIVATE
83 #include <os/refcnt.h>
84 #if __LP64__
85 #define MEMORY_OBJECT_HAS_REFCOUNT 1
86 #else
87 #define MEMORY_OBJECT_HAS_REFCOUNT 0
88 #endif
89 #endif /* XNU_KERNEL_PRIVATE */
90 
91 #define VM_64_BIT_DATA_OBJECTS
92 
93 typedef unsigned long long      memory_object_offset_t;
94 typedef unsigned long long      memory_object_size_t;
95 typedef natural_t               memory_object_cluster_size_t;
96 typedef natural_t *             memory_object_fault_info_t;
97 
98 typedef unsigned long long      vm_object_id_t;
99 
100 
101 /*
102  * Temporary until real EMMI version gets re-implemented
103  */
104 
105 #ifdef  KERNEL_PRIVATE
106 
107 /* IMPORTANT: this type must match "ipc_object_bits_t" from ipc/ipc_port.h */
108 typedef natural_t mo_ipc_object_bits_t;
109 
110 struct memory_object_pager_ops; /* forward declaration */
111 
112 typedef struct vm_object       *memory_object_control_t;
113 /*
114  * "memory_object" used to be a Mach port in user space and could be passed
115  * as such to some kernel APIs.
116  *
117  * Its first field must match the "io_bits" field of a
118  * "struct ipc_object" to identify them as a "IKOT_MEMORY_OBJECT".
119  */
120 typedef struct memory_object {
121 	mo_ipc_object_bits_t                    mo_ikot; /* DO NOT CHANGE */
122 #if __LP64__
123 #if XNU_KERNEL_PRIVATE
124 	/*
125 	 * On LP64 there's a 4 byte hole that is perfect for a refcount.
126 	 * Expose it so that all pagers can take advantage of it.
127 	 */
128 	os_ref_atomic_t                         mo_ref;
129 #else
130 	unsigned int                            __mo_padding;
131 #endif /* XNU_KERNEL_PRIVATE */
132 #endif /* __LP64__ */
133 	const struct memory_object_pager_ops    *mo_pager_ops;
134 	memory_object_control_t                 mo_control;
135 	uint32_t                                mo_last_unmap_ctid;
136 } *memory_object_t;
137 
138 typedef const struct memory_object_pager_ops {
139 	void (*memory_object_reference)(
140 		memory_object_t mem_obj);
141 	void (*memory_object_deallocate)(
142 		memory_object_t mem_obj);
143 	kern_return_t (*memory_object_init)(
144 		memory_object_t mem_obj,
145 		memory_object_control_t mem_control,
146 		memory_object_cluster_size_t size);
147 	kern_return_t (*memory_object_terminate)(
148 		memory_object_t mem_obj);
149 	kern_return_t (*memory_object_data_request)(
150 		memory_object_t mem_obj,
151 		memory_object_offset_t offset,
152 		memory_object_cluster_size_t length,
153 		vm_prot_t desired_access,
154 		memory_object_fault_info_t fault_info);
155 	kern_return_t (*memory_object_data_return)(
156 		memory_object_t mem_obj,
157 		memory_object_offset_t offset,
158 		memory_object_cluster_size_t size,
159 		memory_object_offset_t *resid_offset,
160 		int *io_error,
161 		boolean_t dirty,
162 		boolean_t kernel_copy,
163 		int upl_flags);
164 	kern_return_t (*memory_object_data_initialize)(
165 		memory_object_t mem_obj,
166 		memory_object_offset_t offset,
167 		memory_object_cluster_size_t size);
168 #if XNU_KERNEL_PRIVATE
169 	void *__obsolete_memory_object_data_unlock;
170 	void *__obsolete_memory_object_synchronize;
171 #else
172 	kern_return_t (*memory_object_data_unlock)(
173 		memory_object_t mem_obj,
174 		memory_object_offset_t offset,
175 		memory_object_size_t size,
176 		vm_prot_t desired_access); /* obsolete */
177 	kern_return_t (*memory_object_synchronize)(
178 		memory_object_t mem_obj,
179 		memory_object_offset_t offset,
180 		memory_object_size_t size,
181 		vm_sync_t sync_flags); /* obsolete */
182 #endif /* !XNU_KERNEL_PRIVATE */
183 	kern_return_t (*memory_object_map)(
184 		memory_object_t mem_obj,
185 		vm_prot_t prot);
186 	kern_return_t (*memory_object_last_unmap)(
187 		memory_object_t mem_obj);
188 #if XNU_KERNEL_PRIVATE
189 	void *__obsolete_memory_object_data_reclaim;
190 #else
191 	kern_return_t (*memory_object_data_reclaim)(
192 		memory_object_t mem_obj,
193 		boolean_t reclaim_backing_store); /* obsolete */
194 #endif /* !XNU_KERNEL_PRIVATE */
195 	boolean_t (*memory_object_backing_object)(
196 		memory_object_t mem_obj,
197 		memory_object_offset_t mem_obj_offset,
198 		vm_object_t *backing_object,
199 		vm_object_offset_t *backing_offset);
200 	const char *memory_object_pager_name;
201 } * memory_object_pager_ops_t;
202 
203 #else   /* KERNEL_PRIVATE */
204 
205 typedef mach_port_t     memory_object_t;
206 /*
207  * vestigial, maintained for source compatibility,
208  * no MIG interface will accept or return non NULL
209  * objects for those.
210  */
211 typedef mach_port_t     memory_object_control_t;
212 
213 #endif  /* KERNEL_PRIVATE */
214 
215 typedef memory_object_t *memory_object_array_t;
216 /* A memory object ... */
217 /*  Used by the kernel to retrieve */
218 /*  or store data */
219 
220 typedef mach_port_t     memory_object_name_t;
221 /* Used to describe the memory ... */
222 /*  object in vm_regions() calls */
223 
224 typedef mach_port_t     memory_object_default_t;
225 /* Registered with the host ... */
226 /*  for creating new internal objects */
227 
228 #define MEMORY_OBJECT_NULL              ((memory_object_t) 0)
229 #define MEMORY_OBJECT_CONTROL_NULL      ((memory_object_control_t) 0)
230 #define MEMORY_OBJECT_NAME_NULL         ((memory_object_name_t) 0)
231 #define MEMORY_OBJECT_DEFAULT_NULL      ((memory_object_default_t) 0)
232 
233 
234 typedef int             memory_object_copy_strategy_t;
235 /* How memory manager handles copy: */
236 #define         MEMORY_OBJECT_COPY_NONE         0
237 /* ... No special support */
238 #define         MEMORY_OBJECT_COPY_CALL         1
239 /* ... Make call on memory manager */
240 #define         MEMORY_OBJECT_COPY_DELAY        2
241 /* ... Memory manager doesn't
242  *     change data externally.
243  */
244 #define         MEMORY_OBJECT_COPY_TEMPORARY    3
245 /* ... Memory manager doesn't
246  *     change data externally, and
247  *     doesn't need to see changes.
248  */
249 #define         MEMORY_OBJECT_COPY_SYMMETRIC    4
250 /* ... Memory manager doesn't
251  *     change data externally,
252  *     doesn't need to see changes,
253  *     and object will not be
254  *     multiply mapped.
255  *
256  *     XXX
257  *     Not yet safe for non-kernel use.
258  */
259 
260 #define         MEMORY_OBJECT_COPY_INVALID      5
261 /* ...	An invalid copy strategy,
262  *	for external objects which
263  *	have not been initialized.
264  *	Allows copy_strategy to be
265  *	examined without also
266  *	examining pager_ready and
267  *	internal.
268  */
269 
270 #define         MEMORY_OBJECT_COPY_DELAY_FORK   6
271 /*
272  * ...  Like MEMORY_OBJECT_COPY_DELAY for vm_map_fork() but like
273  *      MEMORY_OBJECT_COPY_NONE otherwise.
274  */
275 
276 typedef int             memory_object_return_t;
277 /* Which pages to return to manager
278  *  this time (lock_request) */
279 #define         MEMORY_OBJECT_RETURN_NONE       0
280 /* ... don't return any. */
281 #define         MEMORY_OBJECT_RETURN_DIRTY      1
282 /* ... only dirty pages. */
283 #define         MEMORY_OBJECT_RETURN_ALL        2
284 /* ... dirty and precious pages. */
285 #define         MEMORY_OBJECT_RETURN_ANYTHING   3
286 /* ... any resident page. */
287 
288 /*
289  *	Data lock request flags
290  */
291 
292 #define         MEMORY_OBJECT_DATA_FLUSH        0x1
293 #define         MEMORY_OBJECT_DATA_NO_CHANGE    0x2
294 #define         MEMORY_OBJECT_DATA_PURGE        0x4
295 #define         MEMORY_OBJECT_COPY_SYNC         0x8
296 #define         MEMORY_OBJECT_DATA_SYNC         0x10
297 #define         MEMORY_OBJECT_IO_SYNC           0x20
298 #define         MEMORY_OBJECT_DATA_FLUSH_ALL    0x40
299 
300 /*
301  *	Types for the memory object flavor interfaces
302  */
303 
304 #define MEMORY_OBJECT_INFO_MAX      (1024)
305 typedef int     *memory_object_info_t;
306 typedef int      memory_object_flavor_t;
307 typedef int      memory_object_info_data_t[MEMORY_OBJECT_INFO_MAX];
308 
309 
310 #define MEMORY_OBJECT_PERFORMANCE_INFO  11
311 #define MEMORY_OBJECT_ATTRIBUTE_INFO    14
312 #define MEMORY_OBJECT_BEHAVIOR_INFO     15
313 
314 #ifdef  PRIVATE
315 
316 #define OLD_MEMORY_OBJECT_BEHAVIOR_INFO         10
317 #define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO        12
318 
319 struct old_memory_object_behave_info {
320 	memory_object_copy_strategy_t   copy_strategy;
321 	boolean_t                       temporary;
322 	boolean_t                       invalidate;
323 };
324 
325 struct old_memory_object_attr_info {                    /* old attr list */
326 	boolean_t                       object_ready;
327 	boolean_t                       may_cache;
328 	memory_object_copy_strategy_t   copy_strategy;
329 };
330 
331 typedef struct old_memory_object_behave_info *old_memory_object_behave_info_t;
332 typedef struct old_memory_object_behave_info old_memory_object_behave_info_data_t;
333 typedef struct old_memory_object_attr_info *old_memory_object_attr_info_t;
334 typedef struct old_memory_object_attr_info old_memory_object_attr_info_data_t;
335 
336 #define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT     ((mach_msg_type_number_t) \
337 	        (sizeof(old_memory_object_behave_info_data_t)/sizeof(int)))
338 #define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT       ((mach_msg_type_number_t) \
339 	        (sizeof(old_memory_object_attr_info_data_t)/sizeof(int)))
340 
341 #ifdef KERNEL
342 
343 __BEGIN_DECLS
344 extern void memory_object_reference(memory_object_t object);
345 extern void memory_object_deallocate(memory_object_t object);
346 extern boolean_t memory_object_backing_object(
347 	memory_object_t mem_obj,
348 	memory_object_offset_t offset,
349 	vm_object_t *backing_object,
350 	vm_object_offset_t *backing_offset);
351 
352 extern void memory_object_control_reference(memory_object_control_t control);
353 extern void memory_object_control_deallocate(memory_object_control_t control);
354 extern int  memory_object_control_uiomove(memory_object_control_t, memory_object_offset_t, void *, int, int, int, int);
355 __END_DECLS
356 
357 #endif  /* KERNEL */
358 
359 #endif  /* PRIVATE */
360 
361 struct memory_object_perf_info {
362 	memory_object_cluster_size_t    cluster_size;
363 	boolean_t                       may_cache;
364 };
365 
366 struct memory_object_attr_info {
367 	memory_object_copy_strategy_t   copy_strategy;
368 	memory_object_cluster_size_t    cluster_size;
369 	boolean_t                       may_cache_object;
370 	boolean_t                       temporary;
371 };
372 
373 struct memory_object_behave_info {
374 	memory_object_copy_strategy_t   copy_strategy;
375 	boolean_t                       temporary;
376 	boolean_t                       invalidate;
377 	boolean_t                       silent_overwrite;
378 	boolean_t                       advisory_pageout;
379 };
380 
381 
382 typedef struct memory_object_behave_info *memory_object_behave_info_t;
383 typedef struct memory_object_behave_info memory_object_behave_info_data_t;
384 
385 typedef struct memory_object_perf_info  *memory_object_perf_info_t;
386 typedef struct memory_object_perf_info  memory_object_perf_info_data_t;
387 
388 typedef struct memory_object_attr_info  *memory_object_attr_info_t;
389 typedef struct memory_object_attr_info  memory_object_attr_info_data_t;
390 
391 #define MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t)       \
392 	        (sizeof(memory_object_behave_info_data_t)/sizeof(int)))
393 #define MEMORY_OBJECT_PERF_INFO_COUNT   ((mach_msg_type_number_t)       \
394 	        (sizeof(memory_object_perf_info_data_t)/sizeof(int)))
395 #define MEMORY_OBJECT_ATTR_INFO_COUNT   ((mach_msg_type_number_t)       \
396 	        (sizeof(memory_object_attr_info_data_t)/sizeof(int)))
397 
398 #define invalid_memory_object_flavor(f)                                 \
399 	(f != MEMORY_OBJECT_ATTRIBUTE_INFO &&                           \
400 	 f != MEMORY_OBJECT_PERFORMANCE_INFO &&                         \
401 	 f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO &&                        \
402 	 f != MEMORY_OBJECT_BEHAVIOR_INFO &&                            \
403 	 f != OLD_MEMORY_OBJECT_ATTRIBUTE_INFO)
404 
405 
406 /*
407  * Used to support options on memory_object_release_name call
408  */
409 #define MEMORY_OBJECT_TERMINATE_IDLE    0x1
410 #define MEMORY_OBJECT_RESPECT_CACHE     0x2
411 #define MEMORY_OBJECT_RELEASE_NO_OP     0x4
412 
413 
414 /* named entry processor mapping options */
415 /* enumerated */
416 #define MAP_MEM_NOOP                      0
417 #define MAP_MEM_COPYBACK                  1
418 #define MAP_MEM_IO                        2
419 #define MAP_MEM_WTHRU                     3
420 #define MAP_MEM_WCOMB                     4       /* Write combining mode */
421                                                   /* aka store gather     */
422 #define MAP_MEM_INNERWBACK                5
423 #define MAP_MEM_POSTED                    6
424 #define MAP_MEM_RT                        7
425 #define MAP_MEM_POSTED_REORDERED          8
426 #define MAP_MEM_POSTED_COMBINED_REORDERED 9
427 
428 #define GET_MAP_MEM(flags)      \
429 	((((unsigned int)(flags)) >> 24) & 0xFF)
430 
431 #define SET_MAP_MEM(caching, flags)     \
432 	((flags) = ((((unsigned int)(caching)) << 24) \
433 	                & 0xFF000000) | ((flags) & 0xFFFFFF));
434 
435 /* leave room for vm_prot bits (0xFF ?) */
436 #define MAP_MEM_PROT_MASK            0xFF
437 #define MAP_MEM_LEDGER_TAGGED        0x002000 /* object owned by a specific task and ledger */
438 #define MAP_MEM_PURGABLE_KERNEL_ONLY 0x004000 /* volatility controlled by kernel */
439 #define MAP_MEM_GRAB_SECLUDED   0x008000 /* can grab secluded pages */
440 #define MAP_MEM_ONLY            0x010000 /* change processor caching  */
441 #define MAP_MEM_NAMED_CREATE    0x020000 /* create extant object      */
442 #define MAP_MEM_PURGABLE        0x040000 /* create a purgable VM object */
443 #define MAP_MEM_NAMED_REUSE     0x080000 /* reuse provided entry if identical */
444 #define MAP_MEM_USE_DATA_ADDR   0x100000 /* preserve address of data, rather than base of page */
445 #define MAP_MEM_VM_COPY         0x200000 /* make a copy of a VM range */
446 #define MAP_MEM_VM_SHARE        0x400000 /* extract a VM range for remap */
447 #define MAP_MEM_4K_DATA_ADDR    0x800000 /* preserve 4K aligned address of data */
448 
449 #define MAP_MEM_FLAGS_MASK 0x00FFFF00
450 #define MAP_MEM_FLAGS_USER (                               \
451 	MAP_MEM_PURGABLE_KERNEL_ONLY |                     \
452 	MAP_MEM_GRAB_SECLUDED |                            \
453 	MAP_MEM_ONLY |                                     \
454 	MAP_MEM_NAMED_CREATE |                             \
455 	MAP_MEM_PURGABLE |                                 \
456 	MAP_MEM_NAMED_REUSE |                              \
457 	MAP_MEM_USE_DATA_ADDR |                            \
458 	MAP_MEM_VM_COPY |                                  \
459 	MAP_MEM_VM_SHARE |                                 \
460 	MAP_MEM_LEDGER_TAGGED |                            \
461 	MAP_MEM_4K_DATA_ADDR)
462 #define MAP_MEM_FLAGS_ALL (                     \
463 	MAP_MEM_FLAGS_USER)
464 
465 #ifdef KERNEL
466 
467 /*
468  *  Universal Page List data structures
469  *
470  *  A UPL describes a bounded set of physical pages
471  *  associated with some range of an object or map
472  *  and a snapshot of the attributes associated with
473  *  each of those pages.
474  */
475 #ifdef PRIVATE
476 #define MAX_UPL_TRANSFER_BYTES  (1024 * 1024)
477 #define MAX_UPL_SIZE_BYTES      (1024 * 1024 * 64)
478 
479 #define MAX_UPL_SIZE            (MAX_UPL_SIZE_BYTES / PAGE_SIZE)
480 #define MAX_UPL_TRANSFER        (MAX_UPL_TRANSFER_BYTES / PAGE_SIZE)
481 
482 struct upl_page_info {
483 	ppnum_t         phys_addr;      /* physical page index number */
484 	unsigned int
485 #ifdef  XNU_KERNEL_PRIVATE
486 	    free_when_done:1,    /* page is to be freed on commit */
487 	    absent:1,           /* No valid data in this page */
488 	    dirty:1,            /* Page must be cleaned (O) */
489 	    precious:1,         /* must be cleaned, we have only copy */
490 	    device:1,           /* no page data, mapped dev memory */
491 	    speculative:1,      /* page is valid, but not yet accessed */
492 #define VMP_CS_BITS 4
493 #define VMP_CS_ALL_FALSE 0x0
494 #define VMP_CS_ALL_TRUE 0xF
495 	cs_validated:VMP_CS_BITS,     /* CODE SIGNING: page was validated */
496 	    cs_tainted:VMP_CS_BITS,   /* CODE SIGNING: page is tainted */
497 	    cs_nx:VMP_CS_BITS,        /* CODE SIGNING: page is NX */
498 
499 	    needed:1,           /* page should be left in cache on abort */
500 	    mark:1,             /* a mark flag for the creator to use as they wish */
501 	    reserved: 12,
502 	:0;                     /* force to long boundary */
503 #else
504 	opaque;                 /* use upl_page_xxx() accessor funcs */
505 #endif /* XNU_KERNEL_PRIVATE */
506 };
507 _Static_assert(sizeof(struct upl_page_info) == 8, "sizeof(struct upl_page_info) doesn't match expectation");
508 
509 #else
510 
511 struct upl_page_info {
512 	unsigned int    opaque[2];      /* use upl_page_xxx() accessor funcs */
513 };
514 
515 #endif /* PRIVATE */
516 
517 typedef struct upl_page_info    upl_page_info_t;
518 typedef upl_page_info_t         *upl_page_info_array_t;
519 typedef upl_page_info_array_t   upl_page_list_ptr_t;
520 
521 typedef uint32_t        upl_offset_t;   /* page-aligned byte offset */
522 typedef uint32_t        upl_size_t;     /* page-aligned byte size */
523 #define UPL_SIZE_MAX    (UINT32_MAX & ~PAGE_MASK)
524 
525 /* upl invocation flags */
526 /* top nibble is used by super upl */
527 
528 typedef uint64_t upl_control_flags_t;
529 
530 #define UPL_FLAGS_NONE          0x00000000ULL
531 #define UPL_COPYOUT_FROM        0x00000001ULL
532 #define UPL_PRECIOUS            0x00000002ULL
533 #define UPL_NO_SYNC             0x00000004ULL
534 #define UPL_CLEAN_IN_PLACE      0x00000008ULL
535 #define UPL_NOBLOCK             0x00000010ULL
536 #define UPL_RET_ONLY_DIRTY      0x00000020ULL
537 #define UPL_SET_INTERNAL        0x00000040ULL
538 #define UPL_QUERY_OBJECT_TYPE   0x00000080ULL
539 #define UPL_RET_ONLY_ABSENT     0x00000100ULL /* used only for COPY_FROM = FALSE */
540 #define UPL_FILE_IO             0x00000200ULL
541 #define UPL_SET_LITE            0x00000400ULL
542 #define UPL_SET_INTERRUPTIBLE   0x00000800ULL
543 #define UPL_SET_IO_WIRE         0x00001000ULL
544 #define UPL_FOR_PAGEOUT         0x00002000ULL
545 #define UPL_WILL_BE_DUMPED      0x00004000ULL
546 #define UPL_FORCE_DATA_SYNC     0x00008000ULL
547 /* continued after the ticket bits... */
548 
549 #define UPL_PAGE_TICKET_MASK    0x000F0000ULL
550 #define UPL_PAGE_TICKET_SHIFT   16
551 
552 /* ... flags resume here */
553 #define UPL_BLOCK_ACCESS        0x00100000ULL
554 #define UPL_ENCRYPT             0x00200000ULL
555 #define UPL_NOZEROFILL          0x00400000ULL
556 #define UPL_WILL_MODIFY         0x00800000ULL /* caller will modify the pages */
557 
558 #define UPL_NEED_32BIT_ADDR     0x01000000ULL
559 #define UPL_UBC_MSYNC           0x02000000ULL
560 #define UPL_UBC_PAGEOUT         0x04000000ULL
561 #define UPL_UBC_PAGEIN          0x08000000ULL
562 #define UPL_REQUEST_SET_DIRTY   0x10000000ULL
563 #define UPL_REQUEST_NO_FAULT    0x20000000ULL /* fail if pages not all resident */
564 #define UPL_NOZEROFILLIO        0x40000000ULL /* allow non zerofill pages present */
565 #define UPL_REQUEST_FORCE_COHERENCY     0x80000000ULL
566 
567 
568 
569 #define UPL_CARRY_VA_TAG        0x10000000000ULL
570 /* UPL flags known by this kernel */
571 #define UPL_VALID_FLAGS         0x1FFFFFFFFFFULL
572 
573 
574 /* upl abort error flags */
575 #define UPL_ABORT_RESTART               0x1
576 #define UPL_ABORT_UNAVAILABLE   0x2
577 #define UPL_ABORT_ERROR         0x4
578 #define UPL_ABORT_FREE_ON_EMPTY 0x8  /* only implemented in wrappers */
579 #define UPL_ABORT_DUMP_PAGES    0x10
580 #define UPL_ABORT_NOTIFY_EMPTY  0x20
581 /* deprecated: #define UPL_ABORT_ALLOW_ACCESS	0x40 */
582 #define UPL_ABORT_REFERENCE     0x80
583 
584 /* upl pages check flags */
585 #define UPL_CHECK_DIRTY         0x1
586 
587 
588 /*
589  *  upl pagein/pageout  flags
590  *
591  *
592  * when I/O is issued from this UPL it should be done synchronously
593  */
594 #define UPL_IOSYNC      0x1
595 
596 /*
597  * the passed in UPL should not have either a commit or abort
598  * applied to it by the underlying layers... the site that
599  * created the UPL is responsible for cleaning it up.
600  */
601 #define UPL_NOCOMMIT    0x2
602 
603 /*
604  * turn off any speculative read-ahead applied at the I/O layer
605  */
606 #define UPL_NORDAHEAD   0x4
607 
608 /*
609  * pageout request is targeting a real file
610  * as opposed to a swap file.
611  */
612 
613 #define UPL_VNODE_PAGER 0x8
614 /*
615  * this pageout is being originated as part of an explicit
616  * memory synchronization operation... no speculative clustering
617  * should be applied, only the range specified should be pushed.
618  */
619 #define UPL_MSYNC               0x10
620 
621 /*
622  *
623  */
624 #define UPL_PAGING_ENCRYPTED    0x20
625 
626 /*
627  * this pageout is being originated as part of an explicit
628  * memory synchronization operation that is checking for I/O
629  * errors and taking it's own action... if an error occurs,
630  * just abort the pages back into the cache unchanged
631  */
632 #define UPL_KEEPCACHED          0x40
633 
634 /*
635  * this pageout originated from within cluster_io to deal
636  * with a dirty page that hasn't yet been seen by the FS
637  * that backs it... tag it so that the FS can take the
638  * appropriate action w/r to its locking model since the
639  * pageout will reenter the FS for the same file currently
640  * being handled in this context.
641  */
642 #define UPL_NESTED_PAGEOUT      0x80
643 
644 /*
645  * we've detected a sequential access pattern and
646  * we are speculatively and aggressively pulling
647  * pages in... do not count these as real PAGEINs
648  * w/r to our hard throttle maintenance
649  */
650 #define UPL_IOSTREAMING         0x100
651 
652 /*
653  * Currently, it's only used for the swap pagein path.
654  * Since the swap + compressed pager layer manage their
655  * pages, these pages are not marked "absent" i.e. these
656  * are "valid" pages. The pagein path will _not_ issue an
657  * I/O (correctly) for valid pages. So, this flag is used
658  * to override that logic in the vnode I/O path.
659  */
660 #define UPL_IGNORE_VALID_PAGE_CHECK     0x200
661 
662 
663 
664 /* upl commit flags */
665 #define UPL_COMMIT_FREE_ON_EMPTY        0x1 /* only implemented in wrappers */
666 #define UPL_COMMIT_CLEAR_DIRTY          0x2
667 #define UPL_COMMIT_SET_DIRTY            0x4
668 #define UPL_COMMIT_INACTIVATE           0x8
669 #define UPL_COMMIT_NOTIFY_EMPTY         0x10
670 /* deprecated: #define UPL_COMMIT_ALLOW_ACCESS		0x20 */
671 #define UPL_COMMIT_CS_VALIDATED         0x40
672 #define UPL_COMMIT_CLEAR_PRECIOUS       0x80
673 #define UPL_COMMIT_SPECULATE            0x100
674 #define UPL_COMMIT_FREE_ABSENT          0x200
675 #define UPL_COMMIT_WRITTEN_BY_KERNEL    0x400
676 
677 #define UPL_COMMIT_KERNEL_ONLY_FLAGS    (UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_FREE_ABSENT)
678 
679 /* flags for return of state from vm_map_get_upl,  vm_upl address space */
680 /* based call */
681 #define UPL_DEV_MEMORY                  0x1
682 #define UPL_PHYS_CONTIG                 0x2
683 
684 
685 /*
686  * Flags for the UPL page ops routine.  This routine is not exported
687  * out of the kernel at the moment and so the defs live here.
688  */
689 #define UPL_POP_DIRTY           0x1
690 #define UPL_POP_PAGEOUT         0x2
691 #define UPL_POP_PRECIOUS                0x4
692 #define UPL_POP_ABSENT          0x8
693 #define UPL_POP_BUSY                    0x10
694 
695 #define UPL_POP_PHYSICAL        0x10000000
696 #define UPL_POP_DUMP            0x20000000
697 #define UPL_POP_SET             0x40000000
698 #define UPL_POP_CLR             0x80000000
699 
700 /*
701  * Flags for the UPL range op routine.  This routine is not exported
702  * out of the kernel at the moemet and so the defs live here.
703  */
704 /*
705  * UPL_ROP_ABSENT: Returns the extent of the range presented which
706  * is absent, starting with the start address presented
707  */
708 #define UPL_ROP_ABSENT          0x01
709 /*
710  * UPL_ROP_PRESENT: Returns the extent of the range presented which
711  * is present (i.e. resident), starting with the start address presented
712  */
713 #define UPL_ROP_PRESENT         0x02
714 /*
715  * UPL_ROP_DUMP: Dump the pages which are found in the target object
716  * for the target range.
717  */
718 #define UPL_ROP_DUMP                    0x04
719 
720 #ifdef  PRIVATE
721 
722 #define UPL_REPRIO_INFO_MASK    (0xFFFFFFFF)
723 #define UPL_REPRIO_INFO_SHIFT   32
724 
725 /* access macros for upl_t */
726 
727 #define UPL_DEVICE_PAGE(upl) \
728 	(((upl)[0].phys_addr != 0) ? ((upl)[0].device) : FALSE)
729 
730 #define UPL_PAGE_PRESENT(upl, index) \
731 	((upl)[(index)].phys_addr != 0)
732 
733 #define UPL_PHYS_PAGE(upl, index) \
734 	((upl)[(index)].phys_addr)
735 
736 #define UPL_SPECULATIVE_PAGE(upl, index) \
737 	(((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].speculative) : FALSE)
738 
739 #define UPL_DIRTY_PAGE(upl, index) \
740 	(((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].dirty) : FALSE)
741 
742 #define UPL_PRECIOUS_PAGE(upl, index) \
743 	(((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].precious) : FALSE)
744 
745 #define UPL_VALID_PAGE(upl, index) \
746 	(((upl)[(index)].phys_addr != 0) ? (!((upl)[(index)].absent)) : FALSE)
747 
748 #define UPL_PAGEOUT_PAGE(upl, index) \
749 	(((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].free_when_done) : FALSE)
750 
751 #define UPL_SET_PAGE_FREE_ON_COMMIT(upl, index) \
752 	(((upl)[(index)].phys_addr != 0) ?            \
753 	 ((upl)[(index)].free_when_done = TRUE) : FALSE)
754 
755 #define UPL_CLR_PAGE_FREE_ON_COMMIT(upl, index) \
756 	(((upl)[(index)].phys_addr != 0) ?       \
757 	 ((upl)[(index)].free_when_done = FALSE) : FALSE)
758 
759 #define UPL_REPRIO_INFO_BLKNO(upl, index) \
760 	(((upl)->upl_reprio_info[(index)]) & UPL_REPRIO_INFO_MASK)
761 
762 #define UPL_REPRIO_INFO_LEN(upl, index) \
763 	((((upl)->upl_reprio_info[(index)]) >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK)
764 
765 /* modifier macros for upl_t */
766 
767 #define UPL_SET_CS_VALIDATED(upl, index, value) \
768 	((upl)[(index)].cs_validated = (value))
769 
770 #define UPL_SET_CS_TAINTED(upl, index, value) \
771 	((upl)[(index)].cs_tainted = (value))
772 
773 #define UPL_SET_CS_NX(upl, index, value) \
774 	((upl)[(index)].cs_nx = (value))
775 
776 #define UPL_SET_REPRIO_INFO(upl, index, blkno, len) \
777 	((upl)->upl_reprio_info[(index)]) = (((uint64_t)(blkno) & UPL_REPRIO_INFO_MASK) | \
778 	(((uint64_t)(len) & UPL_REPRIO_INFO_MASK) << UPL_REPRIO_INFO_SHIFT))
779 
780 /* UPL_GET_INTERNAL_PAGE_LIST is only valid on internal objects where the */
781 /* list request was made with the UPL_INTERNAL flag */
782 
783 #define UPL_GET_INTERNAL_PAGE_LIST(upl) upl_get_internal_page_list(upl)
784 
785 __BEGIN_DECLS
786 
787 extern void            *upl_get_internal_vectorupl(upl_t);
788 extern upl_page_info_t *upl_get_internal_vectorupl_pagelist(upl_t);
789 extern upl_page_info_t *upl_get_internal_page_list(upl_t upl);
790 extern ppnum_t          upl_phys_page(upl_page_info_t *upl, int index);
791 extern boolean_t        upl_device_page(upl_page_info_t *upl);
792 extern boolean_t        upl_speculative_page(upl_page_info_t *upl, int index);
793 extern void     upl_clear_dirty(upl_t upl, boolean_t value);
794 extern void     upl_set_referenced(upl_t upl, boolean_t value);
795 extern void     upl_range_needed(upl_t upl, int index, int count);
796 #if CONFIG_IOSCHED
797 extern int64_t upl_blkno(upl_page_info_t *upl, int index);
798 extern void     upl_set_blkno(upl_t upl, vm_offset_t upl_offset, int size, int64_t blkno);
799 #endif
800 
801 __END_DECLS
802 
803 #endif /* PRIVATE */
804 
805 __BEGIN_DECLS
806 
807 extern boolean_t        upl_page_present(upl_page_info_t *upl, int index);
808 extern boolean_t        upl_dirty_page(upl_page_info_t *upl, int index);
809 extern boolean_t        upl_valid_page(upl_page_info_t *upl, int index);
810 extern void             upl_deallocate(upl_t upl);
811 extern void             upl_mark_decmp(upl_t upl);
812 extern void             upl_unmark_decmp(upl_t upl);
813 extern boolean_t        upl_has_wired_pages(upl_t upl);
814 
815 #ifdef KERNEL_PRIVATE
816 
817 void upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v);
818 boolean_t upl_page_get_mark(upl_page_info_t *upl, int index);
819 boolean_t upl_page_is_needed(upl_page_info_t *upl, int index);
820 
821 #endif // KERNEL_PRIVATE
822 
823 __END_DECLS
824 
825 #endif  /* KERNEL */
826 
827 #endif  /* _MACH_MEMORY_OBJECT_TYPES_H_ */
828