xref: /xnu-11215.41.3/osfmk/vm/vm_protos.h (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2004-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifdef  XNU_KERNEL_PRIVATE
30 
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
33 
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
36 #include <vm/vm_options.h>
37 
38 #ifdef __cplusplus
39 extern "C" {
40 #endif
41 
42 /*
43  * This file contains various type definitions and routine prototypes
44  * that are needed to avoid compilation warnings for VM code (in osfmk,
45  * default_pager and bsd).
46  * Most of these should eventually go into more appropriate header files.
47  *
48  * Include it after all other header files since it doesn't include any
49  * type definitions and it works around some conflicts with other header
50  * files.
51  */
52 
53 
54 /*
55  * osfmk
56  */
57 #ifndef _IPC_IPC_PORT_H_
58 extern mach_port_name_t ipc_port_copyout_send(
59 	ipc_port_t      sright,
60 	ipc_space_t     space);
61 extern mach_port_name_t ipc_port_copyout_send_pinned(
62 	ipc_port_t      sright,
63 	ipc_space_t     space);
64 extern kern_return_t mach_port_deallocate_kernel(
65 	ipc_space_t             space,
66 	mach_port_name_t        name,
67 	natural_t               kotype);
68 #endif /* _IPC_IPC_PORT_H_ */
69 
70 #ifndef _KERN_IPC_TT_H_
71 
72 #define port_name_to_task(name) port_name_to_task_kernel(name)
73 
74 extern task_t port_name_to_task_kernel(
75 	mach_port_name_t name);
76 extern task_t port_name_to_task_read(
77 	mach_port_name_t name);
78 extern task_t port_name_to_task_name(
79 	mach_port_name_t name);
80 extern void ipc_port_release_send(
81 	ipc_port_t      port);
82 #endif /* _KERN_IPC_TT_H_ */
83 
84 extern ipc_space_t  get_task_ipcspace(
85 	task_t t);
86 
87 #if CONFIG_MEMORYSTATUS
88 extern int max_task_footprint_mb;       /* Per-task limit on physical memory consumption in megabytes */
89 #endif /* CONFIG_MEMORYSTATUS */
90 
91 /* Some loose-ends VM stuff */
92 
93 extern const vm_size_t msg_ool_size_small;
94 
95 extern kern_return_t vm_tests(void);
96 extern void consider_machine_adjust(void);
97 extern vm_map_offset_t get_map_min(vm_map_t);
98 extern vm_map_offset_t get_map_max(vm_map_t);
99 extern vm_map_size_t get_vmmap_size(vm_map_t);
100 extern int get_task_page_size(task_t);
101 #if CONFIG_COREDUMP
102 extern int get_vmmap_entries(vm_map_t);
103 #endif
104 extern int get_map_nentries(vm_map_t);
105 
106 extern vm_map_offset_t vm_map_page_mask(vm_map_t);
107 
108 
109 #if MACH_ASSERT
110 extern void vm_map_pmap_set_process(
111 	vm_map_t        map,
112 	int             pid,
113 	char            *procname);
114 extern void vm_map_pmap_check_ledgers(
115 	pmap_t          pmap,
116 	ledger_t        ledger,
117 	int             pid,
118 	char            *procname);
119 #endif /* MACH_ASSERT */
120 
121 #if CONFIG_COREDUMP
122 extern boolean_t coredumpok(vm_map_t map, mach_vm_offset_t va);
123 #endif
124 
125 #if XNU_PLATFORM_MacOSX
126 /*
127  * VM routines that used to be published to
128  * user space, and are now restricted to the kernel.
129  *
130  * They should eventually go away entirely -
131  * to be replaced with standard vm_map() and
132  * vm_deallocate() calls.
133  */
134 extern kern_return_t vm_region_object_create
135 (
136 	vm_map_t target_task,
137 	vm_size_t size,
138 	ipc_port_t *object_handle
139 );
140 #endif /* XNU_PLATFORM_MacOSX */
141 
142 #if CONFIG_CODE_DECRYPTION
143 #define VM_MAP_DEBUG_APPLE_PROTECT      MACH_ASSERT
144 #if VM_MAP_DEBUG_APPLE_PROTECT
145 extern int vm_map_debug_apple_protect;
146 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
147 struct pager_crypt_info;
148 extern kern_return_t vm_map_apple_protected(
149 	vm_map_t                map,
150 	vm_map_offset_t         start,
151 	vm_map_offset_t         end,
152 	vm_object_offset_t      crypto_backing_offset,
153 	struct pager_crypt_info *crypt_info,
154 	uint32_t                cryptid);
155 #endif  /* CONFIG_CODE_DECRYPTION */
156 
157 struct vm_shared_region_slide_info;
158 
159 #if __has_feature(ptrauth_calls)
160 extern void shared_region_key_alloc(
161 	char *shared_region_id,
162 	bool inherit,
163 	uint64_t inherited_key);
164 extern void shared_region_key_dealloc(
165 	char *shared_region_id);
166 extern uint64_t generate_jop_key(void);
167 #endif /* __has_feature(ptrauth_calls) */
168 extern bool vm_shared_region_is_reslide(struct task *task);
169 
170 struct vnode;
171 extern memory_object_t swapfile_pager_setup(struct vnode *vp);
172 extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj);
173 
174 #if __arm64__ || (__ARM_ARCH_7K__ >= 2)
175 #define SIXTEENK_PAGE_SIZE      0x4000
176 #define SIXTEENK_PAGE_MASK      0x3FFF
177 #define SIXTEENK_PAGE_SHIFT     14
178 #endif /* __arm64__ || (__ARM_ARCH_7K__ >= 2) */
179 
180 #define FOURK_PAGE_SIZE         0x1000
181 #define FOURK_PAGE_MASK         0xFFF
182 #define FOURK_PAGE_SHIFT        12
183 
184 #if __arm64__
185 extern unsigned int page_shift_user32;
186 #endif /* __arm64__ */
187 
188 /*
189  * bsd
190  */
191 struct vnode;
192 
193 extern void vnode_setswapmount(struct vnode *);
194 extern int64_t vnode_getswappin_avail(struct vnode *);
195 
196 #if CHECK_CS_VALIDATION_BITMAP
197 /* used by the vnode_pager_cs_validation_bitmap routine*/
198 #define CS_BITMAP_SET   1
199 #define CS_BITMAP_CLEAR 2
200 #define CS_BITMAP_CHECK 3
201 
202 #endif /* CHECK_CS_VALIDATION_BITMAP */
203 
204 extern kern_return_t vnode_pager_init(
205 	memory_object_t,
206 	memory_object_control_t,
207 	memory_object_cluster_size_t);
208 
209 #if CONFIG_IOSCHED
210 extern kern_return_t vnode_pager_get_object_devvp(
211 	memory_object_t,
212 	uintptr_t *);
213 #endif
214 
215 /*
216  * Functions defined in ubc_subr.c used by the vm code
217  */
218 extern  kern_return_t ubc_cs_check_validation_bitmap(
219 	struct vnode *vp,
220 	memory_object_offset_t offset,
221 	int optype);
222 extern int  ubc_map(
223 	struct vnode *vp,
224 	int flags);
225 extern void ubc_unmap(
226 	struct vnode *vp);
227 
228 
229 extern void   device_pager_reference(memory_object_t);
230 extern void   device_pager_deallocate(memory_object_t);
231 extern kern_return_t   device_pager_init(memory_object_t,
232     memory_object_control_t,
233     memory_object_cluster_size_t);
234 extern  kern_return_t device_pager_terminate(memory_object_t);
235 extern  kern_return_t   device_pager_data_request(memory_object_t,
236     memory_object_offset_t,
237     memory_object_cluster_size_t,
238     vm_prot_t,
239     memory_object_fault_info_t);
240 extern kern_return_t device_pager_data_return(memory_object_t,
241     memory_object_offset_t,
242     memory_object_cluster_size_t,
243     memory_object_offset_t *,
244     int *,
245     boolean_t,
246     boolean_t,
247     int);
248 extern kern_return_t device_pager_data_initialize(memory_object_t,
249     memory_object_offset_t,
250     memory_object_cluster_size_t);
251 extern kern_return_t device_pager_map(memory_object_t, vm_prot_t);
252 extern kern_return_t device_pager_last_unmap(memory_object_t);
253 
254 extern kern_return_t pager_map_to_phys_contiguous(
255 	memory_object_control_t object,
256 	memory_object_offset_t  offset,
257 	addr64_t                base_vaddr,
258 	vm_size_t               size);
259 
260 struct macx_triggers_args;
261 
262 extern int macx_swapinfo(
263 	memory_object_size_t    *total_p,
264 	memory_object_size_t    *avail_p,
265 	vm_size_t               *pagesize_p,
266 	boolean_t               *encrypted_p);
267 
268 
269 struct proc;
270 struct proc *current_proc(void);
271 extern int cs_allow_invalid(struct proc *p);
272 extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed);
273 
274 #define CS_VALIDATE_TAINTED     0x00000001
275 #define CS_VALIDATE_NX          0x00000002
276 extern boolean_t cs_validate_range(struct vnode *vp,
277     memory_object_t pager,
278     memory_object_offset_t offset,
279     const void *data,
280     vm_size_t size,
281     unsigned *result);
282 extern void cs_validate_page(
283 	struct vnode *vp,
284 	memory_object_t pager,
285 	memory_object_offset_t offset,
286 	const void *data,
287 	int *validated_p,
288 	int *tainted_p,
289 	int *nx_p);
290 
291 
292 extern kern_return_t mach_memory_entry_purgable_control(
293 	ipc_port_t      entry_port,
294 	vm_purgable_t   control,
295 	int             *state);
296 
297 extern int no_paging_space_action(void);
298 
299 extern unsigned int vmtc_total;        /* total # of text page corruptions detected */
300 
301 extern kern_return_t revalidate_text_page(task_t, vm_map_offset_t);
302 
303 #define VM_TOGGLE_CLEAR         0
304 #define VM_TOGGLE_SET           1
305 #define VM_TOGGLE_GETVALUE      999
306 int vm_toggle_entry_reuse(int, int*);
307 
308 #define SWAP_WRITE              0x00000000      /* Write buffer (pseudo flag). */
309 #define SWAP_READ               0x00000001      /* Read buffer. */
310 #define SWAP_ASYNC              0x00000002      /* Start I/O, do not wait. */
311 
312 extern boolean_t vm_compressor_low_on_space(void);
313 extern bool vm_compressor_compressed_pages_nearing_limit(void);
314 extern boolean_t vm_compressor_out_of_space(void);
315 
316 extern bool      vm_swap_low_on_space(void);
317 extern int       vm_swap_out_of_space(void);
318 void             do_fastwake_warmup_all(void);
319 
320 #if CONFIG_JETSAM
321 extern int proc_get_memstat_priority(struct proc*, boolean_t);
322 #endif /* CONFIG_JETSAM */
323 
324 /* the object purger. purges the next eligible object from memory. */
325 /* returns TRUE if an object was purged, otherwise FALSE. */
326 boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
327 void vm_owned_objects_disown(task_t task);
328 void vm_object_wired_page_update_ledgers(
329 	vm_object_t object,
330 	int64_t wired_delta);
331 
332 struct trim_list {
333 	uint64_t        tl_offset;
334 	uint64_t        tl_length;
335 	struct trim_list *tl_next;
336 };
337 
338 #define MAX_SWAPFILENAME_LEN    1024
339 #define SWAPFILENAME_INDEX_LEN  2       /* Doesn't include the terminating NULL character */
340 
341 extern char     swapfilename[MAX_SWAPFILENAME_LEN + 1];
342 
343 struct vm_counters {
344 	unsigned int    do_collapse_compressor;
345 	unsigned int    do_collapse_compressor_pages;
346 	unsigned int    do_collapse_terminate;
347 	unsigned int    do_collapse_terminate_failure;
348 	unsigned int    should_cow_but_wired;
349 	unsigned int    create_upl_extra_cow;
350 	unsigned int    create_upl_extra_cow_pages;
351 	unsigned int    create_upl_lookup_failure_write;
352 	unsigned int    create_upl_lookup_failure_copy;
353 };
354 extern struct vm_counters vm_counters;
355 
356 #if CONFIG_SECLUDED_MEMORY
357 struct vm_page_secluded_data {
358 	int     eligible_for_secluded;
359 	int     grab_success_free;
360 	int     grab_success_other;
361 	int     grab_failure_locked;
362 	int     grab_failure_state;
363 	int     grab_failure_realtime;
364 	int     grab_failure_dirty;
365 	int     grab_for_iokit;
366 	int     grab_for_iokit_success;
367 };
368 extern struct vm_page_secluded_data vm_page_secluded;
369 
370 extern int num_tasks_can_use_secluded_mem;
371 
372 /* boot-args */
373 
374 __enum_decl(secluded_filecache_mode_t, uint8_t, {
375 	/*
376 	 * SECLUDED_FILECACHE_NONE:
377 	 * + no file contents in secluded pool
378 	 */
379 	SECLUDED_FILECACHE_NONE = 0,
380 	/*
381 	 * SECLUDED_FILECACHE_APPS
382 	 * + no files from /
383 	 * + files from /Applications/ are OK
384 	 * + files from /Applications/Camera are not OK
385 	 * + no files that are open for write
386 	 */
387 	SECLUDED_FILECACHE_APPS = 1,
388 	/*
389 	 * SECLUDED_FILECACHE_RDONLY
390 	 * + all read-only files OK, except:
391 	 *      + dyld_shared_cache_arm64*
392 	 *      + Camera
393 	 *      + mediaserverd
394 	 *      + cameracaptured
395 	 */
396 	SECLUDED_FILECACHE_RDONLY = 2,
397 });
398 
399 extern secluded_filecache_mode_t secluded_for_filecache;
400 extern bool secluded_for_apps;
401 extern bool secluded_for_iokit;
402 
403 extern uint64_t vm_page_secluded_drain(void);
404 extern void             memory_object_mark_eligible_for_secluded(
405 	memory_object_control_t         control,
406 	boolean_t                       eligible_for_secluded);
407 
408 #endif /* CONFIG_SECLUDED_MEMORY */
409 
410 extern void             memory_object_mark_for_realtime(
411 	memory_object_control_t         control,
412 	bool                            for_realtime);
413 
414 #define MAX_PAGE_RANGE_QUERY    (1ULL * 1024 * 1024 * 1024) /* 1 GB */
415 
416 extern uint64_t vm_purge_filebacked_pagers(void);
417 
418 #define roundup(x, y)   ((((x) % (y)) == 0) ? \
419 	                (x) : ((x) + ((y) - ((x) % (y)))))
420 
421 #ifdef __cplusplus
422 }
423 #endif
424 
425 /*
426  * Flags for the VM swapper/reclaimer.
427  * Used by vm_swap_consider_defragment()
428  * to force defrag/reclaim by the swap
429  * GC thread.
430  */
431 #define VM_SWAP_FLAGS_NONE             0
432 #define VM_SWAP_FLAGS_FORCE_DEFRAG     1
433 #define VM_SWAP_FLAGS_FORCE_RECLAIM    2
434 
435 #if __arm64__
436 /*
437  * Flags to control the behavior of
438  * the legacy footprint entitlement.
439  */
440 #define LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE             (1)
441 #define LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT         (2)
442 #define LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE     (3)
443 
444 #endif /* __arm64__ */
445 
446 #if DEVELOPMENT || DEBUG
447 struct proc;
448 extern struct proc *current_proc(void);
449 extern int proc_pid(struct proc *);
450 extern const char *proc_best_name(struct proc *);
451 struct thread;
452 extern uint64_t thread_tid(struct thread *);
453 extern int debug4k_filter;
454 extern int debug4k_proc_filter;
455 extern char debug4k_proc_name[];
456 extern const char *debug4k_category_name[];
457 
458 #define __DEBUG4K(category, fmt, ...)                                   \
459 	MACRO_BEGIN                                                     \
460 	int __category = (category);                                    \
461 	struct thread *__t = NULL;                                      \
462 	struct proc *__p = NULL;                                        \
463 	const char *__pname = "?";                                      \
464 	boolean_t __do_log = FALSE;                                     \
465                                                                         \
466 	if ((1 << __category) & debug4k_filter) {                       \
467 	        __do_log = TRUE;                                        \
468 	} else if (((1 << __category) & debug4k_proc_filter) &&         \
469 	           debug4k_proc_name[0] != '\0') {                      \
470 	        __p = current_proc();                                   \
471 	        if (__p != NULL) {                                      \
472 	                __pname = proc_best_name(__p);                  \
473 	        }                                                       \
474 	        if (!strcmp(debug4k_proc_name, __pname)) {              \
475 	                __do_log = TRUE;                                \
476 	        }                                                       \
477 	}                                                               \
478 	if (__do_log) {                                                 \
479 	        if (__p == NULL) {                                      \
480 	                __p = current_proc();                           \
481 	                if (__p != NULL) {                              \
482 	                        __pname = proc_best_name(__p);          \
483 	                }                                               \
484 	        }                                                       \
485 	        __t = current_thread();                                 \
486 	        printf("DEBUG4K(%s) %d[%s] %p(0x%llx) %s:%d: " fmt,     \
487 	               debug4k_category_name[__category],               \
488 	               __p ? proc_pid(__p) : 0,                         \
489 	               __pname,                                         \
490 	               __t,                                             \
491 	               thread_tid(__t),                                 \
492 	               __FUNCTION__,                                    \
493 	               __LINE__,                                        \
494 	               ##__VA_ARGS__);                                  \
495 	}                                                               \
496 	MACRO_END
497 
498 #define __DEBUG4K_ERROR         0
499 #define __DEBUG4K_LIFE          1
500 #define __DEBUG4K_LOAD          2
501 #define __DEBUG4K_FAULT         3
502 #define __DEBUG4K_COPY          4
503 #define __DEBUG4K_SHARE         5
504 #define __DEBUG4K_ADJUST        6
505 #define __DEBUG4K_PMAP          7
506 #define __DEBUG4K_MEMENTRY      8
507 #define __DEBUG4K_IOKIT         9
508 #define __DEBUG4K_UPL           10
509 #define __DEBUG4K_EXC           11
510 #define __DEBUG4K_VFS           12
511 
512 #define DEBUG4K_ERROR(...)      __DEBUG4K(__DEBUG4K_ERROR, ##__VA_ARGS__)
513 #define DEBUG4K_LIFE(...)       __DEBUG4K(__DEBUG4K_LIFE, ##__VA_ARGS__)
514 #define DEBUG4K_LOAD(...)       __DEBUG4K(__DEBUG4K_LOAD, ##__VA_ARGS__)
515 #define DEBUG4K_FAULT(...)      __DEBUG4K(__DEBUG4K_FAULT, ##__VA_ARGS__)
516 #define DEBUG4K_COPY(...)       __DEBUG4K(__DEBUG4K_COPY, ##__VA_ARGS__)
517 #define DEBUG4K_SHARE(...)      __DEBUG4K(__DEBUG4K_SHARE, ##__VA_ARGS__)
518 #define DEBUG4K_ADJUST(...)     __DEBUG4K(__DEBUG4K_ADJUST, ##__VA_ARGS__)
519 #define DEBUG4K_PMAP(...)       __DEBUG4K(__DEBUG4K_PMAP, ##__VA_ARGS__)
520 #define DEBUG4K_MEMENTRY(...)   __DEBUG4K(__DEBUG4K_MEMENTRY, ##__VA_ARGS__)
521 #define DEBUG4K_IOKIT(...)      __DEBUG4K(__DEBUG4K_IOKIT, ##__VA_ARGS__)
522 #define DEBUG4K_UPL(...)        __DEBUG4K(__DEBUG4K_UPL, ##__VA_ARGS__)
523 #define DEBUG4K_EXC(...)        __DEBUG4K(__DEBUG4K_EXC, ##__VA_ARGS__)
524 #define DEBUG4K_VFS(...)        __DEBUG4K(__DEBUG4K_VFS, ##__VA_ARGS__)
525 
526 #else /* DEVELOPMENT || DEBUG */
527 
528 #define DEBUG4K_ERROR(...)
529 #define DEBUG4K_LIFE(...)
530 #define DEBUG4K_LOAD(...)
531 #define DEBUG4K_FAULT(...)
532 #define DEBUG4K_COPY(...)
533 #define DEBUG4K_SHARE(...)
534 #define DEBUG4K_ADJUST(...)
535 #define DEBUG4K_PMAP(...)
536 #define DEBUG4K_MEMENTRY(...)
537 #define DEBUG4K_IOKIT(...)
538 #define DEBUG4K_UPL(...)
539 #define DEBUG4K_EXC(...)
540 #define DEBUG4K_VFS(...)
541 
542 #endif /* DEVELOPMENT || DEBUG */
543 
544 
545 __enum_decl(vm_object_destroy_reason_t, uint8_t, {
546 	VM_OBJECT_DESTROY_UNKNOWN_REASON = 0,
547 	VM_OBJECT_DESTROY_RECLAIM = 1,
548 	VM_OBJECT_DESTROY_UNMOUNT = 2,
549 	VM_OBJECT_DESTROY_FORCED_UNMOUNT = 3,
550 	VM_OBJECT_DESTROY_UNGRAFT = 4,
551 	VM_OBJECT_DESTROY_PAGER = 5,
552 	VM_OBJECT_DESTROY_MAX = 5,
553 });
554 _Static_assert(VM_OBJECT_DESTROY_MAX < 8, "Need to fit in `no_pager_reason`'s number of bits");
555 
556 /* From vm_resident.c */
557 void vm_update_darkwake_mode(boolean_t);
558 
559 #if FBDP_DEBUG_OBJECT_NO_PAGER
560 extern kern_return_t memory_object_mark_as_tracked(
561 	memory_object_control_t         control,
562 	bool                            new_value,
563 	bool                            *old_value);
564 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
565 
566 #endif  /* _VM_VM_PROTOS_H_ */
567 
568 #endif  /* XNU_KERNEL_PRIVATE */
569