xref: /xnu-8796.101.5/osfmk/vm/vm_protos.h (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2004-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifdef  XNU_KERNEL_PRIVATE
30 
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
33 
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
36 
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40 
41 /*
42  * This file contains various type definitions and routine prototypes
43  * that are needed to avoid compilation warnings for VM code (in osfmk,
44  * default_pager and bsd).
45  * Most of these should eventually go into more appropriate header files.
46  *
47  * Include it after all other header files since it doesn't include any
48  * type definitions and it works around some conflicts with other header
49  * files.
50  */
51 
52 /*
53  * iokit
54  */
55 extern kern_return_t device_data_action(
56 	uintptr_t               device_handle,
57 	ipc_port_t              device_pager,
58 	vm_prot_t               protection,
59 	vm_object_offset_t      offset,
60 	vm_size_t               size);
61 
62 extern kern_return_t device_close(
63 	uintptr_t     device_handle);
64 
65 extern boolean_t vm_swap_files_pinned(void);
66 
67 /*
68  * osfmk
69  */
70 #ifndef _IPC_IPC_PORT_H_
71 extern mach_port_name_t ipc_port_copyout_send(
72 	ipc_port_t      sright,
73 	ipc_space_t     space);
74 extern mach_port_name_t ipc_port_copyout_send_pinned(
75 	ipc_port_t      sright,
76 	ipc_space_t     space);
77 #endif /* _IPC_IPC_PORT_H_ */
78 
79 #ifndef _KERN_IPC_TT_H_
80 
81 #define port_name_to_task(name) port_name_to_task_kernel(name)
82 
83 extern task_t port_name_to_task_kernel(
84 	mach_port_name_t name);
85 extern task_t port_name_to_task_read(
86 	mach_port_name_t name);
87 extern task_t port_name_to_task_name(
88 	mach_port_name_t name);
89 extern void ipc_port_release_send(
90 	ipc_port_t      port);
91 #endif /* _KERN_IPC_TT_H_ */
92 
93 extern ipc_space_t  get_task_ipcspace(
94 	task_t t);
95 
96 #if CONFIG_MEMORYSTATUS
97 extern int max_task_footprint_mb;       /* Per-task limit on physical memory consumption in megabytes */
98 #endif /* CONFIG_MEMORYSTATUS */
99 
100 /* Some loose-ends VM stuff */
101 
102 extern const vm_size_t msg_ool_size_small;
103 
104 extern kern_return_t vm_tests(void);
105 extern void consider_machine_adjust(void);
106 extern vm_map_offset_t get_map_min(vm_map_t);
107 extern vm_map_offset_t get_map_max(vm_map_t);
108 extern vm_map_size_t get_vmmap_size(vm_map_t);
109 extern int get_task_page_size(task_t);
110 #if CONFIG_COREDUMP
111 extern int get_vmmap_entries(vm_map_t);
112 #endif
113 extern int get_map_nentries(vm_map_t);
114 
115 extern vm_map_offset_t vm_map_page_mask(vm_map_t);
116 
117 extern kern_return_t vm_map_purgable_control(
118 	vm_map_t                map,
119 	vm_map_offset_t         address,
120 	vm_purgable_t           control,
121 	int                     *state);
122 
123 #if MACH_ASSERT
124 extern void vm_map_pmap_set_process(
125 	vm_map_t        map,
126 	int             pid,
127 	char            *procname);
128 extern void vm_map_pmap_check_ledgers(
129 	pmap_t          pmap,
130 	ledger_t        ledger,
131 	int             pid,
132 	char            *procname);
133 #endif /* MACH_ASSERT */
134 
135 extern kern_return_t
136 vnode_pager_get_object_vnode(
137 	memory_object_t mem_obj,
138 	uintptr_t * vnodeaddr,
139 	uint32_t * vid);
140 
141 #if CONFIG_COREDUMP
142 extern boolean_t coredumpok(vm_map_t map, mach_vm_offset_t va);
143 #endif
144 
145 /*
146  * VM routines that used to be published to
147  * user space, and are now restricted to the kernel.
148  *
149  * They should eventually go away entirely -
150  * to be replaced with standard vm_map() and
151  * vm_deallocate() calls.
152  */
153 
154 extern kern_return_t vm_upl_map
155 (
156 	vm_map_t target_task,
157 	upl_t upl,
158 	vm_address_t *address
159 );
160 
161 extern kern_return_t vm_upl_unmap
162 (
163 	vm_map_t target_task,
164 	upl_t upl
165 );
166 
167 extern kern_return_t vm_upl_map_range
168 (
169 	vm_map_t target_task,
170 	upl_t upl,
171 	vm_offset_t offset,
172 	vm_size_t size,
173 	vm_prot_t prot,
174 	vm_address_t *address
175 );
176 
177 extern kern_return_t vm_upl_unmap_range
178 (
179 	vm_map_t target_task,
180 	upl_t upl,
181 	vm_offset_t offset,
182 	vm_size_t size
183 );
184 
185 extern kern_return_t vm_region_object_create
186 (
187 	vm_map_t target_task,
188 	vm_size_t size,
189 	ipc_port_t *object_handle
190 );
191 
192 #if CONFIG_CODE_DECRYPTION
193 #define VM_MAP_DEBUG_APPLE_PROTECT      MACH_ASSERT
194 #if VM_MAP_DEBUG_APPLE_PROTECT
195 extern int vm_map_debug_apple_protect;
196 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
197 struct pager_crypt_info;
198 extern kern_return_t vm_map_apple_protected(
199 	vm_map_t                map,
200 	vm_map_offset_t         start,
201 	vm_map_offset_t         end,
202 	vm_object_offset_t      crypto_backing_offset,
203 	struct pager_crypt_info *crypt_info,
204 	uint32_t                cryptid);
205 extern memory_object_t apple_protect_pager_setup(
206 	vm_object_t             backing_object,
207 	vm_object_offset_t      backing_offset,
208 	vm_object_offset_t      crypto_backing_offset,
209 	struct pager_crypt_info *crypt_info,
210 	vm_object_offset_t      crypto_start,
211 	vm_object_offset_t      crypto_end,
212 	boolean_t               cache_pager);
213 #endif  /* CONFIG_CODE_DECRYPTION */
214 
215 struct vm_shared_region_slide_info;
216 extern kern_return_t vm_map_shared_region(
217 	vm_map_t                map,
218 	vm_map_offset_t         start,
219 	vm_map_offset_t         end,
220 	vm_object_offset_t      backing_offset,
221 	struct vm_shared_region_slide_info *slide_info);
222 
223 extern memory_object_t shared_region_pager_setup(
224 	vm_object_t             backing_object,
225 	vm_object_offset_t      backing_offset,
226 	struct vm_shared_region_slide_info *slide_info,
227 	uint64_t                jop_key);
228 #if __has_feature(ptrauth_calls)
229 extern memory_object_t shared_region_pager_match(
230 	vm_object_t             backing_object,
231 	vm_object_offset_t      backing_offset,
232 	struct vm_shared_region_slide_info *slide_info,
233 	uint64_t                jop_key);
234 extern void shared_region_key_alloc(
235 	char *shared_region_id,
236 	bool inherit,
237 	uint64_t inherited_key);
238 extern void shared_region_key_dealloc(
239 	char *shared_region_id);
240 extern uint64_t generate_jop_key(void);
241 extern void shared_region_pager_match_task_key(memory_object_t memobj, task_t task);
242 #endif /* __has_feature(ptrauth_calls) */
243 extern bool vm_shared_region_is_reslide(struct task *task);
244 
245 struct vnode;
246 extern memory_object_t swapfile_pager_setup(struct vnode *vp);
247 extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj);
248 
249 #if __arm64__ || (__ARM_ARCH_7K__ >= 2)
250 #define SIXTEENK_PAGE_SIZE      0x4000
251 #define SIXTEENK_PAGE_MASK      0x3FFF
252 #define SIXTEENK_PAGE_SHIFT     14
253 #endif /* __arm64__ || (__ARM_ARCH_7K__ >= 2) */
254 
255 #define FOURK_PAGE_SIZE         0x1000
256 #define FOURK_PAGE_MASK         0xFFF
257 #define FOURK_PAGE_SHIFT        12
258 
259 #if __arm64__
260 
261 extern unsigned int page_shift_user32;
262 
263 #define VM_MAP_DEBUG_FOURK      MACH_ASSERT
264 #if VM_MAP_DEBUG_FOURK
265 extern int vm_map_debug_fourk;
266 #endif /* VM_MAP_DEBUG_FOURK */
267 extern memory_object_t fourk_pager_create(void);
268 extern vm_object_t fourk_pager_to_vm_object(memory_object_t mem_obj);
269 extern kern_return_t fourk_pager_populate(
270 	memory_object_t mem_obj,
271 	boolean_t overwrite,
272 	int index,
273 	vm_object_t new_backing_object,
274 	vm_object_offset_t new_backing_offset,
275 	vm_object_t *old_backing_object,
276 	vm_object_offset_t *old_backing_offset);
277 #endif /* __arm64__ */
278 
279 /*
280  * bsd
281  */
282 struct vnode;
283 
284 extern void vnode_setswapmount(struct vnode *);
285 extern int64_t vnode_getswappin_avail(struct vnode *);
286 
287 extern void vnode_pager_was_dirtied(
288 	struct vnode *,
289 	vm_object_offset_t,
290 	vm_object_offset_t);
291 
292 typedef int pager_return_t;
293 extern pager_return_t   vnode_pagein(
294 	struct vnode *, upl_t,
295 	upl_offset_t, vm_object_offset_t,
296 	upl_size_t, int, int *);
297 extern pager_return_t   vnode_pageout(
298 	struct vnode *, upl_t,
299 	upl_offset_t, vm_object_offset_t,
300 	upl_size_t, int, int *);
301 extern uint32_t vnode_trim(struct vnode *, int64_t offset, unsigned long len);
302 extern memory_object_t vnode_pager_setup(
303 	struct vnode *, memory_object_t);
304 extern vm_object_offset_t vnode_pager_get_filesize(
305 	struct vnode *);
306 extern uint32_t vnode_pager_isinuse(
307 	struct vnode *);
308 extern boolean_t vnode_pager_isSSD(
309 	struct vnode *);
310 extern void vnode_pager_throttle(
311 	void);
312 extern uint32_t vnode_pager_return_throttle_io_limit(
313 	struct vnode *,
314 	uint32_t     *);
315 extern kern_return_t vnode_pager_get_name(
316 	struct vnode    *vp,
317 	char            *pathname,
318 	vm_size_t       pathname_len,
319 	char            *filename,
320 	vm_size_t       filename_len,
321 	boolean_t       *truncated_path_p);
322 struct timespec;
323 extern kern_return_t vnode_pager_get_mtime(
324 	struct vnode    *vp,
325 	struct timespec *mtime,
326 	struct timespec *cs_mtime);
327 extern kern_return_t vnode_pager_get_cs_blobs(
328 	struct vnode    *vp,
329 	void            **blobs);
330 
331 #if CONFIG_IOSCHED
332 void vnode_pager_issue_reprioritize_io(
333 	struct vnode    *devvp,
334 	uint64_t        blkno,
335 	uint32_t        len,
336 	int             priority);
337 #endif
338 
339 #if CHECK_CS_VALIDATION_BITMAP
340 /* used by the vnode_pager_cs_validation_bitmap routine*/
341 #define CS_BITMAP_SET   1
342 #define CS_BITMAP_CLEAR 2
343 #define CS_BITMAP_CHECK 3
344 
345 #endif /* CHECK_CS_VALIDATION_BITMAP */
346 
347 extern kern_return_t
348 vnode_pager_data_unlock(
349 	memory_object_t         mem_obj,
350 	memory_object_offset_t  offset,
351 	memory_object_size_t            size,
352 	vm_prot_t               desired_access);
353 extern kern_return_t vnode_pager_init(
354 	memory_object_t,
355 	memory_object_control_t,
356 	memory_object_cluster_size_t);
357 extern kern_return_t vnode_pager_get_object_size(
358 	memory_object_t,
359 	memory_object_offset_t *);
360 
361 #if CONFIG_IOSCHED
362 extern kern_return_t vnode_pager_get_object_devvp(
363 	memory_object_t,
364 	uintptr_t *);
365 #endif
366 
367 extern void vnode_pager_dirtied(
368 	memory_object_t,
369 	vm_object_offset_t,
370 	vm_object_offset_t);
371 extern kern_return_t vnode_pager_get_isinuse(
372 	memory_object_t,
373 	uint32_t *);
374 extern kern_return_t vnode_pager_get_isSSD(
375 	memory_object_t,
376 	boolean_t *);
377 extern kern_return_t vnode_pager_get_throttle_io_limit(
378 	memory_object_t,
379 	uint32_t *);
380 extern kern_return_t vnode_pager_get_object_name(
381 	memory_object_t mem_obj,
382 	char            *pathname,
383 	vm_size_t       pathname_len,
384 	char            *filename,
385 	vm_size_t       filename_len,
386 	boolean_t       *truncated_path_p);
387 extern kern_return_t vnode_pager_get_object_mtime(
388 	memory_object_t mem_obj,
389 	struct timespec *mtime,
390 	struct timespec *cs_mtime);
391 
392 #if CHECK_CS_VALIDATION_BITMAP
393 extern kern_return_t vnode_pager_cs_check_validation_bitmap(
394 	memory_object_t mem_obj,
395 	memory_object_offset_t  offset,
396 	int             optype);
397 #endif /*CHECK_CS_VALIDATION_BITMAP*/
398 
399 extern  kern_return_t ubc_cs_check_validation_bitmap(
400 	struct vnode *vp,
401 	memory_object_offset_t offset,
402 	int optype);
403 
404 extern kern_return_t vnode_pager_data_request(
405 	memory_object_t,
406 	memory_object_offset_t,
407 	memory_object_cluster_size_t,
408 	vm_prot_t,
409 	memory_object_fault_info_t);
410 extern kern_return_t vnode_pager_data_return(
411 	memory_object_t,
412 	memory_object_offset_t,
413 	memory_object_cluster_size_t,
414 	memory_object_offset_t *,
415 	int *,
416 	boolean_t,
417 	boolean_t,
418 	int);
419 extern kern_return_t vnode_pager_data_initialize(
420 	memory_object_t,
421 	memory_object_offset_t,
422 	memory_object_cluster_size_t);
423 extern void vnode_pager_reference(
424 	memory_object_t         mem_obj);
425 extern kern_return_t vnode_pager_map(
426 	memory_object_t         mem_obj,
427 	vm_prot_t               prot);
428 extern kern_return_t vnode_pager_last_unmap(
429 	memory_object_t         mem_obj);
430 extern void vnode_pager_deallocate(
431 	memory_object_t);
432 extern kern_return_t vnode_pager_terminate(
433 	memory_object_t);
434 extern void vnode_pager_vrele(
435 	struct vnode *vp);
436 extern struct vnode *vnode_pager_lookup_vnode(
437 	memory_object_t);
438 
439 extern int  ubc_map(
440 	struct vnode *vp,
441 	int flags);
442 extern void ubc_unmap(
443 	struct vnode *vp);
444 
445 struct vm_map_entry;
446 extern struct vm_object *find_vnode_object(struct vm_map_entry *entry);
447 
448 extern void   device_pager_reference(memory_object_t);
449 extern void   device_pager_deallocate(memory_object_t);
450 extern kern_return_t   device_pager_init(memory_object_t,
451     memory_object_control_t,
452     memory_object_cluster_size_t);
453 extern  kern_return_t device_pager_terminate(memory_object_t);
454 extern  kern_return_t   device_pager_data_request(memory_object_t,
455     memory_object_offset_t,
456     memory_object_cluster_size_t,
457     vm_prot_t,
458     memory_object_fault_info_t);
459 extern kern_return_t device_pager_data_return(memory_object_t,
460     memory_object_offset_t,
461     memory_object_cluster_size_t,
462     memory_object_offset_t *,
463     int *,
464     boolean_t,
465     boolean_t,
466     int);
467 extern kern_return_t device_pager_data_initialize(memory_object_t,
468     memory_object_offset_t,
469     memory_object_cluster_size_t);
470 extern kern_return_t device_pager_map(memory_object_t, vm_prot_t);
471 extern kern_return_t device_pager_last_unmap(memory_object_t);
472 extern kern_return_t device_pager_populate_object(
473 	memory_object_t         device,
474 	memory_object_offset_t  offset,
475 	ppnum_t                 page_num,
476 	vm_size_t               size);
477 extern memory_object_t device_pager_setup(
478 	memory_object_t,
479 	uintptr_t,
480 	vm_size_t,
481 	int);
482 
483 extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops);
484 
485 extern kern_return_t pager_map_to_phys_contiguous(
486 	memory_object_control_t object,
487 	memory_object_offset_t  offset,
488 	addr64_t                base_vaddr,
489 	vm_size_t               size);
490 
491 extern kern_return_t memory_object_create_named(
492 	memory_object_t pager,
493 	memory_object_offset_t  size,
494 	memory_object_control_t         *control);
495 
496 struct macx_triggers_args;
497 extern int mach_macx_triggers(
498 	struct macx_triggers_args       *args);
499 
500 extern int macx_swapinfo(
501 	memory_object_size_t    *total_p,
502 	memory_object_size_t    *avail_p,
503 	vm_size_t               *pagesize_p,
504 	boolean_t               *encrypted_p);
505 
506 extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot);
507 extern void log_unnest_badness(
508 	vm_map_t map,
509 	vm_map_offset_t start_unnest,
510 	vm_map_offset_t end_unnest,
511 	boolean_t is_nested_map,
512 	vm_map_offset_t lowest_unnestable_addr);
513 
514 struct proc;
515 struct proc *current_proc(void);
516 extern int cs_allow_invalid(struct proc *p);
517 extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed);
518 
519 #define CS_VALIDATE_TAINTED     0x00000001
520 #define CS_VALIDATE_NX          0x00000002
521 extern boolean_t cs_validate_range(struct vnode *vp,
522     memory_object_t pager,
523     memory_object_offset_t offset,
524     const void *data,
525     vm_size_t size,
526     unsigned *result);
527 extern void cs_validate_page(
528 	struct vnode *vp,
529 	memory_object_t pager,
530 	memory_object_offset_t offset,
531 	const void *data,
532 	int *validated_p,
533 	int *tainted_p,
534 	int *nx_p);
535 
536 extern kern_return_t memory_entry_purgeable_control_internal(
537 	ipc_port_t      entry_port,
538 	vm_purgable_t   control,
539 	int             *state);
540 
541 extern kern_return_t memory_entry_access_tracking_internal(
542 	ipc_port_t      entry_port,
543 	int             *access_tracking,
544 	uint32_t        *access_tracking_reads,
545 	uint32_t        *access_tracking_writes);
546 
547 extern kern_return_t mach_memory_object_memory_entry_64(
548 	host_t                  host,
549 	boolean_t               internal,
550 	vm_object_offset_t      size,
551 	vm_prot_t               permission,
552 	memory_object_t         pager,
553 	ipc_port_t              *entry_handle);
554 
555 extern kern_return_t mach_memory_entry_purgable_control(
556 	ipc_port_t      entry_port,
557 	vm_purgable_t   control,
558 	int             *state);
559 
560 extern kern_return_t mach_memory_entry_get_page_counts(
561 	ipc_port_t      entry_port,
562 	unsigned int    *resident_page_count,
563 	unsigned int    *dirty_page_count);
564 
565 extern kern_return_t mach_memory_entry_phys_page_offset(
566 	ipc_port_t              entry_port,
567 	vm_object_offset_t      *offset_p);
568 
569 extern kern_return_t mach_memory_entry_map_size(
570 	ipc_port_t             entry_port,
571 	vm_map_t               map,
572 	memory_object_offset_t offset,
573 	memory_object_offset_t size,
574 	mach_vm_size_t         *map_size);
575 
576 extern kern_return_t vm_map_range_physical_size(
577 	vm_map_t         map,
578 	vm_map_address_t start,
579 	mach_vm_size_t   size,
580 	mach_vm_size_t * phys_size);
581 
582 extern kern_return_t mach_memory_entry_page_op(
583 	ipc_port_t              entry_port,
584 	vm_object_offset_t      offset,
585 	int                     ops,
586 	ppnum_t                 *phys_entry,
587 	int                     *flags);
588 
589 extern kern_return_t mach_memory_entry_range_op(
590 	ipc_port_t              entry_port,
591 	vm_object_offset_t      offset_beg,
592 	vm_object_offset_t      offset_end,
593 	int                     ops,
594 	int                     *range);
595 
596 extern void mach_memory_entry_port_release(ipc_port_t port);
597 extern vm_named_entry_t mach_memory_entry_from_port(ipc_port_t port);
598 extern struct vm_named_entry *mach_memory_entry_allocate(ipc_port_t *user_handle_p);
599 extern vm_object_t vm_named_entry_to_vm_object(
600 	vm_named_entry_t        named_entry);
601 extern void vm_named_entry_associate_vm_object(
602 	vm_named_entry_t        named_entry,
603 	vm_object_t             object,
604 	vm_object_offset_t      offset,
605 	vm_object_size_t        size,
606 	vm_prot_t               prot);
607 
608 extern int macx_backing_store_compaction(int flags);
609 extern unsigned int mach_vm_ctl_page_free_wanted(void);
610 
611 extern int no_paging_space_action(void);
612 
613 extern unsigned int vmtc_total;        /* total # of text page corruptions detected */
614 
615 extern kern_return_t revalidate_text_page(task_t, vm_map_offset_t);
616 
617 #define VM_TOGGLE_CLEAR         0
618 #define VM_TOGGLE_SET           1
619 #define VM_TOGGLE_GETVALUE      999
620 int vm_toggle_entry_reuse(int, int*);
621 
622 #define SWAP_WRITE              0x00000000      /* Write buffer (pseudo flag). */
623 #define SWAP_READ               0x00000001      /* Read buffer. */
624 #define SWAP_ASYNC              0x00000002      /* Start I/O, do not wait. */
625 
626 extern kern_return_t compressor_memory_object_create(
627 	memory_object_size_t,
628 	memory_object_t *);
629 
630 extern boolean_t vm_compressor_low_on_space(void);
631 extern bool vm_compressor_compressed_pages_nearing_limit(void);
632 extern boolean_t vm_compressor_out_of_space(void);
633 extern int       vm_swap_low_on_space(void);
634 extern int       vm_swap_out_of_space(void);
635 void             do_fastwake_warmup_all(void);
636 
637 #if defined(__arm64__)
638 extern void vm_panic_hibernate_write_image_failed(int err);
639 #endif /* __arm64__ */
640 
641 #if CONFIG_JETSAM
642 extern int proc_get_memstat_priority(struct proc*, boolean_t);
643 #endif /* CONFIG_JETSAM */
644 
645 /* the object purger. purges the next eligible object from memory. */
646 /* returns TRUE if an object was purged, otherwise FALSE. */
647 boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
648 void vm_purgeable_nonvolatile_owner_update(task_t       owner,
649     int          delta);
650 void vm_purgeable_volatile_owner_update(task_t          owner,
651     int             delta);
652 void vm_owned_objects_disown(task_t task);
653 
654 
655 struct trim_list {
656 	uint64_t        tl_offset;
657 	uint64_t        tl_length;
658 	struct trim_list *tl_next;
659 };
660 
661 u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only);
662 
663 #define MAX_SWAPFILENAME_LEN    1024
664 #define SWAPFILENAME_INDEX_LEN  2       /* Doesn't include the terminating NULL character */
665 
666 extern char     swapfilename[MAX_SWAPFILENAME_LEN + 1];
667 
668 struct vm_counters {
669 	unsigned int    do_collapse_compressor;
670 	unsigned int    do_collapse_compressor_pages;
671 	unsigned int    do_collapse_terminate;
672 	unsigned int    do_collapse_terminate_failure;
673 	unsigned int    should_cow_but_wired;
674 	unsigned int    create_upl_extra_cow;
675 	unsigned int    create_upl_extra_cow_pages;
676 	unsigned int    create_upl_lookup_failure_write;
677 	unsigned int    create_upl_lookup_failure_copy;
678 };
679 extern struct vm_counters vm_counters;
680 
681 #if CONFIG_SECLUDED_MEMORY
682 struct vm_page_secluded_data {
683 	int     eligible_for_secluded;
684 	int     grab_success_free;
685 	int     grab_success_other;
686 	int     grab_failure_locked;
687 	int     grab_failure_state;
688 	int     grab_failure_realtime;
689 	int     grab_failure_dirty;
690 	int     grab_for_iokit;
691 	int     grab_for_iokit_success;
692 };
693 extern struct vm_page_secluded_data vm_page_secluded;
694 
695 extern int num_tasks_can_use_secluded_mem;
696 
697 /* boot-args */
698 
699 __enum_decl(secluded_filecache_mode_t, uint8_t, {
700 	/*
701 	 * SECLUDED_FILECACHE_NONE:
702 	 * + no file contents in secluded pool
703 	 */
704 	SECLUDED_FILECACHE_NONE = 0,
705 	/*
706 	 * SECLUDED_FILECACHE_APPS
707 	 * + no files from /
708 	 * + files from /Applications/ are OK
709 	 * + files from /Applications/Camera are not OK
710 	 * + no files that are open for write
711 	 */
712 	SECLUDED_FILECACHE_APPS = 1,
713 	/*
714 	 * SECLUDED_FILECACHE_RDONLY
715 	 * + all read-only files OK, except:
716 	 *      + dyld_shared_cache_arm64*
717 	 *      + Camera
718 	 *	+ mediaserverd
719 	 */
720 	SECLUDED_FILECACHE_RDONLY = 2,
721 });
722 
723 extern secluded_filecache_mode_t secluded_for_filecache;
724 extern bool secluded_for_apps;
725 extern bool secluded_for_iokit;
726 
727 extern uint64_t vm_page_secluded_drain(void);
728 extern void             memory_object_mark_eligible_for_secluded(
729 	memory_object_control_t         control,
730 	boolean_t                       eligible_for_secluded);
731 
732 #endif /* CONFIG_SECLUDED_MEMORY */
733 
734 extern void             memory_object_mark_for_realtime(
735 	memory_object_control_t         control,
736 	bool                            for_realtime);
737 
738 #if MACH_ASSERT
739 extern void             memory_object_mark_for_fbdp(
740 	memory_object_control_t         control);
741 #endif /* MACH_ASSERT */
742 
743 #define MAX_PAGE_RANGE_QUERY    (1ULL * 1024 * 1024 * 1024) /* 1 GB */
744 
745 extern kern_return_t mach_make_memory_entry_internal(
746 	vm_map_t                target_map,
747 	memory_object_size_t    *size,
748 	memory_object_offset_t offset,
749 	vm_prot_t               permission,
750 	vm_named_entry_kernel_flags_t vmne_kflags,
751 	ipc_port_t              *object_handle,
752 	ipc_port_t              parent_handle);
753 
754 extern kern_return_t
755 memory_entry_check_for_adjustment(
756 	vm_map_t                        src_map,
757 	ipc_port_t                      port,
758 	vm_map_offset_t         *overmap_start,
759 	vm_map_offset_t         *overmap_end);
760 
761 #define roundup(x, y)   ((((x) % (y)) == 0) ? \
762 	                (x) : ((x) + ((y) - ((x) % (y)))))
763 
764 #ifdef __cplusplus
765 }
766 #endif
767 
768 /*
769  * Flags for the VM swapper/reclaimer.
770  * Used by vm_swap_consider_defragment()
771  * to force defrag/reclaim by the swap
772  * GC thread.
773  */
774 #define VM_SWAP_FLAGS_NONE             0
775 #define VM_SWAP_FLAGS_FORCE_DEFRAG     1
776 #define VM_SWAP_FLAGS_FORCE_RECLAIM    2
777 
778 #if __arm64__
779 /*
780  * Flags to control the behavior of
781  * the legacy footprint entitlement.
782  */
783 #define LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE             (1)
784 #define LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT         (2)
785 #define LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE     (3)
786 
787 #endif /* __arm64__ */
788 
789 #if MACH_ASSERT
790 struct proc;
791 extern struct proc *current_proc(void);
792 extern int proc_pid(struct proc *);
793 extern char *proc_best_name(struct proc *);
794 struct thread;
795 extern uint64_t thread_tid(struct thread *);
796 extern int debug4k_filter;
797 extern int debug4k_proc_filter;
798 extern char debug4k_proc_name[];
799 extern const char *debug4k_category_name[];
800 
801 #define __DEBUG4K(category, fmt, ...)                                   \
802 	MACRO_BEGIN                                                     \
803 	int __category = (category);                                    \
804 	struct thread *__t = NULL;                                      \
805 	struct proc *__p = NULL;                                        \
806 	const char *__pname = "?";                                      \
807 	boolean_t __do_log = FALSE;                                     \
808                                                                         \
809 	if ((1 << __category) & debug4k_filter) {                       \
810 	        __do_log = TRUE;                                        \
811 	} else if (((1 << __category) & debug4k_proc_filter) &&         \
812 	           debug4k_proc_name[0] != '\0') {                      \
813 	        __p = current_proc();                                   \
814 	        if (__p != NULL) {                                      \
815 	                __pname = proc_best_name(__p);                  \
816 	        }                                                       \
817 	        if (!strcmp(debug4k_proc_name, __pname)) {              \
818 	                __do_log = TRUE;                                \
819 	        }                                                       \
820 	}                                                               \
821 	if (__do_log) {                                                 \
822 	        if (__p == NULL) {                                      \
823 	                __p = current_proc();                           \
824 	                if (__p != NULL) {                              \
825 	                        __pname = proc_best_name(__p);          \
826 	                }                                               \
827 	        }                                                       \
828 	        __t = current_thread();                                 \
829 	        printf("DEBUG4K(%s) %d[%s] %p(0x%llx) %s:%d: " fmt,     \
830 	               debug4k_category_name[__category],               \
831 	               __p ? proc_pid(__p) : 0,                         \
832 	               __pname,                                         \
833 	               __t,                                             \
834 	               thread_tid(__t),                                 \
835 	               __FUNCTION__,                                    \
836 	               __LINE__,                                        \
837 	               ##__VA_ARGS__);                                  \
838 	}                                                               \
839 	MACRO_END
840 
841 #define __DEBUG4K_ERROR         0
842 #define __DEBUG4K_LIFE          1
843 #define __DEBUG4K_LOAD          2
844 #define __DEBUG4K_FAULT         3
845 #define __DEBUG4K_COPY          4
846 #define __DEBUG4K_SHARE         5
847 #define __DEBUG4K_ADJUST        6
848 #define __DEBUG4K_PMAP          7
849 #define __DEBUG4K_MEMENTRY      8
850 #define __DEBUG4K_IOKIT         9
851 #define __DEBUG4K_UPL           10
852 #define __DEBUG4K_EXC           11
853 #define __DEBUG4K_VFS           12
854 
855 #define DEBUG4K_ERROR(...)      __DEBUG4K(__DEBUG4K_ERROR, ##__VA_ARGS__)
856 #define DEBUG4K_LIFE(...)       __DEBUG4K(__DEBUG4K_LIFE, ##__VA_ARGS__)
857 #define DEBUG4K_LOAD(...)       __DEBUG4K(__DEBUG4K_LOAD, ##__VA_ARGS__)
858 #define DEBUG4K_FAULT(...)      __DEBUG4K(__DEBUG4K_FAULT, ##__VA_ARGS__)
859 #define DEBUG4K_COPY(...)       __DEBUG4K(__DEBUG4K_COPY, ##__VA_ARGS__)
860 #define DEBUG4K_SHARE(...)      __DEBUG4K(__DEBUG4K_SHARE, ##__VA_ARGS__)
861 #define DEBUG4K_ADJUST(...)     __DEBUG4K(__DEBUG4K_ADJUST, ##__VA_ARGS__)
862 #define DEBUG4K_PMAP(...)       __DEBUG4K(__DEBUG4K_PMAP, ##__VA_ARGS__)
863 #define DEBUG4K_MEMENTRY(...)   __DEBUG4K(__DEBUG4K_MEMENTRY, ##__VA_ARGS__)
864 #define DEBUG4K_IOKIT(...)      __DEBUG4K(__DEBUG4K_IOKIT, ##__VA_ARGS__)
865 #define DEBUG4K_UPL(...)        __DEBUG4K(__DEBUG4K_UPL, ##__VA_ARGS__)
866 #define DEBUG4K_EXC(...)        __DEBUG4K(__DEBUG4K_EXC, ##__VA_ARGS__)
867 #define DEBUG4K_VFS(...)        __DEBUG4K(__DEBUG4K_VFS, ##__VA_ARGS__)
868 
869 #else /* MACH_ASSERT */
870 
871 #define DEBUG4K_ERROR(...)
872 #define DEBUG4K_LIFE(...)
873 #define DEBUG4K_LOAD(...)
874 #define DEBUG4K_FAULT(...)
875 #define DEBUG4K_COPY(...)
876 #define DEBUG4K_SHARE(...)
877 #define DEBUG4K_ADJUST(...)
878 #define DEBUG4K_PMAP(...)
879 #define DEBUG4K_MEMENTRY(...)
880 #define DEBUG4K_IOKIT(...)
881 #define DEBUG4K_UPL(...)
882 #define DEBUG4K_EXC(...)
883 #define DEBUG4K_VFS(...)
884 
885 #endif /* MACH_ASSERT */
886 
887 
888 #endif  /* _VM_VM_PROTOS_H_ */
889 
890 #endif  /* XNU_KERNEL_PRIVATE */
891