xref: /xnu-8792.81.2/osfmk/vm/vm_protos.h (revision 19c3b8c28c31cb8130e034cfb5df6bf9ba342d90)
1 /*
2  * Copyright (c) 2004-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifdef  XNU_KERNEL_PRIVATE
30 
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
33 
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
36 
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40 
41 /*
42  * This file contains various type definitions and routine prototypes
43  * that are needed to avoid compilation warnings for VM code (in osfmk,
44  * default_pager and bsd).
45  * Most of these should eventually go into more appropriate header files.
46  *
47  * Include it after all other header files since it doesn't include any
48  * type definitions and it works around some conflicts with other header
49  * files.
50  */
51 
52 /*
53  * iokit
54  */
55 extern kern_return_t device_data_action(
56 	uintptr_t               device_handle,
57 	ipc_port_t              device_pager,
58 	vm_prot_t               protection,
59 	vm_object_offset_t      offset,
60 	vm_size_t               size);
61 
62 extern kern_return_t device_close(
63 	uintptr_t     device_handle);
64 
65 extern boolean_t vm_swap_files_pinned(void);
66 
67 /*
68  * osfmk
69  */
70 #ifndef _IPC_IPC_PORT_H_
71 extern mach_port_name_t ipc_port_copyout_send(
72 	ipc_port_t      sright,
73 	ipc_space_t     space);
74 extern mach_port_name_t ipc_port_copyout_send_pinned(
75 	ipc_port_t      sright,
76 	ipc_space_t     space);
77 #endif /* _IPC_IPC_PORT_H_ */
78 
79 #ifndef _KERN_IPC_TT_H_
80 
81 #define port_name_to_task(name) port_name_to_task_kernel(name)
82 
83 extern task_t port_name_to_task_kernel(
84 	mach_port_name_t name);
85 extern task_t port_name_to_task_read(
86 	mach_port_name_t name);
87 extern task_t port_name_to_task_name(
88 	mach_port_name_t name);
89 extern void ipc_port_release_send(
90 	ipc_port_t      port);
91 #endif /* _KERN_IPC_TT_H_ */
92 
93 extern ipc_space_t  get_task_ipcspace(
94 	task_t t);
95 
96 #if CONFIG_MEMORYSTATUS
97 extern int max_task_footprint_mb;       /* Per-task limit on physical memory consumption in megabytes */
98 #endif /* CONFIG_MEMORYSTATUS */
99 
100 /* Some loose-ends VM stuff */
101 
102 extern const vm_size_t msg_ool_size_small;
103 
104 extern kern_return_t vm_tests(void);
105 extern void consider_machine_adjust(void);
106 extern vm_map_offset_t get_map_min(vm_map_t);
107 extern vm_map_offset_t get_map_max(vm_map_t);
108 extern vm_map_size_t get_vmmap_size(vm_map_t);
109 extern int get_task_page_size(task_t);
110 #if CONFIG_COREDUMP
111 extern int get_vmmap_entries(vm_map_t);
112 #endif
113 extern int get_map_nentries(vm_map_t);
114 
115 extern vm_map_offset_t vm_map_page_mask(vm_map_t);
116 
117 extern kern_return_t vm_map_purgable_control(
118 	vm_map_t                map,
119 	vm_map_offset_t         address,
120 	vm_purgable_t           control,
121 	int                     *state);
122 
123 #if MACH_ASSERT
124 extern void vm_map_pmap_set_process(
125 	vm_map_t        map,
126 	int             pid,
127 	char            *procname);
128 extern void vm_map_pmap_check_ledgers(
129 	pmap_t          pmap,
130 	ledger_t        ledger,
131 	int             pid,
132 	char            *procname);
133 #endif /* MACH_ASSERT */
134 
135 extern kern_return_t
136 vnode_pager_get_object_vnode(
137 	memory_object_t mem_obj,
138 	uintptr_t * vnodeaddr,
139 	uint32_t * vid);
140 
141 #if CONFIG_COREDUMP
142 extern boolean_t coredumpok(vm_map_t map, mach_vm_offset_t va);
143 #endif
144 
145 /*
146  * VM routines that used to be published to
147  * user space, and are now restricted to the kernel.
148  *
149  * They should eventually go away entirely -
150  * to be replaced with standard vm_map() and
151  * vm_deallocate() calls.
152  */
153 
154 extern kern_return_t vm_upl_map
155 (
156 	vm_map_t target_task,
157 	upl_t upl,
158 	vm_address_t *address
159 );
160 
161 extern kern_return_t vm_upl_unmap
162 (
163 	vm_map_t target_task,
164 	upl_t upl
165 );
166 
167 extern kern_return_t vm_upl_map_range
168 (
169 	vm_map_t target_task,
170 	upl_t upl,
171 	vm_offset_t offset,
172 	vm_size_t size,
173 	vm_prot_t prot,
174 	vm_address_t *address
175 );
176 
177 extern kern_return_t vm_upl_unmap_range
178 (
179 	vm_map_t target_task,
180 	upl_t upl,
181 	vm_offset_t offset,
182 	vm_size_t size
183 );
184 
185 extern kern_return_t vm_region_object_create
186 (
187 	vm_map_t target_task,
188 	vm_size_t size,
189 	ipc_port_t *object_handle
190 );
191 
192 extern mach_vm_offset_t mach_get_vm_start(vm_map_t);
193 extern mach_vm_offset_t mach_get_vm_end(vm_map_t);
194 
195 #if CONFIG_CODE_DECRYPTION
196 #define VM_MAP_DEBUG_APPLE_PROTECT      MACH_ASSERT
197 #if VM_MAP_DEBUG_APPLE_PROTECT
198 extern int vm_map_debug_apple_protect;
199 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
200 struct pager_crypt_info;
201 extern kern_return_t vm_map_apple_protected(
202 	vm_map_t                map,
203 	vm_map_offset_t         start,
204 	vm_map_offset_t         end,
205 	vm_object_offset_t      crypto_backing_offset,
206 	struct pager_crypt_info *crypt_info,
207 	uint32_t                cryptid);
208 extern memory_object_t apple_protect_pager_setup(
209 	vm_object_t             backing_object,
210 	vm_object_offset_t      backing_offset,
211 	vm_object_offset_t      crypto_backing_offset,
212 	struct pager_crypt_info *crypt_info,
213 	vm_object_offset_t      crypto_start,
214 	vm_object_offset_t      crypto_end,
215 	boolean_t               cache_pager);
216 #endif  /* CONFIG_CODE_DECRYPTION */
217 
218 struct vm_shared_region_slide_info;
219 extern kern_return_t vm_map_shared_region(
220 	vm_map_t                map,
221 	vm_map_offset_t         start,
222 	vm_map_offset_t         end,
223 	vm_object_offset_t      backing_offset,
224 	struct vm_shared_region_slide_info *slide_info);
225 
226 extern memory_object_t shared_region_pager_setup(
227 	vm_object_t             backing_object,
228 	vm_object_offset_t      backing_offset,
229 	struct vm_shared_region_slide_info *slide_info,
230 	uint64_t                jop_key);
231 #if __has_feature(ptrauth_calls)
232 extern memory_object_t shared_region_pager_match(
233 	vm_object_t             backing_object,
234 	vm_object_offset_t      backing_offset,
235 	struct vm_shared_region_slide_info *slide_info,
236 	uint64_t                jop_key);
237 extern void shared_region_key_alloc(
238 	char *shared_region_id,
239 	bool inherit,
240 	uint64_t inherited_key);
241 extern void shared_region_key_dealloc(
242 	char *shared_region_id);
243 extern uint64_t generate_jop_key(void);
244 extern void shared_region_pager_match_task_key(memory_object_t memobj, task_t task);
245 #endif /* __has_feature(ptrauth_calls) */
246 extern bool vm_shared_region_is_reslide(struct task *task);
247 
248 struct vnode;
249 extern memory_object_t swapfile_pager_setup(struct vnode *vp);
250 extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj);
251 
252 #if __arm64__ || (__ARM_ARCH_7K__ >= 2)
253 #define SIXTEENK_PAGE_SIZE      0x4000
254 #define SIXTEENK_PAGE_MASK      0x3FFF
255 #define SIXTEENK_PAGE_SHIFT     14
256 #endif /* __arm64__ || (__ARM_ARCH_7K__ >= 2) */
257 
258 #define FOURK_PAGE_SIZE         0x1000
259 #define FOURK_PAGE_MASK         0xFFF
260 #define FOURK_PAGE_SHIFT        12
261 
262 #if __arm64__
263 
264 extern unsigned int page_shift_user32;
265 
266 #define VM_MAP_DEBUG_FOURK      MACH_ASSERT
267 #if VM_MAP_DEBUG_FOURK
268 extern int vm_map_debug_fourk;
269 #endif /* VM_MAP_DEBUG_FOURK */
270 extern memory_object_t fourk_pager_create(void);
271 extern vm_object_t fourk_pager_to_vm_object(memory_object_t mem_obj);
272 extern kern_return_t fourk_pager_populate(
273 	memory_object_t mem_obj,
274 	boolean_t overwrite,
275 	int index,
276 	vm_object_t new_backing_object,
277 	vm_object_offset_t new_backing_offset,
278 	vm_object_t *old_backing_object,
279 	vm_object_offset_t *old_backing_offset);
280 #endif /* __arm64__ */
281 
282 /*
283  * bsd
284  */
285 struct vnode;
286 extern void *upl_get_internal_page_list(
287 	upl_t upl);
288 
289 extern void vnode_setswapmount(struct vnode *);
290 extern int64_t vnode_getswappin_avail(struct vnode *);
291 
292 extern void vnode_pager_was_dirtied(
293 	struct vnode *,
294 	vm_object_offset_t,
295 	vm_object_offset_t);
296 
297 typedef int pager_return_t;
298 extern pager_return_t   vnode_pagein(
299 	struct vnode *, upl_t,
300 	upl_offset_t, vm_object_offset_t,
301 	upl_size_t, int, int *);
302 extern pager_return_t   vnode_pageout(
303 	struct vnode *, upl_t,
304 	upl_offset_t, vm_object_offset_t,
305 	upl_size_t, int, int *);
306 extern uint32_t vnode_trim(struct vnode *, int64_t offset, unsigned long len);
307 extern memory_object_t vnode_pager_setup(
308 	struct vnode *, memory_object_t);
309 extern vm_object_offset_t vnode_pager_get_filesize(
310 	struct vnode *);
311 extern uint32_t vnode_pager_isinuse(
312 	struct vnode *);
313 extern boolean_t vnode_pager_isSSD(
314 	struct vnode *);
315 extern void vnode_pager_throttle(
316 	void);
317 extern uint32_t vnode_pager_return_throttle_io_limit(
318 	struct vnode *,
319 	uint32_t     *);
320 extern kern_return_t vnode_pager_get_name(
321 	struct vnode    *vp,
322 	char            *pathname,
323 	vm_size_t       pathname_len,
324 	char            *filename,
325 	vm_size_t       filename_len,
326 	boolean_t       *truncated_path_p);
327 struct timespec;
328 extern kern_return_t vnode_pager_get_mtime(
329 	struct vnode    *vp,
330 	struct timespec *mtime,
331 	struct timespec *cs_mtime);
332 extern kern_return_t vnode_pager_get_cs_blobs(
333 	struct vnode    *vp,
334 	void            **blobs);
335 
336 #if CONFIG_IOSCHED
337 void vnode_pager_issue_reprioritize_io(
338 	struct vnode    *devvp,
339 	uint64_t        blkno,
340 	uint32_t        len,
341 	int             priority);
342 #endif
343 
344 #if CHECK_CS_VALIDATION_BITMAP
345 /* used by the vnode_pager_cs_validation_bitmap routine*/
346 #define CS_BITMAP_SET   1
347 #define CS_BITMAP_CLEAR 2
348 #define CS_BITMAP_CHECK 3
349 
350 #endif /* CHECK_CS_VALIDATION_BITMAP */
351 
352 extern kern_return_t
353 vnode_pager_data_unlock(
354 	memory_object_t         mem_obj,
355 	memory_object_offset_t  offset,
356 	memory_object_size_t            size,
357 	vm_prot_t               desired_access);
358 extern kern_return_t vnode_pager_init(
359 	memory_object_t,
360 	memory_object_control_t,
361 	memory_object_cluster_size_t);
362 extern kern_return_t vnode_pager_get_object_size(
363 	memory_object_t,
364 	memory_object_offset_t *);
365 
366 #if CONFIG_IOSCHED
367 extern kern_return_t vnode_pager_get_object_devvp(
368 	memory_object_t,
369 	uintptr_t *);
370 #endif
371 
372 extern void vnode_pager_dirtied(
373 	memory_object_t,
374 	vm_object_offset_t,
375 	vm_object_offset_t);
376 extern kern_return_t vnode_pager_get_isinuse(
377 	memory_object_t,
378 	uint32_t *);
379 extern kern_return_t vnode_pager_get_isSSD(
380 	memory_object_t,
381 	boolean_t *);
382 extern kern_return_t vnode_pager_get_throttle_io_limit(
383 	memory_object_t,
384 	uint32_t *);
385 extern kern_return_t vnode_pager_get_object_name(
386 	memory_object_t mem_obj,
387 	char            *pathname,
388 	vm_size_t       pathname_len,
389 	char            *filename,
390 	vm_size_t       filename_len,
391 	boolean_t       *truncated_path_p);
392 extern kern_return_t vnode_pager_get_object_mtime(
393 	memory_object_t mem_obj,
394 	struct timespec *mtime,
395 	struct timespec *cs_mtime);
396 
397 #if CHECK_CS_VALIDATION_BITMAP
398 extern kern_return_t vnode_pager_cs_check_validation_bitmap(
399 	memory_object_t mem_obj,
400 	memory_object_offset_t  offset,
401 	int             optype);
402 #endif /*CHECK_CS_VALIDATION_BITMAP*/
403 
404 extern  kern_return_t ubc_cs_check_validation_bitmap(
405 	struct vnode *vp,
406 	memory_object_offset_t offset,
407 	int optype);
408 
409 extern kern_return_t vnode_pager_data_request(
410 	memory_object_t,
411 	memory_object_offset_t,
412 	memory_object_cluster_size_t,
413 	vm_prot_t,
414 	memory_object_fault_info_t);
415 extern kern_return_t vnode_pager_data_return(
416 	memory_object_t,
417 	memory_object_offset_t,
418 	memory_object_cluster_size_t,
419 	memory_object_offset_t *,
420 	int *,
421 	boolean_t,
422 	boolean_t,
423 	int);
424 extern kern_return_t vnode_pager_data_initialize(
425 	memory_object_t,
426 	memory_object_offset_t,
427 	memory_object_cluster_size_t);
428 extern void vnode_pager_reference(
429 	memory_object_t         mem_obj);
430 extern kern_return_t vnode_pager_map(
431 	memory_object_t         mem_obj,
432 	vm_prot_t               prot);
433 extern kern_return_t vnode_pager_last_unmap(
434 	memory_object_t         mem_obj);
435 extern void vnode_pager_deallocate(
436 	memory_object_t);
437 extern kern_return_t vnode_pager_terminate(
438 	memory_object_t);
439 extern void vnode_pager_vrele(
440 	struct vnode *vp);
441 extern struct vnode *vnode_pager_lookup_vnode(
442 	memory_object_t);
443 
444 extern int  ubc_map(
445 	struct vnode *vp,
446 	int flags);
447 extern void ubc_unmap(
448 	struct vnode *vp);
449 
450 struct vm_map_entry;
451 extern struct vm_object *find_vnode_object(struct vm_map_entry *entry);
452 
453 extern void   device_pager_reference(memory_object_t);
454 extern void   device_pager_deallocate(memory_object_t);
455 extern kern_return_t   device_pager_init(memory_object_t,
456     memory_object_control_t,
457     memory_object_cluster_size_t);
458 extern  kern_return_t device_pager_terminate(memory_object_t);
459 extern  kern_return_t   device_pager_data_request(memory_object_t,
460     memory_object_offset_t,
461     memory_object_cluster_size_t,
462     vm_prot_t,
463     memory_object_fault_info_t);
464 extern kern_return_t device_pager_data_return(memory_object_t,
465     memory_object_offset_t,
466     memory_object_cluster_size_t,
467     memory_object_offset_t *,
468     int *,
469     boolean_t,
470     boolean_t,
471     int);
472 extern kern_return_t device_pager_data_initialize(memory_object_t,
473     memory_object_offset_t,
474     memory_object_cluster_size_t);
475 extern kern_return_t device_pager_map(memory_object_t, vm_prot_t);
476 extern kern_return_t device_pager_last_unmap(memory_object_t);
477 extern kern_return_t device_pager_populate_object(
478 	memory_object_t         device,
479 	memory_object_offset_t  offset,
480 	ppnum_t                 page_num,
481 	vm_size_t               size);
482 extern memory_object_t device_pager_setup(
483 	memory_object_t,
484 	uintptr_t,
485 	vm_size_t,
486 	int);
487 
488 extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops);
489 
490 extern kern_return_t pager_map_to_phys_contiguous(
491 	memory_object_control_t object,
492 	memory_object_offset_t  offset,
493 	addr64_t                base_vaddr,
494 	vm_size_t               size);
495 
496 extern kern_return_t memory_object_create_named(
497 	memory_object_t pager,
498 	memory_object_offset_t  size,
499 	memory_object_control_t         *control);
500 
501 struct macx_triggers_args;
502 extern int mach_macx_triggers(
503 	struct macx_triggers_args       *args);
504 
505 extern int macx_swapinfo(
506 	memory_object_size_t    *total_p,
507 	memory_object_size_t    *avail_p,
508 	vm_size_t               *pagesize_p,
509 	boolean_t               *encrypted_p);
510 
511 extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot);
512 extern void log_unnest_badness(
513 	vm_map_t map,
514 	vm_map_offset_t start_unnest,
515 	vm_map_offset_t end_unnest,
516 	boolean_t is_nested_map,
517 	vm_map_offset_t lowest_unnestable_addr);
518 
519 struct proc;
520 struct proc *current_proc(void);
521 extern int cs_allow_invalid(struct proc *p);
522 extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed);
523 
524 #define CS_VALIDATE_TAINTED     0x00000001
525 #define CS_VALIDATE_NX          0x00000002
526 extern boolean_t cs_validate_range(struct vnode *vp,
527     memory_object_t pager,
528     memory_object_offset_t offset,
529     const void *data,
530     vm_size_t size,
531     unsigned *result);
532 extern void cs_validate_page(
533 	struct vnode *vp,
534 	memory_object_t pager,
535 	memory_object_offset_t offset,
536 	const void *data,
537 	int *validated_p,
538 	int *tainted_p,
539 	int *nx_p);
540 
541 extern kern_return_t memory_entry_purgeable_control_internal(
542 	ipc_port_t      entry_port,
543 	vm_purgable_t   control,
544 	int             *state);
545 
546 extern kern_return_t memory_entry_access_tracking_internal(
547 	ipc_port_t      entry_port,
548 	int             *access_tracking,
549 	uint32_t        *access_tracking_reads,
550 	uint32_t        *access_tracking_writes);
551 
552 extern kern_return_t mach_memory_object_memory_entry_64(
553 	host_t                  host,
554 	boolean_t               internal,
555 	vm_object_offset_t      size,
556 	vm_prot_t               permission,
557 	memory_object_t         pager,
558 	ipc_port_t              *entry_handle);
559 
560 extern kern_return_t mach_memory_entry_purgable_control(
561 	ipc_port_t      entry_port,
562 	vm_purgable_t   control,
563 	int             *state);
564 
565 extern kern_return_t mach_memory_entry_get_page_counts(
566 	ipc_port_t      entry_port,
567 	unsigned int    *resident_page_count,
568 	unsigned int    *dirty_page_count);
569 
570 extern kern_return_t mach_memory_entry_phys_page_offset(
571 	ipc_port_t              entry_port,
572 	vm_object_offset_t      *offset_p);
573 
574 extern kern_return_t mach_memory_entry_map_size(
575 	ipc_port_t             entry_port,
576 	vm_map_t               map,
577 	memory_object_offset_t offset,
578 	memory_object_offset_t size,
579 	mach_vm_size_t         *map_size);
580 
581 extern kern_return_t vm_map_range_physical_size(
582 	vm_map_t         map,
583 	vm_map_address_t start,
584 	mach_vm_size_t   size,
585 	mach_vm_size_t * phys_size);
586 
587 extern kern_return_t mach_memory_entry_page_op(
588 	ipc_port_t              entry_port,
589 	vm_object_offset_t      offset,
590 	int                     ops,
591 	ppnum_t                 *phys_entry,
592 	int                     *flags);
593 
594 extern kern_return_t mach_memory_entry_range_op(
595 	ipc_port_t              entry_port,
596 	vm_object_offset_t      offset_beg,
597 	vm_object_offset_t      offset_end,
598 	int                     ops,
599 	int                     *range);
600 
601 extern void mach_memory_entry_port_release(ipc_port_t port);
602 extern vm_named_entry_t mach_memory_entry_from_port(ipc_port_t port);
603 extern struct vm_named_entry *mach_memory_entry_allocate(ipc_port_t *user_handle_p);
604 extern vm_object_t vm_named_entry_to_vm_object(
605 	vm_named_entry_t        named_entry);
606 extern void vm_named_entry_associate_vm_object(
607 	vm_named_entry_t        named_entry,
608 	vm_object_t             object,
609 	vm_object_offset_t      offset,
610 	vm_object_size_t        size,
611 	vm_prot_t               prot);
612 
613 extern int macx_backing_store_compaction(int flags);
614 extern unsigned int mach_vm_ctl_page_free_wanted(void);
615 
616 extern int no_paging_space_action(void);
617 
618 extern unsigned int vmtc_total;        /* total # of text page corruptions detected */
619 
620 extern kern_return_t revalidate_text_page(task_t, vm_map_offset_t);
621 
622 #define VM_TOGGLE_CLEAR         0
623 #define VM_TOGGLE_SET           1
624 #define VM_TOGGLE_GETVALUE      999
625 int vm_toggle_entry_reuse(int, int*);
626 
627 #define SWAP_WRITE              0x00000000      /* Write buffer (pseudo flag). */
628 #define SWAP_READ               0x00000001      /* Read buffer. */
629 #define SWAP_ASYNC              0x00000002      /* Start I/O, do not wait. */
630 
631 extern kern_return_t compressor_memory_object_create(
632 	memory_object_size_t,
633 	memory_object_t *);
634 
635 extern boolean_t vm_compressor_low_on_space(void);
636 extern bool vm_compressor_compressed_pages_nearing_limit(void);
637 extern boolean_t vm_compressor_out_of_space(void);
638 extern int       vm_swap_low_on_space(void);
639 extern int       vm_swap_out_of_space(void);
640 void             do_fastwake_warmup_all(void);
641 
642 #if defined(__arm64__)
643 extern void vm_panic_hibernate_write_image_failed(int err);
644 #endif /* __arm64__ */
645 
646 #if CONFIG_JETSAM
647 extern int proc_get_memstat_priority(struct proc*, boolean_t);
648 #endif /* CONFIG_JETSAM */
649 
650 /* the object purger. purges the next eligible object from memory. */
651 /* returns TRUE if an object was purged, otherwise FALSE. */
652 boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
653 void vm_purgeable_nonvolatile_owner_update(task_t       owner,
654     int          delta);
655 void vm_purgeable_volatile_owner_update(task_t          owner,
656     int             delta);
657 void vm_owned_objects_disown(task_t task);
658 
659 
660 struct trim_list {
661 	uint64_t        tl_offset;
662 	uint64_t        tl_length;
663 	struct trim_list *tl_next;
664 };
665 
666 u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only);
667 
668 #define MAX_SWAPFILENAME_LEN    1024
669 #define SWAPFILENAME_INDEX_LEN  2       /* Doesn't include the terminating NULL character */
670 
671 extern char     swapfilename[MAX_SWAPFILENAME_LEN + 1];
672 
673 struct vm_counters {
674 	unsigned int    do_collapse_compressor;
675 	unsigned int    do_collapse_compressor_pages;
676 	unsigned int    do_collapse_terminate;
677 	unsigned int    do_collapse_terminate_failure;
678 	unsigned int    should_cow_but_wired;
679 	unsigned int    create_upl_extra_cow;
680 	unsigned int    create_upl_extra_cow_pages;
681 	unsigned int    create_upl_lookup_failure_write;
682 	unsigned int    create_upl_lookup_failure_copy;
683 };
684 extern struct vm_counters vm_counters;
685 
686 #if CONFIG_SECLUDED_MEMORY
687 struct vm_page_secluded_data {
688 	int     eligible_for_secluded;
689 	int     grab_success_free;
690 	int     grab_success_other;
691 	int     grab_failure_locked;
692 	int     grab_failure_state;
693 	int     grab_failure_realtime;
694 	int     grab_failure_dirty;
695 	int     grab_for_iokit;
696 	int     grab_for_iokit_success;
697 };
698 extern struct vm_page_secluded_data vm_page_secluded;
699 
700 extern int num_tasks_can_use_secluded_mem;
701 
702 /* boot-args */
703 extern int secluded_for_apps;
704 extern int secluded_for_iokit;
705 extern int secluded_for_filecache;
706 #if 11
707 extern int secluded_for_fbdp;
708 #endif
709 
710 extern uint64_t vm_page_secluded_drain(void);
711 extern void             memory_object_mark_eligible_for_secluded(
712 	memory_object_control_t         control,
713 	boolean_t                       eligible_for_secluded);
714 
715 #endif /* CONFIG_SECLUDED_MEMORY */
716 
717 extern void             memory_object_mark_for_realtime(
718 	memory_object_control_t         control,
719 	bool                            for_realtime);
720 
721 #if MACH_ASSERT
722 extern void             memory_object_mark_for_fbdp(
723 	memory_object_control_t         control);
724 #endif /* MACH_ASSERT */
725 
726 #define MAX_PAGE_RANGE_QUERY    (1ULL * 1024 * 1024 * 1024) /* 1 GB */
727 
728 extern kern_return_t mach_make_memory_entry_internal(
729 	vm_map_t                target_map,
730 	memory_object_size_t    *size,
731 	memory_object_offset_t offset,
732 	vm_prot_t               permission,
733 	vm_named_entry_kernel_flags_t vmne_kflags,
734 	ipc_port_t              *object_handle,
735 	ipc_port_t              parent_handle);
736 
737 extern kern_return_t
738 memory_entry_check_for_adjustment(
739 	vm_map_t                        src_map,
740 	ipc_port_t                      port,
741 	vm_map_offset_t         *overmap_start,
742 	vm_map_offset_t         *overmap_end);
743 
744 #define roundup(x, y)   ((((x) % (y)) == 0) ? \
745 	                (x) : ((x) + ((y) - ((x) % (y)))))
746 
747 #ifdef __cplusplus
748 }
749 #endif
750 
751 /*
752  * Flags for the VM swapper/reclaimer.
753  * Used by vm_swap_consider_defragment()
754  * to force defrag/reclaim by the swap
755  * GC thread.
756  */
757 #define VM_SWAP_FLAGS_NONE             0
758 #define VM_SWAP_FLAGS_FORCE_DEFRAG     1
759 #define VM_SWAP_FLAGS_FORCE_RECLAIM    2
760 
761 #if __arm64__
762 /*
763  * Flags to control the behavior of
764  * the legacy footprint entitlement.
765  */
766 #define LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE             (1)
767 #define LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT         (2)
768 #define LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE     (3)
769 
770 #endif /* __arm64__ */
771 
772 #if MACH_ASSERT
773 struct proc;
774 extern struct proc *current_proc(void);
775 extern int proc_pid(struct proc *);
776 extern char *proc_best_name(struct proc *);
777 struct thread;
778 extern uint64_t thread_tid(struct thread *);
779 extern int debug4k_filter;
780 extern int debug4k_proc_filter;
781 extern char debug4k_proc_name[];
782 extern const char *debug4k_category_name[];
783 
784 #define __DEBUG4K(category, fmt, ...)                                   \
785 	MACRO_BEGIN                                                     \
786 	int __category = (category);                                    \
787 	struct thread *__t = NULL;                                      \
788 	struct proc *__p = NULL;                                        \
789 	const char *__pname = "?";                                      \
790 	boolean_t __do_log = FALSE;                                     \
791                                                                         \
792 	if ((1 << __category) & debug4k_filter) {                       \
793 	        __do_log = TRUE;                                        \
794 	} else if (((1 << __category) & debug4k_proc_filter) &&         \
795 	           debug4k_proc_name[0] != '\0') {                      \
796 	        __p = current_proc();                                   \
797 	        if (__p != NULL) {                                      \
798 	                __pname = proc_best_name(__p);                  \
799 	        }                                                       \
800 	        if (!strcmp(debug4k_proc_name, __pname)) {              \
801 	                __do_log = TRUE;                                \
802 	        }                                                       \
803 	}                                                               \
804 	if (__do_log) {                                                 \
805 	        if (__p == NULL) {                                      \
806 	                __p = current_proc();                           \
807 	                if (__p != NULL) {                              \
808 	                        __pname = proc_best_name(__p);          \
809 	                }                                               \
810 	        }                                                       \
811 	        __t = current_thread();                                 \
812 	        printf("DEBUG4K(%s) %d[%s] %p(0x%llx) %s:%d: " fmt,     \
813 	               debug4k_category_name[__category],               \
814 	               __p ? proc_pid(__p) : 0,                         \
815 	               __pname,                                         \
816 	               __t,                                             \
817 	               thread_tid(__t),                                 \
818 	               __FUNCTION__,                                    \
819 	               __LINE__,                                        \
820 	               ##__VA_ARGS__);                                  \
821 	}                                                               \
822 	MACRO_END
823 
824 #define __DEBUG4K_ERROR         0
825 #define __DEBUG4K_LIFE          1
826 #define __DEBUG4K_LOAD          2
827 #define __DEBUG4K_FAULT         3
828 #define __DEBUG4K_COPY          4
829 #define __DEBUG4K_SHARE         5
830 #define __DEBUG4K_ADJUST        6
831 #define __DEBUG4K_PMAP          7
832 #define __DEBUG4K_MEMENTRY      8
833 #define __DEBUG4K_IOKIT         9
834 #define __DEBUG4K_UPL           10
835 #define __DEBUG4K_EXC           11
836 #define __DEBUG4K_VFS           12
837 
838 #define DEBUG4K_ERROR(...)      __DEBUG4K(__DEBUG4K_ERROR, ##__VA_ARGS__)
839 #define DEBUG4K_LIFE(...)       __DEBUG4K(__DEBUG4K_LIFE, ##__VA_ARGS__)
840 #define DEBUG4K_LOAD(...)       __DEBUG4K(__DEBUG4K_LOAD, ##__VA_ARGS__)
841 #define DEBUG4K_FAULT(...)      __DEBUG4K(__DEBUG4K_FAULT, ##__VA_ARGS__)
842 #define DEBUG4K_COPY(...)       __DEBUG4K(__DEBUG4K_COPY, ##__VA_ARGS__)
843 #define DEBUG4K_SHARE(...)      __DEBUG4K(__DEBUG4K_SHARE, ##__VA_ARGS__)
844 #define DEBUG4K_ADJUST(...)     __DEBUG4K(__DEBUG4K_ADJUST, ##__VA_ARGS__)
845 #define DEBUG4K_PMAP(...)       __DEBUG4K(__DEBUG4K_PMAP, ##__VA_ARGS__)
846 #define DEBUG4K_MEMENTRY(...)   __DEBUG4K(__DEBUG4K_MEMENTRY, ##__VA_ARGS__)
847 #define DEBUG4K_IOKIT(...)      __DEBUG4K(__DEBUG4K_IOKIT, ##__VA_ARGS__)
848 #define DEBUG4K_UPL(...)        __DEBUG4K(__DEBUG4K_UPL, ##__VA_ARGS__)
849 #define DEBUG4K_EXC(...)        __DEBUG4K(__DEBUG4K_EXC, ##__VA_ARGS__)
850 #define DEBUG4K_VFS(...)        __DEBUG4K(__DEBUG4K_VFS, ##__VA_ARGS__)
851 
852 #else /* MACH_ASSERT */
853 
854 #define DEBUG4K_ERROR(...)
855 #define DEBUG4K_LIFE(...)
856 #define DEBUG4K_LOAD(...)
857 #define DEBUG4K_FAULT(...)
858 #define DEBUG4K_COPY(...)
859 #define DEBUG4K_SHARE(...)
860 #define DEBUG4K_ADJUST(...)
861 #define DEBUG4K_PMAP(...)
862 #define DEBUG4K_MEMENTRY(...)
863 #define DEBUG4K_IOKIT(...)
864 #define DEBUG4K_UPL(...)
865 #define DEBUG4K_EXC(...)
866 #define DEBUG4K_VFS(...)
867 
868 #endif /* MACH_ASSERT */
869 
870 
871 #endif  /* _VM_VM_PROTOS_H_ */
872 
873 #endif  /* XNU_KERNEL_PRIVATE */
874