xref: /xnu-12377.81.4/osfmk/vm/vm_compressor_xnu.h (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #ifndef _VM_VM_COMPRESSOR_XNU_H_
29 #define _VM_VM_COMPRESSOR_XNU_H_
30 #include <stdbool.h>
31 #include <stdint.h>
32 
33 #ifdef MACH_KERNEL_PRIVATE
34 
35 #include <vm/vm_kern.h>
36 #include <vm/vm_page.h>
37 #include <vm/vm_protos.h>
38 #include <vm/WKdm_new.h>
39 #include <vm/vm_object_xnu.h>
40 #include <vm/vm_map.h>
41 #include <machine/pmap.h>
42 #include <kern/locks.h>
43 
44 #include <sys/kdebug.h>
45 
46 #if defined(__arm64__)
47 #include <arm64/proc_reg.h>
48 #endif
49 
50 #if HAS_MTE
51 #include <arm64/mte_xnu.h>
52 #endif
53 
54 #define C_SEG_OFFSET_BITS       16
55 
56 #define C_SEG_MAX_POPULATE_SIZE (4 * PAGE_SIZE)
57 
58 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
59 
60 #if defined(XNU_PLATFORM_WatchOS)
61 #define VALIDATE_C_SEGMENTS (1)
62 #endif
63 #endif /* defined(__arm64__) && (DEVELOPMENT || DEBUG) */
64 
65 
66 #if DEBUG || COMPRESSOR_INTEGRITY_CHECKS
67 #define ENABLE_SWAP_CHECKS 1
68 #define ENABLE_COMPRESSOR_CHECKS 1
69 #define POPCOUNT_THE_COMPRESSED_DATA (1)
70 #else
71 #define ENABLE_SWAP_CHECKS 0
72 #define ENABLE_COMPRESSOR_CHECKS 0
73 #endif
74 
75 #define CHECKSUM_THE_SWAP               ENABLE_SWAP_CHECKS              /* Debug swap data */
76 #define CHECKSUM_THE_DATA               ENABLE_COMPRESSOR_CHECKS        /* Debug compressor/decompressor data */
77 #define CHECKSUM_THE_COMPRESSED_DATA    ENABLE_COMPRESSOR_CHECKS        /* Debug compressor/decompressor compressed data */
78 
79 #ifndef VALIDATE_C_SEGMENTS
80 #define VALIDATE_C_SEGMENTS             ENABLE_COMPRESSOR_CHECKS        /* Debug compaction */
81 #endif
82 
83 #define RECORD_THE_COMPRESSED_DATA      0
84 #define TRACK_C_SEGMENT_UTILIZATION     0
85 
86 /*
87  * The c_slot structure embeds a packed pointer to a c_slot_mapping
88  * (32bits) which we ideally want to span as much VA space as possible
89  * to not limit zalloc in how it sets itself up.
90  */
91 #if !defined(__LP64__)                  /* no packing */
92 #define C_SLOT_PACKED_PTR_BITS          32
93 #define C_SLOT_PACKED_PTR_SHIFT         0
94 #define C_SLOT_PACKED_PTR_BASE          0
95 
96 #define C_SLOT_C_SIZE_BITS              12
97 #define C_SLOT_C_CODEC_BITS             1
98 #define C_SLOT_C_POPCOUNT_BITS          0
99 #define C_SLOT_C_PADDING_BITS           3
100 
101 #elif defined(__arm64__)                /* 32G from the heap start */
102 
103 #if HAS_MTE
104 #define C_MTE_SIZE                      MTE_SIZE_TO_ATAG_STORAGE(PAGE_SIZE)
105 #define C_SLOT_EXTRA_METADATA           16            /* 16 possible tags */
106 #define C_SLOT_C_MTE_SIZE_BITS          10            /* ceil(log2(C_MTE_SIZE + C_SLOT_EXTRA_METADATA))  */
107 #define C_SLOT_C_MTE_SIZE_MAX           (C_MTE_SIZE + C_SLOT_EXTRA_METADATA + 1)
108 #define C_SLOT_C_PADDING_BITS           22
109 #else /* !HAS_MTE */
110 #define C_SLOT_C_PADDING_BITS           0
111 #endif /* HAS_MTE */
112 
113 #define C_SLOT_PACKED_PTR_BITS          33
114 #define C_SLOT_PACKED_PTR_SHIFT         2
115 #define C_SLOT_PACKED_PTR_BASE          ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START)
116 
117 #define C_SLOT_C_SIZE_BITS              14
118 #define C_SLOT_C_CODEC_BITS             1
119 #define C_SLOT_C_POPCOUNT_BITS          0
120 
121 #elif defined(__x86_64__)               /* 256G from the heap start */
122 #define C_SLOT_PACKED_PTR_BITS          36
123 #define C_SLOT_PACKED_PTR_SHIFT         2
124 #define C_SLOT_PACKED_PTR_BASE          ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START)
125 
126 #define C_SLOT_C_SIZE_BITS              12
127 #define C_SLOT_C_CODEC_BITS             0 /* not used */
128 #define C_SLOT_C_POPCOUNT_BITS          0
129 #define C_SLOT_C_PADDING_BITS           0
130 
131 #else
132 #error vm_compressor parameters undefined for this architecture
133 #endif
134 
135 /*
136  * Popcounts needs to represent both 0 and full which requires
137  * (8 ^ C_SLOT_C_SIZE_BITS) + 1 values and (C_SLOT_C_SIZE_BITS + 4) bits.
138  *
139  * We us the (2 * (8 ^ C_SLOT_C_SIZE_BITS) - 1) value to mean "unknown".
140  */
141 #define C_SLOT_NO_POPCOUNT              ((16u << C_SLOT_C_SIZE_BITS) - 1)
142 
143 static_assert((C_SEG_OFFSET_BITS + C_SLOT_C_SIZE_BITS +
144 #if HAS_MTE
145     C_SLOT_C_MTE_SIZE_BITS +
146 #endif
147     C_SLOT_C_CODEC_BITS + C_SLOT_C_POPCOUNT_BITS +
148     C_SLOT_C_PADDING_BITS + C_SLOT_PACKED_PTR_BITS) % 32 == 0);
149 
150 struct c_slot {
151 	uint64_t        c_offset:C_SEG_OFFSET_BITS __kernel_ptr_semantics;
152 	/* 0 means it's an empty slot
153 	 * 4 means it's a short-value that did not fit in the hash
154 	 * [5 : PAGE_SIZE-1] means it is normally compressed
155 	 * PAGE_SIZE means it was incompressible (see tag:WK-INCOMPRESSIBLE) */
156 	uint64_t        c_size:C_SLOT_C_SIZE_BITS;
157 #if HAS_MTE
158 	/* 0 means there are no MTE
159 	 * [1 : C_MTE_SIZE-1] means normally compressed tags
160 	 * C_MTE_SIZE means incompressible tags
161 	 * [C_MTE_SIZE + 1 : C_SLOT_C_MTE_SIZE_MAX] means single-tag and encodes the tag */
162 	uint64_t        c_mte_size:C_SLOT_C_MTE_SIZE_BITS;
163 #endif /* HAS_MTE */
164 #if C_SLOT_C_CODEC_BITS
165 	uint64_t        c_codec:C_SLOT_C_CODEC_BITS;
166 #endif
167 #if C_SLOT_C_POPCOUNT_BITS
168 	/*
169 	 * This value may not agree with c_pop_cdata, as it may be the
170 	 * population count of the uncompressed data.
171 	 *
172 	 * This value must be C_SLOT_NO_POPCOUNT when the compression algorithm
173 	 * cannot provide it.
174 	 */
175 	uint32_t        c_inline_popcount:C_SLOT_C_POPCOUNT_BITS;
176 #endif
177 #if C_SLOT_C_PADDING_BITS
178 	uint64_t        c_padding:C_SLOT_C_PADDING_BITS;
179 #endif
180 	uint64_t        c_packed_ptr:C_SLOT_PACKED_PTR_BITS __kernel_ptr_semantics; /* points back to the c_slot_mapping_t in the pager */
181 
182 	/* debugging fields, typically not present on release kernels */
183 #if CHECKSUM_THE_DATA
184 	unsigned int    c_hash_data;
185 #endif
186 #if CHECKSUM_THE_COMPRESSED_DATA
187 	unsigned int    c_hash_compressed_data;
188 #endif
189 #if POPCOUNT_THE_COMPRESSED_DATA
190 	unsigned int    c_pop_cdata;
191 #endif
192 } __attribute__((packed, aligned(4)));
193 
194 #define C_IS_EMPTY              0  /* segment was just allocated and is going to start filling */
195 #define C_IS_FREE               1  /* segment is unused, went to the free-list, unallocated */
196 #define C_IS_FILLING            2
197 #define C_ON_AGE_Q              3
198 #define C_ON_SWAPOUT_Q          4
199 #define C_ON_SWAPPEDOUT_Q       5
200 #define C_ON_SWAPPEDOUTSPARSE_Q 6  /* segment is swapped-out but some of its slots were freed */
201 #define C_ON_SWAPPEDIN_Q        7
202 #define C_ON_MAJORCOMPACT_Q     8  /* we just did major compaction on this segment */
203 #define C_ON_BAD_Q              9
204 #define C_ON_SWAPIO_Q          10
205 
206 
207 struct c_segment {
208 	lck_mtx_t       c_lock;
209 	queue_chain_t   c_age_list;  /* chain of the main queue this c_segment is in */
210 	queue_chain_t   c_list;      /* chain of c_minor_list_head, if c_on_minorcompact_q==1 */
211 
212 #if CONFIG_FREEZE
213 	queue_chain_t   c_task_list_next_cseg;
214 	task_t          c_task_owner;
215 #endif /* CONFIG_FREEZE */
216 
217 #define C_SEG_MAX_LIMIT         (UINT_MAX)       /* this needs to track the size of c_mysegno */
218 	uint32_t        c_mysegno;  /* my index in c_segments */
219 
220 	uint32_t        c_creation_ts;  /* time filling the segment has finished, used for checking if segment reached ripe age */
221 	uint64_t        c_generation_id;  /* a unique id of a single lifetime of a segment */
222 
223 	int32_t         c_bytes_used;
224 	int32_t         c_bytes_unused;
225 	uint32_t        c_slots_used;
226 
227 	uint16_t        c_firstemptyslot;  /* index of lowest empty slot. used for instance in minor compaction to not have to start from 0 */
228 	uint16_t        c_nextslot;        /* index of the next available slot in either c_slot_fixed_array or c_slot_var_array */
229 	uint32_t        c_nextoffset;      /* next available position in the buffer space pointed by c_store.c_buffer */
230 	uint32_t        c_populated_offset; /* how much of the segment is populated from it's beginning */
231 	/* c_nextoffset and c_populated_offset count ints, not bytes
232 	 * Invariants: - (c_nextoffset <= c_populated_offset) always
233 	 *             - c_nextoffset is rounded to WKDM alignment
234 	 *             - c_populated_offset is in quanta of PAGE_SIZE/sizeof(int) */
235 
236 	union {
237 		int32_t *c_buffer;
238 		uint64_t c_swap_handle;  /* this is populated if C_SEG_IS_ONDISK()  */
239 	} c_store;
240 
241 #if     VALIDATE_C_SEGMENTS
242 	uint32_t        c_was_minor_compacted;
243 	uint32_t        c_was_major_compacted;
244 	uint32_t        c_was_major_donor;
245 #endif
246 #if CHECKSUM_THE_SWAP
247 	unsigned int    cseg_hash;
248 	unsigned int    cseg_swap_size;
249 #endif /* CHECKSUM_THE_SWAP */
250 
251 	thread_t        c_busy_for_thread;
252 	uint32_t        c_agedin_ts;  /* time the seg got to age_q after being swapped in. used for stats*/
253 	uint32_t        c_swappedin_ts;
254 	bool            c_swappedin;
255 #if TRACK_C_SEGMENT_UTILIZATION
256 	uint32_t        c_decompressions_since_swapin;
257 #endif /* TRACK_C_SEGMENT_UTILIZATION */
258 	/*
259 	 * Do not pull c_swappedin above into the bitfield below.
260 	 * We update it without always taking the segment
261 	 * lock and rely on the segment being busy instead.
262 	 * The bitfield needs the segment lock. So updating
263 	 * this state, if in the bitfield, without the lock
264 	 * will race with the updates to the other fields and
265 	 * result in a mess.
266 	 */
267 	uint32_t        c_busy:1,
268 	    c_busy_swapping:1,
269 	    c_wanted:1,
270 	    c_on_minorcompact_q:1,              /* can also be on the age_q, the majorcompact_q or the swappedin_q */
271 
272 	    c_state:4,                          /* what state is the segment in which dictates which q to find it on */
273 	    c_overage_swap:1,
274 	    c_has_donated_pages:1,
275 #if CONFIG_FREEZE
276 	    c_has_freezer_pages:1,
277 	    c_reserved:21;
278 #else /* CONFIG_FREEZE */
279 	c_reserved:22;
280 #endif /* CONFIG_FREEZE */
281 
282 	int             c_slot_var_array_len;  /* length of the allocated c_slot_var_array */
283 	struct  c_slot  *c_slot_var_array;     /* see C_SEG_SLOT_FROM_INDEX() */
284 	struct  c_slot  c_slot_fixed_array[0];
285 };
286 
287 /*
288  * the pager holds a buffer of this 32 bit sized object, one for each page in the vm_object,
289  * to refer to a specific slot in a specific segment in the compressor
290  */
291 struct  c_slot_mapping {
292 #if !CONFIG_TRACK_UNMODIFIED_ANON_PAGES
293 	uint32_t        s_cseg:22,      /* segment number + 1 */
294 	    s_cindx:10;                 /* index of slot in the segment, see also C_SLOT_MAX_INDEX */
295 	/* in the case of a single-value (sv) page, s_cseg==C_SV_CSEG_ID and s_cindx is the
296 	 * index into c_segment_sv_hash_table
297 	 */
298 #else /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
299 	uint32_t        s_cseg:21,      /* segment number + 1 */
300 	    s_cindx:10,                 /* index in the segment */
301 	    s_uncompressed:1;           /* This bit indicates that the page resides uncompressed in a swapfile.
302 	                                 * This can happen in 2 ways:-
303 	                                 * 1) Page used to be in the compressor, got decompressed, was not
304 	                                 * modified, and so was pushed uncompressed to a different swapfile on disk.
305 	                                 * 2) Page was in its uncompressed form in a swapfile on disk. It got swapped in
306 	                                 * but was not modified. As we are about to reclaim it, we notice that this bit
307 	                                 * is set in its current slot. And so we can safely toss this clean anonymous page
308 	                                 * because its copy exists on disk.
309 	                                 */
310 #endif /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
311 };
312 #define C_SLOT_MAX_INDEX        (1 << 10)
313 
314 typedef struct c_slot_mapping *c_slot_mapping_t;
315 
316 
317 extern  int             c_seg_fixed_array_len;
318 extern  vm_offset_t     c_buffers;
319 extern _Atomic uint64_t c_segment_compressed_bytes;
320 
321 #define C_SEG_BUFFER_ADDRESS(c_segno)   ((c_buffers + ((uint64_t)c_segno * (uint64_t)c_seg_allocsize)))
322 
323 #define C_SEG_SLOT_FROM_INDEX(cseg, index)      (index < c_seg_fixed_array_len ? &(cseg->c_slot_fixed_array[index]) : &(cseg->c_slot_var_array[index - c_seg_fixed_array_len]))
324 
325 #define C_SEG_OFFSET_TO_BYTES(off)      ((off) * (int) sizeof(int32_t))
326 #define C_SEG_BYTES_TO_OFFSET(bytes)    ((bytes) / (int) sizeof(int32_t))
327 
328 #define C_SEG_UNUSED_BYTES(cseg)        (cseg->c_bytes_unused + (C_SEG_OFFSET_TO_BYTES(cseg->c_populated_offset - cseg->c_nextoffset)))
329 
330 #ifndef __PLATFORM_WKDM_ALIGNMENT_MASK__
331 #define C_SEG_OFFSET_ALIGNMENT_MASK     0x3ULL
332 #define C_SEG_OFFSET_ALIGNMENT_BOUNDARY 0x4
333 #else
334 #define C_SEG_OFFSET_ALIGNMENT_MASK     __PLATFORM_WKDM_ALIGNMENT_MASK__
335 #define C_SEG_OFFSET_ALIGNMENT_BOUNDARY __PLATFORM_WKDM_ALIGNMENT_BOUNDARY__
336 #endif
337 
338 /* round an offset/size up to the next multiple the wkdm write alignment (64 byte) */
339 #define C_SEG_ROUND_TO_ALIGNMENT(offset) \
340 	(((offset) + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK)
341 
342 #define C_SEG_SHOULD_MINORCOMPACT_NOW(cseg)     ((C_SEG_UNUSED_BYTES(cseg) >= (c_seg_bufsize / 4)) ? 1 : 0)
343 
344 /*
345  * the decsion to force a c_seg to be major compacted is based on 2 criteria
346  * 1) is the c_seg buffer almost empty (i.e. we have a chance to merge it with another c_seg)
347  * 2) are there at least a minimum number of slots unoccupied so that we have a chance
348  *    of combining this c_seg with another one.
349  */
350 #define C_SEG_SHOULD_MAJORCOMPACT_NOW(cseg)                                                                                     \
351 	((((cseg->c_bytes_unused + (c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset))) >= (c_seg_bufsize / 8)) &&     \
352 	  ((C_SLOT_MAX_INDEX - cseg->c_slots_used) > (c_seg_bufsize / PAGE_SIZE))) \
353 	? 1 : 0)
354 
355 #define C_SEG_ONDISK_IS_SPARSE(cseg)    ((cseg->c_bytes_used < cseg->c_bytes_unused) ? 1 : 0)
356 #define C_SEG_IS_ONDISK(cseg)           ((cseg->c_state == C_ON_SWAPPEDOUT_Q || cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q))
357 #define C_SEG_IS_ON_DISK_OR_SOQ(cseg)   ((cseg->c_state == C_ON_SWAPPEDOUT_Q || \
358 	                                  cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q || \
359 	                                  cseg->c_state == C_ON_SWAPOUT_Q || \
360 	                                  cseg->c_state == C_ON_SWAPIO_Q))
361 
362 
363 #define C_SEG_WAKEUP_DONE(cseg)                         \
364 	MACRO_BEGIN                                     \
365 	assert((cseg)->c_busy);                         \
366 	(cseg)->c_busy = 0;                             \
367 	assert((cseg)->c_busy_for_thread != NULL);      \
368 	(cseg)->c_busy_for_thread = NULL;               \
369 	if ((cseg)->c_wanted) {                         \
370 	        (cseg)->c_wanted = 0;                   \
371 	        thread_wakeup((event_t) (cseg));        \
372 	}                                               \
373 	MACRO_END
374 
375 #define C_SEG_BUSY(cseg)                                \
376 	MACRO_BEGIN                                     \
377 	assert((cseg)->c_busy == 0);                    \
378 	(cseg)->c_busy = 1;                             \
379 	assert((cseg)->c_busy_for_thread == NULL);      \
380 	(cseg)->c_busy_for_thread = current_thread();   \
381 	MACRO_END
382 
383 
384 extern vm_map_t compressor_map;
385 
386 #if CONFIG_CSEG_MPROTECT
387 extern bool write_protect_c_segs;
388 extern int vm_compressor_test_seg_wp;
389 
390 #define C_SEG_MAKE_WRITEABLE(cseg)                      \
391 	MACRO_BEGIN                                     \
392 	if (write_protect_c_segs) {                     \
393 	        vm_map_protect(compressor_map,                  \
394 	                       (vm_map_offset_t)cseg->c_store.c_buffer,         \
395 	                       (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(c_seg_allocsize)],\
396 	                       0, VM_PROT_READ | VM_PROT_WRITE);    \
397 	}                               \
398 	MACRO_END
399 
400 #define C_SEG_WRITE_PROTECT(cseg)                       \
401 	MACRO_BEGIN                                     \
402 	if (write_protect_c_segs) {                     \
403 	        vm_map_protect(compressor_map,                  \
404 	                       (vm_map_offset_t)cseg->c_store.c_buffer,         \
405 	                       (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(c_seg_allocsize)],\
406 	                       0, VM_PROT_READ);                    \
407 	}                                                       \
408 	if (vm_compressor_test_seg_wp) {                                \
409 	        volatile uint32_t vmtstmp = *(volatile uint32_t *)cseg->c_store.c_buffer; \
410 	        *(volatile uint32_t *)cseg->c_store.c_buffer = 0xDEADABCD; \
411 	        (void) vmtstmp;                                         \
412 	}                                                               \
413 	MACRO_END
414 #else /* !CONFIG_CSEG_MPROTECT */
415 #define C_SEG_MAKE_WRITEABLE(cseg)
416 #define C_SEG_WRITE_PROTECT(cseg)
417 #endif /* CONFIG_CSEG_MPROTECT */
418 
419 typedef struct c_segment *c_segment_t;
420 typedef struct c_slot   *c_slot_t;
421 
422 void vm_decompressor_lock(void);
423 void vm_decompressor_unlock(void);
424 void vm_compressor_delay_trim(void);
425 void vm_compressor_do_warmup(void);
426 
427 extern kern_return_t    vm_swap_get(c_segment_t, uint64_t, uint64_t);
428 
429 extern _Atomic uint64_t compressor_bytes_used;
430 extern uint32_t         swapout_target_age;
431 
432 extern uint32_t vm_compressor_minorcompact_threshold_divisor;
433 extern uint32_t vm_compressor_majorcompact_threshold_divisor;
434 extern uint32_t vm_compressor_unthrottle_threshold_divisor;
435 extern uint32_t vm_compressor_catchup_threshold_divisor;
436 
437 extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden;
438 extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden;
439 extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden;
440 extern uint32_t vm_compressor_catchup_threshold_divisor_overridden;
441 
442 struct vm_compressor_kdp_state {
443 	char           *kc_scratch_bufs;
444 	char           *kc_decompressed_pages;
445 	addr64_t       *kc_decompressed_pages_paddr;
446 	ppnum_t        *kc_decompressed_pages_ppnum;
447 	char           *kc_panic_scratch_buf;
448 	char           *kc_panic_decompressed_page;
449 	addr64_t        kc_panic_decompressed_page_paddr;
450 	ppnum_t         kc_panic_decompressed_page_ppnum;
451 };
452 extern struct vm_compressor_kdp_state vm_compressor_kdp_state;
453 
454 extern void kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo);
455 extern kern_return_t vm_compressor_kdp_init(void);
456 extern void vm_compressor_kdp_teardown(void);
457 
458 /*
459  * TODO, there may be a minor optimisation opportunity to replace these divisions
460  * with multiplies and shifts
461  *
462  * By multiplying by 10, the divisors can have more precision w/o resorting to floating point... a divisor specified as 25 is in reality a divide by 2.5
463  * By multiplying by 9, you get a number ~11% smaller which allows us to have another limit point derived from the same base
464  * By multiplying by 11, you get a number ~10% bigger which allows us to generate a reset limit derived from the same base which is useful for hysteresis
465  */
466 
467 #define VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD            (((AVAILABLE_MEMORY) * 10) / (vm_compressor_minorcompact_threshold_divisor ? vm_compressor_minorcompact_threshold_divisor : 10))
468 #define VM_PAGE_COMPRESSOR_SWAP_THRESHOLD               (((AVAILABLE_MEMORY) * 10) / (vm_compressor_majorcompact_threshold_divisor ? vm_compressor_majorcompact_threshold_divisor : 10))
469 
470 #define VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD    (((AVAILABLE_MEMORY) * 10) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 10))
471 #define VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD    (((AVAILABLE_MEMORY) * 11) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 11))
472 
473 #define VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD  (((AVAILABLE_MEMORY) * 11) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 11))
474 #define VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD       (((AVAILABLE_MEMORY) * 10) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 10))
475 #define VM_PAGE_COMPRESSOR_HARD_THROTTLE_THRESHOLD      (((AVAILABLE_MEMORY) * 9) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 9))
476 
477 #if !XNU_TARGET_OS_OSX
478 #define AVAILABLE_NON_COMPRESSED_MIN                    20000
479 #define COMPRESSOR_NEEDS_TO_SWAP()              (((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) || \
480 	                                          (AVAILABLE_NON_COMPRESSED_MEMORY < AVAILABLE_NON_COMPRESSED_MIN)) ? 1 : 0)
481 #else /* !XNU_TARGET_OS_OSX */
482 #define COMPRESSOR_NEEDS_TO_SWAP()              ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) ? 1 : 0)
483 #endif /* !XNU_TARGET_OS_OSX */
484 
485 #define HARD_THROTTLE_LIMIT_REACHED()           ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_HARD_THROTTLE_THRESHOLD) ? 1 : 0)
486 #define SWAPPER_NEEDS_TO_UNTHROTTLE()           ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) ? 1 : 0)
487 #define SWAPPER_NEEDS_TO_RETHROTTLE()           ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD) ? 1 : 0)
488 #define SWAPPER_NEEDS_TO_CATCHUP()              ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD) ? 1 : 0)
489 #define SWAPPER_HAS_CAUGHTUP()                  ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD) ? 1 : 0)
490 
491 
492 #if !XNU_TARGET_OS_OSX
493 #define COMPRESSOR_FREE_RESERVED_LIMIT          28
494 #else /* !XNU_TARGET_OS_OSX */
495 #define COMPRESSOR_FREE_RESERVED_LIMIT          128
496 #endif /* !XNU_TARGET_OS_OSX */
497 
498 #define COMPRESSOR_SCRATCH_BUF_SIZE vm_compressor_get_encode_scratch_size()
499 
500 extern lck_mtx_t c_list_lock_storage;
501 #define          c_list_lock (&c_list_lock_storage)
502 
503 #if DEVELOPMENT || DEBUG
504 extern uint32_t vm_ktrace_enabled;
505 
506 #define VMKDBG(x, ...)          \
507 MACRO_BEGIN                     \
508 if (vm_ktrace_enabled) {        \
509 	KDBG(x, ## __VA_ARGS__);\
510 }                               \
511 MACRO_END
512 
513 extern bool compressor_running_perf_test;
514 extern uint64_t compressor_perf_test_pages_processed;
515 #endif /* DEVELOPMENT || DEBUG */
516 
517 #endif /* MACH_KERNEL_PRIVATE */
518 
519 /*
520  * @func vm_swap_low_on_space
521  *
522  * @brief Return true if the system is running low on swap space
523  *
524  * @discussion
525  * Returns true if the number of free swapfile segments is low and we aren't
526  * likely to be able to create another swapfile (e.g. because the swapfile
527  * creation thread has failed to create a new swapfile).
528  */
529 extern bool vm_swap_low_on_space(void);
530 
531 /*
532  * @func vm_swap_out_of_space
533  *
534  * @brief Return true if the system has totally exhausted it's swap space
535  *
536  * @discussion
537  * Returns true iff all free swapfile segments have been exhausted and we aren't
538  * able to create another swapfile (because we've reached the configured limit).
539  * Unlike @c vm_swap_low_on_space(), @c vm_swap_out_of_space() will not return
540  * true if the swapfile creation thread has failed in the recent past -- even
541  * if we've run out of swapfile segments. This is because conditions may change
542  * and allow for future creation of new swapfiles.
543  */
544 extern bool vm_swap_out_of_space(void);
545 
546 #define HIBERNATE_FLUSHING_SECS_TO_COMPLETE     120
547 
548 #if DEVELOPMENT || DEBUG
549 int do_cseg_wedge_thread(void);
550 int do_cseg_unwedge_thread(void);
551 #endif /* DEVELOPMENT || DEBUG */
552 
553 #if CONFIG_FREEZE
554 void task_disown_frozen_csegs(task_t owner_task);
555 #endif /* CONFIG_FREEZE */
556 
557 void vm_wake_compactor_swapper(void);
558 extern void             vm_swap_consider_defragmenting(int);
559 void vm_run_compactor(void);
560 void vm_thrashing_jetsam_done(void);
561 
562 uint32_t vm_compression_ratio(void);
563 uint32_t vm_compressor_pool_size(void);
564 uint32_t vm_compressor_fragmentation_level(void);
565 uint32_t vm_compressor_incore_fragmentation_wasted_pages(void);
566 bool vm_compressor_is_thrashing(void);
567 bool vm_compressor_swapout_is_ripe(void);
568 uint32_t vm_compressor_pages_compressed(void);
569 void vm_compressor_process_special_swapped_in_segments(void);
570 uint32_t vm_compressor_get_swapped_segment_count(void);
571 
572 
573 #if DEVELOPMENT || DEBUG
574 __enum_closed_decl(vm_c_serialize_add_data_t, uint32_t, {
575 	VM_C_SERIALIZE_DATA_NONE,
576 #if HAS_MTE
577 	VM_C_SERIALIZE_DATA_TAGS,
578 #endif /* HAS_MTE */
579 });
580 kern_return_t vm_compressor_serialize_segment_debug_info(int segno, char *buf, size_t *size, vm_c_serialize_add_data_t with_data);
581 #endif /* DEVELOPMENT || DEBUG */
582 
583 extern bool vm_compressor_low_on_space(void);
584 extern bool vm_compressor_compressed_pages_nearing_limit(void);
585 extern bool vm_compressor_out_of_space(void);
586 
587 #if HAS_MTE
588 // number of tagged pages ever sent to the compressor
589 SCALABLE_COUNTER_DECLARE(compressor_tagged_pages_compressed);
590 // different reasons why tagged pages were removed from the compressor
591 SCALABLE_COUNTER_DECLARE(compressor_tagged_pages_decompressed);
592 SCALABLE_COUNTER_DECLARE(compressor_tagged_pages_freed);
593 SCALABLE_COUNTER_DECLARE(compressor_tagged_pages_corrupted);
594 // current number of bytes taken by compressed tags in the compressor
595 SCALABLE_COUNTER_DECLARE(compressor_tags_overhead_bytes);
596 // current number of tagged pages that reside in the compressor
597 SCALABLE_COUNTER_DECLARE(compressor_tagged_pages);
598 // current number of tag storage pages composing the compressor pool
599 SCALABLE_COUNTER_DECLARE(compressor_tag_storage_pages_in_pool);
600 // current number of non-tag storage pages composing the compressor pool
601 SCALABLE_COUNTER_DECLARE(compressor_non_tag_storage_pages_in_pool);
602 // the following is a breakdown of tagged_pages_compressed
603 #if DEVELOPMENT || DEBUG
604 SCALABLE_COUNTER_DECLARE(compressor_tags_all_zero);
605 SCALABLE_COUNTER_DECLARE(compressor_tags_same_value);
606 SCALABLE_COUNTER_DECLARE(compressor_tags_below_align);
607 SCALABLE_COUNTER_DECLARE(compressor_tags_above_align);
608 SCALABLE_COUNTER_DECLARE(compressor_tags_incompressible);
609 #endif /* DEVELOPMENT || DEBUG */
610 #endif /* HAS_MTE */
611 
612 #endif /* _VM_VM_COMPRESSOR_XNU_H_ */
613