xref: /xnu-11215.81.4/osfmk/vm/vm_compressor_xnu.h (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #ifndef _VM_VM_COMPRESSOR_XNU_H_
29 #define _VM_VM_COMPRESSOR_XNU_H_
30 
31 #ifdef MACH_KERNEL_PRIVATE
32 
33 #include <vm/vm_kern.h>
34 #include <vm/vm_page.h>
35 #include <vm/vm_protos.h>
36 #include <vm/WKdm_new.h>
37 #include <vm/vm_object_xnu.h>
38 #include <vm/vm_map.h>
39 #include <machine/pmap.h>
40 #include <kern/locks.h>
41 
42 #include <sys/kdebug.h>
43 
44 #if defined(__arm64__)
45 #include <arm64/proc_reg.h>
46 #endif
47 
48 #define C_SEG_OFFSET_BITS       16
49 
50 #define C_SEG_MAX_POPULATE_SIZE (4 * PAGE_SIZE)
51 
52 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
53 
54 #if defined(XNU_PLATFORM_WatchOS)
55 #define VALIDATE_C_SEGMENTS (1)
56 #endif
57 #endif /* defined(__arm64__) && (DEVELOPMENT || DEBUG) */
58 
59 
60 #if DEBUG || COMPRESSOR_INTEGRITY_CHECKS
61 #define ENABLE_SWAP_CHECKS 1
62 #define ENABLE_COMPRESSOR_CHECKS 1
63 #define POPCOUNT_THE_COMPRESSED_DATA (1)
64 #else
65 #define ENABLE_SWAP_CHECKS 0
66 #define ENABLE_COMPRESSOR_CHECKS 0
67 #endif
68 
69 #define CHECKSUM_THE_SWAP               ENABLE_SWAP_CHECKS              /* Debug swap data */
70 #define CHECKSUM_THE_DATA               ENABLE_COMPRESSOR_CHECKS        /* Debug compressor/decompressor data */
71 #define CHECKSUM_THE_COMPRESSED_DATA    ENABLE_COMPRESSOR_CHECKS        /* Debug compressor/decompressor compressed data */
72 
73 #ifndef VALIDATE_C_SEGMENTS
74 #define VALIDATE_C_SEGMENTS             ENABLE_COMPRESSOR_CHECKS        /* Debug compaction */
75 #endif
76 
77 #define RECORD_THE_COMPRESSED_DATA      0
78 #define TRACK_C_SEGMENT_UTILIZATION     0
79 
80 /*
81  * The c_slot structure embeds a packed pointer to a c_slot_mapping
82  * (32bits) which we ideally want to span as much VA space as possible
83  * to not limit zalloc in how it sets itself up.
84  */
85 #if !defined(__LP64__)                  /* no packing */
86 #define C_SLOT_PACKED_PTR_BITS          32
87 #define C_SLOT_PACKED_PTR_SHIFT         0
88 #define C_SLOT_PACKED_PTR_BASE          0
89 
90 #define C_SLOT_C_SIZE_BITS              12
91 #define C_SLOT_C_CODEC_BITS             1
92 #define C_SLOT_C_POPCOUNT_BITS          0
93 #define C_SLOT_C_PADDING_BITS           3
94 
95 #elif defined(__arm64__)                /* 32G from the heap start */
96 #define C_SLOT_PACKED_PTR_BITS          33
97 #define C_SLOT_PACKED_PTR_SHIFT         2
98 #define C_SLOT_PACKED_PTR_BASE          ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START)
99 
100 #define C_SLOT_C_SIZE_BITS              14
101 #define C_SLOT_C_CODEC_BITS             1
102 #define C_SLOT_C_POPCOUNT_BITS          0
103 #define C_SLOT_C_PADDING_BITS           0
104 
105 #elif defined(__x86_64__)               /* 256G from the heap start */
106 #define C_SLOT_PACKED_PTR_BITS          36
107 #define C_SLOT_PACKED_PTR_SHIFT         2
108 #define C_SLOT_PACKED_PTR_BASE          ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START)
109 
110 #define C_SLOT_C_SIZE_BITS              12
111 #define C_SLOT_C_CODEC_BITS             0 /* not used */
112 #define C_SLOT_C_POPCOUNT_BITS          0
113 #define C_SLOT_C_PADDING_BITS           0
114 
115 #else
116 #error vm_compressor parameters undefined for this architecture
117 #endif
118 
119 /*
120  * Popcounts needs to represent both 0 and full which requires
121  * (8 ^ C_SLOT_C_SIZE_BITS) + 1 values and (C_SLOT_C_SIZE_BITS + 4) bits.
122  *
123  * We us the (2 * (8 ^ C_SLOT_C_SIZE_BITS) - 1) value to mean "unknown".
124  */
125 #define C_SLOT_NO_POPCOUNT              ((16u << C_SLOT_C_SIZE_BITS) - 1)
126 
127 static_assert((C_SEG_OFFSET_BITS + C_SLOT_C_SIZE_BITS +
128     C_SLOT_C_CODEC_BITS + C_SLOT_C_POPCOUNT_BITS +
129     C_SLOT_C_PADDING_BITS + C_SLOT_PACKED_PTR_BITS) % 32 == 0);
130 
131 struct c_slot {
132 	uint64_t        c_offset:C_SEG_OFFSET_BITS __kernel_ptr_semantics;
133 	uint64_t        c_size:C_SLOT_C_SIZE_BITS; /* 0 means it's an empty slot */
134 #if C_SLOT_C_CODEC_BITS
135 	uint64_t        c_codec:C_SLOT_C_CODEC_BITS;
136 #endif
137 #if C_SLOT_C_POPCOUNT_BITS
138 	/*
139 	 * This value may not agree with c_pop_cdata, as it may be the
140 	 * population count of the uncompressed data.
141 	 *
142 	 * This value must be C_SLOT_NO_POPCOUNT when the compression algorithm
143 	 * cannot provide it.
144 	 */
145 	uint32_t        c_inline_popcount:C_SLOT_C_POPCOUNT_BITS;
146 #endif
147 #if C_SLOT_C_PADDING_BITS
148 	uint64_t        c_padding:C_SLOT_C_PADDING_BITS;
149 #endif
150 	uint64_t        c_packed_ptr:C_SLOT_PACKED_PTR_BITS __kernel_ptr_semantics; /* points back to the c_slot_mapping_t in the pager */
151 
152 	/* debugging fields, typically not present on release kernels */
153 #if CHECKSUM_THE_DATA
154 	unsigned int    c_hash_data;
155 #endif
156 #if CHECKSUM_THE_COMPRESSED_DATA
157 	unsigned int    c_hash_compressed_data;
158 #endif
159 #if POPCOUNT_THE_COMPRESSED_DATA
160 	unsigned int    c_pop_cdata;
161 #endif
162 } __attribute__((packed, aligned(4)));
163 
164 #define C_IS_EMPTY              0  /* segment was just allocated and is going to start filling */
165 #define C_IS_FREE               1  /* segment is unused, went to the free-list, unallocated */
166 #define C_IS_FILLING            2
167 #define C_ON_AGE_Q              3
168 #define C_ON_SWAPOUT_Q          4
169 #define C_ON_SWAPPEDOUT_Q       5
170 #define C_ON_SWAPPEDOUTSPARSE_Q 6  /* segment is swapped-out but some of its slots were freed */
171 #define C_ON_SWAPPEDIN_Q        7
172 #define C_ON_MAJORCOMPACT_Q     8  /* we just did major compaction on this segment */
173 #define C_ON_BAD_Q              9
174 #define C_ON_SWAPIO_Q          10
175 
176 
177 struct c_segment {
178 	lck_mtx_t       c_lock;
179 	queue_chain_t   c_age_list;  /* chain of the main queue this c_segment is in */
180 	queue_chain_t   c_list;      /* chain of c_minor_list_head, if c_on_minorcompact_q==1 */
181 
182 #if CONFIG_FREEZE
183 	queue_chain_t   c_task_list_next_cseg;
184 	task_t          c_task_owner;
185 #endif /* CONFIG_FREEZE */
186 
187 #define C_SEG_MAX_LIMIT         (UINT_MAX)       /* this needs to track the size of c_mysegno */
188 	uint32_t        c_mysegno;  /* my index in c_segments */
189 
190 	uint32_t        c_creation_ts;  /* time filling the segment has finished, used for checking if segment reached ripe age */
191 	uint64_t        c_generation_id;  /* a unique id of a single lifetime of a segment */
192 
193 	int32_t         c_bytes_used;
194 	int32_t         c_bytes_unused;
195 	uint32_t        c_slots_used;
196 
197 	uint16_t        c_firstemptyslot;  /* index of lowest empty slot. used for instance in minor compaction to not have to start from 0 */
198 	uint16_t        c_nextslot;        /* index of the next available slot in either c_slot_fixed_array or c_slot_var_array */
199 	uint32_t        c_nextoffset;      /* next available position in the buffer space pointed by c_store.c_buffer */
200 	uint32_t        c_populated_offset;
201 
202 	union {
203 		int32_t *c_buffer;
204 		uint64_t c_swap_handle;  /* this is populated if C_SEG_IS_ONDISK()  */
205 	} c_store;
206 
207 #if     VALIDATE_C_SEGMENTS
208 	uint32_t        c_was_minor_compacted;
209 	uint32_t        c_was_major_compacted;
210 	uint32_t        c_was_major_donor;
211 #endif
212 #if CHECKSUM_THE_SWAP
213 	unsigned int    cseg_hash;
214 	unsigned int    cseg_swap_size;
215 #endif /* CHECKSUM_THE_SWAP */
216 
217 	thread_t        c_busy_for_thread;
218 	uint32_t        c_agedin_ts;  /* time the seg got to age_q after being swapped in. used for stats*/
219 	uint32_t        c_swappedin_ts;
220 	bool            c_swappedin;
221 #if TRACK_C_SEGMENT_UTILIZATION
222 	uint32_t        c_decompressions_since_swapin;
223 #endif /* TRACK_C_SEGMENT_UTILIZATION */
224 	/*
225 	 * Do not pull c_swappedin above into the bitfield below.
226 	 * We update it without always taking the segment
227 	 * lock and rely on the segment being busy instead.
228 	 * The bitfield needs the segment lock. So updating
229 	 * this state, if in the bitfield, without the lock
230 	 * will race with the updates to the other fields and
231 	 * result in a mess.
232 	 */
233 	uint32_t        c_busy:1,
234 	    c_busy_swapping:1,
235 	    c_wanted:1,
236 	    c_on_minorcompact_q:1,              /* can also be on the age_q, the majorcompact_q or the swappedin_q */
237 
238 	    c_state:4,                          /* what state is the segment in which dictates which q to find it on */
239 	    c_overage_swap:1,
240 	    c_has_donated_pages:1,
241 #if CONFIG_FREEZE
242 	    c_has_freezer_pages:1,
243 	    c_reserved:21;
244 #else /* CONFIG_FREEZE */
245 	c_reserved:22;
246 #endif /* CONFIG_FREEZE */
247 
248 	int             c_slot_var_array_len;  /* length of the allocated c_slot_var_array */
249 	struct  c_slot  *c_slot_var_array;     /* see C_SEG_SLOT_FROM_INDEX() */
250 	struct  c_slot  c_slot_fixed_array[0];
251 };
252 
253 /*
254  * the pager holds a buffer of this 32 bit sized object, one for each page in the vm_object,
255  * to refer to a specific slot in a specific segment in the compressor
256  */
257 struct  c_slot_mapping {
258 #if !CONFIG_TRACK_UNMODIFIED_ANON_PAGES
259 	uint32_t        s_cseg:22,      /* segment number + 1 */
260 	    s_cindx:10;                 /* index of slot in the segment, see also C_SLOT_MAX_INDEX */
261 	/* in the case of a single-value (sv) page, s_cseg==C_SV_CSEG_ID and s_cindx is the
262 	 * index into c_segment_sv_hash_table
263 	 */
264 #else /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
265 	uint32_t        s_cseg:21,      /* segment number + 1 */
266 	    s_cindx:10,                 /* index in the segment */
267 	    s_uncompressed:1;           /* This bit indicates that the page resides uncompressed in a swapfile.
268 	                                 * This can happen in 2 ways:-
269 	                                 * 1) Page used to be in the compressor, got decompressed, was not
270 	                                 * modified, and so was pushed uncompressed to a different swapfile on disk.
271 	                                 * 2) Page was in its uncompressed form in a swapfile on disk. It got swapped in
272 	                                 * but was not modified. As we are about to reclaim it, we notice that this bit
273 	                                 * is set in its current slot. And so we can safely toss this clean anonymous page
274 	                                 * because its copy exists on disk.
275 	                                 */
276 #endif /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
277 };
278 #define C_SLOT_MAX_INDEX        (1 << 10)
279 
280 typedef struct c_slot_mapping *c_slot_mapping_t;
281 
282 
283 extern  int             c_seg_fixed_array_len;
284 extern  vm_offset_t     c_buffers;
285 extern _Atomic uint64_t c_segment_compressed_bytes;
286 
287 #define C_SEG_BUFFER_ADDRESS(c_segno)   ((c_buffers + ((uint64_t)c_segno * (uint64_t)c_seg_allocsize)))
288 
289 #define C_SEG_SLOT_FROM_INDEX(cseg, index)      (index < c_seg_fixed_array_len ? &(cseg->c_slot_fixed_array[index]) : &(cseg->c_slot_var_array[index - c_seg_fixed_array_len]))
290 
291 #define C_SEG_OFFSET_TO_BYTES(off)      ((off) * (int) sizeof(int32_t))
292 #define C_SEG_BYTES_TO_OFFSET(bytes)    ((bytes) / (int) sizeof(int32_t))
293 
294 #define C_SEG_UNUSED_BYTES(cseg)        (cseg->c_bytes_unused + (C_SEG_OFFSET_TO_BYTES(cseg->c_populated_offset - cseg->c_nextoffset)))
295 
296 #ifndef __PLATFORM_WKDM_ALIGNMENT_MASK__
297 #define C_SEG_OFFSET_ALIGNMENT_MASK     0x3ULL
298 #define C_SEG_OFFSET_ALIGNMENT_BOUNDARY 0x4
299 #else
300 #define C_SEG_OFFSET_ALIGNMENT_MASK     __PLATFORM_WKDM_ALIGNMENT_MASK__
301 #define C_SEG_OFFSET_ALIGNMENT_BOUNDARY __PLATFORM_WKDM_ALIGNMENT_BOUNDARY__
302 #endif
303 
304 #define C_SEG_SHOULD_MINORCOMPACT_NOW(cseg)     ((C_SEG_UNUSED_BYTES(cseg) >= (c_seg_bufsize / 4)) ? 1 : 0)
305 
306 /*
307  * the decsion to force a c_seg to be major compacted is based on 2 criteria
308  * 1) is the c_seg buffer almost empty (i.e. we have a chance to merge it with another c_seg)
309  * 2) are there at least a minimum number of slots unoccupied so that we have a chance
310  *    of combining this c_seg with another one.
311  */
312 #define C_SEG_SHOULD_MAJORCOMPACT_NOW(cseg)                                                                                     \
313 	((((cseg->c_bytes_unused + (c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset))) >= (c_seg_bufsize / 8)) &&     \
314 	  ((C_SLOT_MAX_INDEX - cseg->c_slots_used) > (c_seg_bufsize / PAGE_SIZE))) \
315 	? 1 : 0)
316 
317 #define C_SEG_ONDISK_IS_SPARSE(cseg)    ((cseg->c_bytes_used < cseg->c_bytes_unused) ? 1 : 0)
318 #define C_SEG_IS_ONDISK(cseg)           ((cseg->c_state == C_ON_SWAPPEDOUT_Q || cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q))
319 #define C_SEG_IS_ON_DISK_OR_SOQ(cseg)   ((cseg->c_state == C_ON_SWAPPEDOUT_Q || \
320 	                                  cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q || \
321 	                                  cseg->c_state == C_ON_SWAPOUT_Q || \
322 	                                  cseg->c_state == C_ON_SWAPIO_Q))
323 
324 
325 #define C_SEG_WAKEUP_DONE(cseg)                         \
326 	MACRO_BEGIN                                     \
327 	assert((cseg)->c_busy);                         \
328 	(cseg)->c_busy = 0;                             \
329 	assert((cseg)->c_busy_for_thread != NULL);      \
330 	(cseg)->c_busy_for_thread = NULL;               \
331 	if ((cseg)->c_wanted) {                         \
332 	        (cseg)->c_wanted = 0;                   \
333 	        thread_wakeup((event_t) (cseg));        \
334 	}                                               \
335 	MACRO_END
336 
337 #define C_SEG_BUSY(cseg)                                \
338 	MACRO_BEGIN                                     \
339 	assert((cseg)->c_busy == 0);                    \
340 	(cseg)->c_busy = 1;                             \
341 	assert((cseg)->c_busy_for_thread == NULL);      \
342 	(cseg)->c_busy_for_thread = current_thread();   \
343 	MACRO_END
344 
345 
346 extern vm_map_t compressor_map;
347 
348 #if DEVELOPMENT || DEBUG
349 extern boolean_t write_protect_c_segs;
350 extern int vm_compressor_test_seg_wp;
351 
352 #define C_SEG_MAKE_WRITEABLE(cseg)                      \
353 	MACRO_BEGIN                                     \
354 	if (write_protect_c_segs) {                     \
355 	        vm_map_protect(compressor_map,                  \
356 	                       (vm_map_offset_t)cseg->c_store.c_buffer,         \
357 	                       (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(c_seg_allocsize)],\
358 	                       0, VM_PROT_READ | VM_PROT_WRITE);    \
359 	}                               \
360 	MACRO_END
361 
362 #define C_SEG_WRITE_PROTECT(cseg)                       \
363 	MACRO_BEGIN                                     \
364 	if (write_protect_c_segs) {                     \
365 	        vm_map_protect(compressor_map,                  \
366 	                       (vm_map_offset_t)cseg->c_store.c_buffer,         \
367 	                       (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(c_seg_allocsize)],\
368 	                       0, VM_PROT_READ);                    \
369 	}                                                       \
370 	if (vm_compressor_test_seg_wp) {                                \
371 	        volatile uint32_t vmtstmp = *(volatile uint32_t *)cseg->c_store.c_buffer; \
372 	        *(volatile uint32_t *)cseg->c_store.c_buffer = 0xDEADABCD; \
373 	        (void) vmtstmp;                                         \
374 	}                                                               \
375 	MACRO_END
376 #endif
377 
378 typedef struct c_segment *c_segment_t;
379 typedef struct c_slot   *c_slot_t;
380 
381 void vm_decompressor_lock(void);
382 void vm_decompressor_unlock(void);
383 void vm_compressor_delay_trim(void);
384 void vm_compressor_do_warmup(void);
385 
386 extern bool             vm_swap_low_on_space(void);
387 extern int              vm_swap_out_of_space(void);
388 extern kern_return_t    vm_swap_get(c_segment_t, uint64_t, uint64_t);
389 
390 extern uint32_t         c_age_count;
391 extern uint32_t         c_early_swapout_count, c_regular_swapout_count, c_late_swapout_count;
392 extern uint32_t         c_swappedout_count;
393 extern uint32_t         c_swappedout_sparse_count;
394 
395 extern _Atomic uint64_t compressor_bytes_used;
396 extern uint32_t         swapout_target_age;
397 
398 extern uint32_t vm_compressor_minorcompact_threshold_divisor;
399 extern uint32_t vm_compressor_majorcompact_threshold_divisor;
400 extern uint32_t vm_compressor_unthrottle_threshold_divisor;
401 extern uint32_t vm_compressor_catchup_threshold_divisor;
402 
403 extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden;
404 extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden;
405 extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden;
406 extern uint32_t vm_compressor_catchup_threshold_divisor_overridden;
407 
408 struct vm_compressor_kdp_state {
409 	char           *kc_scratch_bufs;
410 	char           *kc_decompressed_pages;
411 	addr64_t       *kc_decompressed_pages_paddr;
412 	ppnum_t        *kc_decompressed_pages_ppnum;
413 	char           *kc_panic_scratch_buf;
414 	char           *kc_panic_decompressed_page;
415 	addr64_t        kc_panic_decompressed_page_paddr;
416 	ppnum_t         kc_panic_decompressed_page_ppnum;
417 };
418 extern struct vm_compressor_kdp_state vm_compressor_kdp_state;
419 
420 extern void kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo);
421 extern kern_return_t vm_compressor_kdp_init(void);
422 extern void vm_compressor_kdp_teardown(void);
423 
424 /*
425  * TODO, there may be a minor optimisation opportunity to replace these divisions
426  * with multiplies and shifts
427  *
428  * By multiplying by 10, the divisors can have more precision w/o resorting to floating point... a divisor specified as 25 is in reality a divide by 2.5
429  * By multiplying by 9, you get a number ~11% smaller which allows us to have another limit point derived from the same base
430  * By multiplying by 11, you get a number ~10% bigger which allows us to generate a reset limit derived from the same base which is useful for hysteresis
431  */
432 
433 #define VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD            (((AVAILABLE_MEMORY) * 10) / (vm_compressor_minorcompact_threshold_divisor ? vm_compressor_minorcompact_threshold_divisor : 10))
434 #define VM_PAGE_COMPRESSOR_SWAP_THRESHOLD               (((AVAILABLE_MEMORY) * 10) / (vm_compressor_majorcompact_threshold_divisor ? vm_compressor_majorcompact_threshold_divisor : 10))
435 
436 #define VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD    (((AVAILABLE_MEMORY) * 10) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 10))
437 #define VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD    (((AVAILABLE_MEMORY) * 11) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 11))
438 
439 #define VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD  (((AVAILABLE_MEMORY) * 11) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 11))
440 #define VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD       (((AVAILABLE_MEMORY) * 10) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 10))
441 #define VM_PAGE_COMPRESSOR_HARD_THROTTLE_THRESHOLD      (((AVAILABLE_MEMORY) * 9) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 9))
442 
443 #if !XNU_TARGET_OS_OSX
444 #define AVAILABLE_NON_COMPRESSED_MIN                    20000
445 #define COMPRESSOR_NEEDS_TO_SWAP()              (((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) || \
446 	                                          (AVAILABLE_NON_COMPRESSED_MEMORY < AVAILABLE_NON_COMPRESSED_MIN)) ? 1 : 0)
447 #else /* !XNU_TARGET_OS_OSX */
448 #define COMPRESSOR_NEEDS_TO_SWAP()              ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) ? 1 : 0)
449 #endif /* !XNU_TARGET_OS_OSX */
450 
451 #define HARD_THROTTLE_LIMIT_REACHED()           ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_HARD_THROTTLE_THRESHOLD) ? 1 : 0)
452 #define SWAPPER_NEEDS_TO_UNTHROTTLE()           ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) ? 1 : 0)
453 #define SWAPPER_NEEDS_TO_RETHROTTLE()           ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD) ? 1 : 0)
454 #define SWAPPER_NEEDS_TO_CATCHUP()              ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD) ? 1 : 0)
455 #define SWAPPER_HAS_CAUGHTUP()                  ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD) ? 1 : 0)
456 
457 
458 #if !XNU_TARGET_OS_OSX
459 #define COMPRESSOR_FREE_RESERVED_LIMIT          28
460 #else /* !XNU_TARGET_OS_OSX */
461 #define COMPRESSOR_FREE_RESERVED_LIMIT          128
462 #endif /* !XNU_TARGET_OS_OSX */
463 
464 #define COMPRESSOR_SCRATCH_BUF_SIZE vm_compressor_get_encode_scratch_size()
465 
466 extern lck_mtx_t c_list_lock_storage;
467 #define          c_list_lock (&c_list_lock_storage)
468 
469 #if DEVELOPMENT || DEBUG
470 extern uint32_t vm_ktrace_enabled;
471 
472 #define VMKDBG(x, ...)          \
473 MACRO_BEGIN                     \
474 if (vm_ktrace_enabled) {        \
475 	KDBG(x, ## __VA_ARGS__);\
476 }                               \
477 MACRO_END
478 
479 #if DEVELOPMENT || DEBUG
480 extern bool compressor_running_perf_test;
481 extern uint64_t compressor_perf_test_pages_processed;
482 #endif /* DEVELOPMENT || DEBUG */
483 #endif
484 
485 #endif /* MACH_KERNEL_PRIVATE */
486 
487 #define HIBERNATE_FLUSHING_SECS_TO_COMPLETE     120
488 
489 #if DEVELOPMENT || DEBUG
490 int do_cseg_wedge_thread(void);
491 int do_cseg_unwedge_thread(void);
492 #endif /* DEVELOPMENT || DEBUG */
493 
494 #if CONFIG_FREEZE
495 void task_disown_frozen_csegs(task_t owner_task);
496 #endif /* CONFIG_FREEZE */
497 
498 void vm_wake_compactor_swapper(void);
499 extern void             vm_swap_consider_defragmenting(int);
500 void vm_run_compactor(void);
501 void vm_thrashing_jetsam_done(void);
502 
503 uint32_t vm_compression_ratio(void);
504 uint32_t vm_compressor_pool_size(void);
505 uint32_t vm_compressor_fragmentation_level(void);
506 bool vm_compressor_is_thrashing(void);
507 bool vm_compressor_swapout_is_ripe(void);
508 uint64_t vm_compressor_pages_compressed(void);
509 void vm_compressor_process_special_swapped_in_segments(void);
510 kern_return_t vm_compressor_serialize_segment_debug_info(int segno, char *buf, size_t *size);
511 
512 
513 #endif /* _VM_VM_COMPRESSOR_XNU_H_ */
514