xref: /xnu-8019.80.24/osfmk/vm/vm_compressor.h (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <vm/vm_compressor_pager.h>
30 #include <vm/vm_kern.h>
31 #include <vm/vm_page.h>
32 #include <vm/vm_protos.h>
33 #include <vm/WKdm_new.h>
34 #include <vm/vm_object.h>
35 #include <vm/vm_map.h>
36 #include <machine/pmap.h>
37 #include <kern/locks.h>
38 
39 #include <sys/kdebug.h>
40 
41 #if defined(__arm64__)
42 #include <arm/proc_reg.h>
43 #endif
44 
45 #define C_SEG_OFFSET_BITS       16
46 
47 #define C_SEG_MAX_POPULATE_SIZE (4 * PAGE_SIZE)
48 
49 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
50 
51 #if defined(PLATFORM_WatchOS)
52 #define VALIDATE_C_SEGMENTS (1)
53 #endif
54 #endif /* defined(__arm64__) && (DEVELOPMENT || DEBUG) */
55 
56 
57 #if DEBUG || COMPRESSOR_INTEGRITY_CHECKS
58 #define ENABLE_SWAP_CHECKS 1
59 #define ENABLE_COMPRESSOR_CHECKS 1
60 #define POPCOUNT_THE_COMPRESSED_DATA (1)
61 #else
62 #define ENABLE_SWAP_CHECKS 0
63 #define ENABLE_COMPRESSOR_CHECKS 0
64 #endif
65 
66 #define CHECKSUM_THE_SWAP               ENABLE_SWAP_CHECKS              /* Debug swap data */
67 #define CHECKSUM_THE_DATA               ENABLE_COMPRESSOR_CHECKS        /* Debug compressor/decompressor data */
68 #define CHECKSUM_THE_COMPRESSED_DATA    ENABLE_COMPRESSOR_CHECKS        /* Debug compressor/decompressor compressed data */
69 
70 #ifndef VALIDATE_C_SEGMENTS
71 #define VALIDATE_C_SEGMENTS             ENABLE_COMPRESSOR_CHECKS        /* Debug compaction */
72 #endif
73 
74 #define RECORD_THE_COMPRESSED_DATA      0
75 
76 /*
77  * The c_slot structure embeds a packed pointer to a c_slot_mapping
78  * (32bits) which we ideally want to span as much VA space as possible
79  * to not limit zalloc in how it sets itself up.
80  */
81 #if !defined(__LP64__)                  /* no packing */
82 #define C_SLOT_PACKED_PTR_BITS          32
83 #define C_SLOT_PACKED_PTR_SHIFT         0
84 #define C_SLOT_PACKED_PTR_BASE          0
85 
86 #define C_SLOT_C_SIZE_BITS              12
87 #define C_SLOT_C_CODEC_BITS             1
88 #define C_SLOT_C_POPCOUNT_BITS          0
89 #define C_SLOT_C_PADDING_BITS           3
90 
91 #elif __APPLE_WKDM_POPCNT_EXTENSIONS__               /* no packing */
92 #define C_SLOT_PACKED_PTR_BITS          47
93 #define C_SLOT_PACKED_PTR_SHIFT         0
94 #define C_SLOT_PACKED_PTR_BASE          ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START)
95 
96 #define C_SLOT_C_SIZE_BITS              14
97 #define C_SLOT_C_CODEC_BITS             1
98 #define C_SLOT_C_POPCOUNT_BITS          18
99 #define C_SLOT_C_PADDING_BITS           0
100 
101 #elif defined(__arm64__)                /* 32G from the heap start */
102 #define C_SLOT_PACKED_PTR_BITS          33
103 #define C_SLOT_PACKED_PTR_SHIFT         2
104 #define C_SLOT_PACKED_PTR_BASE          ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START)
105 
106 #define C_SLOT_C_SIZE_BITS              14
107 #define C_SLOT_C_CODEC_BITS             1
108 #define C_SLOT_C_POPCOUNT_BITS          0
109 #define C_SLOT_C_PADDING_BITS           0
110 
111 #elif defined(__x86_64__)               /* 256G from the heap start */
112 #define C_SLOT_PACKED_PTR_BITS          36
113 #define C_SLOT_PACKED_PTR_SHIFT         2
114 #define C_SLOT_PACKED_PTR_BASE          ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START)
115 
116 #define C_SLOT_C_SIZE_BITS              12
117 #define C_SLOT_C_CODEC_BITS             0 /* not used */
118 #define C_SLOT_C_POPCOUNT_BITS          0
119 #define C_SLOT_C_PADDING_BITS           0
120 
121 #else
122 #error vm_compressor parameters undefined for this architecture
123 #endif
124 
125 /*
126  * Popcounts needs to represent both 0 and full which requires
127  * (8 ^ C_SLOT_C_SIZE_BITS) + 1 values and (C_SLOT_C_SIZE_BITS + 4) bits.
128  *
129  * We us the (2 * (8 ^ C_SLOT_C_SIZE_BITS) - 1) value to mean "unknown".
130  */
131 #define C_SLOT_NO_POPCOUNT              ((16u << C_SLOT_C_SIZE_BITS) - 1)
132 
133 static_assert((C_SEG_OFFSET_BITS + C_SLOT_C_SIZE_BITS +
134     C_SLOT_C_CODEC_BITS + C_SLOT_C_POPCOUNT_BITS +
135     C_SLOT_C_PADDING_BITS + C_SLOT_PACKED_PTR_BITS) % 32 == 0);
136 
137 struct c_slot {
138 	uint64_t        c_offset:C_SEG_OFFSET_BITS;
139 	uint64_t        c_size:C_SLOT_C_SIZE_BITS;
140 #if C_SLOT_C_CODEC_BITS
141 	uint64_t        c_codec:C_SLOT_C_CODEC_BITS;
142 #endif
143 #if C_SLOT_C_POPCOUNT_BITS
144 	/*
145 	 * This value may not agree with c_pop_cdata, as it may be the
146 	 * population count of the uncompressed data.
147 	 *
148 	 * This value must be C_SLOT_NO_POPCOUNT when the compression algorithm
149 	 * cannot provide it.
150 	 */
151 	uint32_t        c_inline_popcount:C_SLOT_C_POPCOUNT_BITS;
152 #endif
153 #if C_SLOT_C_PADDING_BITS
154 	uint64_t        c_padding:C_SLOT_C_PADDING_BITS;
155 #endif
156 	uint64_t        c_packed_ptr:C_SLOT_PACKED_PTR_BITS;
157 
158 	/* debugging fields, typically not present on release kernels */
159 #if CHECKSUM_THE_DATA
160 	unsigned int    c_hash_data;
161 #endif
162 #if CHECKSUM_THE_COMPRESSED_DATA
163 	unsigned int    c_hash_compressed_data;
164 #endif
165 #if POPCOUNT_THE_COMPRESSED_DATA
166 	unsigned int    c_pop_cdata;
167 #endif
168 } __attribute__((packed, aligned(4)));
169 
170 #define C_IS_EMPTY              0
171 #define C_IS_FREE               1
172 #define C_IS_FILLING            2
173 #define C_ON_AGE_Q              3
174 #define C_ON_SWAPOUT_Q          4
175 #define C_ON_SWAPPEDOUT_Q       5
176 #define C_ON_SWAPPEDOUTSPARSE_Q 6
177 #define C_ON_SWAPPEDIN_Q        7
178 #define C_ON_MAJORCOMPACT_Q     8
179 #define C_ON_BAD_Q              9
180 #define C_ON_SWAPIO_Q          10
181 
182 
183 struct c_segment {
184 	lck_mtx_t       c_lock;
185 	queue_chain_t   c_age_list;
186 	queue_chain_t   c_list;
187 
188 #if CONFIG_FREEZE
189 	queue_chain_t   c_task_list_next_cseg;
190 	task_t          c_task_owner;
191 #endif /* CONFIG_FREEZE */
192 
193 #define C_SEG_MAX_LIMIT         (1 << 20)       /* this needs to track the size of c_mysegno */
194 	uint32_t        c_mysegno:20,
195 	    c_busy:1,
196 	    c_busy_swapping:1,
197 	    c_wanted:1,
198 	    c_on_minorcompact_q:1,              /* can also be on the age_q, the majorcompact_q or the swappedin_q */
199 
200 	    c_state:4,                          /* what state is the segment in which dictates which q to find it on */
201 	    c_overage_swap:1,
202 	    c_reserved:3;
203 
204 	uint32_t        c_creation_ts;
205 	uint64_t        c_generation_id;
206 
207 	int32_t         c_bytes_used;
208 	int32_t         c_bytes_unused;
209 	uint32_t        c_slots_used;
210 
211 	uint16_t        c_firstemptyslot;
212 	uint16_t        c_nextslot;
213 	uint32_t        c_nextoffset;
214 	uint32_t        c_populated_offset;
215 
216 	union {
217 		int32_t *c_buffer;
218 		uint64_t c_swap_handle;
219 	} c_store;
220 
221 #if     VALIDATE_C_SEGMENTS
222 	uint32_t        c_was_minor_compacted;
223 	uint32_t        c_was_major_compacted;
224 	uint32_t        c_was_major_donor;
225 #endif
226 #if CHECKSUM_THE_SWAP
227 	unsigned int    cseg_hash;
228 	unsigned int    cseg_swap_size;
229 #endif /* CHECKSUM_THE_SWAP */
230 
231 	thread_t        c_busy_for_thread;
232 	uint32_t        c_agedin_ts;
233 	uint32_t        c_swappedin_ts;
234 	bool            c_swappedin;
235 
236 	int             c_slot_var_array_len;
237 	struct  c_slot  *c_slot_var_array;
238 	struct  c_slot  c_slot_fixed_array[0];
239 };
240 
241 
242 struct  c_slot_mapping {
243 	uint32_t        s_cseg:22,      /* segment number + 1 */
244 	    s_cindx:10;                 /* index in the segment */
245 };
246 #define C_SLOT_MAX_INDEX        (1 << 10)
247 
248 typedef struct c_slot_mapping *c_slot_mapping_t;
249 
250 
251 extern  int             c_seg_fixed_array_len;
252 extern  vm_offset_t     c_buffers;
253 #define C_SEG_BUFFER_ADDRESS(c_segno)   ((c_buffers + ((uint64_t)c_segno * (uint64_t)c_seg_allocsize)))
254 
255 #define C_SEG_SLOT_FROM_INDEX(cseg, index)      (index < c_seg_fixed_array_len ? &(cseg->c_slot_fixed_array[index]) : &(cseg->c_slot_var_array[index - c_seg_fixed_array_len]))
256 
257 #define C_SEG_OFFSET_TO_BYTES(off)      ((off) * (int) sizeof(int32_t))
258 #define C_SEG_BYTES_TO_OFFSET(bytes)    ((bytes) / (int) sizeof(int32_t))
259 
260 #define C_SEG_UNUSED_BYTES(cseg)        (cseg->c_bytes_unused + (C_SEG_OFFSET_TO_BYTES(cseg->c_populated_offset - cseg->c_nextoffset)))
261 //todo opensource
262 
263 #ifndef __PLATFORM_WKDM_ALIGNMENT_MASK__
264 #define C_SEG_OFFSET_ALIGNMENT_MASK     0x3ULL
265 #define C_SEG_OFFSET_ALIGNMENT_BOUNDARY 0x4
266 #else
267 #define C_SEG_OFFSET_ALIGNMENT_MASK     __PLATFORM_WKDM_ALIGNMENT_MASK__
268 #define C_SEG_OFFSET_ALIGNMENT_BOUNDARY __PLATFORM_WKDM_ALIGNMENT_BOUNDARY__
269 #endif
270 
271 #define C_SEG_SHOULD_MINORCOMPACT_NOW(cseg)     ((C_SEG_UNUSED_BYTES(cseg) >= (c_seg_bufsize / 4)) ? 1 : 0)
272 
273 /*
274  * the decsion to force a c_seg to be major compacted is based on 2 criteria
275  * 1) is the c_seg buffer almost empty (i.e. we have a chance to merge it with another c_seg)
276  * 2) are there at least a minimum number of slots unoccupied so that we have a chance
277  *    of combining this c_seg with another one.
278  */
279 #define C_SEG_SHOULD_MAJORCOMPACT_NOW(cseg)                                                                                     \
280 	((((cseg->c_bytes_unused + (c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset))) >= (c_seg_bufsize / 8)) &&     \
281 	  ((C_SLOT_MAX_INDEX - cseg->c_slots_used) > (c_seg_bufsize / PAGE_SIZE))) \
282 	? 1 : 0)
283 
284 #define C_SEG_ONDISK_IS_SPARSE(cseg)    ((cseg->c_bytes_used < cseg->c_bytes_unused) ? 1 : 0)
285 #define C_SEG_IS_ONDISK(cseg)           ((cseg->c_state == C_ON_SWAPPEDOUT_Q || cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q))
286 #define C_SEG_IS_ON_DISK_OR_SOQ(cseg)   ((cseg->c_state == C_ON_SWAPPEDOUT_Q || \
287 	                                  cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q || \
288 	                                  cseg->c_state == C_ON_SWAPOUT_Q || \
289 	                                  cseg->c_state == C_ON_SWAPIO_Q))
290 
291 
292 #define C_SEG_WAKEUP_DONE(cseg)                         \
293 	MACRO_BEGIN                                     \
294 	assert((cseg)->c_busy);                         \
295 	(cseg)->c_busy = 0;                             \
296 	assert((cseg)->c_busy_for_thread != NULL);      \
297 	(cseg)->c_busy_for_thread = NULL;               \
298 	if ((cseg)->c_wanted) {                         \
299 	        (cseg)->c_wanted = 0;                   \
300 	        thread_wakeup((event_t) (cseg));        \
301 	}                                               \
302 	MACRO_END
303 
304 #define C_SEG_BUSY(cseg)                                \
305 	MACRO_BEGIN                                     \
306 	assert((cseg)->c_busy == 0);                    \
307 	(cseg)->c_busy = 1;                             \
308 	assert((cseg)->c_busy_for_thread == NULL);      \
309 	(cseg)->c_busy_for_thread = current_thread();   \
310 	MACRO_END
311 
312 
313 extern vm_map_t compressor_map;
314 
315 #if DEVELOPMENT || DEBUG
316 extern boolean_t write_protect_c_segs;
317 extern int vm_compressor_test_seg_wp;
318 
319 #define C_SEG_MAKE_WRITEABLE(cseg)                      \
320 	MACRO_BEGIN                                     \
321 	if (write_protect_c_segs) {                     \
322 	        vm_map_protect(compressor_map,                  \
323 	                       (vm_map_offset_t)cseg->c_store.c_buffer,         \
324 	                       (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(c_seg_allocsize)],\
325 	                       VM_PROT_READ | VM_PROT_WRITE,    \
326 	                       0);                              \
327 	}                               \
328 	MACRO_END
329 
330 #define C_SEG_WRITE_PROTECT(cseg)                       \
331 	MACRO_BEGIN                                     \
332 	if (write_protect_c_segs) {                     \
333 	        vm_map_protect(compressor_map,                  \
334 	                       (vm_map_offset_t)cseg->c_store.c_buffer,         \
335 	                       (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(c_seg_allocsize)],\
336 	                       VM_PROT_READ,                    \
337 	                       0);                              \
338 	}                                                       \
339 	if (vm_compressor_test_seg_wp) {                                \
340 	        volatile uint32_t vmtstmp = *(volatile uint32_t *)cseg->c_store.c_buffer; \
341 	        *(volatile uint32_t *)cseg->c_store.c_buffer = 0xDEADABCD; \
342 	        (void) vmtstmp;                                         \
343 	}                                                               \
344 	MACRO_END
345 #endif
346 
347 typedef struct c_segment *c_segment_t;
348 typedef struct c_slot   *c_slot_t;
349 
350 uint64_t vm_compressor_total_compressions(void);
351 void vm_wake_compactor_swapper(void);
352 void vm_run_compactor(void);
353 void vm_thrashing_jetsam_done(void);
354 void vm_consider_waking_compactor_swapper(void);
355 void vm_consider_swapping(void);
356 void vm_compressor_flush(void);
357 void c_seg_free(c_segment_t);
358 void c_seg_free_locked(c_segment_t);
359 void c_seg_insert_into_age_q(c_segment_t);
360 void c_seg_need_delayed_compaction(c_segment_t, boolean_t);
361 void c_seg_update_task_owner(c_segment_t, task_t);
362 
363 void vm_decompressor_lock(void);
364 void vm_decompressor_unlock(void);
365 
366 void vm_compressor_delay_trim(void);
367 void vm_compressor_do_warmup(void);
368 void vm_compressor_record_warmup_start(void);
369 void vm_compressor_record_warmup_end(void);
370 
371 int                     vm_wants_task_throttled(task_t);
372 
373 extern void             vm_compaction_swapper_do_init(void);
374 extern void             vm_compressor_swap_init(void);
375 extern lck_rw_t         c_master_lock;
376 
377 #if ENCRYPTED_SWAP
378 extern void             vm_swap_decrypt(c_segment_t);
379 #endif /* ENCRYPTED_SWAP */
380 
381 extern int              vm_swap_low_on_space(void);
382 extern int              vm_swap_out_of_space(void);
383 extern kern_return_t    vm_swap_get(c_segment_t, uint64_t, uint64_t);
384 extern void             vm_swap_free(uint64_t);
385 extern void             vm_swap_consider_defragmenting(int);
386 
387 extern void             c_seg_swapin_requeue(c_segment_t, boolean_t, boolean_t, boolean_t);
388 extern int              c_seg_swapin(c_segment_t, boolean_t, boolean_t);
389 extern void             c_seg_wait_on_busy(c_segment_t);
390 extern void             c_seg_trim_tail(c_segment_t);
391 extern void             c_seg_switch_state(c_segment_t, int, boolean_t);
392 
393 extern boolean_t        fastwake_recording_in_progress;
394 extern int              compaction_swapper_inited;
395 extern int              compaction_swapper_running;
396 extern uint64_t         vm_swap_put_failures;
397 
398 extern int              c_overage_swapped_count;
399 extern int              c_overage_swapped_limit;
400 
401 extern queue_head_t     c_minor_list_head;
402 extern queue_head_t     c_age_list_head;
403 extern queue_head_t     c_swapout_list_head;
404 extern queue_head_t     c_swappedout_list_head;
405 extern queue_head_t     c_swappedout_sparse_list_head;
406 
407 extern uint32_t         c_age_count;
408 extern uint32_t         c_swapout_count;
409 extern uint32_t         c_swappedout_count;
410 extern uint32_t         c_swappedout_sparse_count;
411 
412 extern int64_t          compressor_bytes_used;
413 extern uint64_t         first_c_segment_to_warm_generation_id;
414 extern uint64_t         last_c_segment_to_warm_generation_id;
415 extern boolean_t        hibernate_flushing;
416 extern boolean_t        hibernate_no_swapspace;
417 extern boolean_t        hibernate_in_progress_with_pinned_swap;
418 extern boolean_t        hibernate_flush_timed_out;
419 extern uint32_t         swapout_target_age;
420 
421 extern void c_seg_insert_into_q(queue_head_t *, c_segment_t);
422 
423 extern uint32_t vm_compressor_minorcompact_threshold_divisor;
424 extern uint32_t vm_compressor_majorcompact_threshold_divisor;
425 extern uint32_t vm_compressor_unthrottle_threshold_divisor;
426 extern uint32_t vm_compressor_catchup_threshold_divisor;
427 
428 extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden;
429 extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden;
430 extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden;
431 extern uint32_t vm_compressor_catchup_threshold_divisor_overridden;
432 
433 extern uint64_t vm_compressor_compute_elapsed_msecs(clock_sec_t, clock_nsec_t, clock_sec_t, clock_nsec_t);
434 
435 extern void kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo);
436 
437 #define PAGE_REPLACEMENT_DISALLOWED(enable)     (enable == TRUE ? lck_rw_lock_shared(&c_master_lock) : lck_rw_done(&c_master_lock))
438 #define PAGE_REPLACEMENT_ALLOWED(enable)        (enable == TRUE ? lck_rw_lock_exclusive(&c_master_lock) : lck_rw_done(&c_master_lock))
439 
440 
441 #define AVAILABLE_NON_COMPRESSED_MEMORY         (vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count)
442 #define AVAILABLE_MEMORY                        (AVAILABLE_NON_COMPRESSED_MEMORY + VM_PAGE_COMPRESSOR_COUNT)
443 
444 /*
445  * TODO, there may be a minor optimisation opportunity to replace these divisions
446  * with multiplies and shifts
447  *
448  * By multiplying by 10, the divisors can have more precision w/o resorting to floating point... a divisor specified as 25 is in reality a divide by 2.5
449  * By multiplying by 9, you get a number ~11% smaller which allows us to have another limit point derived from the same base
450  * By multiplying by 11, you get a number ~10% bigger which allows us to generate a reset limit derived from the same base which is useful for hysteresis
451  */
452 
453 #define VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD            (((AVAILABLE_MEMORY) * 10) / (vm_compressor_minorcompact_threshold_divisor ? vm_compressor_minorcompact_threshold_divisor : 10))
454 #define VM_PAGE_COMPRESSOR_SWAP_THRESHOLD               (((AVAILABLE_MEMORY) * 10) / (vm_compressor_majorcompact_threshold_divisor ? vm_compressor_majorcompact_threshold_divisor : 10))
455 
456 #define VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD    (((AVAILABLE_MEMORY) * 10) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 10))
457 #define VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD    (((AVAILABLE_MEMORY) * 11) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 11))
458 
459 #define VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD  (((AVAILABLE_MEMORY) * 11) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 11))
460 #define VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD       (((AVAILABLE_MEMORY) * 10) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 10))
461 #define VM_PAGE_COMPRESSOR_HARD_THROTTLE_THRESHOLD      (((AVAILABLE_MEMORY) * 9) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 9))
462 
463 #if !XNU_TARGET_OS_OSX
464 #define AVAILABLE_NON_COMPRESSED_MIN                    20000
465 #define COMPRESSOR_NEEDS_TO_SWAP()              (((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) || \
466 	                                          (AVAILABLE_NON_COMPRESSED_MEMORY < AVAILABLE_NON_COMPRESSED_MIN)) ? 1 : 0)
467 #else /* !XNU_TARGET_OS_OSX */
468 #define COMPRESSOR_NEEDS_TO_SWAP()              ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) ? 1 : 0)
469 #endif /* !XNU_TARGET_OS_OSX */
470 
471 #define HARD_THROTTLE_LIMIT_REACHED()           ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_HARD_THROTTLE_THRESHOLD) ? 1 : 0)
472 #define SWAPPER_NEEDS_TO_UNTHROTTLE()           ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) ? 1 : 0)
473 #define SWAPPER_NEEDS_TO_RETHROTTLE()           ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD) ? 1 : 0)
474 #define SWAPPER_NEEDS_TO_CATCHUP()              ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD) ? 1 : 0)
475 #define SWAPPER_HAS_CAUGHTUP()                  ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD) ? 1 : 0)
476 #define COMPRESSOR_NEEDS_TO_MINOR_COMPACT()     ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0)
477 
478 
479 #if !XNU_TARGET_OS_OSX
480 #define COMPRESSOR_FREE_RESERVED_LIMIT          28
481 #else /* !XNU_TARGET_OS_OSX */
482 #define COMPRESSOR_FREE_RESERVED_LIMIT          128
483 #endif /* !XNU_TARGET_OS_OSX */
484 
485 uint32_t vm_compressor_get_encode_scratch_size(void) __pure2;
486 uint32_t vm_compressor_get_decode_scratch_size(void) __pure2;
487 
488 #define COMPRESSOR_SCRATCH_BUF_SIZE vm_compressor_get_encode_scratch_size()
489 
490 #if RECORD_THE_COMPRESSED_DATA
491 extern void      c_compressed_record_init(void);
492 extern void      c_compressed_record_write(char *, int);
493 #endif
494 
495 extern lck_mtx_t c_list_lock_storage;
496 #define          c_list_lock (&c_list_lock_storage)
497 
498 #if DEVELOPMENT || DEBUG
499 extern uint32_t vm_ktrace_enabled;
500 
501 #define VMKDBG(x, ...)          \
502 MACRO_BEGIN                     \
503 if (vm_ktrace_enabled) {        \
504 	KDBG(x, ## __VA_ARGS__);\
505 }                               \
506 MACRO_END
507 #endif
508