xref: /xnu-11215.1.10/osfmk/vm/vm_compressor.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <vm/vm_compressor_internal.h>
30 
31 #if CONFIG_PHANTOM_CACHE
32 #include <vm/vm_phantom_cache_internal.h>
33 #endif
34 
35 #include <vm/vm_map_xnu.h>
36 #include <vm/vm_pageout_xnu.h>
37 #include <vm/vm_map_internal.h>
38 #include <vm/memory_object.h>
39 #include <vm/vm_compressor_algorithms_internal.h>
40 #include <vm/vm_compressor_backing_store_internal.h>
41 #include <vm/vm_fault.h>
42 #include <vm/vm_protos.h>
43 #include <vm/vm_kern_xnu.h>
44 #include <vm/vm_compressor_pager_internal.h>
45 #include <vm/vm_iokit.h>
46 #include <mach/mach_host.h>             /* for host_info() */
47 #if DEVELOPMENT || DEBUG
48 #include <kern/hvg_hypercall.h>
49 #include <vm/vm_compressor_info.h>         /* for c_segment_info */
50 #endif
51 #include <kern/ledger.h>
52 #include <kern/policy_internal.h>
53 #include <kern/thread_group.h>
54 #include <san/kasan.h>
55 #include <os/atomic_private.h>
56 #include <os/log.h>
57 #include <pexpert/pexpert.h>
58 #include <pexpert/device_tree.h>
59 
60 #if defined(__x86_64__)
61 #include <i386/misc_protos.h>
62 #endif
63 #if defined(__arm64__)
64 #include <arm/machine_routines.h>
65 #endif
66 
67 #include <IOKit/IOHibernatePrivate.h>
68 
69 /*
70  * The segment buffer size is a tradeoff.
71  * A larger buffer leads to faster I/O throughput, better compression ratios
72  * (since fewer bytes are wasted at the end of the segment),
73  * and less overhead (both in time and space).
74  * However, a smaller buffer causes less swap when the system is overcommited
75  * b/c a higher percentage of the swapped-in segment is definitely accessed
76  * before it goes back out to storage.
77  *
78  * So on systems without swap, a larger segment is a clear win.
79  * On systems with swap, the choice is murkier. Empirically, we've
80  * found that a 64KB segment provides a better tradeoff both in terms of
81  * performance and swap writes than a 256KB segment on systems with fast SSDs
82  * and a HW compression block.
83  */
84 #define C_SEG_BUFSIZE_ARM_SWAP (1024 * 64)
85 #if XNU_TARGET_OS_OSX && defined(__arm64__)
86 #define C_SEG_BUFSIZE_DEFAULT C_SEG_BUFSIZE_ARM_SWAP
87 #else
88 #define C_SEG_BUFSIZE_DEFAULT (1024 * 256)
89 #endif /* TARGET_OS_OSX && defined(__arm64__) */
90 uint32_t c_seg_bufsize;
91 
92 uint32_t c_seg_max_pages; /* maximum number of pages the compressed data of a segment can take  */
93 uint32_t c_seg_off_limit; /* if we've reached this size while filling the segment, don't bother trying to fill anymore
94                            * because it's unlikely to succeed */
95 uint32_t c_seg_allocsize, c_seg_slot_var_array_min_len;
96 
97 extern boolean_t vm_darkwake_mode;
98 extern zone_t vm_page_zone;
99 
100 #if DEVELOPMENT || DEBUG
101 /* sysctl defined in bsd/dev/arm64/sysctl.c */
102 static event_t debug_cseg_wait_event = NULL;
103 #endif /* DEVELOPMENT || DEBUG */
104 
105 #if CONFIG_FREEZE
106 bool freezer_incore_cseg_acct = TRUE; /* Only count incore compressed memory for jetsams. */
107 #endif /* CONFIG_FREEZE */
108 
109 #if POPCOUNT_THE_COMPRESSED_DATA
110 boolean_t popcount_c_segs = TRUE;
111 
112 static inline uint32_t
vmc_pop(uintptr_t ins,int sz)113 vmc_pop(uintptr_t ins, int sz)
114 {
115 	uint32_t rv = 0;
116 
117 	if (__probable(popcount_c_segs == FALSE)) {
118 		return 0xDEAD707C;
119 	}
120 
121 	while (sz >= 16) {
122 		uint32_t rv1, rv2;
123 		uint64_t *ins64 = (uint64_t *) ins;
124 		uint64_t *ins642 = (uint64_t *) (ins + 8);
125 		rv1 = __builtin_popcountll(*ins64);
126 		rv2 = __builtin_popcountll(*ins642);
127 		rv += rv1 + rv2;
128 		sz -= 16;
129 		ins += 16;
130 	}
131 
132 	while (sz >= 4) {
133 		uint32_t *ins32 = (uint32_t *) ins;
134 		rv += __builtin_popcount(*ins32);
135 		sz -= 4;
136 		ins += 4;
137 	}
138 
139 	while (sz > 0) {
140 		char *ins8 = (char *)ins;
141 		rv += __builtin_popcount(*ins8);
142 		sz--;
143 		ins++;
144 	}
145 	return rv;
146 }
147 #endif
148 
149 #if VALIDATE_C_SEGMENTS
150 boolean_t validate_c_segs = TRUE;
151 #endif
152 /*
153  * vm_compressor_mode has a hierarchy of control to set its value.
154  * boot-args are checked first, then device-tree, and finally
155  * the default value that is defined below. See vm_fault_init() for
156  * the boot-arg & device-tree code.
157  */
158 
159 #if !XNU_TARGET_OS_OSX
160 
161 #if CONFIG_FREEZE
162 int     vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT;
163 struct  freezer_context freezer_context_global;
164 #else /* CONFIG_FREEZE */
165 int     vm_compressor_mode = VM_PAGER_NOT_CONFIGURED;
166 #endif /* CONFIG_FREEZE */
167 
168 #else /* !XNU_TARGET_OS_OSX */
169 int             vm_compressor_mode = VM_PAGER_COMPRESSOR_WITH_SWAP;
170 
171 #endif /* !XNU_TARGET_OS_OSX */
172 
173 TUNABLE(uint32_t, vm_compression_limit, "vm_compression_limit", 0);
174 int             vm_compressor_is_active = 0;
175 int             vm_compressor_available = 0;
176 
177 extern uint64_t vm_swap_get_max_configured_space(void);
178 extern void     vm_pageout_io_throttle(void);
179 
180 #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
181 extern unsigned int hash_string(char *cp, int len);
182 static unsigned int vmc_hash(char *, int);
183 boolean_t checksum_c_segs = TRUE;
184 
185 unsigned int
vmc_hash(char * cp,int len)186 vmc_hash(char *cp, int len)
187 {
188 	unsigned int result;
189 	if (__probable(checksum_c_segs == FALSE)) {
190 		return 0xDEAD7A37;
191 	}
192 	vm_memtag_disable_checking();
193 	result = hash_string(cp, len);
194 	vm_memtag_enable_checking();
195 	return result;
196 }
197 #endif
198 
199 #define UNPACK_C_SIZE(cs)       ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
200 #define PACK_C_SIZE(cs, size)   (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size))
201 
202 
203 struct c_sv_hash_entry {
204 	union {
205 		struct  {
206 			uint32_t        c_sv_he_ref;
207 			uint32_t        c_sv_he_data;
208 		} c_sv_he;
209 		uint64_t        c_sv_he_record;
210 	} c_sv_he_un;
211 };
212 
213 #define he_ref  c_sv_he_un.c_sv_he.c_sv_he_ref
214 #define he_data c_sv_he_un.c_sv_he.c_sv_he_data
215 #define he_record c_sv_he_un.c_sv_he_record
216 
217 #define C_SV_HASH_MAX_MISS      32
218 #define C_SV_HASH_SIZE          ((1 << 10))
219 #define C_SV_HASH_MASK          ((1 << 10) - 1)
220 
221 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
222 #define C_SV_CSEG_ID            ((1 << 21) - 1)
223 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
224 #define C_SV_CSEG_ID            ((1 << 22) - 1)
225 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
226 
227 /* elements of c_segments array */
228 union c_segu {
229 	c_segment_t     c_seg;
230 	uintptr_t       c_segno;  /* index of the next element in the segments free-list, c_free_segno_head is the head */
231 };
232 
233 #define C_SLOT_ASSERT_PACKABLE(ptr) \
234 	VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(ptr), C_SLOT_PACKED_PTR);
235 
236 #define C_SLOT_PACK_PTR(ptr) \
237 	VM_PACK_POINTER((vm_offset_t)(ptr), C_SLOT_PACKED_PTR)
238 
239 #define C_SLOT_UNPACK_PTR(cslot) \
240 	(c_slot_mapping_t)VM_UNPACK_POINTER((cslot)->c_packed_ptr, C_SLOT_PACKED_PTR)
241 
242 /* for debugging purposes */
243 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) c_slot_packing_params =
244     VM_PACKING_PARAMS(C_SLOT_PACKED_PTR);
245 
246 uint32_t        c_segment_count = 0;       /* count all allocated c_segments in all queues */
247 uint32_t        c_segment_count_max = 0;   /* maximum c_segment_count has ever been */
248 
249 uint64_t        c_generation_id = 0;
250 uint64_t        c_generation_id_flush_barrier;
251 
252 
253 #define         HIBERNATE_FLUSHING_SECS_TO_COMPLETE     120
254 
255 boolean_t       hibernate_no_swapspace = FALSE;
256 boolean_t       hibernate_flush_timed_out = FALSE;
257 clock_sec_t     hibernate_flushing_deadline = 0;
258 
259 #if RECORD_THE_COMPRESSED_DATA
260 /* buffer used as an intermediate stage before writing to file */
261 char    *c_compressed_record_sbuf;  /* start */
262 char    *c_compressed_record_ebuf;  /* end */
263 char    *c_compressed_record_cptr;  /* next buffered write */
264 #endif
265 
266 /* the different queues a c_segment can be in via c_age_list */
267 queue_head_t    c_age_list_head;
268 queue_head_t    c_early_swappedin_list_head, c_regular_swappedin_list_head, c_late_swappedin_list_head;
269 queue_head_t    c_early_swapout_list_head, c_regular_swapout_list_head, c_late_swapout_list_head;
270 queue_head_t    c_swapio_list_head;
271 queue_head_t    c_swappedout_list_head;
272 queue_head_t    c_swappedout_sparse_list_head;
273 queue_head_t    c_major_list_head;
274 queue_head_t    c_filling_list_head;
275 queue_head_t    c_bad_list_head;
276 
277 /* count of each of the queues above */
278 uint32_t        c_age_count = 0;
279 uint32_t        c_early_swappedin_count = 0, c_regular_swappedin_count = 0, c_late_swappedin_count = 0;
280 uint32_t        c_early_swapout_count = 0, c_regular_swapout_count = 0, c_late_swapout_count = 0;
281 uint32_t        c_swapio_count = 0;
282 uint32_t        c_swappedout_count = 0;
283 uint32_t        c_swappedout_sparse_count = 0;
284 uint32_t        c_major_count = 0;
285 uint32_t        c_filling_count = 0;
286 uint32_t        c_empty_count = 0;
287 uint32_t        c_bad_count = 0;
288 
289 /* a c_segment can be in the minor-compact queue as well as one of the above ones, via c_list */
290 queue_head_t    c_minor_list_head;
291 uint32_t        c_minor_count = 0;
292 
293 int             c_overage_swapped_count = 0;
294 int             c_overage_swapped_limit = 0;
295 
296 int             c_seg_fixed_array_len;   /* number of slots in the c_segment inline slots array */
297 union  c_segu   *c_segments;             /* array of all c_segments, not all of it may be populated */
298 vm_offset_t     c_buffers;               /* starting address of all compressed data pointed to by c_segment.c_store.c_buffer */
299 vm_size_t       c_buffers_size;          /* total size allocated in c_buffers */
300 caddr_t         c_segments_next_page;    /* next page to populate for extending c_segments */
301 boolean_t       c_segments_busy;
302 uint32_t        c_segments_available;    /* how many segments are in populated memory (used or free), populated size of c_segments array */
303 uint32_t        c_segments_limit;        /* max size of c_segments array */
304 uint32_t        c_segments_nearing_limit;
305 
306 uint32_t        c_segment_svp_in_hash;
307 uint32_t        c_segment_svp_hash_succeeded;
308 uint32_t        c_segment_svp_hash_failed;
309 uint32_t        c_segment_svp_zero_compressions;
310 uint32_t        c_segment_svp_nonzero_compressions;
311 uint32_t        c_segment_svp_zero_decompressions;
312 uint32_t        c_segment_svp_nonzero_decompressions;
313 
314 uint32_t        c_segment_noncompressible_pages;
315 
316 uint32_t        c_segment_pages_compressed = 0; /* Tracks # of uncompressed pages fed into the compressor, including SV (single value) pages */
317 #if CONFIG_FREEZE
318 int32_t         c_segment_pages_compressed_incore = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory */
319 int32_t         c_segment_pages_compressed_incore_late_swapout = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory and tagged for swapout */
320 uint32_t        c_segments_incore_limit = 0; /* Tracks # of segments allowed to be in-core. Based on compressor pool size */
321 #endif /* CONFIG_FREEZE */
322 
323 uint32_t        c_segment_pages_compressed_limit;
324 uint32_t        c_segment_pages_compressed_nearing_limit;
325 uint32_t        c_free_segno_head = (uint32_t)-1;   /* head of free list of c_segment pointers in c_segments */
326 
327 uint32_t        vm_compressor_minorcompact_threshold_divisor = 10;
328 uint32_t        vm_compressor_majorcompact_threshold_divisor = 10;
329 uint32_t        vm_compressor_unthrottle_threshold_divisor = 10;
330 uint32_t        vm_compressor_catchup_threshold_divisor = 10;
331 
332 uint32_t        vm_compressor_minorcompact_threshold_divisor_overridden = 0;
333 uint32_t        vm_compressor_majorcompact_threshold_divisor_overridden = 0;
334 uint32_t        vm_compressor_unthrottle_threshold_divisor_overridden = 0;
335 uint32_t        vm_compressor_catchup_threshold_divisor_overridden = 0;
336 
337 #define         C_SEGMENTS_PER_PAGE     (PAGE_SIZE / sizeof(union c_segu))
338 
339 LCK_GRP_DECLARE(vm_compressor_lck_grp, "vm_compressor");
340 LCK_RW_DECLARE(c_master_lock, &vm_compressor_lck_grp);
341 LCK_MTX_DECLARE(c_list_lock_storage, &vm_compressor_lck_grp);
342 
343 boolean_t       decompressions_blocked = FALSE;
344 
345 zone_t          compressor_segment_zone;
346 int             c_compressor_swap_trigger = 0;
347 
348 uint32_t        compressor_cpus;
349 char            *compressor_scratch_bufs;
350 
351 struct vm_compressor_kdp_state vm_compressor_kdp_state;
352 
353 clock_sec_t     start_of_sample_period_sec = 0;
354 clock_nsec_t    start_of_sample_period_nsec = 0;
355 clock_sec_t     start_of_eval_period_sec = 0;
356 clock_nsec_t    start_of_eval_period_nsec = 0;
357 uint32_t        sample_period_decompression_count = 0;
358 uint32_t        sample_period_compression_count = 0;
359 uint32_t        last_eval_decompression_count = 0;
360 uint32_t        last_eval_compression_count = 0;
361 
362 #define         DECOMPRESSION_SAMPLE_MAX_AGE            (60 * 30)
363 
364 boolean_t       vm_swapout_ripe_segments = FALSE;
365 uint32_t        vm_ripe_target_age = (60 * 60 * 48);
366 
367 uint32_t        swapout_target_age = 0;
368 uint32_t        age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE];
369 uint32_t        overage_decompressions_during_sample_period = 0;
370 
371 
372 void            do_fastwake_warmup(queue_head_t *, boolean_t);
373 boolean_t       fastwake_warmup = FALSE;
374 boolean_t       fastwake_recording_in_progress = FALSE;
375 uint64_t        dont_trim_until_ts = 0;
376 
377 uint64_t        c_segment_warmup_count;
378 uint64_t        first_c_segment_to_warm_generation_id = 0;
379 uint64_t        last_c_segment_to_warm_generation_id = 0;
380 boolean_t       hibernate_flushing = FALSE;
381 
382 _Atomic uint64_t c_segment_input_bytes = 0;
383 _Atomic uint64_t c_segment_compressed_bytes = 0;
384 _Atomic uint64_t compressor_bytes_used = 0;
385 
386 /* Keeps track of the most recent timestamp for when major compaction finished. */
387 mach_timespec_t major_compact_ts;
388 
389 struct c_sv_hash_entry c_segment_sv_hash_table[C_SV_HASH_SIZE]  __attribute__ ((aligned(8)));
390 
391 static void vm_compressor_swap_trigger_thread(void);
392 static void vm_compressor_do_delayed_compactions(boolean_t);
393 static void vm_compressor_compact_and_swap(boolean_t);
394 static void vm_compressor_process_regular_swapped_in_segments(boolean_t);
395 static void vm_compressor_process_special_swapped_in_segments_locked(void);
396 
397 struct vm_compressor_swapper_stats vmcs_stats;
398 
399 static void vm_compressor_process_major_segments(bool);
400 #if XNU_TARGET_OS_OSX
401 static void vm_compressor_take_paging_space_action(void);
402 #endif /* XNU_TARGET_OS_OSX */
403 
404 void compute_swapout_target_age(void);
405 
406 boolean_t c_seg_major_compact(c_segment_t, c_segment_t);
407 boolean_t c_seg_major_compact_ok(c_segment_t, c_segment_t);
408 
409 int  c_seg_minor_compaction_and_unlock(c_segment_t, boolean_t);
410 int  c_seg_do_minor_compaction_and_unlock(c_segment_t, boolean_t, boolean_t, boolean_t);
411 void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg);
412 
413 void c_seg_move_to_sparse_list(c_segment_t);
414 void c_seg_insert_into_q(queue_head_t *, c_segment_t);
415 
416 uint64_t vm_available_memory(void);
417 
418 /*
419  * indicate the need to do a major compaction if
420  * the overall set of in-use compression segments
421  * becomes sparse... on systems that support pressure
422  * driven swapping, this will also cause swapouts to
423  * be initiated.
424  */
425 static bool
vm_compressor_needs_to_major_compact(void)426 vm_compressor_needs_to_major_compact(void)
427 {
428 	uint32_t        incore_seg_count;
429 
430 	incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
431 
432 	/* second condition:
433 	 *   first term:
434 	 *   - (incore_seg_count * c_seg_max_pages) is the maximum size that is this number of segments can hold in the buffer
435 	 *   - VM_PAGE_COMPRESSOR_COUNT is the current size that is actually held by the buffers
436 	 *   -- subtracting these gives the amount of pages that is wasted as holes due to segments not be full
437 	 *   second term:
438 	 *   - 1/8 of the maximum size that can be held by this many segments
439 	 *   meaning of the comparison: is the ratio of wasted space greated than 1/8
440 	 * first condition:
441 	 *   compare number of segments being used vs the number of segments that can ever be allocated
442 	 *   if we don't have a lot of data in the compressor, then we don't need to bother caring about wasted space in holes
443 	 */
444 
445 	if ((c_segment_count >= (c_segments_nearing_limit / 8)) &&
446 	    ((incore_seg_count * c_seg_max_pages) - VM_PAGE_COMPRESSOR_COUNT) >
447 	    ((incore_seg_count / 8) * c_seg_max_pages)) {
448 		return true;
449 	}
450 	return false;
451 }
452 
453 TUNABLE_WRITEABLE(uint64_t, vm_compressor_minor_fragmentation_threshold_pct, "vm_compressor_minor_frag_threshold_pct", 10);
454 
455 static bool
vm_compressor_needs_to_minor_compact(void)456 vm_compressor_needs_to_minor_compact(void)
457 {
458 	uint32_t compactible_seg_count = os_atomic_load(&c_minor_count, relaxed);
459 	if (compactible_seg_count == 0) {
460 		return false;
461 	}
462 
463 	bool is_pressured = AVAILABLE_NON_COMPRESSED_MEMORY <
464 	    VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD;
465 	if (!is_pressured) {
466 		return false;
467 	}
468 
469 	uint64_t bytes_used = os_atomic_load(&compressor_bytes_used, relaxed);
470 	uint64_t bytes_total = VM_PAGE_COMPRESSOR_COUNT * PAGE_SIZE_64;
471 	uint64_t bytes_frag = bytes_total - bytes_used;
472 	bool is_fragmented = bytes_frag >
473 	    bytes_total * vm_compressor_minor_fragmentation_threshold_pct / 100;
474 
475 	return is_fragmented;
476 }
477 
478 
479 uint64_t
vm_available_memory(void)480 vm_available_memory(void)
481 {
482 	return ((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE_64;
483 }
484 
485 
486 uint32_t
vm_compressor_pool_size(void)487 vm_compressor_pool_size(void)
488 {
489 	return VM_PAGE_COMPRESSOR_COUNT;
490 }
491 
492 uint32_t
vm_compressor_fragmentation_level(void)493 vm_compressor_fragmentation_level(void)
494 {
495 	const uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
496 	if ((incore_seg_count == 0) || (c_seg_max_pages == 0)) {
497 		return 0;
498 	}
499 	return 100 - (vm_compressor_pool_size() * 100 / (incore_seg_count * c_seg_max_pages));
500 }
501 
502 uint32_t
vm_compression_ratio(void)503 vm_compression_ratio(void)
504 {
505 	if (vm_compressor_pool_size() == 0) {
506 		return UINT32_MAX;
507 	}
508 	return c_segment_pages_compressed / vm_compressor_pool_size();
509 }
510 
511 uint64_t
vm_compressor_pages_compressed(void)512 vm_compressor_pages_compressed(void)
513 {
514 	return c_segment_pages_compressed * PAGE_SIZE_64;
515 }
516 
517 bool
vm_compressor_compressed_pages_nearing_limit(void)518 vm_compressor_compressed_pages_nearing_limit(void)
519 {
520 	uint32_t pages = 0;
521 
522 #if CONFIG_FREEZE
523 	pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
524 #else /* CONFIG_FREEZE */
525 	pages = c_segment_pages_compressed;
526 #endif /* CONFIG_FREEZE */
527 
528 	return pages > c_segment_pages_compressed_nearing_limit;
529 }
530 
531 static bool
vm_compressor_segments_nearing_limit(void)532 vm_compressor_segments_nearing_limit(void)
533 {
534 	uint64_t segments;
535 
536 #if CONFIG_FREEZE
537 	if (freezer_incore_cseg_acct) {
538 		if (os_sub_overflow(c_segment_count, c_swappedout_count, &segments)) {
539 			segments = 0;
540 		}
541 		if (os_sub_overflow(segments, c_swappedout_sparse_count, &segments)) {
542 			segments = 0;
543 		}
544 	} else {
545 		segments = os_atomic_load(&c_segment_count, relaxed);
546 	}
547 #else /* CONFIG_FREEZE */
548 	segments = c_segment_count;
549 #endif /* CONFIG_FREEZE */
550 
551 	return segments > c_segments_nearing_limit;
552 }
553 
554 boolean_t
vm_compressor_low_on_space(void)555 vm_compressor_low_on_space(void)
556 {
557 	return vm_compressor_compressed_pages_nearing_limit() ||
558 	       vm_compressor_segments_nearing_limit();
559 }
560 
561 
562 boolean_t
vm_compressor_out_of_space(void)563 vm_compressor_out_of_space(void)
564 {
565 #if CONFIG_FREEZE
566 	uint64_t incore_seg_count;
567 	uint32_t incore_compressed_pages;
568 	if (freezer_incore_cseg_acct) {
569 		if (os_sub_overflow(c_segment_count, c_swappedout_count, &incore_seg_count)) {
570 			incore_seg_count = 0;
571 		}
572 		if (os_sub_overflow(incore_seg_count, c_swappedout_sparse_count, &incore_seg_count)) {
573 			incore_seg_count = 0;
574 		}
575 		incore_compressed_pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
576 	} else {
577 		incore_seg_count = os_atomic_load(&c_segment_count, relaxed);
578 		incore_compressed_pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
579 	}
580 
581 	if ((incore_compressed_pages >= c_segment_pages_compressed_limit) ||
582 	    (incore_seg_count > c_segments_incore_limit)) {
583 		return TRUE;
584 	}
585 #else /* CONFIG_FREEZE */
586 	if ((c_segment_pages_compressed >= c_segment_pages_compressed_limit) ||
587 	    (c_segment_count >= c_segments_limit)) {
588 		return TRUE;
589 	}
590 #endif /* CONFIG_FREEZE */
591 	return FALSE;
592 }
593 
594 bool
vm_compressor_is_thrashing()595 vm_compressor_is_thrashing()
596 {
597 	compute_swapout_target_age();
598 
599 	if (swapout_target_age) {
600 		c_segment_t     c_seg;
601 
602 		lck_mtx_lock_spin_always(c_list_lock);
603 
604 		if (!queue_empty(&c_age_list_head)) {
605 			c_seg = (c_segment_t) queue_first(&c_age_list_head);
606 
607 			if (c_seg->c_creation_ts > swapout_target_age) {
608 				swapout_target_age = 0;
609 			}
610 		}
611 		lck_mtx_unlock_always(c_list_lock);
612 	}
613 
614 	return swapout_target_age != 0;
615 }
616 
617 
618 int
vm_wants_task_throttled(task_t task)619 vm_wants_task_throttled(task_t task)
620 {
621 	ledger_amount_t compressed;
622 	if (task == kernel_task) {
623 		return 0;
624 	}
625 
626 	if (VM_CONFIG_SWAP_IS_ACTIVE) {
627 		if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED())) {
628 			ledger_get_balance(task->ledger, task_ledgers.internal_compressed, &compressed);
629 			compressed >>= VM_MAP_PAGE_SHIFT(task->map);
630 			if ((unsigned int)compressed > (c_segment_pages_compressed / 4)) {
631 				return 1;
632 			}
633 		}
634 	}
635 	return 0;
636 }
637 
638 
639 #if DEVELOPMENT || DEBUG
640 /*
641  * On compressor/swap exhaustion, kill the largest process regardless of
642  * its chosen process policy.
643  */
644 TUNABLE(bool, kill_on_no_paging_space, "-kill_on_no_paging_space", false);
645 #endif /* DEVELOPMENT || DEBUG */
646 
647 #if CONFIG_JETSAM
648 boolean_t       memorystatus_kill_on_VM_compressor_space_shortage(boolean_t);
649 void            memorystatus_thread_wake(void);
650 extern uint32_t jetsam_kill_on_low_swap;
651 bool            memorystatus_disable_swap(void);
652 #if CONFIG_PHANTOM_CACHE
653 extern bool memorystatus_phantom_cache_pressure;
654 #endif /* CONFIG_PHANTOM_CACHE */
655 int             compressor_thrashing_induced_jetsam = 0;
656 int             filecache_thrashing_induced_jetsam = 0;
657 static boolean_t        vm_compressor_thrashing_detected = FALSE;
658 #else  /* CONFIG_JETSAM */
659 static bool no_paging_space_action_in_progress = false;
660 extern void memorystatus_send_low_swap_note(void);
661 #endif /* CONFIG_JETSAM */
662 
663 static void
vm_compressor_take_paging_space_action(void)664 vm_compressor_take_paging_space_action(void)
665 {
666 #if CONFIG_JETSAM
667 	/*
668 	 * On systems with both swap and jetsam,
669 	 * just wake up the jetsam thread and have it handle the low swap condition
670 	 * by killing apps.
671 	 */
672 	if (jetsam_kill_on_low_swap) {
673 		memorystatus_thread_wake();
674 	}
675 #else /* CONFIG_JETSAM */
676 	if (os_atomic_cmpxchg(&no_paging_space_action_in_progress, false, true, relaxed)) {
677 		if (no_paging_space_action()) {
678 #if DEVELOPMENT || DEBUG
679 			if (kill_on_no_paging_space) {
680 				/*
681 				 * Since we are choosing to always kill a process, we don't need the
682 				 * "out of application memory" dialog box in this mode. And, hence we won't
683 				 * send the knote.
684 				 */
685 				os_atomic_store(&no_paging_space_action_in_progress, false, relaxed);
686 				return;
687 			}
688 #endif /* DEVELOPMENT || DEBUG */
689 			memorystatus_send_low_swap_note();
690 		}
691 		os_atomic_store(&no_paging_space_action_in_progress, false, relaxed);
692 	}
693 #endif /* !CONFIG_JETSAM */
694 }
695 
696 
697 void
vm_decompressor_lock(void)698 vm_decompressor_lock(void)
699 {
700 	PAGE_REPLACEMENT_ALLOWED(TRUE);
701 
702 	decompressions_blocked = TRUE;
703 
704 	PAGE_REPLACEMENT_ALLOWED(FALSE);
705 }
706 
707 void
vm_decompressor_unlock(void)708 vm_decompressor_unlock(void)
709 {
710 	PAGE_REPLACEMENT_ALLOWED(TRUE);
711 
712 	decompressions_blocked = FALSE;
713 
714 	PAGE_REPLACEMENT_ALLOWED(FALSE);
715 
716 	thread_wakeup((event_t)&decompressions_blocked);
717 }
718 
719 static inline void
cslot_copy(c_slot_t cdst,c_slot_t csrc)720 cslot_copy(c_slot_t cdst, c_slot_t csrc)
721 {
722 #if CHECKSUM_THE_DATA
723 	cdst->c_hash_data = csrc->c_hash_data;
724 #endif
725 #if CHECKSUM_THE_COMPRESSED_DATA
726 	cdst->c_hash_compressed_data = csrc->c_hash_compressed_data;
727 #endif
728 #if POPCOUNT_THE_COMPRESSED_DATA
729 	cdst->c_pop_cdata = csrc->c_pop_cdata;
730 #endif
731 	cdst->c_size = csrc->c_size;
732 	cdst->c_packed_ptr = csrc->c_packed_ptr;
733 #if defined(__arm64__)
734 	cdst->c_codec = csrc->c_codec;
735 #endif
736 }
737 
738 #if XNU_TARGET_OS_OSX
739 #define VM_COMPRESSOR_MAX_POOL_SIZE (192UL << 30)
740 #else
741 #define VM_COMPRESSOR_MAX_POOL_SIZE (0)
742 #endif
743 
744 static vm_map_size_t compressor_size;
745 static SECURITY_READ_ONLY_LATE(struct mach_vm_range) compressor_range;
746 vm_map_t compressor_map;
747 uint64_t compressor_pool_max_size;
748 uint64_t compressor_pool_size;
749 uint32_t compressor_pool_multiplier;
750 
751 #if DEVELOPMENT || DEBUG
752 /*
753  * Compressor segments are write-protected in development/debug
754  * kernels to help debug memory corruption.
755  * In cases where performance is a concern, this can be disabled
756  * via the boot-arg "-disable_cseg_write_protection".
757  */
758 boolean_t write_protect_c_segs = TRUE;
759 int vm_compressor_test_seg_wp;
760 uint32_t vm_ktrace_enabled;
761 #endif /* DEVELOPMENT || DEBUG */
762 
763 #if (XNU_TARGET_OS_OSX && __arm64__)
764 
765 #include <IOKit/IOPlatformExpert.h>
766 #include <sys/random.h>
767 
768 static const char *csegbufsizeExperimentProperty = "_csegbufsz_experiment";
769 static thread_call_t csegbufsz_experiment_thread_call;
770 
771 extern boolean_t IOServiceWaitForMatchingResource(const char * property, uint64_t timeout);
772 static void
erase_csegbufsz_experiment_property(__unused void * param0,__unused void * param1)773 erase_csegbufsz_experiment_property(__unused void *param0, __unused void *param1)
774 {
775 	// Wait for NVRAM to be writable
776 	if (!IOServiceWaitForMatchingResource("IONVRAM", UINT64_MAX)) {
777 		printf("csegbufsz_experiment_property: Failed to wait for IONVRAM.");
778 	}
779 
780 	if (!PERemoveNVRAMProperty(csegbufsizeExperimentProperty)) {
781 		printf("csegbufsize_experiment_property: Failed to remove %s from NVRAM.", csegbufsizeExperimentProperty);
782 	}
783 	thread_call_free(csegbufsz_experiment_thread_call);
784 }
785 
786 static void
erase_csegbufsz_experiment_property_async()787 erase_csegbufsz_experiment_property_async()
788 {
789 	csegbufsz_experiment_thread_call = thread_call_allocate_with_priority(
790 		erase_csegbufsz_experiment_property,
791 		NULL,
792 		THREAD_CALL_PRIORITY_LOW
793 		);
794 	if (csegbufsz_experiment_thread_call == NULL) {
795 		printf("csegbufsize_experiment_property: Unable to allocate thread call.");
796 	} else {
797 		thread_call_enter(csegbufsz_experiment_thread_call);
798 	}
799 }
800 
801 static void
cleanup_csegbufsz_experiment(__unused void * arg0)802 cleanup_csegbufsz_experiment(__unused void *arg0)
803 {
804 	char nvram = 0;
805 	unsigned int len = sizeof(nvram);
806 	if (PEReadNVRAMProperty(csegbufsizeExperimentProperty, &nvram, &len)) {
807 		erase_csegbufsz_experiment_property_async();
808 	}
809 }
810 
811 STARTUP_ARG(EARLY_BOOT, STARTUP_RANK_FIRST, cleanup_csegbufsz_experiment, NULL);
812 #endif /* XNU_TARGET_OS_OSX && __arm64__ */
813 
814 #if CONFIG_JETSAM
815 extern unsigned int memorystatus_swap_all_apps;
816 #endif /* CONFIG_JETSAM */
817 
818 TUNABLE_DT(uint64_t, swap_vol_min_capacity, "/defaults", "kern.swap_min_capacity", "kern.swap_min_capacity", 0, TUNABLE_DT_NONE);
819 
820 static void
vm_compressor_set_size(void)821 vm_compressor_set_size(void)
822 {
823 	/*
824 	 * Note that this function may be called multiple times on systems with app swap
825 	 * because the value of vm_swap_get_max_configured_space() and memorystatus_swap_all_apps
826 	 * can change based the size of the swap volume. On these systems, we'll call
827 	 * this function once early in boot to reserve the maximum amount of VA required
828 	 * for the compressor submap and then one more time in vm_compressor_init after
829 	 * determining the swap volume size. We must not return a larger value the second
830 	 * time around.
831 	 */
832 	vm_size_t       c_segments_arr_size = 0;
833 	struct c_slot_mapping tmp_slot_ptr;
834 
835 	/* The segment size can be overwritten by a boot-arg */
836 	if (!PE_parse_boot_argn("vm_compressor_segment_buffer_size", &c_seg_bufsize, sizeof(c_seg_bufsize))) {
837 #if CONFIG_JETSAM
838 		if (memorystatus_swap_all_apps) {
839 			c_seg_bufsize = C_SEG_BUFSIZE_ARM_SWAP;
840 		} else {
841 			c_seg_bufsize = C_SEG_BUFSIZE_DEFAULT;
842 		}
843 #else
844 		c_seg_bufsize = C_SEG_BUFSIZE_DEFAULT;
845 #endif /* CONFIG_JETSAM */
846 	}
847 
848 	vm_compressor_swap_init_swap_file_limit();
849 	if (vm_compression_limit) {
850 		compressor_pool_size = ptoa_64(vm_compression_limit);
851 	}
852 
853 	compressor_pool_max_size = C_SEG_MAX_LIMIT;
854 	compressor_pool_max_size *= c_seg_bufsize;
855 
856 #if XNU_TARGET_OS_OSX
857 
858 	if (vm_compression_limit == 0) {
859 		if (max_mem <= (4ULL * 1024ULL * 1024ULL * 1024ULL)) {
860 			compressor_pool_size = 16ULL * max_mem;
861 		} else if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
862 			compressor_pool_size = 8ULL * max_mem;
863 		} else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
864 			compressor_pool_size = 4ULL * max_mem;
865 		} else {
866 			compressor_pool_size = 2ULL * max_mem;
867 		}
868 	}
869 	/*
870 	 * Cap the compressor pool size to a max of 192G
871 	 */
872 	if (compressor_pool_size > VM_COMPRESSOR_MAX_POOL_SIZE) {
873 		compressor_pool_size = VM_COMPRESSOR_MAX_POOL_SIZE;
874 	}
875 	if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
876 		compressor_pool_multiplier = 1;
877 	} else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
878 		compressor_pool_multiplier = 2;
879 	} else {
880 		compressor_pool_multiplier = 4;
881 	}
882 
883 #else
884 
885 	if (compressor_pool_max_size > max_mem) {
886 		compressor_pool_max_size = max_mem;
887 	}
888 
889 	if (vm_compression_limit == 0) {
890 		compressor_pool_size = max_mem;
891 	}
892 
893 #if XNU_TARGET_OS_WATCH
894 	compressor_pool_multiplier = 2;
895 #elif XNU_TARGET_OS_IOS
896 	if (max_mem <= (2ULL * 1024ULL * 1024ULL * 1024ULL)) {
897 		compressor_pool_multiplier = 2;
898 	} else {
899 		compressor_pool_multiplier = 1;
900 	}
901 #else
902 	compressor_pool_multiplier = 1;
903 #endif
904 
905 #endif
906 
907 	PE_parse_boot_argn("kern.compressor_pool_multiplier", &compressor_pool_multiplier, sizeof(compressor_pool_multiplier));
908 	if (compressor_pool_multiplier < 1) {
909 		compressor_pool_multiplier = 1;
910 	}
911 
912 	if (compressor_pool_size > compressor_pool_max_size) {
913 		compressor_pool_size = compressor_pool_max_size;
914 	}
915 
916 	c_seg_max_pages = (c_seg_bufsize / PAGE_SIZE);
917 	c_seg_slot_var_array_min_len = c_seg_max_pages;
918 
919 #if !defined(__x86_64__)
920 	c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 512)));
921 	c_seg_allocsize = (c_seg_bufsize + PAGE_SIZE);
922 #else
923 	c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 128)));
924 	c_seg_allocsize = c_seg_bufsize;
925 #endif /* !defined(__x86_64__) */
926 
927 	c_segments_limit = (uint32_t)(compressor_pool_size / (vm_size_t)(c_seg_allocsize));
928 	tmp_slot_ptr.s_cseg = c_segments_limit;
929 	/* Panic on internal configs*/
930 	assertf((tmp_slot_ptr.s_cseg == c_segments_limit), "vm_compressor_init: overflowed s_cseg field in c_slot_mapping with c_segno: %d", c_segments_limit);
931 
932 	if (tmp_slot_ptr.s_cseg != c_segments_limit) {
933 		tmp_slot_ptr.s_cseg = -1;
934 		c_segments_limit = tmp_slot_ptr.s_cseg - 1; /*limited by segment idx bits in c_slot_mapping*/
935 		compressor_pool_size = (c_segments_limit * (vm_size_t)(c_seg_allocsize));
936 	}
937 
938 	c_segments_nearing_limit = (uint32_t)(((uint64_t)c_segments_limit * 98ULL) / 100ULL);
939 
940 	/* an upper limit on how many input pages the compressor can hold */
941 	c_segment_pages_compressed_limit = (c_segments_limit * (c_seg_bufsize / PAGE_SIZE) * compressor_pool_multiplier);
942 
943 	if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE)) {
944 #if defined(XNU_TARGET_OS_WATCH)
945 		c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
946 #else
947 		if (!vm_compression_limit) {
948 			c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
949 		}
950 #endif
951 	}
952 
953 	c_segment_pages_compressed_nearing_limit = (uint32_t)(((uint64_t)c_segment_pages_compressed_limit * 98ULL) / 100ULL);
954 
955 #if CONFIG_FREEZE
956 	/*
957 	 * Our in-core limits are based on the size of the compressor pool.
958 	 * The c_segments_nearing_limit is also based on the compressor pool
959 	 * size and calculated above.
960 	 */
961 	c_segments_incore_limit = c_segments_limit;
962 
963 	if (freezer_incore_cseg_acct) {
964 		/*
965 		 * Add enough segments to track all frozen c_segs that can be stored in swap.
966 		 */
967 		c_segments_limit += (uint32_t)(vm_swap_get_max_configured_space() / (vm_size_t)(c_seg_allocsize));
968 		tmp_slot_ptr.s_cseg = c_segments_limit;
969 		/* Panic on internal configs*/
970 		assertf((tmp_slot_ptr.s_cseg == c_segments_limit), "vm_compressor_init: freezer reserve overflowed s_cseg field in c_slot_mapping with c_segno: %d", c_segments_limit);
971 	}
972 #endif
973 	/*
974 	 * Submap needs space for:
975 	 * - c_segments
976 	 * - c_buffers
977 	 * - swap reclaimations -- c_seg_bufsize
978 	 */
979 	c_segments_arr_size = vm_map_round_page((sizeof(union c_segu) * c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
980 	c_buffers_size = vm_map_round_page(((vm_size_t)c_seg_allocsize * (vm_size_t)c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
981 
982 	compressor_size = c_segments_arr_size + c_buffers_size + c_seg_bufsize;
983 
984 #if RECORD_THE_COMPRESSED_DATA
985 	c_compressed_record_sbuf_size = (vm_size_t)c_seg_allocsize + (PAGE_SIZE * 2);
986 	compressor_size += c_compressed_record_sbuf_size;
987 #endif /* RECORD_THE_COMPRESSED_DATA */
988 }
989 STARTUP(KMEM, STARTUP_RANK_FIRST, vm_compressor_set_size);
990 
991 KMEM_RANGE_REGISTER_DYNAMIC(compressor, &compressor_range, ^() {
992 	return compressor_size;
993 });
994 
995 bool
osenvironment_is_diagnostics(void)996 osenvironment_is_diagnostics(void)
997 {
998 	DTEntry chosen;
999 	const char *osenvironment;
1000 	unsigned int size;
1001 	if (kSuccess == SecureDTLookupEntry(0, "/chosen", &chosen)) {
1002 		if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &osenvironment, &size)) {
1003 			return strcmp(osenvironment, "diagnostics") == 0;
1004 		}
1005 	}
1006 	return false;
1007 }
1008 
1009 void
vm_compressor_init(void)1010 vm_compressor_init(void)
1011 {
1012 	thread_t        thread;
1013 #if RECORD_THE_COMPRESSED_DATA
1014 	vm_size_t       c_compressed_record_sbuf_size = 0;
1015 #endif /* RECORD_THE_COMPRESSED_DATA */
1016 
1017 #if DEVELOPMENT || DEBUG || CONFIG_FREEZE
1018 	char bootarg_name[32];
1019 #endif /* DEVELOPMENT || DEBUG || CONFIG_FREEZE */
1020 	__unused uint64_t early_boot_compressor_size = compressor_size;
1021 
1022 #if CONFIG_JETSAM
1023 	if (memorystatus_swap_all_apps && osenvironment_is_diagnostics()) {
1024 		printf("osenvironment == \"diagnostics\". Disabling app swap.\n");
1025 		memorystatus_disable_swap();
1026 	}
1027 
1028 	if (memorystatus_swap_all_apps) {
1029 		/*
1030 		 * App swap is disabled on devices with small NANDs.
1031 		 * Now that we're no longer in early boot, we can get
1032 		 * the NAND size and re-run vm_compressor_set_size.
1033 		 */
1034 		int error = vm_swap_vol_get_capacity(SWAP_VOLUME_NAME, &vm_swap_volume_capacity);
1035 #if DEVELOPMENT || DEBUG
1036 		if (error != 0) {
1037 			panic("vm_compressor_init: Unable to get swap volume capacity. error=%d\n", error);
1038 		}
1039 #else
1040 		if (error != 0) {
1041 			os_log_with_startup_serial(OS_LOG_DEFAULT, "vm_compressor_init: Unable to get swap volume capacity. error=%d\n", error);
1042 		}
1043 #endif /* DEVELOPMENT || DEBUG */
1044 		if (vm_swap_volume_capacity < swap_vol_min_capacity) {
1045 			memorystatus_disable_swap();
1046 		}
1047 		/*
1048 		 * Resize the compressor and swap now that we know the capacity
1049 		 * of the swap volume.
1050 		 */
1051 		vm_compressor_set_size();
1052 		/*
1053 		 * We reserved a chunk of VA early in boot for the compressor submap.
1054 		 * We can't allocate more than that.
1055 		 */
1056 		assert(compressor_size <= early_boot_compressor_size);
1057 	}
1058 #endif /* CONFIG_JETSAM */
1059 
1060 #if DEVELOPMENT || DEBUG
1061 	if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name, sizeof(bootarg_name))) {
1062 		write_protect_c_segs = FALSE;
1063 	}
1064 
1065 	int vmcval = 1;
1066 #if defined(XNU_TARGET_OS_WATCH)
1067 	vmcval = 0;
1068 #endif /* XNU_TARGET_OS_WATCH */
1069 	PE_parse_boot_argn("vm_compressor_validation", &vmcval, sizeof(vmcval));
1070 
1071 	if (kern_feature_override(KF_COMPRSV_OVRD)) {
1072 		vmcval = 0;
1073 	}
1074 
1075 	if (vmcval == 0) {
1076 #if POPCOUNT_THE_COMPRESSED_DATA
1077 		popcount_c_segs = FALSE;
1078 #endif
1079 #if CHECKSUM_THE_DATA || CHECKSUM_THE_COMPRESSED_DATA
1080 		checksum_c_segs = FALSE;
1081 #endif
1082 #if VALIDATE_C_SEGMENTS
1083 		validate_c_segs = FALSE;
1084 #endif
1085 		write_protect_c_segs = FALSE;
1086 	}
1087 #endif /* DEVELOPMENT || DEBUG */
1088 
1089 #if CONFIG_FREEZE
1090 	if (PE_parse_boot_argn("-disable_freezer_cseg_acct", bootarg_name, sizeof(bootarg_name))) {
1091 		freezer_incore_cseg_acct = FALSE;
1092 	}
1093 #endif /* CONFIG_FREEZE */
1094 
1095 	assert((C_SEGMENTS_PER_PAGE * sizeof(union c_segu)) == PAGE_SIZE);
1096 
1097 #if !XNU_TARGET_OS_OSX
1098 	vm_compressor_minorcompact_threshold_divisor = 20;
1099 	vm_compressor_majorcompact_threshold_divisor = 30;
1100 	vm_compressor_unthrottle_threshold_divisor = 40;
1101 	vm_compressor_catchup_threshold_divisor = 60;
1102 #else /* !XNU_TARGET_OS_OSX */
1103 	if (max_mem <= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
1104 		vm_compressor_minorcompact_threshold_divisor = 11;
1105 		vm_compressor_majorcompact_threshold_divisor = 13;
1106 		vm_compressor_unthrottle_threshold_divisor = 20;
1107 		vm_compressor_catchup_threshold_divisor = 35;
1108 	} else {
1109 		vm_compressor_minorcompact_threshold_divisor = 20;
1110 		vm_compressor_majorcompact_threshold_divisor = 25;
1111 		vm_compressor_unthrottle_threshold_divisor = 35;
1112 		vm_compressor_catchup_threshold_divisor = 50;
1113 	}
1114 #endif /* !XNU_TARGET_OS_OSX */
1115 
1116 	queue_init(&c_bad_list_head);
1117 	queue_init(&c_age_list_head);
1118 	queue_init(&c_minor_list_head);
1119 	queue_init(&c_major_list_head);
1120 	queue_init(&c_filling_list_head);
1121 	queue_init(&c_early_swapout_list_head);
1122 	queue_init(&c_regular_swapout_list_head);
1123 	queue_init(&c_late_swapout_list_head);
1124 	queue_init(&c_swapio_list_head);
1125 	queue_init(&c_early_swappedin_list_head);
1126 	queue_init(&c_regular_swappedin_list_head);
1127 	queue_init(&c_late_swappedin_list_head);
1128 	queue_init(&c_swappedout_list_head);
1129 	queue_init(&c_swappedout_sparse_list_head);
1130 
1131 	c_free_segno_head = -1;
1132 	c_segments_available = 0;
1133 
1134 	compressor_map = kmem_suballoc(kernel_map, &compressor_range.min_address,
1135 	    compressor_size, VM_MAP_CREATE_NEVER_FAULTS,
1136 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, KMS_NOFAIL | KMS_PERMANENT,
1137 	    VM_KERN_MEMORY_COMPRESSOR).kmr_submap;
1138 
1139 	kmem_alloc(compressor_map, (vm_offset_t *)(&c_segments),
1140 	    (sizeof(union c_segu) * c_segments_limit),
1141 	    KMA_NOFAIL | KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT,
1142 	    VM_KERN_MEMORY_COMPRESSOR);
1143 	kmem_alloc(compressor_map, &c_buffers, c_buffers_size,
1144 	    KMA_NOFAIL | KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT,
1145 	    VM_KERN_MEMORY_COMPRESSOR);
1146 
1147 #if DEVELOPMENT || DEBUG
1148 	if (hvg_is_hcall_available(HVG_HCALL_SET_COREDUMP_DATA)) {
1149 		hvg_hcall_set_coredump_data();
1150 	}
1151 #endif
1152 
1153 	/*
1154 	 * Pick a good size that will minimize fragmentation in zalloc
1155 	 * by minimizing the fragmentation in a 16k run.
1156 	 *
1157 	 * c_seg_slot_var_array_min_len is larger on 4k systems than 16k ones,
1158 	 * making the fragmentation in a 4k page terrible. Using 16k for all
1159 	 * systems matches zalloc() and will minimize fragmentation.
1160 	 */
1161 	uint32_t c_segment_size = sizeof(struct c_segment) + (c_seg_slot_var_array_min_len * sizeof(struct c_slot));
1162 	uint32_t cnt  = (16 << 10) / c_segment_size;
1163 	uint32_t frag = (16 << 10) % c_segment_size;
1164 
1165 	c_seg_fixed_array_len = c_seg_slot_var_array_min_len;
1166 
1167 	while (cnt * sizeof(struct c_slot) < frag) {
1168 		c_segment_size += sizeof(struct c_slot);
1169 		c_seg_fixed_array_len++;
1170 		frag -= cnt * sizeof(struct c_slot);
1171 	}
1172 
1173 	compressor_segment_zone = zone_create("compressor_segment",
1174 	    c_segment_size, ZC_PGZ_USE_GUARDS | ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
1175 
1176 	c_segments_busy = FALSE;
1177 
1178 	c_segments_next_page = (caddr_t)c_segments;
1179 	vm_compressor_algorithm_init();
1180 
1181 	{
1182 		host_basic_info_data_t hinfo;
1183 		mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
1184 		size_t bufsize;
1185 		char *buf;
1186 
1187 #define BSD_HOST 1
1188 		host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
1189 
1190 		compressor_cpus = hinfo.max_cpus;
1191 
1192 		/* allocate various scratch buffers at the same place */
1193 		bufsize = PAGE_SIZE;
1194 		bufsize += compressor_cpus * vm_compressor_get_decode_scratch_size();
1195 		/* For the panic path */
1196 		bufsize += vm_compressor_get_decode_scratch_size();
1197 #if CONFIG_FREEZE
1198 		bufsize += vm_compressor_get_encode_scratch_size();
1199 #endif
1200 #if RECORD_THE_COMPRESSED_DATA
1201 		bufsize += c_compressed_record_sbuf_size;
1202 #endif
1203 
1204 		kmem_alloc(kernel_map, (vm_offset_t *)&buf, bufsize,
1205 		    KMA_DATA | KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT,
1206 		    VM_KERN_MEMORY_COMPRESSOR);
1207 
1208 		/*
1209 		 * vm_compressor_kdp_state.kc_decompressed_page must be page aligned because we access
1210 		 * it through the physical aperture by page number.
1211 		 */
1212 		vm_compressor_kdp_state.kc_panic_decompressed_page = buf;
1213 		vm_compressor_kdp_state.kc_panic_decompressed_page_paddr = kvtophys((vm_offset_t)vm_compressor_kdp_state.kc_panic_decompressed_page);
1214 		vm_compressor_kdp_state.kc_panic_decompressed_page_ppnum = (ppnum_t) atop(vm_compressor_kdp_state.kc_panic_decompressed_page_paddr);
1215 		buf += PAGE_SIZE;
1216 		bufsize -= PAGE_SIZE;
1217 
1218 		compressor_scratch_bufs = buf;
1219 		buf += compressor_cpus * vm_compressor_get_decode_scratch_size();
1220 		bufsize -= compressor_cpus * vm_compressor_get_decode_scratch_size();
1221 
1222 		vm_compressor_kdp_state.kc_panic_scratch_buf = buf;
1223 		buf += vm_compressor_get_decode_scratch_size();
1224 		bufsize -= vm_compressor_get_decode_scratch_size();
1225 
1226 		/* This is set up before each stackshot in vm_compressor_kdp_init */
1227 		vm_compressor_kdp_state.kc_scratch_bufs = NULL;
1228 
1229 #if CONFIG_FREEZE
1230 		freezer_context_global.freezer_ctx_compressor_scratch_buf = buf;
1231 		buf += vm_compressor_get_encode_scratch_size();
1232 		bufsize -= vm_compressor_get_encode_scratch_size();
1233 #endif
1234 
1235 #if RECORD_THE_COMPRESSED_DATA
1236 		c_compressed_record_sbuf = buf;
1237 		c_compressed_record_cptr = buf;
1238 		c_compressed_record_ebuf = c_compressed_record_sbuf + c_compressed_record_sbuf_size;
1239 		buf += c_compressed_record_sbuf_size;
1240 		bufsize -= c_compressed_record_sbuf_size;
1241 #endif
1242 		assert(bufsize == 0);
1243 	}
1244 
1245 	if (kernel_thread_start_priority((thread_continue_t)vm_compressor_swap_trigger_thread, NULL,
1246 	    BASEPRI_VM, &thread) != KERN_SUCCESS) {
1247 		panic("vm_compressor_swap_trigger_thread: create failed");
1248 	}
1249 	thread_deallocate(thread);
1250 
1251 	if (vm_pageout_internal_start() != KERN_SUCCESS) {
1252 		panic("vm_compressor_init: Failed to start the internal pageout thread.");
1253 	}
1254 	if (VM_CONFIG_SWAP_IS_PRESENT) {
1255 		vm_compressor_swap_init();
1256 	}
1257 
1258 	if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
1259 		vm_compressor_is_active = 1;
1260 	}
1261 
1262 	vm_compressor_available = 1;
1263 
1264 	vm_page_reactivate_all_throttled();
1265 
1266 	bzero(&vmcs_stats, sizeof(struct vm_compressor_swapper_stats));
1267 }
1268 
1269 #define COMPRESSOR_KDP_BUFSIZE (\
1270 	(vm_compressor_get_decode_scratch_size() * compressor_cpus) + \
1271 	(PAGE_SIZE * compressor_cpus)) + \
1272 	(sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus) + \
1273 	(sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus)
1274 
1275 
1276 /**
1277  * Initializes the VM compressor in preparation for a stackshot.
1278  * Stackshot mutex must be held.
1279  */
1280 kern_return_t
vm_compressor_kdp_init(void)1281 vm_compressor_kdp_init(void)
1282 {
1283 	char *buf;
1284 	kern_return_t err;
1285 	size_t bufsize;
1286 	size_t total_decode_size;
1287 
1288 #if DEVELOPMENT || DEBUG
1289 	extern lck_mtx_t stackshot_subsys_mutex;
1290 	lck_mtx_assert(&stackshot_subsys_mutex, LCK_MTX_ASSERT_OWNED);
1291 #endif /* DEVELOPMENT || DEBUG */
1292 
1293 	if (!vm_compressor_available) {
1294 		return KERN_SUCCESS;
1295 	}
1296 
1297 	bufsize = COMPRESSOR_KDP_BUFSIZE;
1298 
1299 	/* Allocate the per-cpu decompression pages. */
1300 	err = kmem_alloc(kernel_map, (vm_offset_t *)&buf, bufsize,
1301 	    KMA_DATA | KMA_NOFAIL | KMA_KOBJECT,
1302 	    VM_KERN_MEMORY_COMPRESSOR);
1303 
1304 	if (err != KERN_SUCCESS) {
1305 		return err;
1306 	}
1307 
1308 	assert(vm_compressor_kdp_state.kc_scratch_bufs == NULL);
1309 	vm_compressor_kdp_state.kc_scratch_bufs = buf;
1310 	total_decode_size = vm_compressor_get_decode_scratch_size() * compressor_cpus;
1311 	buf += total_decode_size;
1312 	bufsize -= total_decode_size;
1313 
1314 	/*
1315 	 * vm_compressor_kdp_state.kc_decompressed_page must be page aligned because we access
1316 	 * it through the physical aperture by page number.
1317 	 */
1318 	assert(vm_compressor_kdp_state.kc_decompressed_pages == NULL);
1319 	vm_compressor_kdp_state.kc_decompressed_pages = buf;
1320 	buf += PAGE_SIZE * compressor_cpus;
1321 	bufsize -= PAGE_SIZE * compressor_cpus;
1322 
1323 	/* Scary! This will be aligned, I promise :) */
1324 	assert(((vm_address_t) buf) % _Alignof(addr64_t) == 0);
1325 	assert(vm_compressor_kdp_state.kc_decompressed_pages_paddr == NULL);
1326 	vm_compressor_kdp_state.kc_decompressed_pages_paddr = (addr64_t*) (void*) buf;
1327 	buf += sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus;
1328 	bufsize -= sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus;
1329 
1330 	assert(((vm_address_t) buf) % _Alignof(ppnum_t) == 0);
1331 	assert(vm_compressor_kdp_state.kc_decompressed_pages_ppnum == NULL);
1332 	vm_compressor_kdp_state.kc_decompressed_pages_ppnum = (ppnum_t*) (void*) buf;
1333 	buf += sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus;
1334 	bufsize -= sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus;
1335 
1336 	assert(bufsize == 0);
1337 
1338 	for (size_t i = 0; i < compressor_cpus; i++) {
1339 		vm_offset_t offset = (vm_offset_t) &vm_compressor_kdp_state.kc_decompressed_pages[i * PAGE_SIZE];
1340 		vm_compressor_kdp_state.kc_decompressed_pages_paddr[i] = kvtophys(offset);
1341 		vm_compressor_kdp_state.kc_decompressed_pages_ppnum[i] = (ppnum_t) atop(vm_compressor_kdp_state.kc_decompressed_pages_paddr[i]);
1342 	}
1343 
1344 	return KERN_SUCCESS;
1345 }
1346 
1347 /*
1348  * Frees up compressor buffers used by stackshot.
1349  * Stackshot mutex must be held.
1350  */
1351 void
vm_compressor_kdp_teardown(void)1352 vm_compressor_kdp_teardown(void)
1353 {
1354 	extern lck_mtx_t stackshot_subsys_mutex;
1355 	LCK_MTX_ASSERT(&stackshot_subsys_mutex, LCK_MTX_ASSERT_OWNED);
1356 
1357 	if (vm_compressor_kdp_state.kc_scratch_bufs == NULL) {
1358 		return;
1359 	}
1360 
1361 	/* Deallocate the per-cpu decompression pages. */
1362 	kmem_free(kernel_map, (vm_offset_t) vm_compressor_kdp_state.kc_scratch_bufs, COMPRESSOR_KDP_BUFSIZE);
1363 
1364 	vm_compressor_kdp_state.kc_scratch_bufs = NULL;
1365 	vm_compressor_kdp_state.kc_decompressed_pages = NULL;
1366 	vm_compressor_kdp_state.kc_decompressed_pages_paddr = 0;
1367 	vm_compressor_kdp_state.kc_decompressed_pages_ppnum = 0;
1368 }
1369 
1370 #if VALIDATE_C_SEGMENTS
1371 
1372 static void
c_seg_validate(c_segment_t c_seg,boolean_t must_be_compact)1373 c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact)
1374 {
1375 	uint16_t        c_indx;
1376 	int32_t         bytes_used;
1377 	uint32_t        c_rounded_size;
1378 	uint32_t        c_size;
1379 	c_slot_t        cs;
1380 
1381 	if (__probable(validate_c_segs == FALSE)) {
1382 		return;
1383 	}
1384 	if (c_seg->c_firstemptyslot < c_seg->c_nextslot) {
1385 		c_indx = c_seg->c_firstemptyslot;
1386 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1387 
1388 		if (cs == NULL) {
1389 			panic("c_seg_validate:  no slot backing c_firstemptyslot");
1390 		}
1391 
1392 		if (cs->c_size) {
1393 			panic("c_seg_validate:  c_firstemptyslot has non-zero size (%d)", cs->c_size);
1394 		}
1395 	}
1396 	bytes_used = 0;
1397 
1398 	for (c_indx = 0; c_indx < c_seg->c_nextslot; c_indx++) {
1399 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1400 
1401 		c_size = UNPACK_C_SIZE(cs);
1402 
1403 		c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
1404 
1405 		bytes_used += c_rounded_size;
1406 
1407 #if CHECKSUM_THE_COMPRESSED_DATA
1408 		unsigned csvhash;
1409 		if (c_size && cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
1410 			addr64_t csvphys = kvtophys((vm_offset_t)&c_seg->c_store.c_buffer[cs->c_offset]);
1411 			panic("Compressed data doesn't match original %p phys: 0x%llx %d %p %d %d 0x%x 0x%x", c_seg, csvphys, cs->c_offset, cs, c_indx, c_size, cs->c_hash_compressed_data, csvhash);
1412 		}
1413 #endif
1414 #if POPCOUNT_THE_COMPRESSED_DATA
1415 		unsigned csvpop;
1416 		if (c_size) {
1417 			uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
1418 			if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
1419 				panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%llx 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, (uint64_t)cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
1420 			}
1421 		}
1422 #endif
1423 	}
1424 
1425 	if (bytes_used != c_seg->c_bytes_used) {
1426 		panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d", bytes_used, c_seg->c_bytes_used);
1427 	}
1428 
1429 	if (c_seg->c_bytes_used > C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1430 		panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d,  c_bytes_used = %d",
1431 		    (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1432 	}
1433 
1434 	if (must_be_compact) {
1435 		if (c_seg->c_bytes_used != C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1436 			panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d,  c_bytes_used = %d",
1437 			    (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1438 		}
1439 	}
1440 }
1441 
1442 #endif
1443 
1444 
1445 void
c_seg_need_delayed_compaction(c_segment_t c_seg,boolean_t c_list_lock_held)1446 c_seg_need_delayed_compaction(c_segment_t c_seg, boolean_t c_list_lock_held)
1447 {
1448 	boolean_t       clear_busy = FALSE;
1449 
1450 	if (c_list_lock_held == FALSE) {
1451 		if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1452 			C_SEG_BUSY(c_seg);
1453 
1454 			lck_mtx_unlock_always(&c_seg->c_lock);
1455 			lck_mtx_lock_spin_always(c_list_lock);
1456 			lck_mtx_lock_spin_always(&c_seg->c_lock);
1457 
1458 			clear_busy = TRUE;
1459 		}
1460 	}
1461 	assert(c_seg->c_state != C_IS_FILLING);
1462 
1463 	if (!c_seg->c_on_minorcompact_q && !(C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) && !c_seg->c_has_donated_pages) {
1464 		queue_enter(&c_minor_list_head, c_seg, c_segment_t, c_list);
1465 		c_seg->c_on_minorcompact_q = 1;
1466 		os_atomic_inc(&c_minor_count, relaxed);
1467 	}
1468 	if (c_list_lock_held == FALSE) {
1469 		lck_mtx_unlock_always(c_list_lock);
1470 	}
1471 
1472 	if (clear_busy == TRUE) {
1473 		C_SEG_WAKEUP_DONE(c_seg);
1474 	}
1475 }
1476 
1477 
1478 unsigned int c_seg_moved_to_sparse_list = 0;
1479 
1480 void
c_seg_move_to_sparse_list(c_segment_t c_seg)1481 c_seg_move_to_sparse_list(c_segment_t c_seg)
1482 {
1483 	boolean_t       clear_busy = FALSE;
1484 
1485 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1486 		C_SEG_BUSY(c_seg);
1487 
1488 		lck_mtx_unlock_always(&c_seg->c_lock);
1489 		lck_mtx_lock_spin_always(c_list_lock);
1490 		lck_mtx_lock_spin_always(&c_seg->c_lock);
1491 
1492 		clear_busy = TRUE;
1493 	}
1494 	c_seg_switch_state(c_seg, C_ON_SWAPPEDOUTSPARSE_Q, FALSE);
1495 
1496 	c_seg_moved_to_sparse_list++;
1497 
1498 	lck_mtx_unlock_always(c_list_lock);
1499 
1500 	if (clear_busy == TRUE) {
1501 		C_SEG_WAKEUP_DONE(c_seg);
1502 	}
1503 }
1504 
1505 
1506 
1507 
1508 int try_minor_compaction_failed = 0;
1509 int try_minor_compaction_succeeded = 0;
1510 
1511 void
c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)1512 c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)
1513 {
1514 	assert(c_seg->c_on_minorcompact_q);
1515 	/*
1516 	 * c_seg is currently on the delayed minor compaction
1517 	 * queue and we have c_seg locked... if we can get the
1518 	 * c_list_lock w/o blocking (if we blocked we could deadlock
1519 	 * because the lock order is c_list_lock then c_seg's lock)
1520 	 * we'll pull it from the delayed list and free it directly
1521 	 */
1522 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1523 		/*
1524 		 * c_list_lock is held, we need to bail
1525 		 */
1526 		try_minor_compaction_failed++;
1527 
1528 		lck_mtx_unlock_always(&c_seg->c_lock);
1529 	} else {
1530 		try_minor_compaction_succeeded++;
1531 
1532 		C_SEG_BUSY(c_seg);
1533 		c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, FALSE);
1534 	}
1535 }
1536 
1537 
1538 int
c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy,boolean_t need_list_lock,boolean_t disallow_page_replacement)1539 c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy, boolean_t need_list_lock, boolean_t disallow_page_replacement)
1540 {
1541 	int     c_seg_freed;
1542 
1543 	assert(c_seg->c_busy);
1544 	assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
1545 
1546 	/*
1547 	 * check for the case that can occur when we are not swapping
1548 	 * and this segment has been major compacted in the past
1549 	 * and moved to the majorcompact q to remove it from further
1550 	 * consideration... if the occupancy falls too low we need
1551 	 * to put it back on the age_q so that it will be considered
1552 	 * in the next major compaction sweep... if we don't do this
1553 	 * we will eventually run into the c_segments_limit
1554 	 */
1555 	if (c_seg->c_state == C_ON_MAJORCOMPACT_Q && C_SEG_SHOULD_MAJORCOMPACT_NOW(c_seg)) {
1556 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
1557 	}
1558 	if (!c_seg->c_on_minorcompact_q) {
1559 		if (clear_busy == TRUE) {
1560 			C_SEG_WAKEUP_DONE(c_seg);
1561 		}
1562 
1563 		lck_mtx_unlock_always(&c_seg->c_lock);
1564 
1565 		return 0;
1566 	}
1567 	queue_remove(&c_minor_list_head, c_seg, c_segment_t, c_list);
1568 	c_seg->c_on_minorcompact_q = 0;
1569 	os_atomic_dec(&c_minor_count, relaxed);
1570 
1571 	lck_mtx_unlock_always(c_list_lock);
1572 
1573 	if (disallow_page_replacement == TRUE) {
1574 		lck_mtx_unlock_always(&c_seg->c_lock);
1575 
1576 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
1577 
1578 		lck_mtx_lock_spin_always(&c_seg->c_lock);
1579 	}
1580 	c_seg_freed = c_seg_minor_compaction_and_unlock(c_seg, clear_busy);
1581 
1582 	if (disallow_page_replacement == TRUE) {
1583 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
1584 	}
1585 
1586 	if (need_list_lock == TRUE) {
1587 		lck_mtx_lock_spin_always(c_list_lock);
1588 	}
1589 
1590 	return c_seg_freed;
1591 }
1592 
1593 void
kdp_compressor_busy_find_owner(event64_t wait_event,thread_waitinfo_t * waitinfo)1594 kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo)
1595 {
1596 	c_segment_t c_seg = (c_segment_t) wait_event;
1597 
1598 	waitinfo->owner = thread_tid(c_seg->c_busy_for_thread);
1599 	waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(c_seg);
1600 }
1601 
1602 #if DEVELOPMENT || DEBUG
1603 int
do_cseg_wedge_thread(void)1604 do_cseg_wedge_thread(void)
1605 {
1606 	struct c_segment c_seg;
1607 	c_seg.c_busy_for_thread = current_thread();
1608 
1609 	debug_cseg_wait_event = (event_t) &c_seg;
1610 
1611 	thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1612 	assert_wait((event_t) (&c_seg), THREAD_INTERRUPTIBLE);
1613 
1614 	thread_block(THREAD_CONTINUE_NULL);
1615 
1616 	return 0;
1617 }
1618 
1619 int
do_cseg_unwedge_thread(void)1620 do_cseg_unwedge_thread(void)
1621 {
1622 	thread_wakeup(debug_cseg_wait_event);
1623 	debug_cseg_wait_event = NULL;
1624 
1625 	return 0;
1626 }
1627 #endif /* DEVELOPMENT || DEBUG */
1628 
1629 void
c_seg_wait_on_busy(c_segment_t c_seg)1630 c_seg_wait_on_busy(c_segment_t c_seg)
1631 {
1632 	c_seg->c_wanted = 1;
1633 
1634 	thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1635 	assert_wait((event_t) (c_seg), THREAD_UNINT);
1636 
1637 	lck_mtx_unlock_always(&c_seg->c_lock);
1638 	thread_block(THREAD_CONTINUE_NULL);
1639 }
1640 
1641 #if CONFIG_FREEZE
1642 /*
1643  * We don't have the task lock held while updating the task's
1644  * c_seg queues. We can do that because of the following restrictions:
1645  *
1646  * - SINGLE FREEZER CONTEXT:
1647  *   We 'insert' c_segs into the task list on the task_freeze path.
1648  *   There can only be one such freeze in progress and the task
1649  *   isn't disappearing because we have the VM map lock held throughout
1650  *   and we have a reference on the proc too.
1651  *
1652  * - SINGLE TASK DISOWN CONTEXT:
1653  *   We 'disown' c_segs of a task ONLY from the task_terminate context. So
1654  *   we don't need the task lock but we need the c_list_lock and the
1655  *   compressor master lock (shared). We also hold the individual
1656  *   c_seg locks (exclusive).
1657  *
1658  *   If we either:
1659  *   - can't get the c_seg lock on a try, then we start again because maybe
1660  *   the c_seg is part of a compaction and might get freed. So we can't trust
1661  *   that linkage and need to restart our queue traversal.
1662  *   - OR, we run into a busy c_seg (say being swapped in or free-ing) we
1663  *   drop all locks again and wait and restart our queue traversal.
1664  *
1665  * - The new_owner_task below is currently only the kernel or NULL.
1666  *
1667  */
1668 void
c_seg_update_task_owner(c_segment_t c_seg,task_t new_owner_task)1669 c_seg_update_task_owner(c_segment_t c_seg, task_t new_owner_task)
1670 {
1671 	task_t          owner_task = c_seg->c_task_owner;
1672 	uint64_t        uncompressed_bytes = ((c_seg->c_slots_used) * PAGE_SIZE_64);
1673 
1674 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1675 	LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1676 
1677 	if (owner_task) {
1678 		task_update_frozen_to_swap_acct(owner_task, uncompressed_bytes, DEBIT_FROM_SWAP);
1679 		queue_remove(&owner_task->task_frozen_cseg_q, c_seg,
1680 		    c_segment_t, c_task_list_next_cseg);
1681 	}
1682 
1683 	if (new_owner_task) {
1684 		queue_enter(&new_owner_task->task_frozen_cseg_q, c_seg,
1685 		    c_segment_t, c_task_list_next_cseg);
1686 		task_update_frozen_to_swap_acct(new_owner_task, uncompressed_bytes, CREDIT_TO_SWAP);
1687 	}
1688 
1689 	c_seg->c_task_owner = new_owner_task;
1690 }
1691 
1692 void
task_disown_frozen_csegs(task_t owner_task)1693 task_disown_frozen_csegs(task_t owner_task)
1694 {
1695 	c_segment_t c_seg = NULL, next_cseg = NULL;
1696 
1697 again:
1698 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
1699 	lck_mtx_lock_spin_always(c_list_lock);
1700 
1701 	for (c_seg = (c_segment_t) queue_first(&owner_task->task_frozen_cseg_q);
1702 	    !queue_end(&owner_task->task_frozen_cseg_q, (queue_entry_t) c_seg);
1703 	    c_seg = next_cseg) {
1704 		next_cseg = (c_segment_t) queue_next(&c_seg->c_task_list_next_cseg);
1705 
1706 		if (!lck_mtx_try_lock_spin_always(&c_seg->c_lock)) {
1707 			lck_mtx_unlock(c_list_lock);
1708 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
1709 			goto again;
1710 		}
1711 
1712 		if (c_seg->c_busy) {
1713 			lck_mtx_unlock(c_list_lock);
1714 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
1715 
1716 			c_seg_wait_on_busy(c_seg);
1717 
1718 			goto again;
1719 		}
1720 		assert(c_seg->c_task_owner == owner_task);
1721 		c_seg_update_task_owner(c_seg, kernel_task);
1722 		lck_mtx_unlock_always(&c_seg->c_lock);
1723 	}
1724 
1725 	lck_mtx_unlock(c_list_lock);
1726 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
1727 }
1728 #endif /* CONFIG_FREEZE */
1729 
1730 void
c_seg_switch_state(c_segment_t c_seg,int new_state,boolean_t insert_head)1731 c_seg_switch_state(c_segment_t c_seg, int new_state, boolean_t insert_head)
1732 {
1733 	int     old_state = c_seg->c_state;
1734 	queue_head_t *donate_swapout_list_head, *donate_swappedin_list_head;
1735 	uint32_t     *donate_swapout_count, *donate_swappedin_count;
1736 
1737 	/*
1738 	 * On macOS the donate queue is swapped first ie the c_early_swapout queue.
1739 	 * On other swap-capable platforms, we want to swap those out last. So we
1740 	 * use the c_late_swapout queue.
1741 	 */
1742 #if XNU_TARGET_OS_OSX  /* tag:DONATE */
1743 #if (DEVELOPMENT || DEBUG)
1744 	if (new_state != C_IS_FILLING) {
1745 		LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1746 	}
1747 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1748 #endif /* DEVELOPMENT || DEBUG */
1749 
1750 	donate_swapout_list_head = &c_early_swapout_list_head;
1751 	donate_swapout_count = &c_early_swapout_count;
1752 	donate_swappedin_list_head = &c_early_swappedin_list_head;
1753 	donate_swappedin_count = &c_early_swappedin_count;
1754 #else /* XNU_TARGET_OS_OSX */
1755 	donate_swapout_list_head = &c_late_swapout_list_head;
1756 	donate_swapout_count = &c_late_swapout_count;
1757 	donate_swappedin_list_head = &c_late_swappedin_list_head;
1758 	donate_swappedin_count = &c_late_swappedin_count;
1759 #endif /* XNU_TARGET_OS_OSX */
1760 
1761 	switch (old_state) {
1762 	case C_IS_EMPTY:
1763 		assert(new_state == C_IS_FILLING || new_state == C_IS_FREE);
1764 
1765 		c_empty_count--;
1766 		break;
1767 
1768 	case C_IS_FILLING:
1769 		assert(new_state == C_ON_AGE_Q || new_state == C_ON_SWAPOUT_Q);
1770 
1771 		queue_remove(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1772 		c_filling_count--;
1773 		break;
1774 
1775 	case C_ON_AGE_Q:
1776 		assert(new_state == C_ON_SWAPOUT_Q || new_state == C_ON_MAJORCOMPACT_Q ||
1777 		    new_state == C_IS_FREE);
1778 
1779 		queue_remove(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1780 		c_age_count--;
1781 		break;
1782 
1783 	case C_ON_SWAPPEDIN_Q:
1784 		if (c_seg->c_has_donated_pages) {
1785 			assert(new_state == C_ON_SWAPOUT_Q || new_state == C_IS_FREE);
1786 			queue_remove(donate_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1787 			*donate_swappedin_count -= 1;
1788 		} else {
1789 			assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1790 #if CONFIG_FREEZE
1791 			assert(c_seg->c_has_freezer_pages);
1792 			queue_remove(&c_early_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1793 			c_early_swappedin_count--;
1794 #else /* CONFIG_FREEZE */
1795 			queue_remove(&c_regular_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1796 			c_regular_swappedin_count--;
1797 #endif /* CONFIG_FREEZE */
1798 		}
1799 		break;
1800 
1801 	case C_ON_SWAPOUT_Q:
1802 		assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE || new_state == C_IS_EMPTY || new_state == C_ON_SWAPIO_Q);
1803 
1804 #if CONFIG_FREEZE
1805 		if (c_seg->c_has_freezer_pages) {
1806 			if (c_seg->c_task_owner && (new_state != C_ON_SWAPIO_Q)) {
1807 				c_seg_update_task_owner(c_seg, NULL);
1808 			}
1809 			queue_remove(&c_early_swapout_list_head, c_seg, c_segment_t, c_age_list);
1810 			c_early_swapout_count--;
1811 		} else
1812 #endif /* CONFIG_FREEZE */
1813 		{
1814 			if (c_seg->c_has_donated_pages) {
1815 				queue_remove(donate_swapout_list_head, c_seg, c_segment_t, c_age_list);
1816 				*donate_swapout_count -= 1;
1817 			} else {
1818 				queue_remove(&c_regular_swapout_list_head, c_seg, c_segment_t, c_age_list);
1819 				c_regular_swapout_count--;
1820 			}
1821 		}
1822 
1823 		if (new_state == C_ON_AGE_Q) {
1824 			c_seg->c_has_donated_pages = 0;
1825 		}
1826 		thread_wakeup((event_t)&compaction_swapper_running);
1827 		break;
1828 
1829 	case C_ON_SWAPIO_Q:
1830 #if CONFIG_FREEZE
1831 		if (c_seg->c_has_freezer_pages) {
1832 			assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1833 		} else
1834 #endif /* CONFIG_FREEZE */
1835 		{
1836 			if (c_seg->c_has_donated_pages) {
1837 				assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_SWAPPEDIN_Q);
1838 			} else {
1839 				assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1840 			}
1841 		}
1842 
1843 		queue_remove(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1844 		c_swapio_count--;
1845 		break;
1846 
1847 	case C_ON_SWAPPEDOUT_Q:
1848 		assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1849 		    new_state == C_ON_SWAPPEDOUTSPARSE_Q ||
1850 		    new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1851 
1852 		queue_remove(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1853 		c_swappedout_count--;
1854 		break;
1855 
1856 	case C_ON_SWAPPEDOUTSPARSE_Q:
1857 		assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1858 		    new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1859 
1860 		queue_remove(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1861 		c_swappedout_sparse_count--;
1862 		break;
1863 
1864 	case C_ON_MAJORCOMPACT_Q:
1865 		assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1866 
1867 		queue_remove(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1868 		c_major_count--;
1869 		break;
1870 
1871 	case C_ON_BAD_Q:
1872 		assert(new_state == C_IS_FREE);
1873 
1874 		queue_remove(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1875 		c_bad_count--;
1876 		break;
1877 
1878 	default:
1879 		panic("c_seg %p has bad c_state = %d", c_seg, old_state);
1880 	}
1881 
1882 	switch (new_state) {
1883 	case C_IS_FREE:
1884 		assert(old_state != C_IS_FILLING);
1885 
1886 		break;
1887 
1888 	case C_IS_EMPTY:
1889 		assert(old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1890 
1891 		c_empty_count++;
1892 		break;
1893 
1894 	case C_IS_FILLING:
1895 		assert(old_state == C_IS_EMPTY);
1896 
1897 		queue_enter(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1898 		c_filling_count++;
1899 		break;
1900 
1901 	case C_ON_AGE_Q:
1902 		assert(old_state == C_IS_FILLING || old_state == C_ON_SWAPPEDIN_Q ||
1903 		    old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPIO_Q ||
1904 		    old_state == C_ON_MAJORCOMPACT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1905 
1906 		assert(!c_seg->c_has_donated_pages);
1907 		if (old_state == C_IS_FILLING) {
1908 			queue_enter(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1909 		} else {
1910 			if (!queue_empty(&c_age_list_head)) {
1911 				c_segment_t     c_first;
1912 
1913 				c_first = (c_segment_t)queue_first(&c_age_list_head);
1914 				c_seg->c_creation_ts = c_first->c_creation_ts;
1915 			}
1916 			queue_enter_first(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1917 		}
1918 		c_age_count++;
1919 		break;
1920 
1921 	case C_ON_SWAPPEDIN_Q:
1922 	{
1923 		queue_head_t *list_head;
1924 
1925 		assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q || old_state == C_ON_SWAPIO_Q);
1926 		if (c_seg->c_has_donated_pages) {
1927 			/* Error in swapouts could happen while the c_seg is still on the swapio queue */
1928 			list_head = donate_swappedin_list_head;
1929 			*donate_swappedin_count += 1;
1930 		} else {
1931 #if CONFIG_FREEZE
1932 			assert(c_seg->c_has_freezer_pages);
1933 			list_head = &c_early_swappedin_list_head;
1934 			c_early_swappedin_count++;
1935 #else /* CONFIG_FREEZE */
1936 			list_head = &c_regular_swappedin_list_head;
1937 			c_regular_swappedin_count++;
1938 #endif /* CONFIG_FREEZE */
1939 		}
1940 
1941 		if (insert_head == TRUE) {
1942 			queue_enter_first(list_head, c_seg, c_segment_t, c_age_list);
1943 		} else {
1944 			queue_enter(list_head, c_seg, c_segment_t, c_age_list);
1945 		}
1946 		break;
1947 	}
1948 
1949 	case C_ON_SWAPOUT_Q:
1950 	{
1951 		queue_head_t *list_head;
1952 
1953 #if CONFIG_FREEZE
1954 		/*
1955 		 * A segment with both identities of frozen + donated pages
1956 		 * will be put on early swapout Q ie the frozen identity wins.
1957 		 * This is because when both identities are set, the donation bit
1958 		 * is added on after in the c_current_seg_filled path for accounting
1959 		 * purposes.
1960 		 */
1961 		if (c_seg->c_has_freezer_pages) {
1962 			assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1963 			list_head = &c_early_swapout_list_head;
1964 			c_early_swapout_count++;
1965 		} else
1966 #endif
1967 		{
1968 			if (c_seg->c_has_donated_pages) {
1969 				assert(old_state == C_ON_SWAPPEDIN_Q || old_state == C_IS_FILLING);
1970 				list_head = donate_swapout_list_head;
1971 				*donate_swapout_count += 1;
1972 			} else {
1973 				assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1974 				list_head = &c_regular_swapout_list_head;
1975 				c_regular_swapout_count++;
1976 			}
1977 		}
1978 
1979 		if (insert_head == TRUE) {
1980 			queue_enter_first(list_head, c_seg, c_segment_t, c_age_list);
1981 		} else {
1982 			queue_enter(list_head, c_seg, c_segment_t, c_age_list);
1983 		}
1984 		break;
1985 	}
1986 
1987 	case C_ON_SWAPIO_Q:
1988 		assert(old_state == C_ON_SWAPOUT_Q);
1989 
1990 		if (insert_head == TRUE) {
1991 			queue_enter_first(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1992 		} else {
1993 			queue_enter(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1994 		}
1995 		c_swapio_count++;
1996 		break;
1997 
1998 	case C_ON_SWAPPEDOUT_Q:
1999 		assert(old_state == C_ON_SWAPIO_Q);
2000 
2001 		if (insert_head == TRUE) {
2002 			queue_enter_first(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
2003 		} else {
2004 			queue_enter(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
2005 		}
2006 		c_swappedout_count++;
2007 		break;
2008 
2009 	case C_ON_SWAPPEDOUTSPARSE_Q:
2010 		assert(old_state == C_ON_SWAPIO_Q || old_state == C_ON_SWAPPEDOUT_Q);
2011 
2012 		if (insert_head == TRUE) {
2013 			queue_enter_first(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
2014 		} else {
2015 			queue_enter(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
2016 		}
2017 
2018 		c_swappedout_sparse_count++;
2019 		break;
2020 
2021 	case C_ON_MAJORCOMPACT_Q:
2022 		assert(old_state == C_ON_AGE_Q);
2023 		assert(!c_seg->c_has_donated_pages);
2024 
2025 		if (insert_head == TRUE) {
2026 			queue_enter_first(&c_major_list_head, c_seg, c_segment_t, c_age_list);
2027 		} else {
2028 			queue_enter(&c_major_list_head, c_seg, c_segment_t, c_age_list);
2029 		}
2030 		c_major_count++;
2031 		break;
2032 
2033 	case C_ON_BAD_Q:
2034 		assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
2035 
2036 		if (insert_head == TRUE) {
2037 			queue_enter_first(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
2038 		} else {
2039 			queue_enter(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
2040 		}
2041 		c_bad_count++;
2042 		break;
2043 
2044 	default:
2045 		panic("c_seg %p requesting bad c_state = %d", c_seg, new_state);
2046 	}
2047 	c_seg->c_state = new_state;
2048 }
2049 
2050 
2051 
2052 void
c_seg_free(c_segment_t c_seg)2053 c_seg_free(c_segment_t c_seg)
2054 {
2055 	assert(c_seg->c_busy);
2056 
2057 	lck_mtx_unlock_always(&c_seg->c_lock);
2058 	lck_mtx_lock_spin_always(c_list_lock);
2059 	lck_mtx_lock_spin_always(&c_seg->c_lock);
2060 
2061 	c_seg_free_locked(c_seg);
2062 }
2063 
2064 
2065 void
c_seg_free_locked(c_segment_t c_seg)2066 c_seg_free_locked(c_segment_t c_seg)
2067 {
2068 	int             segno;
2069 	int             pages_populated = 0;
2070 	int32_t         *c_buffer = NULL;
2071 	uint64_t        c_swap_handle = 0;
2072 
2073 	assert(c_seg->c_busy);
2074 	assert(c_seg->c_slots_used == 0);
2075 	assert(!c_seg->c_on_minorcompact_q);
2076 	assert(!c_seg->c_busy_swapping);
2077 
2078 	if (c_seg->c_overage_swap == TRUE) {
2079 		c_overage_swapped_count--;
2080 		c_seg->c_overage_swap = FALSE;
2081 	}
2082 	if (!(C_SEG_IS_ONDISK(c_seg))) {
2083 		c_buffer = c_seg->c_store.c_buffer;
2084 	} else {
2085 		c_swap_handle = c_seg->c_store.c_swap_handle;
2086 	}
2087 
2088 	c_seg_switch_state(c_seg, C_IS_FREE, FALSE);
2089 
2090 	if (c_buffer) {
2091 		pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
2092 		c_seg->c_store.c_buffer = NULL;
2093 	} else {
2094 #if CONFIG_FREEZE
2095 		c_seg_update_task_owner(c_seg, NULL);
2096 #endif /* CONFIG_FREEZE */
2097 
2098 		c_seg->c_store.c_swap_handle = (uint64_t)-1;
2099 	}
2100 
2101 	lck_mtx_unlock_always(&c_seg->c_lock);
2102 
2103 	lck_mtx_unlock_always(c_list_lock);
2104 
2105 	if (c_buffer) {
2106 		if (pages_populated) {
2107 			kernel_memory_depopulate((vm_offset_t)c_buffer,
2108 			    ptoa(pages_populated), KMA_COMPRESSOR,
2109 			    VM_KERN_MEMORY_COMPRESSOR);
2110 		}
2111 	} else if (c_swap_handle) {
2112 		/*
2113 		 * Free swap space on disk.
2114 		 */
2115 		vm_swap_free(c_swap_handle);
2116 	}
2117 	lck_mtx_lock_spin_always(&c_seg->c_lock);
2118 	/*
2119 	 * c_seg must remain busy until
2120 	 * after the call to vm_swap_free
2121 	 */
2122 	C_SEG_WAKEUP_DONE(c_seg);
2123 	lck_mtx_unlock_always(&c_seg->c_lock);
2124 
2125 	segno = c_seg->c_mysegno;
2126 
2127 	lck_mtx_lock_spin_always(c_list_lock);
2128 	/*
2129 	 * because the c_buffer is now associated with the segno,
2130 	 * we can't put the segno back on the free list until
2131 	 * after we have depopulated the c_buffer range, or
2132 	 * we run the risk of depopulating a range that is
2133 	 * now being used in one of the compressor heads
2134 	 */
2135 	c_segments[segno].c_segno = c_free_segno_head;
2136 	c_free_segno_head = segno;
2137 	c_segment_count--;
2138 
2139 	lck_mtx_unlock_always(c_list_lock);
2140 
2141 	lck_mtx_destroy(&c_seg->c_lock, &vm_compressor_lck_grp);
2142 
2143 	if (c_seg->c_slot_var_array_len) {
2144 		kfree_type(struct c_slot, c_seg->c_slot_var_array_len,
2145 		    c_seg->c_slot_var_array);
2146 	}
2147 
2148 	zfree(compressor_segment_zone, c_seg);
2149 }
2150 
2151 #if DEVELOPMENT || DEBUG
2152 int c_seg_trim_page_count = 0;
2153 #endif
2154 
2155 void
c_seg_trim_tail(c_segment_t c_seg)2156 c_seg_trim_tail(c_segment_t c_seg)
2157 {
2158 	c_slot_t        cs;
2159 	uint32_t        c_size;
2160 	uint32_t        c_offset;
2161 	uint32_t        c_rounded_size;
2162 	uint16_t        current_nextslot;
2163 	uint32_t        current_populated_offset;
2164 
2165 	if (c_seg->c_bytes_used == 0) {
2166 		return;
2167 	}
2168 	current_nextslot = c_seg->c_nextslot;
2169 	current_populated_offset = c_seg->c_populated_offset;
2170 
2171 	while (c_seg->c_nextslot) {
2172 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, (c_seg->c_nextslot - 1));
2173 
2174 		c_size = UNPACK_C_SIZE(cs);
2175 
2176 		if (c_size) {
2177 			if (current_nextslot != c_seg->c_nextslot) {
2178 				c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
2179 				c_offset = cs->c_offset + C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2180 
2181 				c_seg->c_nextoffset = c_offset;
2182 				c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) &
2183 				    ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
2184 
2185 				if (c_seg->c_firstemptyslot > c_seg->c_nextslot) {
2186 					c_seg->c_firstemptyslot = c_seg->c_nextslot;
2187 				}
2188 #if DEVELOPMENT || DEBUG
2189 				c_seg_trim_page_count += ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset)) -
2190 				    round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) /
2191 				    PAGE_SIZE);
2192 #endif
2193 			}
2194 			break;
2195 		}
2196 		c_seg->c_nextslot--;
2197 	}
2198 	assert(c_seg->c_nextslot);
2199 }
2200 
2201 
2202 int
c_seg_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy)2203 c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy)
2204 {
2205 	c_slot_mapping_t slot_ptr;
2206 	uint32_t        c_offset = 0;
2207 	uint32_t        old_populated_offset;
2208 	uint32_t        c_rounded_size;
2209 	uint32_t        c_size;
2210 	uint16_t        c_indx = 0;
2211 	int             i;
2212 	c_slot_t        c_dst;
2213 	c_slot_t        c_src;
2214 
2215 	assert(c_seg->c_busy);
2216 
2217 #if VALIDATE_C_SEGMENTS
2218 	c_seg_validate(c_seg, FALSE);
2219 #endif
2220 	if (c_seg->c_bytes_used == 0) {
2221 		c_seg_free(c_seg);
2222 		return 1;
2223 	}
2224 	lck_mtx_unlock_always(&c_seg->c_lock);
2225 
2226 	if (c_seg->c_firstemptyslot >= c_seg->c_nextslot || C_SEG_UNUSED_BYTES(c_seg) < PAGE_SIZE) {
2227 		goto done;
2228 	}
2229 
2230 /* TODO: assert first emptyslot's c_size is actually 0 */
2231 
2232 #if DEVELOPMENT || DEBUG
2233 	C_SEG_MAKE_WRITEABLE(c_seg);
2234 #endif
2235 
2236 #if VALIDATE_C_SEGMENTS
2237 	c_seg->c_was_minor_compacted++;
2238 #endif
2239 	c_indx = c_seg->c_firstemptyslot;
2240 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
2241 
2242 	old_populated_offset = c_seg->c_populated_offset;
2243 	c_offset = c_dst->c_offset;
2244 
2245 	for (i = c_indx + 1; i < c_seg->c_nextslot && c_offset < c_seg->c_nextoffset; i++) {
2246 		c_src = C_SEG_SLOT_FROM_INDEX(c_seg, i);
2247 
2248 		c_size = UNPACK_C_SIZE(c_src);
2249 
2250 		if (c_size == 0) {
2251 			continue;
2252 		}
2253 
2254 		c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
2255 /* N.B.: This memcpy may be an overlapping copy */
2256 		memcpy(&c_seg->c_store.c_buffer[c_offset], &c_seg->c_store.c_buffer[c_src->c_offset], c_rounded_size);
2257 
2258 		cslot_copy(c_dst, c_src);
2259 		c_dst->c_offset = c_offset;
2260 
2261 		slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2262 		slot_ptr->s_cindx = c_indx;
2263 
2264 		c_offset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2265 		PACK_C_SIZE(c_src, 0);
2266 		c_indx++;
2267 
2268 		c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
2269 	}
2270 	c_seg->c_firstemptyslot = c_indx;
2271 	c_seg->c_nextslot = c_indx;
2272 	c_seg->c_nextoffset = c_offset;
2273 	c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
2274 	c_seg->c_bytes_unused = 0;
2275 
2276 #if VALIDATE_C_SEGMENTS
2277 	c_seg_validate(c_seg, TRUE);
2278 #endif
2279 	if (old_populated_offset > c_seg->c_populated_offset) {
2280 		uint32_t        gc_size;
2281 		int32_t         *gc_ptr;
2282 
2283 		gc_size = C_SEG_OFFSET_TO_BYTES(old_populated_offset - c_seg->c_populated_offset);
2284 		gc_ptr = &c_seg->c_store.c_buffer[c_seg->c_populated_offset];
2285 
2286 		kernel_memory_depopulate((vm_offset_t)gc_ptr, gc_size,
2287 		    KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
2288 	}
2289 
2290 #if DEVELOPMENT || DEBUG
2291 	C_SEG_WRITE_PROTECT(c_seg);
2292 #endif
2293 
2294 done:
2295 	if (clear_busy == TRUE) {
2296 		lck_mtx_lock_spin_always(&c_seg->c_lock);
2297 		C_SEG_WAKEUP_DONE(c_seg);
2298 		lck_mtx_unlock_always(&c_seg->c_lock);
2299 	}
2300 	return 0;
2301 }
2302 
2303 
2304 static void
c_seg_alloc_nextslot(c_segment_t c_seg)2305 c_seg_alloc_nextslot(c_segment_t c_seg)
2306 {
2307 	struct c_slot   *old_slot_array = NULL;
2308 	struct c_slot   *new_slot_array = NULL;
2309 	int             newlen;
2310 	int             oldlen;
2311 
2312 	if (c_seg->c_nextslot < c_seg_fixed_array_len) {
2313 		return;
2314 	}
2315 
2316 	if ((c_seg->c_nextslot - c_seg_fixed_array_len) >= c_seg->c_slot_var_array_len) {
2317 		oldlen = c_seg->c_slot_var_array_len;
2318 		old_slot_array = c_seg->c_slot_var_array;
2319 
2320 		if (oldlen == 0) {
2321 			newlen = c_seg_slot_var_array_min_len;
2322 		} else {
2323 			newlen = oldlen * 2;
2324 		}
2325 
2326 		new_slot_array = kalloc_type(struct c_slot, newlen, Z_WAITOK);
2327 
2328 		lck_mtx_lock_spin_always(&c_seg->c_lock);
2329 
2330 		if (old_slot_array) {
2331 			memcpy(new_slot_array, old_slot_array,
2332 			    sizeof(struct c_slot) * oldlen);
2333 		}
2334 
2335 		c_seg->c_slot_var_array_len = newlen;
2336 		c_seg->c_slot_var_array = new_slot_array;
2337 
2338 		lck_mtx_unlock_always(&c_seg->c_lock);
2339 
2340 		kfree_type(struct c_slot, oldlen, old_slot_array);
2341 	}
2342 }
2343 
2344 
2345 #define C_SEG_MAJOR_COMPACT_STATS_MAX   (30)
2346 
2347 struct {
2348 	uint64_t asked_permission;
2349 	uint64_t compactions;
2350 	uint64_t moved_slots;
2351 	uint64_t moved_bytes;
2352 	uint64_t wasted_space_in_swapouts;
2353 	uint64_t count_of_swapouts;
2354 	uint64_t count_of_freed_segs;
2355 	uint64_t bailed_compactions;
2356 	uint64_t bytes_freed_rate_us;
2357 } c_seg_major_compact_stats[C_SEG_MAJOR_COMPACT_STATS_MAX];
2358 
2359 int c_seg_major_compact_stats_now = 0;
2360 
2361 
2362 #define C_MAJOR_COMPACTION_SIZE_APPROPRIATE     ((c_seg_bufsize * 90) / 100)
2363 
2364 
2365 boolean_t
c_seg_major_compact_ok(c_segment_t c_seg_dst,c_segment_t c_seg_src)2366 c_seg_major_compact_ok(
2367 	c_segment_t c_seg_dst,
2368 	c_segment_t c_seg_src)
2369 {
2370 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].asked_permission++;
2371 
2372 	if (c_seg_src->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE &&
2373 	    c_seg_dst->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE) {
2374 		return FALSE;
2375 	}
2376 
2377 	if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2378 		/*
2379 		 * destination segment is full... can't compact
2380 		 */
2381 		return FALSE;
2382 	}
2383 
2384 	return TRUE;
2385 }
2386 
2387 
2388 boolean_t
c_seg_major_compact(c_segment_t c_seg_dst,c_segment_t c_seg_src)2389 c_seg_major_compact(
2390 	c_segment_t c_seg_dst,
2391 	c_segment_t c_seg_src)
2392 {
2393 	c_slot_mapping_t slot_ptr;
2394 	uint32_t        c_rounded_size;
2395 	uint32_t        c_size;
2396 	uint16_t        dst_slot;
2397 	int             i;
2398 	c_slot_t        c_dst;
2399 	c_slot_t        c_src;
2400 	boolean_t       keep_compacting = TRUE;
2401 
2402 	/*
2403 	 * segments are not locked but they are both marked c_busy
2404 	 * which keeps c_decompress from working on them...
2405 	 * we can safely allocate new pages, move compressed data
2406 	 * from c_seg_src to c_seg_dst and update both c_segment's
2407 	 * state w/o holding the master lock
2408 	 */
2409 #if DEVELOPMENT || DEBUG
2410 	C_SEG_MAKE_WRITEABLE(c_seg_dst);
2411 #endif
2412 
2413 #if VALIDATE_C_SEGMENTS
2414 	c_seg_dst->c_was_major_compacted++;
2415 	c_seg_src->c_was_major_donor++;
2416 #endif
2417 	assertf(c_seg_dst->c_has_donated_pages == c_seg_src->c_has_donated_pages, "Mismatched donation status Dst: %p, Src: %p\n", c_seg_dst, c_seg_src);
2418 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].compactions++;
2419 
2420 	dst_slot = c_seg_dst->c_nextslot;
2421 
2422 	for (i = 0; i < c_seg_src->c_nextslot; i++) {
2423 		c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, i);
2424 
2425 		c_size = UNPACK_C_SIZE(c_src);
2426 
2427 		if (c_size == 0) {
2428 			/* BATCH: move what we have so far; */
2429 			continue;
2430 		}
2431 
2432 		int combined_size;
2433 		combined_size = c_size;
2434 		c_rounded_size = (combined_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
2435 
2436 		if (C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset - c_seg_dst->c_nextoffset) < (unsigned) combined_size) {
2437 			int     size_to_populate;
2438 
2439 			/* doesn't fit */
2440 			size_to_populate = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset);
2441 
2442 			if (size_to_populate == 0) {
2443 				/* can't fit */
2444 				keep_compacting = FALSE;
2445 				break;
2446 			}
2447 			if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
2448 				size_to_populate = C_SEG_MAX_POPULATE_SIZE;
2449 			}
2450 
2451 			kernel_memory_populate(
2452 				(vm_offset_t) &c_seg_dst->c_store.c_buffer[c_seg_dst->c_populated_offset],
2453 				size_to_populate,
2454 				KMA_NOFAIL | KMA_COMPRESSOR,
2455 				VM_KERN_MEMORY_COMPRESSOR);
2456 
2457 			c_seg_dst->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
2458 			assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset) <= c_seg_bufsize);
2459 		}
2460 		c_seg_alloc_nextslot(c_seg_dst);
2461 
2462 		c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
2463 
2464 		memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], combined_size);
2465 
2466 		c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_slots++;
2467 		c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_bytes += combined_size;
2468 
2469 		cslot_copy(c_dst, c_src);
2470 		c_dst->c_offset = c_seg_dst->c_nextoffset;
2471 
2472 		if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
2473 			c_seg_dst->c_firstemptyslot++;
2474 		}
2475 		c_seg_dst->c_slots_used++;
2476 		c_seg_dst->c_nextslot++;
2477 		c_seg_dst->c_bytes_used += c_rounded_size;
2478 		c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2479 
2480 		PACK_C_SIZE(c_src, 0);
2481 
2482 		c_seg_src->c_bytes_used -= c_rounded_size;
2483 		c_seg_src->c_bytes_unused += c_rounded_size;
2484 		c_seg_src->c_firstemptyslot = 0;
2485 
2486 		assert(c_seg_src->c_slots_used);
2487 		c_seg_src->c_slots_used--;
2488 
2489 		if (!c_seg_src->c_swappedin) {
2490 			/* Pessimistically lose swappedin status when non-swappedin pages are added. */
2491 			c_seg_dst->c_swappedin = false;
2492 		}
2493 
2494 		if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2495 			/* dest segment is now full */
2496 			keep_compacting = FALSE;
2497 			break;
2498 		}
2499 	}
2500 #if DEVELOPMENT || DEBUG
2501 	C_SEG_WRITE_PROTECT(c_seg_dst);
2502 #endif
2503 	if (dst_slot < c_seg_dst->c_nextslot) {
2504 		PAGE_REPLACEMENT_ALLOWED(TRUE);
2505 		/*
2506 		 * we've now locked out c_decompress from
2507 		 * converting the slot passed into it into
2508 		 * a c_segment_t which allows us to use
2509 		 * the backptr to change which c_segment and
2510 		 * index the slot points to
2511 		 */
2512 		while (dst_slot < c_seg_dst->c_nextslot) {
2513 			c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
2514 
2515 			slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2516 			/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
2517 			slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
2518 			slot_ptr->s_cindx = dst_slot++;
2519 		}
2520 		PAGE_REPLACEMENT_ALLOWED(FALSE);
2521 	}
2522 	return keep_compacting;
2523 }
2524 
2525 
2526 uint64_t
vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec,clock_nsec_t end_nsec,clock_sec_t start_sec,clock_nsec_t start_nsec)2527 vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec, clock_nsec_t end_nsec, clock_sec_t start_sec, clock_nsec_t start_nsec)
2528 {
2529 	uint64_t end_msecs;
2530 	uint64_t start_msecs;
2531 
2532 	end_msecs = (end_sec * 1000) + end_nsec / 1000000;
2533 	start_msecs = (start_sec * 1000) + start_nsec / 1000000;
2534 
2535 	return end_msecs - start_msecs;
2536 }
2537 
2538 
2539 
2540 uint32_t compressor_eval_period_in_msecs = 250;
2541 uint32_t compressor_sample_min_in_msecs = 500;
2542 uint32_t compressor_sample_max_in_msecs = 10000;
2543 uint32_t compressor_thrashing_threshold_per_10msecs = 50;
2544 uint32_t compressor_thrashing_min_per_10msecs = 20;
2545 
2546 /* When true, reset sample data next chance we get. */
2547 static boolean_t        compressor_need_sample_reset = FALSE;
2548 
2549 
2550 void
compute_swapout_target_age(void)2551 compute_swapout_target_age(void)
2552 {
2553 	clock_sec_t     cur_ts_sec;
2554 	clock_nsec_t    cur_ts_nsec;
2555 	uint32_t        min_operations_needed_in_this_sample;
2556 	uint64_t        elapsed_msecs_in_eval;
2557 	uint64_t        elapsed_msecs_in_sample;
2558 	boolean_t       need_eval_reset = FALSE;
2559 
2560 	clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
2561 
2562 	elapsed_msecs_in_sample = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_sample_period_sec, start_of_sample_period_nsec);
2563 
2564 	if (compressor_need_sample_reset ||
2565 	    elapsed_msecs_in_sample >= compressor_sample_max_in_msecs) {
2566 		compressor_need_sample_reset = TRUE;
2567 		need_eval_reset = TRUE;
2568 		goto done;
2569 	}
2570 	elapsed_msecs_in_eval = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_eval_period_sec, start_of_eval_period_nsec);
2571 
2572 	if (elapsed_msecs_in_eval < compressor_eval_period_in_msecs) {
2573 		goto done;
2574 	}
2575 	need_eval_reset = TRUE;
2576 
2577 	KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START, elapsed_msecs_in_eval, sample_period_compression_count, sample_period_decompression_count, 0, 0);
2578 
2579 	min_operations_needed_in_this_sample = (compressor_thrashing_min_per_10msecs * (uint32_t)elapsed_msecs_in_eval) / 10;
2580 
2581 	if ((sample_period_compression_count - last_eval_compression_count) < min_operations_needed_in_this_sample ||
2582 	    (sample_period_decompression_count - last_eval_decompression_count) < min_operations_needed_in_this_sample) {
2583 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_compression_count - last_eval_compression_count,
2584 		    sample_period_decompression_count - last_eval_decompression_count, 0, 1, 0);
2585 
2586 		swapout_target_age = 0;
2587 
2588 		compressor_need_sample_reset = TRUE;
2589 		need_eval_reset = TRUE;
2590 		goto done;
2591 	}
2592 	last_eval_compression_count = sample_period_compression_count;
2593 	last_eval_decompression_count = sample_period_decompression_count;
2594 
2595 	if (elapsed_msecs_in_sample < compressor_sample_min_in_msecs) {
2596 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, 0, 0, 5, 0);
2597 		goto done;
2598 	}
2599 	if (sample_period_decompression_count > ((compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10)) {
2600 		uint64_t        running_total;
2601 		uint64_t        working_target;
2602 		uint64_t        aging_target;
2603 		uint32_t        oldest_age_of_csegs_sampled = 0;
2604 		uint64_t        working_set_approximation = 0;
2605 
2606 		swapout_target_age = 0;
2607 
2608 		working_target = (sample_period_decompression_count / 100) * 95;                /* 95 percent */
2609 		aging_target = (sample_period_decompression_count / 100) * 1;                   /* 1 percent */
2610 		running_total = 0;
2611 
2612 		for (oldest_age_of_csegs_sampled = 0; oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE; oldest_age_of_csegs_sampled++) {
2613 			running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2614 
2615 			working_set_approximation += oldest_age_of_csegs_sampled * age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2616 
2617 			if (running_total >= working_target) {
2618 				break;
2619 			}
2620 		}
2621 		if (oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE) {
2622 			working_set_approximation = (working_set_approximation * 1000) / elapsed_msecs_in_sample;
2623 
2624 			if (working_set_approximation < VM_PAGE_COMPRESSOR_COUNT) {
2625 				running_total = overage_decompressions_during_sample_period;
2626 
2627 				for (oldest_age_of_csegs_sampled = DECOMPRESSION_SAMPLE_MAX_AGE - 1; oldest_age_of_csegs_sampled; oldest_age_of_csegs_sampled--) {
2628 					running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2629 
2630 					if (running_total >= aging_target) {
2631 						break;
2632 					}
2633 				}
2634 				swapout_target_age = (uint32_t)cur_ts_sec - oldest_age_of_csegs_sampled;
2635 
2636 				KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 2, 0);
2637 			} else {
2638 				KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 0, 3, 0);
2639 			}
2640 		} else {
2641 			KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_target, running_total, 0, 4, 0);
2642 		}
2643 
2644 		compressor_need_sample_reset = TRUE;
2645 		need_eval_reset = TRUE;
2646 	} else {
2647 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_decompression_count, (compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10, 0, 6, 0);
2648 	}
2649 done:
2650 	if (compressor_need_sample_reset == TRUE) {
2651 		bzero(age_of_decompressions_during_sample_period, sizeof(age_of_decompressions_during_sample_period));
2652 		overage_decompressions_during_sample_period = 0;
2653 
2654 		start_of_sample_period_sec = cur_ts_sec;
2655 		start_of_sample_period_nsec = cur_ts_nsec;
2656 		sample_period_decompression_count = 0;
2657 		sample_period_compression_count = 0;
2658 		last_eval_decompression_count = 0;
2659 		last_eval_compression_count = 0;
2660 		compressor_need_sample_reset = FALSE;
2661 	}
2662 	if (need_eval_reset == TRUE) {
2663 		start_of_eval_period_sec = cur_ts_sec;
2664 		start_of_eval_period_nsec = cur_ts_nsec;
2665 	}
2666 }
2667 
2668 
2669 int             compaction_swapper_init_now = 0;
2670 int             compaction_swapper_running = 0;
2671 int             compaction_swapper_awakened = 0;
2672 int             compaction_swapper_abort = 0;
2673 
2674 bool
vm_compressor_swapout_is_ripe()2675 vm_compressor_swapout_is_ripe()
2676 {
2677 	bool is_ripe = false;
2678 	if (vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit) {
2679 		c_segment_t     c_seg;
2680 		clock_sec_t     now;
2681 		clock_sec_t     age;
2682 		clock_nsec_t    nsec;
2683 
2684 		clock_get_system_nanotime(&now, &nsec);
2685 		age = 0;
2686 
2687 		lck_mtx_lock_spin_always(c_list_lock);
2688 
2689 		if (!queue_empty(&c_age_list_head)) {
2690 			c_seg = (c_segment_t) queue_first(&c_age_list_head);
2691 
2692 			age = now - c_seg->c_creation_ts;
2693 		}
2694 		lck_mtx_unlock_always(c_list_lock);
2695 
2696 		if (age >= vm_ripe_target_age) {
2697 			is_ripe = true;
2698 		}
2699 	}
2700 	return is_ripe;
2701 }
2702 
2703 static bool
compressor_swapout_conditions_met(void)2704 compressor_swapout_conditions_met(void)
2705 {
2706 	bool should_swap = false;
2707 	if (COMPRESSOR_NEEDS_TO_SWAP()) {
2708 		should_swap = true;
2709 		vmcs_stats.compressor_swap_threshold_exceeded++;
2710 	}
2711 	if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external) && vm_page_anonymous_count < (vm_page_inactive_count / 20)) {
2712 		should_swap = true;
2713 		vmcs_stats.external_q_throttled++;
2714 	}
2715 	if (vm_page_free_count < (vm_page_free_reserved - (COMPRESSOR_FREE_RESERVED_LIMIT * 2))) {
2716 		should_swap = true;
2717 		vmcs_stats.free_count_below_reserve++;
2718 	}
2719 	return should_swap;
2720 }
2721 
2722 static bool
compressor_needs_to_swap()2723 compressor_needs_to_swap()
2724 {
2725 	bool should_swap = false;
2726 	if (vm_compressor_swapout_is_ripe()) {
2727 		should_swap = true;
2728 		goto check_if_low_space;
2729 	}
2730 
2731 	if (VM_CONFIG_SWAP_IS_ACTIVE) {
2732 		should_swap =  compressor_swapout_conditions_met();
2733 		if (should_swap) {
2734 			goto check_if_low_space;
2735 		}
2736 	}
2737 
2738 #if (XNU_TARGET_OS_OSX && __arm64__)
2739 	/*
2740 	 * Thrashing detection disabled.
2741 	 */
2742 #else /* (XNU_TARGET_OS_OSX && __arm64__) */
2743 
2744 	if (vm_compressor_is_thrashing()) {
2745 		should_swap = true;
2746 		vmcs_stats.thrashing_detected++;
2747 	}
2748 
2749 #if CONFIG_PHANTOM_CACHE
2750 	if (vm_phantom_cache_check_pressure()) {
2751 		os_atomic_store(&memorystatus_phantom_cache_pressure, true, release);
2752 		should_swap = true;
2753 	}
2754 #endif
2755 	if (swapout_target_age) {
2756 		should_swap = true;
2757 	}
2758 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
2759 
2760 check_if_low_space:
2761 
2762 #if CONFIG_JETSAM
2763 	if (should_swap || vm_compressor_low_on_space() == TRUE) {
2764 		if (vm_compressor_thrashing_detected == FALSE) {
2765 			vm_compressor_thrashing_detected = TRUE;
2766 
2767 			if (swapout_target_age) {
2768 				compressor_thrashing_induced_jetsam++;
2769 			} else if (vm_compressor_low_on_space() == TRUE) {
2770 				compressor_thrashing_induced_jetsam++;
2771 			} else {
2772 				filecache_thrashing_induced_jetsam++;
2773 			}
2774 			/*
2775 			 * Wake up the memorystatus thread so that it can return
2776 			 * the system to a healthy state (by killing processes).
2777 			 */
2778 			memorystatus_thread_wake();
2779 		}
2780 		/*
2781 		 * let the jetsam take precedence over
2782 		 * any major compactions we might have
2783 		 * been able to do... otherwise we run
2784 		 * the risk of doing major compactions
2785 		 * on segments we're about to free up
2786 		 * due to the jetsam activity.
2787 		 */
2788 		should_swap = false;
2789 		if (memorystatus_swap_all_apps && vm_swap_low_on_space()) {
2790 			vm_compressor_take_paging_space_action();
2791 		}
2792 	}
2793 
2794 #else /* CONFIG_JETSAM */
2795 	if (should_swap && vm_swap_low_on_space()) {
2796 		vm_compressor_take_paging_space_action();
2797 	}
2798 #endif /* CONFIG_JETSAM */
2799 
2800 	if (should_swap == false) {
2801 		/*
2802 		 * vm_compressor_needs_to_major_compact returns true only if we're
2803 		 * about to run out of available compressor segments... in this
2804 		 * case, we absolutely need to run a major compaction even if
2805 		 * we've just kicked off a jetsam or we don't otherwise need to
2806 		 * swap... terminating objects releases
2807 		 * pages back to the uncompressed cache, but does not guarantee
2808 		 * that we will free up even a single compression segment
2809 		 */
2810 		should_swap = vm_compressor_needs_to_major_compact();
2811 		if (should_swap) {
2812 			vmcs_stats.fragmentation_detected++;
2813 		}
2814 	}
2815 
2816 	/*
2817 	 * returning TRUE when swap_supported == FALSE
2818 	 * will cause the major compaction engine to
2819 	 * run, but will not trigger any swapping...
2820 	 * segments that have been major compacted
2821 	 * will be moved to the majorcompact queue
2822 	 */
2823 	return should_swap;
2824 }
2825 
2826 #if CONFIG_JETSAM
2827 /*
2828  * This function is called from the jetsam thread after killing something to
2829  * mitigate thrashing.
2830  *
2831  * We need to restart our thrashing detection heuristics since memory pressure
2832  * has potentially changed significantly, and we don't want to detect on old
2833  * data from before the jetsam.
2834  */
2835 void
vm_thrashing_jetsam_done(void)2836 vm_thrashing_jetsam_done(void)
2837 {
2838 	vm_compressor_thrashing_detected = FALSE;
2839 
2840 	/* Were we compressor-thrashing or filecache-thrashing? */
2841 	if (swapout_target_age) {
2842 		swapout_target_age = 0;
2843 		compressor_need_sample_reset = TRUE;
2844 	}
2845 #if CONFIG_PHANTOM_CACHE
2846 	else {
2847 		vm_phantom_cache_restart_sample();
2848 	}
2849 #endif
2850 }
2851 #endif /* CONFIG_JETSAM */
2852 
2853 uint32_t vm_wake_compactor_swapper_calls = 0;
2854 uint32_t vm_run_compactor_already_running = 0;
2855 uint32_t vm_run_compactor_empty_minor_q = 0;
2856 uint32_t vm_run_compactor_did_compact = 0;
2857 uint32_t vm_run_compactor_waited = 0;
2858 
2859 /* run minor compaction right now, if the compaction-swapper thread is not already running */
2860 void
vm_run_compactor(void)2861 vm_run_compactor(void)
2862 {
2863 	if (c_segment_count == 0) {
2864 		return;
2865 	}
2866 
2867 	if (os_atomic_load(&c_minor_count, relaxed) == 0) {
2868 		vm_run_compactor_empty_minor_q++;
2869 		return;
2870 	}
2871 
2872 	lck_mtx_lock_spin_always(c_list_lock);
2873 
2874 	if (compaction_swapper_running) {
2875 		if (vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2876 			vm_run_compactor_already_running++;
2877 
2878 			lck_mtx_unlock_always(c_list_lock);
2879 			return;
2880 		}
2881 		vm_run_compactor_waited++;
2882 
2883 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2884 
2885 		lck_mtx_unlock_always(c_list_lock);
2886 
2887 		thread_block(THREAD_CONTINUE_NULL);
2888 
2889 		return;
2890 	}
2891 	vm_run_compactor_did_compact++;
2892 
2893 	fastwake_warmup = FALSE;
2894 	compaction_swapper_running = 1;
2895 
2896 	vm_compressor_do_delayed_compactions(FALSE);
2897 
2898 	compaction_swapper_running = 0;
2899 
2900 	lck_mtx_unlock_always(c_list_lock);
2901 
2902 	thread_wakeup((event_t)&compaction_swapper_running);
2903 }
2904 
2905 
2906 void
vm_wake_compactor_swapper(void)2907 vm_wake_compactor_swapper(void)
2908 {
2909 	if (compaction_swapper_running || compaction_swapper_awakened || c_segment_count == 0) {
2910 		return;
2911 	}
2912 
2913 	if (os_atomic_load(&c_minor_count, relaxed) ||
2914 	    vm_compressor_needs_to_major_compact()) {
2915 		lck_mtx_lock_spin_always(c_list_lock);
2916 
2917 		fastwake_warmup = FALSE;
2918 
2919 		if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2920 			vm_wake_compactor_swapper_calls++;
2921 
2922 			compaction_swapper_awakened = 1;
2923 			thread_wakeup((event_t)&c_compressor_swap_trigger);
2924 		}
2925 		lck_mtx_unlock_always(c_list_lock);
2926 	}
2927 }
2928 
2929 
2930 void
vm_consider_swapping()2931 vm_consider_swapping()
2932 {
2933 	assert(VM_CONFIG_SWAP_IS_PRESENT);
2934 
2935 	lck_mtx_lock_spin_always(c_list_lock);
2936 
2937 	compaction_swapper_abort = 1;
2938 
2939 	while (compaction_swapper_running) {
2940 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2941 
2942 		lck_mtx_unlock_always(c_list_lock);
2943 
2944 		thread_block(THREAD_CONTINUE_NULL);
2945 
2946 		lck_mtx_lock_spin_always(c_list_lock);
2947 	}
2948 	compaction_swapper_abort = 0;
2949 	compaction_swapper_running = 1;
2950 
2951 	vm_swapout_ripe_segments = TRUE;
2952 
2953 	vm_compressor_process_major_segments(vm_swapout_ripe_segments);
2954 
2955 	vm_compressor_compact_and_swap(FALSE);
2956 
2957 	compaction_swapper_running = 0;
2958 
2959 	vm_swapout_ripe_segments = FALSE;
2960 
2961 	lck_mtx_unlock_always(c_list_lock);
2962 
2963 	thread_wakeup((event_t)&compaction_swapper_running);
2964 }
2965 
2966 
2967 void
vm_consider_waking_compactor_swapper(void)2968 vm_consider_waking_compactor_swapper(void)
2969 {
2970 	bool need_wakeup = false;
2971 
2972 	if (c_segment_count == 0) {
2973 		return;
2974 	}
2975 
2976 	if (compaction_swapper_running || compaction_swapper_awakened) {
2977 		return;
2978 	}
2979 
2980 	if (!compaction_swapper_inited && !compaction_swapper_init_now) {
2981 		compaction_swapper_init_now = 1;
2982 		need_wakeup = true;
2983 	} else if (vm_compressor_needs_to_minor_compact() ||
2984 	    compressor_needs_to_swap()) {
2985 		need_wakeup = true;
2986 	}
2987 
2988 	if (need_wakeup) {
2989 		lck_mtx_lock_spin_always(c_list_lock);
2990 
2991 		fastwake_warmup = FALSE;
2992 
2993 		if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2994 			memoryshot(DBG_VM_WAKEUP_COMPACTOR_SWAPPER, DBG_FUNC_NONE);
2995 
2996 			compaction_swapper_awakened = 1;
2997 			thread_wakeup((event_t)&c_compressor_swap_trigger);
2998 		}
2999 		lck_mtx_unlock_always(c_list_lock);
3000 	}
3001 }
3002 
3003 
3004 #define C_SWAPOUT_LIMIT                 4
3005 #define DELAYED_COMPACTIONS_PER_PASS    30
3006 
3007 /* process segments that are in the minor compaction queue */
3008 void
vm_compressor_do_delayed_compactions(boolean_t flush_all)3009 vm_compressor_do_delayed_compactions(boolean_t flush_all)
3010 {
3011 	c_segment_t     c_seg;
3012 	int             number_compacted = 0;
3013 	boolean_t       needs_to_swap = FALSE;
3014 	uint32_t        c_swapout_count = 0;
3015 
3016 
3017 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, DBG_VM_COMPRESSOR_DELAYED_COMPACT, DBG_FUNC_START, c_minor_count, flush_all, 0, 0);
3018 
3019 #if XNU_TARGET_OS_OSX
3020 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
3021 #endif /* XNU_TARGET_OS_OSX */
3022 
3023 	while (!queue_empty(&c_minor_list_head) && needs_to_swap == FALSE) {
3024 		c_seg = (c_segment_t)queue_first(&c_minor_list_head);
3025 
3026 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3027 
3028 		if (c_seg->c_busy) {
3029 			lck_mtx_unlock_always(c_list_lock);
3030 			c_seg_wait_on_busy(c_seg);
3031 			lck_mtx_lock_spin_always(c_list_lock);
3032 
3033 			continue;
3034 		}
3035 		C_SEG_BUSY(c_seg);
3036 
3037 		c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, TRUE);
3038 
3039 		c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
3040 		if (VM_CONFIG_SWAP_IS_ACTIVE && (number_compacted++ > DELAYED_COMPACTIONS_PER_PASS)) {
3041 			if ((flush_all == TRUE || compressor_needs_to_swap()) && c_swapout_count < C_SWAPOUT_LIMIT) {
3042 				needs_to_swap = TRUE;
3043 			}
3044 
3045 			number_compacted = 0;
3046 		}
3047 		lck_mtx_lock_spin_always(c_list_lock);
3048 	}
3049 
3050 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, DBG_VM_COMPRESSOR_DELAYED_COMPACT, DBG_FUNC_END, c_minor_count, number_compacted, needs_to_swap, 0);
3051 }
3052 
3053 int min_csegs_per_major_compaction = DELAYED_COMPACTIONS_PER_PASS;
3054 
3055 static bool
vm_compressor_major_compact_cseg(c_segment_t c_seg,uint32_t * c_seg_considered,bool * bail_wanted_cseg,uint64_t * total_bytes_freed)3056 vm_compressor_major_compact_cseg(c_segment_t c_seg, uint32_t* c_seg_considered, bool* bail_wanted_cseg, uint64_t* total_bytes_freed)
3057 {
3058 	/*
3059 	 * Major compaction
3060 	 */
3061 	bool keep_compacting = true, fully_compacted = true;
3062 	queue_head_t *list_head = NULL;
3063 	c_segment_t c_seg_next;
3064 	uint64_t        bytes_to_free = 0, bytes_freed = 0;
3065 	uint32_t        number_considered = 0;
3066 
3067 	if (c_seg->c_state == C_ON_AGE_Q) {
3068 		assert(!c_seg->c_has_donated_pages);
3069 		list_head = &c_age_list_head;
3070 	} else if (c_seg->c_state == C_ON_SWAPPEDIN_Q) {
3071 		assert(c_seg->c_has_donated_pages);
3072 		list_head = &c_late_swappedin_list_head;
3073 	}
3074 
3075 	while (keep_compacting == TRUE) {
3076 		assert(c_seg->c_busy);
3077 
3078 		/* look for another segment to consolidate */
3079 
3080 		c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3081 
3082 		if (queue_end(list_head, (queue_entry_t)c_seg_next)) {
3083 			break;
3084 		}
3085 
3086 		assert(c_seg_next->c_state == c_seg->c_state);
3087 
3088 		number_considered++;
3089 
3090 		if (c_seg_major_compact_ok(c_seg, c_seg_next) == FALSE) {
3091 			break;
3092 		}
3093 
3094 		lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3095 
3096 		if (c_seg_next->c_busy) {
3097 			/*
3098 			 * We are going to block for our neighbor.
3099 			 * If our c_seg is wanted, we should unbusy
3100 			 * it because we don't know how long we might
3101 			 * have to block here.
3102 			 */
3103 			if (c_seg->c_wanted) {
3104 				lck_mtx_unlock_always(&c_seg_next->c_lock);
3105 				fully_compacted = false;
3106 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3107 				*bail_wanted_cseg = true;
3108 				break;
3109 			}
3110 
3111 			lck_mtx_unlock_always(c_list_lock);
3112 
3113 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 8, (void*) VM_KERNEL_ADDRPERM(c_seg_next), 0, 0);
3114 
3115 			c_seg_wait_on_busy(c_seg_next);
3116 			lck_mtx_lock_spin_always(c_list_lock);
3117 
3118 			continue;
3119 		}
3120 		/* grab that segment */
3121 		C_SEG_BUSY(c_seg_next);
3122 
3123 		bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3124 		if (c_seg_do_minor_compaction_and_unlock(c_seg_next, FALSE, TRUE, TRUE)) {
3125 			/*
3126 			 * found an empty c_segment and freed it
3127 			 * so we can't continue to use c_seg_next
3128 			 */
3129 			bytes_freed += bytes_to_free;
3130 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3131 			continue;
3132 		}
3133 
3134 		/* unlock the list ... */
3135 		lck_mtx_unlock_always(c_list_lock);
3136 
3137 		/* do the major compaction */
3138 
3139 		keep_compacting = c_seg_major_compact(c_seg, c_seg_next);
3140 
3141 		VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 9, keep_compacting, 0, 0);
3142 
3143 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
3144 
3145 		lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3146 		/*
3147 		 * run a minor compaction on the donor segment
3148 		 * since we pulled at least some of it's
3149 		 * data into our target...  if we've emptied
3150 		 * it, now is a good time to free it which
3151 		 * c_seg_minor_compaction_and_unlock also takes care of
3152 		 *
3153 		 * by passing TRUE, we ask for c_busy to be cleared
3154 		 * and c_wanted to be taken care of
3155 		 */
3156 		bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3157 		if (c_seg_minor_compaction_and_unlock(c_seg_next, TRUE)) {
3158 			bytes_freed += bytes_to_free;
3159 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3160 		} else {
3161 			bytes_to_free -= C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3162 			bytes_freed += bytes_to_free;
3163 		}
3164 
3165 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
3166 
3167 		/* relock the list */
3168 		lck_mtx_lock_spin_always(c_list_lock);
3169 
3170 		if (c_seg->c_wanted) {
3171 			/*
3172 			 * Our c_seg is in demand. Let's
3173 			 * unbusy it and wakeup the waiters
3174 			 * instead of continuing the compaction
3175 			 * because we could be in this loop
3176 			 * for a while.
3177 			 */
3178 			fully_compacted = false;
3179 			*bail_wanted_cseg = true;
3180 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3181 			break;
3182 		}
3183 	} /* major compaction */
3184 
3185 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 10, number_considered, *bail_wanted_cseg, 0);
3186 
3187 	*c_seg_considered += number_considered;
3188 	*total_bytes_freed += bytes_freed;
3189 
3190 	lck_mtx_lock_spin_always(&c_seg->c_lock);
3191 	return fully_compacted;
3192 }
3193 
3194 #define TIME_SUB(rsecs, secs, rfrac, frac, unit)                        \
3195 	MACRO_BEGIN                                                     \
3196 	if ((int)((rfrac) -= (frac)) < 0) {                             \
3197 	        (rfrac) += (unit);                                      \
3198 	        (rsecs) -= 1;                                           \
3199 	}                                                               \
3200 	(rsecs) -= (secs);                                              \
3201 	MACRO_END
3202 
3203 clock_nsec_t c_process_major_report_over_ms = 9; /* report if over 9 ms */
3204 int c_process_major_yield_after = 1000; /* yield after moving 1,000 segments */
3205 uint64_t c_process_major_reports = 0;
3206 clock_sec_t c_process_major_max_sec = 0;
3207 clock_nsec_t c_process_major_max_nsec = 0;
3208 uint32_t c_process_major_peak_segcount = 0;
3209 static void
vm_compressor_process_major_segments(bool ripe_age_only)3210 vm_compressor_process_major_segments(bool ripe_age_only)
3211 {
3212 	c_segment_t c_seg = NULL;
3213 	int count = 0, total = 0, breaks = 0;
3214 	clock_sec_t start_sec, end_sec;
3215 	clock_nsec_t start_nsec, end_nsec;
3216 	clock_nsec_t report_over_ns;
3217 
3218 	if (queue_empty(&c_major_list_head)) {
3219 		return;
3220 	}
3221 
3222 	// printf("%s: starting to move segments from MAJORQ to AGEQ\n", __FUNCTION__);
3223 	if (c_process_major_report_over_ms != 0) {
3224 		report_over_ns = c_process_major_report_over_ms * NSEC_PER_MSEC;
3225 	} else {
3226 		report_over_ns = (clock_nsec_t)-1;
3227 	}
3228 
3229 	if (ripe_age_only) {
3230 		if (c_overage_swapped_count >= c_overage_swapped_limit) {
3231 			/*
3232 			 * Return while we wait for the overage segments
3233 			 * in our queue to get pushed out first.
3234 			 */
3235 			return;
3236 		}
3237 	}
3238 
3239 	clock_get_system_nanotime(&start_sec, &start_nsec);
3240 	while (!queue_empty(&c_major_list_head)) {
3241 		if (!ripe_age_only) {
3242 			/*
3243 			 * Start from the end to preserve aging order. The newer
3244 			 * segments are at the tail and so need to be inserted in
3245 			 * the aging queue in this way so we have the older segments
3246 			 * at the end of the AGE_Q.
3247 			 */
3248 			c_seg = (c_segment_t)queue_last(&c_major_list_head);
3249 		} else {
3250 			c_seg = (c_segment_t)queue_first(&c_major_list_head);
3251 			if ((start_sec - c_seg->c_creation_ts) < vm_ripe_target_age) {
3252 				/*
3253 				 * We have found the first segment in our queue that is not ripe. Segments after it
3254 				 * will be the same. So let's bail here. Return with c_list_lock held.
3255 				 */
3256 				break;
3257 			}
3258 		}
3259 
3260 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3261 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3262 		lck_mtx_unlock_always(&c_seg->c_lock);
3263 
3264 		count++;
3265 		if (count == c_process_major_yield_after ||
3266 		    queue_empty(&c_major_list_head)) {
3267 			/* done or time to take a break */
3268 		} else {
3269 			/* keep going */
3270 			continue;
3271 		}
3272 
3273 		total += count;
3274 		clock_get_system_nanotime(&end_sec, &end_nsec);
3275 		TIME_SUB(end_sec, start_sec, end_nsec, start_nsec, NSEC_PER_SEC);
3276 		if (end_sec > c_process_major_max_sec) {
3277 			c_process_major_max_sec = end_sec;
3278 			c_process_major_max_nsec = end_nsec;
3279 		} else if (end_sec == c_process_major_max_sec &&
3280 		    end_nsec > c_process_major_max_nsec) {
3281 			c_process_major_max_nsec = end_nsec;
3282 		}
3283 		if (total > c_process_major_peak_segcount) {
3284 			c_process_major_peak_segcount = total;
3285 		}
3286 		if (end_sec > 0 ||
3287 		    end_nsec >= report_over_ns) {
3288 			/* we used more than expected */
3289 			c_process_major_reports++;
3290 			printf("%s: moved %d/%d segments from MAJORQ to AGEQ in %lu.%09u seconds and %d breaks\n",
3291 			    __FUNCTION__, count, total,
3292 			    end_sec, end_nsec, breaks);
3293 		}
3294 		if (queue_empty(&c_major_list_head)) {
3295 			/* done */
3296 			break;
3297 		}
3298 		/* take a break to allow someone else to grab the lock */
3299 		lck_mtx_unlock_always(c_list_lock);
3300 		mutex_pause(0); /* 10 microseconds */
3301 		lck_mtx_lock_spin_always(c_list_lock);
3302 		/* start again */
3303 		clock_get_system_nanotime(&start_sec, &start_nsec);
3304 		count = 0;
3305 		breaks++;
3306 	}
3307 }
3308 
3309 /*
3310  * macOS special swappable csegs -> early_swapin queue
3311  * non-macOS special swappable+non-freezer csegs -> late_swapin queue
3312  * Processing special csegs means minor compacting each cseg and then
3313  * major compacting it and putting them on the early or late
3314  * (depending on platform) swapout queue. tag:DONATE
3315  */
3316 static void
vm_compressor_process_special_swapped_in_segments_locked(void)3317 vm_compressor_process_special_swapped_in_segments_locked(void)
3318 {
3319 	c_segment_t c_seg = NULL;
3320 	bool            switch_state = true, bail_wanted_cseg = false;
3321 	unsigned int    number_considered = 0, yield_after_considered_per_pass = 0;
3322 	uint64_t        bytes_freed = 0;
3323 	queue_head_t    *special_swappedin_list_head;
3324 
3325 #if XNU_TARGET_OS_OSX
3326 	special_swappedin_list_head = &c_early_swappedin_list_head;
3327 #else /* XNU_TARGET_OS_OSX */
3328 	if (memorystatus_swap_all_apps) {
3329 		special_swappedin_list_head = &c_late_swappedin_list_head;
3330 	} else {
3331 		/* called on unsupported config*/
3332 		return;
3333 	}
3334 #endif /* XNU_TARGET_OS_OSX */
3335 
3336 	yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3337 	while (!queue_empty(special_swappedin_list_head)) {
3338 		c_seg = (c_segment_t)queue_first(special_swappedin_list_head);
3339 
3340 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3341 
3342 		if (c_seg->c_busy) {
3343 			lck_mtx_unlock_always(c_list_lock);
3344 			c_seg_wait_on_busy(c_seg);
3345 			lck_mtx_lock_spin_always(c_list_lock);
3346 			continue;
3347 		}
3348 
3349 		C_SEG_BUSY(c_seg);
3350 		lck_mtx_unlock_always(&c_seg->c_lock);
3351 		lck_mtx_unlock_always(c_list_lock);
3352 
3353 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
3354 
3355 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3356 
3357 		if (c_seg_minor_compaction_and_unlock(c_seg, FALSE /*clear busy?*/)) {
3358 			/*
3359 			 * found an empty c_segment and freed it
3360 			 * so go grab the next guy in the queue
3361 			 */
3362 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
3363 			lck_mtx_lock_spin_always(c_list_lock);
3364 			continue;
3365 		}
3366 
3367 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
3368 		lck_mtx_lock_spin_always(c_list_lock);
3369 
3370 		switch_state = vm_compressor_major_compact_cseg(c_seg, &number_considered, &bail_wanted_cseg, &bytes_freed);
3371 		assert(c_seg->c_busy);
3372 		assert(!c_seg->c_on_minorcompact_q);
3373 
3374 		if (switch_state) {
3375 			if (VM_CONFIG_SWAP_IS_ACTIVE || VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
3376 				/*
3377 				 * Ordinarily we let swapped in segments age out + get
3378 				 * major compacted with the rest of the c_segs on the ageQ.
3379 				 * But the early donated c_segs, if well compacted, should be
3380 				 * kept ready to be swapped out if needed. These are typically
3381 				 * describing memory belonging to a leaky app (macOS) or a swap-
3382 				 * capable app (iPadOS) and for the latter we can keep these
3383 				 * around longer because we control the triggers in the memorystatus
3384 				 * subsystem
3385 				 */
3386 				c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
3387 			}
3388 		}
3389 
3390 		C_SEG_WAKEUP_DONE(c_seg);
3391 
3392 		lck_mtx_unlock_always(&c_seg->c_lock);
3393 
3394 		if (number_considered >= yield_after_considered_per_pass) {
3395 			if (bail_wanted_cseg) {
3396 				/*
3397 				 * We stopped major compactions on a c_seg
3398 				 * that is wanted. We don't know the priority
3399 				 * of the waiter unfortunately but we are at
3400 				 * a very high priority and so, just in case
3401 				 * the waiter is a critical system daemon or
3402 				 * UI thread, let's give up the CPU in case
3403 				 * the system is running a few CPU intensive
3404 				 * tasks.
3405 				 */
3406 				bail_wanted_cseg = false;
3407 				lck_mtx_unlock_always(c_list_lock);
3408 
3409 				mutex_pause(2); /* 100us yield */
3410 
3411 				lck_mtx_lock_spin_always(c_list_lock);
3412 			}
3413 
3414 			number_considered = 0;
3415 		}
3416 	}
3417 }
3418 
3419 void
vm_compressor_process_special_swapped_in_segments(void)3420 vm_compressor_process_special_swapped_in_segments(void)
3421 {
3422 	lck_mtx_lock_spin_always(c_list_lock);
3423 	vm_compressor_process_special_swapped_in_segments_locked();
3424 	lck_mtx_unlock_always(c_list_lock);
3425 }
3426 
3427 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT   10
3428 /*
3429  * Processing regular csegs means aging them.
3430  */
3431 static void
vm_compressor_process_regular_swapped_in_segments(boolean_t flush_all)3432 vm_compressor_process_regular_swapped_in_segments(boolean_t flush_all)
3433 {
3434 	c_segment_t     c_seg;
3435 	clock_sec_t     now;
3436 	clock_nsec_t    nsec;
3437 
3438 	clock_get_system_nanotime(&now, &nsec);
3439 
3440 	while (!queue_empty(&c_regular_swappedin_list_head)) {
3441 		c_seg = (c_segment_t)queue_first(&c_regular_swappedin_list_head);
3442 
3443 		if (flush_all == FALSE && (now - c_seg->c_swappedin_ts) < C_SEGMENT_SWAPPEDIN_AGE_LIMIT) {
3444 			break;
3445 		}
3446 
3447 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3448 
3449 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3450 		c_seg->c_agedin_ts = (uint32_t) now;
3451 
3452 		lck_mtx_unlock_always(&c_seg->c_lock);
3453 	}
3454 }
3455 
3456 
3457 extern  int     vm_num_swap_files;
3458 extern  int     vm_num_pinned_swap_files;
3459 extern  int     vm_swappin_enabled;
3460 
3461 extern  unsigned int    vm_swapfile_total_segs_used;
3462 extern  unsigned int    vm_swapfile_total_segs_alloced;
3463 
3464 
3465 void
vm_compressor_flush(void)3466 vm_compressor_flush(void)
3467 {
3468 	uint64_t        vm_swap_put_failures_at_start;
3469 	wait_result_t   wait_result = 0;
3470 	AbsoluteTime    startTime, endTime;
3471 	clock_sec_t     now_sec;
3472 	clock_nsec_t    now_nsec;
3473 	uint64_t        nsec;
3474 	c_segment_t     c_seg, c_seg_next;
3475 
3476 	HIBLOG("vm_compressor_flush - starting\n");
3477 
3478 	clock_get_uptime(&startTime);
3479 
3480 	lck_mtx_lock_spin_always(c_list_lock);
3481 
3482 	fastwake_warmup = FALSE;
3483 	compaction_swapper_abort = 1;
3484 
3485 	while (compaction_swapper_running) {
3486 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
3487 
3488 		lck_mtx_unlock_always(c_list_lock);
3489 
3490 		thread_block(THREAD_CONTINUE_NULL);
3491 
3492 		lck_mtx_lock_spin_always(c_list_lock);
3493 	}
3494 	compaction_swapper_abort = 0;
3495 	compaction_swapper_running = 1;
3496 
3497 	hibernate_flushing = TRUE;
3498 	hibernate_no_swapspace = FALSE;
3499 	hibernate_flush_timed_out = FALSE;
3500 	c_generation_id_flush_barrier = c_generation_id + 1000;
3501 
3502 	clock_get_system_nanotime(&now_sec, &now_nsec);
3503 	hibernate_flushing_deadline = now_sec + HIBERNATE_FLUSHING_SECS_TO_COMPLETE;
3504 
3505 	vm_swap_put_failures_at_start = vm_swap_put_failures;
3506 
3507 	/*
3508 	 * We are about to hibernate and so we want all segments flushed to disk.
3509 	 * Segments that are on the major compaction queue won't be considered in
3510 	 * the vm_compressor_compact_and_swap() pass. So we need to bring them to
3511 	 * the ageQ for consideration.
3512 	 */
3513 	if (!queue_empty(&c_major_list_head)) {
3514 		c_seg = (c_segment_t)queue_first(&c_major_list_head);
3515 
3516 		while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) {
3517 			c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3518 			lck_mtx_lock_spin_always(&c_seg->c_lock);
3519 			c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3520 			lck_mtx_unlock_always(&c_seg->c_lock);
3521 			c_seg = c_seg_next;
3522 		}
3523 	}
3524 	vm_compressor_compact_and_swap(TRUE);
3525 
3526 	while (!queue_empty(&c_early_swapout_list_head) || !queue_empty(&c_regular_swapout_list_head) || !queue_empty(&c_late_swapout_list_head)) {
3527 		assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
3528 
3529 		lck_mtx_unlock_always(c_list_lock);
3530 
3531 		wait_result = thread_block(THREAD_CONTINUE_NULL);
3532 
3533 		lck_mtx_lock_spin_always(c_list_lock);
3534 
3535 		if (wait_result == THREAD_TIMED_OUT) {
3536 			break;
3537 		}
3538 	}
3539 	hibernate_flushing = FALSE;
3540 	compaction_swapper_running = 0;
3541 
3542 	if (vm_swap_put_failures > vm_swap_put_failures_at_start) {
3543 		HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n",
3544 		    vm_swap_put_failures - vm_swap_put_failures_at_start, VM_PAGE_COMPRESSOR_COUNT);
3545 	}
3546 
3547 	lck_mtx_unlock_always(c_list_lock);
3548 
3549 	thread_wakeup((event_t)&compaction_swapper_running);
3550 
3551 	clock_get_uptime(&endTime);
3552 	SUB_ABSOLUTETIME(&endTime, &startTime);
3553 	absolutetime_to_nanoseconds(endTime, &nsec);
3554 
3555 	HIBLOG("vm_compressor_flush completed - took %qd msecs - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d, vm_swappin_enabled = %d\n",
3556 	    nsec / 1000000ULL, vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled);
3557 }
3558 
3559 
3560 int             compaction_swap_trigger_thread_awakened = 0;
3561 
3562 static void
vm_compressor_swap_trigger_thread(void)3563 vm_compressor_swap_trigger_thread(void)
3564 {
3565 	current_thread()->options |= TH_OPT_VMPRIV;
3566 
3567 	/*
3568 	 * compaction_swapper_init_now is set when the first call to
3569 	 * vm_consider_waking_compactor_swapper is made from
3570 	 * vm_pageout_scan... since this function is called upon
3571 	 * thread creation, we want to make sure to delay adjusting
3572 	 * the tuneables until we are awakened via vm_pageout_scan
3573 	 * so that we are at a point where the vm_swapfile_open will
3574 	 * be operating on the correct directory (in case the default
3575 	 * of using the VM volume is overridden by the dynamic_pager)
3576 	 */
3577 	if (compaction_swapper_init_now) {
3578 		vm_compaction_swapper_do_init();
3579 
3580 		if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
3581 			thread_vm_bind_group_add();
3582 		}
3583 #if CONFIG_THREAD_GROUPS
3584 		thread_group_vm_add();
3585 #endif
3586 		thread_set_thread_name(current_thread(), "VM_cswap_trigger");
3587 		compaction_swapper_init_now = 0;
3588 	}
3589 	lck_mtx_lock_spin_always(c_list_lock);
3590 
3591 	compaction_swap_trigger_thread_awakened++;
3592 	compaction_swapper_awakened = 0;
3593 
3594 	if (compaction_swapper_running == 0) {
3595 		compaction_swapper_running = 1;
3596 
3597 		vm_compressor_compact_and_swap(FALSE);
3598 
3599 		compaction_swapper_running = 0;
3600 	}
3601 	assert_wait((event_t)&c_compressor_swap_trigger, THREAD_UNINT);
3602 
3603 	if (compaction_swapper_running == 0) {
3604 		thread_wakeup((event_t)&compaction_swapper_running);
3605 	}
3606 
3607 	lck_mtx_unlock_always(c_list_lock);
3608 
3609 	thread_block((thread_continue_t)vm_compressor_swap_trigger_thread);
3610 
3611 	/* NOTREACHED */
3612 }
3613 
3614 
3615 void
vm_compressor_record_warmup_start(void)3616 vm_compressor_record_warmup_start(void)
3617 {
3618 	c_segment_t     c_seg;
3619 
3620 	lck_mtx_lock_spin_always(c_list_lock);
3621 
3622 	if (first_c_segment_to_warm_generation_id == 0) {
3623 		if (!queue_empty(&c_age_list_head)) {
3624 			c_seg = (c_segment_t)queue_last(&c_age_list_head);
3625 
3626 			first_c_segment_to_warm_generation_id = c_seg->c_generation_id;
3627 		} else {
3628 			first_c_segment_to_warm_generation_id = 0;
3629 		}
3630 
3631 		fastwake_recording_in_progress = TRUE;
3632 	}
3633 	lck_mtx_unlock_always(c_list_lock);
3634 }
3635 
3636 
3637 void
vm_compressor_record_warmup_end(void)3638 vm_compressor_record_warmup_end(void)
3639 {
3640 	c_segment_t     c_seg;
3641 
3642 	lck_mtx_lock_spin_always(c_list_lock);
3643 
3644 	if (fastwake_recording_in_progress == TRUE) {
3645 		if (!queue_empty(&c_age_list_head)) {
3646 			c_seg = (c_segment_t)queue_last(&c_age_list_head);
3647 
3648 			last_c_segment_to_warm_generation_id = c_seg->c_generation_id;
3649 		} else {
3650 			last_c_segment_to_warm_generation_id = first_c_segment_to_warm_generation_id;
3651 		}
3652 
3653 		fastwake_recording_in_progress = FALSE;
3654 
3655 		HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
3656 	}
3657 	lck_mtx_unlock_always(c_list_lock);
3658 }
3659 
3660 
3661 #define DELAY_TRIM_ON_WAKE_NS (25 * NSEC_PER_SEC)
3662 
3663 void
vm_compressor_delay_trim(void)3664 vm_compressor_delay_trim(void)
3665 {
3666 	uint64_t now = mach_absolute_time();
3667 	uint64_t delay_abstime;
3668 	nanoseconds_to_absolutetime(DELAY_TRIM_ON_WAKE_NS, &delay_abstime);
3669 	dont_trim_until_ts = now + delay_abstime;
3670 }
3671 
3672 
3673 void
vm_compressor_do_warmup(void)3674 vm_compressor_do_warmup(void)
3675 {
3676 	lck_mtx_lock_spin_always(c_list_lock);
3677 
3678 	if (first_c_segment_to_warm_generation_id == last_c_segment_to_warm_generation_id) {
3679 		first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3680 
3681 		lck_mtx_unlock_always(c_list_lock);
3682 		return;
3683 	}
3684 
3685 	if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
3686 		fastwake_warmup = TRUE;
3687 
3688 		compaction_swapper_awakened = 1;
3689 		thread_wakeup((event_t)&c_compressor_swap_trigger);
3690 	}
3691 	lck_mtx_unlock_always(c_list_lock);
3692 }
3693 
3694 void
do_fastwake_warmup_all(void)3695 do_fastwake_warmup_all(void)
3696 {
3697 	lck_mtx_lock_spin_always(c_list_lock);
3698 
3699 	if (queue_empty(&c_swappedout_list_head) && queue_empty(&c_swappedout_sparse_list_head)) {
3700 		lck_mtx_unlock_always(c_list_lock);
3701 		return;
3702 	}
3703 
3704 	fastwake_warmup = TRUE;
3705 
3706 	do_fastwake_warmup(&c_swappedout_list_head, TRUE);
3707 
3708 	do_fastwake_warmup(&c_swappedout_sparse_list_head, TRUE);
3709 
3710 	fastwake_warmup = FALSE;
3711 
3712 	lck_mtx_unlock_always(c_list_lock);
3713 }
3714 
3715 void
do_fastwake_warmup(queue_head_t * c_queue,boolean_t consider_all_cseg)3716 do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg)
3717 {
3718 	c_segment_t     c_seg = NULL;
3719 	AbsoluteTime    startTime, endTime;
3720 	uint64_t        nsec;
3721 
3722 
3723 	HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
3724 
3725 	clock_get_uptime(&startTime);
3726 
3727 	lck_mtx_unlock_always(c_list_lock);
3728 
3729 	proc_set_thread_policy(current_thread(),
3730 	    TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2);
3731 
3732 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
3733 
3734 	lck_mtx_lock_spin_always(c_list_lock);
3735 
3736 	while (!queue_empty(c_queue) && fastwake_warmup == TRUE) {
3737 		c_seg = (c_segment_t) queue_first(c_queue);
3738 
3739 		if (consider_all_cseg == FALSE) {
3740 			if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id ||
3741 			    c_seg->c_generation_id > last_c_segment_to_warm_generation_id) {
3742 				break;
3743 			}
3744 
3745 			if (vm_page_free_count < (AVAILABLE_MEMORY / 4)) {
3746 				break;
3747 			}
3748 		}
3749 
3750 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3751 		lck_mtx_unlock_always(c_list_lock);
3752 
3753 		if (c_seg->c_busy) {
3754 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
3755 			c_seg_wait_on_busy(c_seg);
3756 			PAGE_REPLACEMENT_DISALLOWED(TRUE);
3757 		} else {
3758 			if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) {
3759 				lck_mtx_unlock_always(&c_seg->c_lock);
3760 			}
3761 			c_segment_warmup_count++;
3762 
3763 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
3764 			vm_pageout_io_throttle();
3765 			PAGE_REPLACEMENT_DISALLOWED(TRUE);
3766 		}
3767 		lck_mtx_lock_spin_always(c_list_lock);
3768 	}
3769 	lck_mtx_unlock_always(c_list_lock);
3770 
3771 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
3772 
3773 	proc_set_thread_policy(current_thread(),
3774 	    TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0);
3775 
3776 	clock_get_uptime(&endTime);
3777 	SUB_ABSOLUTETIME(&endTime, &startTime);
3778 	absolutetime_to_nanoseconds(endTime, &nsec);
3779 
3780 	HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec / 1000000ULL);
3781 
3782 	lck_mtx_lock_spin_always(c_list_lock);
3783 
3784 	if (consider_all_cseg == FALSE) {
3785 		first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3786 	}
3787 }
3788 
3789 extern bool     vm_swapout_thread_running;
3790 extern boolean_t        compressor_store_stop_compaction;
3791 
3792 void
vm_compressor_compact_and_swap(boolean_t flush_all)3793 vm_compressor_compact_and_swap(boolean_t flush_all)
3794 {
3795 	c_segment_t     c_seg;
3796 	bool            switch_state, bail_wanted_cseg = false;
3797 	clock_sec_t     now;
3798 	clock_nsec_t    nsec;
3799 	mach_timespec_t start_ts, end_ts;
3800 	unsigned int    number_considered, wanted_cseg_found, yield_after_considered_per_pass, number_yields;
3801 	uint64_t        bytes_freed, delta_usec;
3802 	uint32_t        c_swapout_count = 0;
3803 
3804 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_START, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
3805 
3806 	if (fastwake_warmup == TRUE) {
3807 		uint64_t        starting_warmup_count;
3808 
3809 		starting_warmup_count = c_segment_warmup_count;
3810 
3811 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_START, c_segment_warmup_count,
3812 		    first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0);
3813 		do_fastwake_warmup(&c_swappedout_list_head, FALSE);
3814 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_END, c_segment_warmup_count, c_segment_warmup_count - starting_warmup_count, 0, 0, 0);
3815 
3816 		fastwake_warmup = FALSE;
3817 	}
3818 
3819 #if (XNU_TARGET_OS_OSX && __arm64__)
3820 	/*
3821 	 * Re-considering major csegs showed benefits on all platforms by
3822 	 * significantly reducing fragmentation and getting back memory.
3823 	 * However, on smaller devices, eg watch, there was increased power
3824 	 * use for the additional compactions. And the turnover in csegs on
3825 	 * those smaller platforms is high enough in the decompression/free
3826 	 * path that we can skip reconsidering them here because we already
3827 	 * consider them for major compaction in those paths.
3828 	 */
3829 	vm_compressor_process_major_segments(false /*all segments and not just the ripe-aged ones*/);
3830 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
3831 
3832 	/*
3833 	 * it's possible for the c_age_list_head to be empty if we
3834 	 * hit our limits for growing the compressor pool and we subsequently
3835 	 * hibernated... on the next hibernation we could see the queue as
3836 	 * empty and not proceeed even though we have a bunch of segments on
3837 	 * the swapped in queue that need to be dealt with.
3838 	 */
3839 	vm_compressor_do_delayed_compactions(flush_all);
3840 	vm_compressor_process_special_swapped_in_segments_locked();
3841 	vm_compressor_process_regular_swapped_in_segments(flush_all);
3842 
3843 	/*
3844 	 * we only need to grab the timestamp once per
3845 	 * invocation of this function since the
3846 	 * timescale we're interested in is measured
3847 	 * in days
3848 	 */
3849 	clock_get_system_nanotime(&now, &nsec);
3850 
3851 	start_ts.tv_sec = (int) now;
3852 	start_ts.tv_nsec = nsec;
3853 	delta_usec = 0;
3854 	number_considered = 0;
3855 	wanted_cseg_found = 0;
3856 	number_yields = 0;
3857 	bytes_freed = 0;
3858 	yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3859 
3860 #if 0
3861 	/**
3862 	 * SW: Need to figure out how to properly rate limit this log because it is currently way too
3863 	 * noisy. rdar://99379414 (Figure out how to rate limit the fragmentation level logging)
3864 	 */
3865 	os_log(OS_LOG_DEFAULT, "memorystatus: before compaction fragmentation level %u\n", vm_compressor_fragmentation_level());
3866 #endif
3867 
3868 	while (!queue_empty(&c_age_list_head) && !compaction_swapper_abort && !compressor_store_stop_compaction) {
3869 		if (hibernate_flushing == TRUE) {
3870 			clock_sec_t     sec;
3871 
3872 			if (hibernate_should_abort()) {
3873 				HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n");
3874 				break;
3875 			}
3876 			if (hibernate_no_swapspace == TRUE) {
3877 				HIBLOG("vm_compressor_flush - out of swap space\n");
3878 				break;
3879 			}
3880 			if (vm_swap_files_pinned() == FALSE) {
3881 				HIBLOG("vm_compressor_flush - unpinned swap files\n");
3882 				break;
3883 			}
3884 			if (hibernate_in_progress_with_pinned_swap == TRUE &&
3885 			    (vm_swapfile_total_segs_alloced == vm_swapfile_total_segs_used)) {
3886 				HIBLOG("vm_compressor_flush - out of pinned swap space\n");
3887 				break;
3888 			}
3889 			clock_get_system_nanotime(&sec, &nsec);
3890 
3891 			if (sec > hibernate_flushing_deadline) {
3892 				hibernate_flush_timed_out = TRUE;
3893 				HIBLOG("vm_compressor_flush - failed to finish before deadline\n");
3894 				break;
3895 			}
3896 		}
3897 
3898 		c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
3899 		if (VM_CONFIG_SWAP_IS_ACTIVE && !vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
3900 			assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 100, 1000 * NSEC_PER_USEC);
3901 
3902 			if (!vm_swapout_thread_running) {
3903 				thread_wakeup((event_t)&vm_swapout_thread);
3904 			}
3905 
3906 			lck_mtx_unlock_always(c_list_lock);
3907 
3908 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 1, c_swapout_count, 0, 0);
3909 
3910 			thread_block(THREAD_CONTINUE_NULL);
3911 
3912 			lck_mtx_lock_spin_always(c_list_lock);
3913 		}
3914 		/*
3915 		 * Minor compactions
3916 		 */
3917 		vm_compressor_do_delayed_compactions(flush_all);
3918 
3919 		/*
3920 		 * vm_compressor_process_early_swapped_in_segments()
3921 		 * might be too aggressive. So OFF for now.
3922 		 */
3923 		vm_compressor_process_regular_swapped_in_segments(flush_all);
3924 
3925 		/* Recompute because we dropped the c_list_lock above*/
3926 		c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
3927 		if (VM_CONFIG_SWAP_IS_ACTIVE && !vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
3928 			/*
3929 			 * we timed out on the above thread_block
3930 			 * let's loop around and try again
3931 			 * the timeout allows us to continue
3932 			 * to do minor compactions to make
3933 			 * more memory available
3934 			 */
3935 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 2, c_swapout_count, 0, 0);
3936 
3937 			continue;
3938 		}
3939 
3940 		/*
3941 		 * Swap out segments?
3942 		 */
3943 		if (flush_all == FALSE) {
3944 			bool needs_to_swap;
3945 
3946 			lck_mtx_unlock_always(c_list_lock);
3947 
3948 			needs_to_swap = compressor_needs_to_swap();
3949 
3950 			lck_mtx_lock_spin_always(c_list_lock);
3951 
3952 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 3, needs_to_swap, 0, 0);
3953 
3954 			if (!needs_to_swap) {
3955 				break;
3956 			}
3957 		}
3958 		if (queue_empty(&c_age_list_head)) {
3959 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 4, c_age_count, 0, 0);
3960 			break;
3961 		}
3962 		c_seg = (c_segment_t) queue_first(&c_age_list_head);
3963 
3964 		assert(c_seg->c_state == C_ON_AGE_Q);
3965 
3966 		if (flush_all == TRUE && c_seg->c_generation_id > c_generation_id_flush_barrier) {
3967 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 5, 0, 0, 0);
3968 			break;
3969 		}
3970 
3971 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3972 
3973 		if (c_seg->c_busy) {
3974 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 6, (void*) VM_KERNEL_ADDRPERM(c_seg), 0, 0);
3975 
3976 			lck_mtx_unlock_always(c_list_lock);
3977 			c_seg_wait_on_busy(c_seg);
3978 			lck_mtx_lock_spin_always(c_list_lock);
3979 
3980 			continue;
3981 		}
3982 		C_SEG_BUSY(c_seg);
3983 
3984 		if (c_seg_do_minor_compaction_and_unlock(c_seg, FALSE, TRUE, TRUE)) {
3985 			/*
3986 			 * found an empty c_segment and freed it
3987 			 * so go grab the next guy in the queue
3988 			 */
3989 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 7, 0, 0, 0);
3990 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3991 			continue;
3992 		}
3993 
3994 		switch_state = vm_compressor_major_compact_cseg(c_seg, &number_considered, &bail_wanted_cseg, &bytes_freed);
3995 		if (bail_wanted_cseg) {
3996 			wanted_cseg_found++;
3997 			bail_wanted_cseg = false;
3998 		}
3999 
4000 		assert(c_seg->c_busy);
4001 		assert(!c_seg->c_on_minorcompact_q);
4002 
4003 		if (switch_state) {
4004 			if (VM_CONFIG_SWAP_IS_ACTIVE) {
4005 				int new_state = C_ON_SWAPOUT_Q;
4006 #if (XNU_TARGET_OS_OSX && __arm64__)
4007 				if (flush_all == false && compressor_swapout_conditions_met() == false) {
4008 					new_state = C_ON_MAJORCOMPACT_Q;
4009 				}
4010 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
4011 
4012 				if (new_state == C_ON_SWAPOUT_Q) {
4013 					/*
4014 					 * This mode of putting a generic c_seg on the swapout list is
4015 					 * only supported when we have general swapping enabled
4016 					 */
4017 					clock_sec_t lnow;
4018 					clock_nsec_t lnsec;
4019 					clock_get_system_nanotime(&lnow, &lnsec);
4020 					if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 30) {
4021 						vmcs_stats.unripe_under_30s++;
4022 					} else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 60) {
4023 						vmcs_stats.unripe_under_60s++;
4024 					} else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 300) {
4025 						vmcs_stats.unripe_under_300s++;
4026 					}
4027 				}
4028 
4029 				c_seg_switch_state(c_seg, new_state, FALSE);
4030 			} else {
4031 				if ((vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit)) {
4032 					assert(VM_CONFIG_SWAP_IS_PRESENT);
4033 					/*
4034 					 * we are running compressor sweeps with swap-behind
4035 					 * make sure the c_seg has aged enough before swapping it
4036 					 * out...
4037 					 */
4038 					if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
4039 						c_seg->c_overage_swap = TRUE;
4040 						c_overage_swapped_count++;
4041 						c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
4042 					}
4043 				}
4044 			}
4045 			if (c_seg->c_state == C_ON_AGE_Q) {
4046 				/*
4047 				 * this c_seg didn't get moved to the swapout queue
4048 				 * so we need to move it out of the way...
4049 				 * we just did a major compaction on it so put it
4050 				 * on that queue
4051 				 */
4052 				c_seg_switch_state(c_seg, C_ON_MAJORCOMPACT_Q, FALSE);
4053 			} else {
4054 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].wasted_space_in_swapouts += c_seg_bufsize - c_seg->c_bytes_used;
4055 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_swapouts++;
4056 			}
4057 		}
4058 
4059 		C_SEG_WAKEUP_DONE(c_seg);
4060 
4061 		lck_mtx_unlock_always(&c_seg->c_lock);
4062 
4063 		/*
4064 		 * On systems _with_ general swap, regardless of jetsam, we wake up the swapout thread here.
4065 		 * On systems _without_ general swap, it's the responsibility of the memorystatus
4066 		 * subsystem to wake up the swapper.
4067 		 * TODO: When we have full jetsam support on a swap enabled system, we will need to revisit
4068 		 * this policy.
4069 		 */
4070 		if (VM_CONFIG_SWAP_IS_ACTIVE && c_swapout_count) {
4071 			/*
4072 			 * We don't pause/yield here because we will either
4073 			 * yield below or at the top of the loop with the
4074 			 * assert_wait_timeout.
4075 			 */
4076 			if (!vm_swapout_thread_running) {
4077 				thread_wakeup((event_t)&vm_swapout_thread);
4078 			}
4079 		}
4080 
4081 		if (number_considered >= yield_after_considered_per_pass) {
4082 			if (wanted_cseg_found) {
4083 				/*
4084 				 * We stopped major compactions on a c_seg
4085 				 * that is wanted. We don't know the priority
4086 				 * of the waiter unfortunately but we are at
4087 				 * a very high priority and so, just in case
4088 				 * the waiter is a critical system daemon or
4089 				 * UI thread, let's give up the CPU in case
4090 				 * the system is running a few CPU intensive
4091 				 * tasks.
4092 				 */
4093 				lck_mtx_unlock_always(c_list_lock);
4094 
4095 				mutex_pause(2); /* 100us yield */
4096 
4097 				number_yields++;
4098 
4099 				VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 11, number_considered, number_yields, 0);
4100 
4101 				lck_mtx_lock_spin_always(c_list_lock);
4102 			}
4103 
4104 			number_considered = 0;
4105 			wanted_cseg_found = 0;
4106 		}
4107 	}
4108 	clock_get_system_nanotime(&now, &nsec);
4109 
4110 	end_ts = major_compact_ts = (mach_timespec_t){.tv_sec = (int)now, .tv_nsec = nsec};
4111 
4112 	SUB_MACH_TIMESPEC(&end_ts, &start_ts);
4113 
4114 	delta_usec = (end_ts.tv_sec * USEC_PER_SEC) + (end_ts.tv_nsec / NSEC_PER_USEC) - (number_yields * 100);
4115 
4116 	delta_usec = MAX(1, delta_usec); /* we could have 0 usec run if conditions weren't right */
4117 
4118 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].bytes_freed_rate_us = (bytes_freed / delta_usec);
4119 
4120 	if ((c_seg_major_compact_stats_now + 1) == C_SEG_MAJOR_COMPACT_STATS_MAX) {
4121 		c_seg_major_compact_stats_now = 0;
4122 	} else {
4123 		c_seg_major_compact_stats_now++;
4124 	}
4125 
4126 	assert(c_seg_major_compact_stats_now < C_SEG_MAJOR_COMPACT_STATS_MAX);
4127 
4128 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_END, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
4129 }
4130 
4131 
4132 static c_segment_t
c_seg_allocate(c_segment_t * current_chead)4133 c_seg_allocate(c_segment_t *current_chead)
4134 {
4135 	c_segment_t     c_seg;
4136 	int             min_needed;
4137 	int             size_to_populate;
4138 	c_segment_t     *donate_queue_head;
4139 
4140 #if XNU_TARGET_OS_OSX
4141 	if (vm_compressor_low_on_space()) {
4142 		vm_compressor_take_paging_space_action();
4143 	}
4144 #endif /* XNU_TARGET_OS_OSX */
4145 
4146 	if ((c_seg = *current_chead) == NULL) {
4147 		uint32_t        c_segno;
4148 
4149 		lck_mtx_lock_spin_always(c_list_lock);
4150 
4151 		while (c_segments_busy == TRUE) {
4152 			assert_wait((event_t) (&c_segments_busy), THREAD_UNINT);
4153 
4154 			lck_mtx_unlock_always(c_list_lock);
4155 
4156 			thread_block(THREAD_CONTINUE_NULL);
4157 
4158 			lck_mtx_lock_spin_always(c_list_lock);
4159 		}
4160 		if (c_free_segno_head == (uint32_t)-1) {
4161 			uint32_t        c_segments_available_new;
4162 			uint32_t        compressed_pages;
4163 
4164 #if CONFIG_FREEZE
4165 			if (freezer_incore_cseg_acct) {
4166 				compressed_pages = c_segment_pages_compressed_incore;
4167 			} else {
4168 				compressed_pages = c_segment_pages_compressed;
4169 			}
4170 #else
4171 			compressed_pages = c_segment_pages_compressed;
4172 #endif /* CONFIG_FREEZE */
4173 
4174 			if (c_segments_available >= c_segments_limit || compressed_pages >= c_segment_pages_compressed_limit) {
4175 				lck_mtx_unlock_always(c_list_lock);
4176 
4177 				return NULL;
4178 			}
4179 			c_segments_busy = TRUE;
4180 			lck_mtx_unlock_always(c_list_lock);
4181 
4182 			/* pages for c_segments are never depopulated, c_segments_available never goes down */
4183 			kernel_memory_populate((vm_offset_t)c_segments_next_page,
4184 			    PAGE_SIZE, KMA_NOFAIL | KMA_KOBJECT,
4185 			    VM_KERN_MEMORY_COMPRESSOR);
4186 			c_segments_next_page += PAGE_SIZE;
4187 
4188 			c_segments_available_new = c_segments_available + C_SEGMENTS_PER_PAGE;
4189 
4190 			if (c_segments_available_new > c_segments_limit) {
4191 				c_segments_available_new = c_segments_limit;
4192 			}
4193 
4194 			/* add the just-added segments to the top of the free-list */
4195 			for (c_segno = c_segments_available + 1; c_segno < c_segments_available_new; c_segno++) {
4196 				c_segments[c_segno - 1].c_segno = c_segno;  /* next free is the one after you */
4197 			}
4198 
4199 			lck_mtx_lock_spin_always(c_list_lock);
4200 
4201 			c_segments[c_segno - 1].c_segno = c_free_segno_head; /* link to the rest of, existing freelist */
4202 			c_free_segno_head = c_segments_available; /* first one in the page that was just allocated */
4203 			c_segments_available = c_segments_available_new;
4204 
4205 			c_segments_busy = FALSE;
4206 			thread_wakeup((event_t) (&c_segments_busy));
4207 		}
4208 		c_segno = c_free_segno_head;
4209 		assert(c_segno >= 0 && c_segno < c_segments_limit);
4210 
4211 		c_free_segno_head = (uint32_t)c_segments[c_segno].c_segno;
4212 
4213 		/*
4214 		 * do the rest of the bookkeeping now while we're still behind
4215 		 * the list lock and grab our generation id now into a local
4216 		 * so that we can install it once we have the c_seg allocated
4217 		 */
4218 		c_segment_count++;
4219 		if (c_segment_count > c_segment_count_max) {
4220 			c_segment_count_max = c_segment_count;
4221 		}
4222 
4223 		lck_mtx_unlock_always(c_list_lock);
4224 
4225 		c_seg = zalloc_flags(compressor_segment_zone, Z_WAITOK | Z_ZERO);
4226 
4227 		c_seg->c_store.c_buffer = (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno);
4228 
4229 		lck_mtx_init(&c_seg->c_lock, &vm_compressor_lck_grp, LCK_ATTR_NULL);
4230 
4231 		c_seg->c_state = C_IS_EMPTY;
4232 		c_seg->c_firstemptyslot = C_SLOT_MAX_INDEX;
4233 		c_seg->c_mysegno = c_segno;
4234 
4235 		lck_mtx_lock_spin_always(c_list_lock);
4236 		c_empty_count++;  /* going to be immediately decremented in the next call */
4237 		c_seg_switch_state(c_seg, C_IS_FILLING, FALSE);
4238 		c_segments[c_segno].c_seg = c_seg;
4239 		assert(c_segments[c_segno].c_segno > c_segments_available);  /* we just assigned a pointer to it so this is an indication that it is occupied */
4240 		lck_mtx_unlock_always(c_list_lock);
4241 
4242 		for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
4243 #if XNU_TARGET_OS_OSX /* tag:DONATE */
4244 			donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_early_swapout_chead);
4245 #else /* XNU_TARGET_OS_OSX */
4246 			if (memorystatus_swap_all_apps) {
4247 				donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_late_swapout_chead);
4248 			} else {
4249 				donate_queue_head = NULL;
4250 			}
4251 #endif /* XNU_TARGET_OS_OSX */
4252 
4253 			if (current_chead == donate_queue_head) {
4254 				c_seg->c_has_donated_pages = 1;
4255 				break;
4256 			}
4257 		}
4258 
4259 		*current_chead = c_seg;
4260 
4261 #if DEVELOPMENT || DEBUG
4262 		C_SEG_MAKE_WRITEABLE(c_seg);
4263 #endif
4264 	}
4265 	c_seg_alloc_nextslot(c_seg);
4266 
4267 	size_to_populate = c_seg_allocsize - C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset);
4268 
4269 	if (size_to_populate) {
4270 		min_needed = PAGE_SIZE + (c_seg_allocsize - c_seg_bufsize);
4271 
4272 		if (C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset) < (unsigned) min_needed) {
4273 			if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
4274 				size_to_populate = C_SEG_MAX_POPULATE_SIZE;
4275 			}
4276 
4277 			OSAddAtomic64(size_to_populate / PAGE_SIZE, &vm_pageout_vminfo.vm_compressor_pages_grabbed);
4278 
4279 			kernel_memory_populate(
4280 				(vm_offset_t) &c_seg->c_store.c_buffer[c_seg->c_populated_offset],
4281 				size_to_populate,
4282 				KMA_NOFAIL | KMA_COMPRESSOR,
4283 				VM_KERN_MEMORY_COMPRESSOR);
4284 		} else {
4285 			size_to_populate = 0;
4286 		}
4287 	}
4288 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
4289 
4290 	lck_mtx_lock_spin_always(&c_seg->c_lock);
4291 
4292 	if (size_to_populate) {
4293 		c_seg->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
4294 	}
4295 
4296 	return c_seg;
4297 }
4298 
4299 #if DEVELOPMENT || DEBUG
4300 #if CONFIG_FREEZE
4301 extern boolean_t memorystatus_freeze_to_memory;
4302 #endif /* CONFIG_FREEZE */
4303 #endif /* DEVELOPMENT || DEBUG */
4304 uint64_t c_seg_total_donated_bytes = 0; /* For testing/debugging only for now. Remove and add new counters for vm_stat.*/
4305 
4306 uint64_t c_seg_filled_no_contention = 0;
4307 uint64_t c_seg_filled_contention = 0;
4308 clock_sec_t c_seg_filled_contention_sec_max = 0;
4309 clock_nsec_t c_seg_filled_contention_nsec_max = 0;
4310 
4311 static void
c_current_seg_filled(c_segment_t c_seg,c_segment_t * current_chead)4312 c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead)
4313 {
4314 	uint32_t        unused_bytes;
4315 	uint32_t        offset_to_depopulate;
4316 	int             new_state = C_ON_AGE_Q;
4317 	clock_sec_t     sec;
4318 	clock_nsec_t    nsec;
4319 	bool            head_insert = false, wakeup_swapout_thread = false;
4320 
4321 	unused_bytes = trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset));
4322 
4323 	if (unused_bytes) {
4324 		/* if this is a platform that need an extra page at the end of the segment when running compress
4325 		 * then now is the time to depopulate that extra page. it still takes virtual space but doesn't
4326 		 * actually waste memory */
4327 		offset_to_depopulate = C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset)));
4328 
4329 		/* release the extra physical page(s) at the end of the segment  */
4330 		lck_mtx_unlock_always(&c_seg->c_lock);
4331 
4332 		kernel_memory_depopulate(
4333 			(vm_offset_t) &c_seg->c_store.c_buffer[offset_to_depopulate],
4334 			unused_bytes,
4335 			KMA_COMPRESSOR,
4336 			VM_KERN_MEMORY_COMPRESSOR);
4337 
4338 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4339 
4340 		c_seg->c_populated_offset = offset_to_depopulate;
4341 	}
4342 	assert(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset) <= c_seg_bufsize);
4343 
4344 #if DEVELOPMENT || DEBUG
4345 	{
4346 		boolean_t       c_seg_was_busy = FALSE;
4347 
4348 		if (!c_seg->c_busy) {
4349 			C_SEG_BUSY(c_seg);
4350 		} else {
4351 			c_seg_was_busy = TRUE;
4352 		}
4353 
4354 		lck_mtx_unlock_always(&c_seg->c_lock);
4355 
4356 		C_SEG_WRITE_PROTECT(c_seg);
4357 
4358 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4359 
4360 		if (c_seg_was_busy == FALSE) {
4361 			C_SEG_WAKEUP_DONE(c_seg);
4362 		}
4363 	}
4364 #endif
4365 
4366 #if CONFIG_FREEZE
4367 	if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) &&
4368 	    VM_CONFIG_SWAP_IS_PRESENT &&
4369 	    VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
4370 #if DEVELOPMENT || DEBUG
4371 	    && !memorystatus_freeze_to_memory
4372 #endif /* DEVELOPMENT || DEBUG */
4373 	    ) {
4374 		new_state = C_ON_SWAPOUT_Q;
4375 		wakeup_swapout_thread = true;
4376 	}
4377 #endif /* CONFIG_FREEZE */
4378 
4379 	if (vm_darkwake_mode == TRUE) {
4380 		new_state = C_ON_SWAPOUT_Q;
4381 		head_insert = true;
4382 		wakeup_swapout_thread = true;
4383 	} else {
4384 		c_segment_t *donate_queue_head;
4385 		for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
4386 #if XNU_TARGET_OS_OSX  /* tag:DONATE */
4387 			donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_early_swapout_chead);
4388 #else /* XNU_TARGET_OS_OSX */
4389 			donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_late_swapout_chead);
4390 #endif /* XNU_TARGET_OS_OSX */
4391 			if (current_chead == donate_queue_head) {
4392 				/* This is the place where the "donating" task actually does the so-called donation
4393 				 * Instead of continueing to take place in memory in the compressor, the segment goes directly
4394 				 * to swap-out instead of going to AGE_Q */
4395 				assert(c_seg->c_has_donated_pages);
4396 				new_state = C_ON_SWAPOUT_Q;
4397 				c_seg_total_donated_bytes += c_seg->c_bytes_used;
4398 				break;
4399 			}
4400 		}
4401 	}
4402 
4403 	clock_get_system_nanotime(&sec, &nsec);
4404 	c_seg->c_creation_ts = (uint32_t)sec;
4405 
4406 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
4407 		clock_sec_t     sec2;
4408 		clock_nsec_t    nsec2;
4409 
4410 		lck_mtx_lock_spin_always(c_list_lock);
4411 		clock_get_system_nanotime(&sec2, &nsec2);
4412 		TIME_SUB(sec2, sec, nsec2, nsec, NSEC_PER_SEC);
4413 		/* keep track of how much time we've waited for c_list_lock */
4414 		if (sec2 > c_seg_filled_contention_sec_max) {
4415 			c_seg_filled_contention_sec_max = sec2;
4416 			c_seg_filled_contention_nsec_max = nsec2;
4417 		} else if (sec2 == c_seg_filled_contention_sec_max && nsec2 > c_seg_filled_contention_nsec_max) {
4418 			c_seg_filled_contention_nsec_max = nsec2;
4419 		}
4420 		c_seg_filled_contention++;
4421 	} else {
4422 		c_seg_filled_no_contention++;
4423 	}
4424 
4425 #if CONFIG_FREEZE
4426 	if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead)) {
4427 		if (freezer_context_global.freezer_ctx_task->donates_own_pages) {
4428 			assert(!c_seg->c_has_donated_pages);
4429 			c_seg->c_has_donated_pages = 1;
4430 			OSAddAtomic(c_seg->c_slots_used, &c_segment_pages_compressed_incore_late_swapout);
4431 		}
4432 		c_seg->c_has_freezer_pages = 1;
4433 	}
4434 #endif /* CONFIG_FREEZE */
4435 
4436 	c_seg->c_generation_id = c_generation_id++;
4437 	c_seg_switch_state(c_seg, new_state, head_insert);
4438 
4439 #if CONFIG_FREEZE
4440 	/*
4441 	 * Donated segments count as frozen to swap if we go through the freezer.
4442 	 * TODO: What we need is a new ledger and cseg state that can describe
4443 	 * a frozen cseg from a donated task so we can accurately decrement it on
4444 	 * swapins.
4445 	 */
4446 	if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) && (c_seg->c_state == C_ON_SWAPOUT_Q)) {
4447 		/*
4448 		 * darkwake and freezer can't co-exist together
4449 		 * We'll need to fix this accounting as a start.
4450 		 * And early donation c_segs are separate from frozen c_segs.
4451 		 */
4452 		assert(vm_darkwake_mode == FALSE);
4453 		c_seg_update_task_owner(c_seg, freezer_context_global.freezer_ctx_task);
4454 		freezer_context_global.freezer_ctx_swapped_bytes += c_seg->c_bytes_used;
4455 	}
4456 #endif /* CONFIG_FREEZE */
4457 
4458 	if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4459 		/* this is possible if we decompressed a page from the segment before it ended filling */
4460 #if CONFIG_FREEZE
4461 		assert(c_seg->c_task_owner == NULL);
4462 #endif /* CONFIG_FREEZE */
4463 		c_seg_need_delayed_compaction(c_seg, TRUE);
4464 	}
4465 
4466 	lck_mtx_unlock_always(c_list_lock);
4467 
4468 	if (wakeup_swapout_thread) {
4469 		/*
4470 		 * Darkwake and Freeze configs always
4471 		 * wake up the swapout thread because
4472 		 * the compactor thread that normally handles
4473 		 * it may not be running as much in these
4474 		 * configs.
4475 		 */
4476 		thread_wakeup((event_t)&vm_swapout_thread);
4477 	}
4478 
4479 	*current_chead = NULL;
4480 }
4481 
4482 /*
4483  * returns with c_seg locked
4484  */
4485 void
c_seg_swapin_requeue(c_segment_t c_seg,boolean_t has_data,boolean_t minor_compact_ok,boolean_t age_on_swapin_q)4486 c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data, boolean_t minor_compact_ok, boolean_t age_on_swapin_q)
4487 {
4488 	clock_sec_t     sec;
4489 	clock_nsec_t    nsec;
4490 
4491 	clock_get_system_nanotime(&sec, &nsec);
4492 
4493 	lck_mtx_lock_spin_always(c_list_lock);
4494 	lck_mtx_lock_spin_always(&c_seg->c_lock);
4495 
4496 	assert(c_seg->c_busy_swapping);
4497 	assert(c_seg->c_busy);
4498 
4499 	c_seg->c_busy_swapping = 0;
4500 
4501 	if (c_seg->c_overage_swap == TRUE) {
4502 		c_overage_swapped_count--;
4503 		c_seg->c_overage_swap = FALSE;
4504 	}
4505 	if (has_data == TRUE) {
4506 		if (age_on_swapin_q == TRUE || c_seg->c_has_donated_pages) {
4507 #if CONFIG_FREEZE
4508 			/*
4509 			 * If a segment has both identities, frozen and donated bits set, the donated
4510 			 * bit wins on the swapin path. This is because the segment is being swapped back
4511 			 * in and so is in demand and should be given more time to spend in memory before
4512 			 * being swapped back out under pressure.
4513 			 */
4514 			if (c_seg->c_has_donated_pages) {
4515 				c_seg->c_has_freezer_pages = 0;
4516 			}
4517 #endif /* CONFIG_FREEZE */
4518 			c_seg_switch_state(c_seg, C_ON_SWAPPEDIN_Q, FALSE);
4519 		} else {
4520 			c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
4521 		}
4522 
4523 		if (minor_compact_ok == TRUE && !c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4524 			c_seg_need_delayed_compaction(c_seg, TRUE);
4525 		}
4526 	} else {
4527 		c_seg->c_store.c_buffer = (int32_t*) NULL;
4528 		c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
4529 
4530 		c_seg_switch_state(c_seg, C_ON_BAD_Q, FALSE);
4531 	}
4532 	c_seg->c_swappedin_ts = (uint32_t)sec;
4533 	c_seg->c_swappedin = true;
4534 #if TRACK_C_SEGMENT_UTILIZATION
4535 	c_seg->c_decompressions_since_swapin = 0;
4536 #endif /* TRACK_C_SEGMENT_UTILIZATION */
4537 
4538 	lck_mtx_unlock_always(c_list_lock);
4539 }
4540 
4541 
4542 
4543 /*
4544  * c_seg has to be locked and is returned locked if the c_seg isn't freed
4545  * PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
4546  * c_seg_swapin returns 1 if the c_seg was freed, 0 otherwise
4547  */
4548 
4549 int
c_seg_swapin(c_segment_t c_seg,boolean_t force_minor_compaction,boolean_t age_on_swapin_q)4550 c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_on_swapin_q)
4551 {
4552 	vm_offset_t     addr = 0;
4553 	uint32_t        io_size = 0;
4554 	uint64_t        f_offset;
4555 	thread_pri_floor_t token;
4556 
4557 	assert(C_SEG_IS_ONDISK(c_seg));
4558 
4559 #if !CHECKSUM_THE_SWAP
4560 	c_seg_trim_tail(c_seg);
4561 #endif
4562 	io_size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
4563 	f_offset = c_seg->c_store.c_swap_handle;
4564 
4565 	C_SEG_BUSY(c_seg);
4566 	c_seg->c_busy_swapping = 1;
4567 
4568 	/*
4569 	 * This thread is likely going to block for I/O.
4570 	 * Make sure it is ready to run when the I/O completes because
4571 	 * it needs to clear the busy bit on the c_seg so that other
4572 	 * waiting threads can make progress too.
4573 	 */
4574 	token = thread_priority_floor_start();
4575 	lck_mtx_unlock_always(&c_seg->c_lock);
4576 
4577 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
4578 
4579 	addr = (vm_offset_t)C_SEG_BUFFER_ADDRESS(c_seg->c_mysegno);
4580 	c_seg->c_store.c_buffer = (int32_t*) addr;
4581 
4582 	kernel_memory_populate(addr, io_size, KMA_NOFAIL | KMA_COMPRESSOR,
4583 	    VM_KERN_MEMORY_COMPRESSOR);
4584 
4585 	if (vm_swap_get(c_seg, f_offset, io_size) != KERN_SUCCESS) {
4586 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
4587 
4588 		kernel_memory_depopulate(addr, io_size, KMA_COMPRESSOR,
4589 		    VM_KERN_MEMORY_COMPRESSOR);
4590 
4591 		c_seg_swapin_requeue(c_seg, FALSE, TRUE, age_on_swapin_q);
4592 	} else {
4593 #if ENCRYPTED_SWAP
4594 		vm_swap_decrypt(c_seg);
4595 #endif /* ENCRYPTED_SWAP */
4596 
4597 #if CHECKSUM_THE_SWAP
4598 		if (c_seg->cseg_swap_size != io_size) {
4599 			panic("swapin size doesn't match swapout size");
4600 		}
4601 
4602 		if (c_seg->cseg_hash != vmc_hash((char*) c_seg->c_store.c_buffer, (int)io_size)) {
4603 			panic("c_seg_swapin - Swap hash mismatch");
4604 		}
4605 #endif /* CHECKSUM_THE_SWAP */
4606 
4607 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
4608 
4609 		c_seg_swapin_requeue(c_seg, TRUE, force_minor_compaction == TRUE ? FALSE : TRUE, age_on_swapin_q);
4610 
4611 #if CONFIG_FREEZE
4612 		/*
4613 		 * c_seg_swapin_requeue() returns with the c_seg lock held.
4614 		 */
4615 		if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
4616 			assert(c_seg->c_busy);
4617 
4618 			lck_mtx_unlock_always(&c_seg->c_lock);
4619 			lck_mtx_lock_spin_always(c_list_lock);
4620 			lck_mtx_lock_spin_always(&c_seg->c_lock);
4621 		}
4622 
4623 		if (c_seg->c_task_owner) {
4624 			c_seg_update_task_owner(c_seg, NULL);
4625 		}
4626 
4627 		lck_mtx_unlock_always(c_list_lock);
4628 
4629 		OSAddAtomic(c_seg->c_slots_used, &c_segment_pages_compressed_incore);
4630 		if (c_seg->c_has_donated_pages) {
4631 			OSAddAtomic(c_seg->c_slots_used, &c_segment_pages_compressed_incore_late_swapout);
4632 		}
4633 #endif /* CONFIG_FREEZE */
4634 
4635 		OSAddAtomic64(c_seg->c_bytes_used, &compressor_bytes_used);
4636 
4637 		if (force_minor_compaction == TRUE) {
4638 			if (c_seg_minor_compaction_and_unlock(c_seg, FALSE)) {
4639 				/*
4640 				 * c_seg was completely empty so it was freed,
4641 				 * so be careful not to reference it again
4642 				 *
4643 				 * Drop the boost so that the thread priority
4644 				 * is returned back to where it is supposed to be.
4645 				 */
4646 				thread_priority_floor_end(&token);
4647 				return 1;
4648 			}
4649 
4650 			lck_mtx_lock_spin_always(&c_seg->c_lock);
4651 		}
4652 	}
4653 	C_SEG_WAKEUP_DONE(c_seg);
4654 
4655 	/*
4656 	 * Drop the boost so that the thread priority
4657 	 * is returned back to where it is supposed to be.
4658 	 */
4659 	thread_priority_floor_end(&token);
4660 
4661 	return 0;
4662 }
4663 
4664 
4665 static void
c_segment_sv_hash_drop_ref(int hash_indx)4666 c_segment_sv_hash_drop_ref(int hash_indx)
4667 {
4668 	struct c_sv_hash_entry o_sv_he, n_sv_he;
4669 
4670 	while (1) {
4671 		o_sv_he.he_record = c_segment_sv_hash_table[hash_indx].he_record;
4672 
4673 		n_sv_he.he_ref = o_sv_he.he_ref - 1;
4674 		n_sv_he.he_data = o_sv_he.he_data;
4675 
4676 		if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_indx].he_record) == TRUE) {
4677 			if (n_sv_he.he_ref == 0) {
4678 				OSAddAtomic(-1, &c_segment_svp_in_hash);
4679 			}
4680 			break;
4681 		}
4682 	}
4683 }
4684 
4685 
4686 static int
c_segment_sv_hash_insert(uint32_t data)4687 c_segment_sv_hash_insert(uint32_t data)
4688 {
4689 	int             hash_sindx;
4690 	int             misses;
4691 	struct c_sv_hash_entry o_sv_he, n_sv_he;
4692 	boolean_t       got_ref = FALSE;
4693 
4694 	if (data == 0) {
4695 		OSAddAtomic(1, &c_segment_svp_zero_compressions);
4696 	} else {
4697 		OSAddAtomic(1, &c_segment_svp_nonzero_compressions);
4698 	}
4699 
4700 	hash_sindx = data & C_SV_HASH_MASK;
4701 
4702 	for (misses = 0; misses < C_SV_HASH_MAX_MISS; misses++) {
4703 		o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
4704 
4705 		while (o_sv_he.he_data == data || o_sv_he.he_ref == 0) {
4706 			n_sv_he.he_ref = o_sv_he.he_ref + 1;
4707 			n_sv_he.he_data = data;
4708 
4709 			if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_sindx].he_record) == TRUE) {
4710 				if (n_sv_he.he_ref == 1) {
4711 					OSAddAtomic(1, &c_segment_svp_in_hash);
4712 				}
4713 				got_ref = TRUE;
4714 				break;
4715 			}
4716 			o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
4717 		}
4718 		if (got_ref == TRUE) {
4719 			break;
4720 		}
4721 		hash_sindx++;
4722 
4723 		if (hash_sindx == C_SV_HASH_SIZE) {
4724 			hash_sindx = 0;
4725 		}
4726 	}
4727 	if (got_ref == FALSE) {
4728 		return -1;
4729 	}
4730 
4731 	return hash_sindx;
4732 }
4733 
4734 
4735 #if RECORD_THE_COMPRESSED_DATA
4736 
4737 static void
c_compressed_record_data(char * src,int c_size)4738 c_compressed_record_data(char *src, int c_size)
4739 {
4740 	if ((c_compressed_record_cptr + c_size + 4) >= c_compressed_record_ebuf) {
4741 		panic("c_compressed_record_cptr >= c_compressed_record_ebuf");
4742 	}
4743 
4744 	*(int *)((void *)c_compressed_record_cptr) = c_size;
4745 
4746 	c_compressed_record_cptr += 4;
4747 
4748 	memcpy(c_compressed_record_cptr, src, c_size);
4749 	c_compressed_record_cptr += c_size;
4750 }
4751 #endif
4752 
4753 
4754 /**
4755  * Do the actual compression of the given page
4756  * @param src [IN] address in the physical aperture of the page to compress.
4757  * @param slot_ptr [OUT] fill the slot-mapping of the c_seg+slot where the page ends up being stored
4758  * @param current_chead [IN-OUT] current filling c_seg. pointer comes from the current compression thread state
4759  *          On the very first call this is going to point to NULL and this function will fill that pointer with a new
4760  *          filling c_sec if the current filling c_seg doesn't have enough space, it will be replaced in this location
4761  *          with a new filling c_seg
4762  * @param scratch_buf [IN] pointer from the current thread state, used by the compression codec
4763  * @return 0 on success, 1 on memory allocation error
4764  */
4765 static int
c_compress_page(char * src,c_slot_mapping_t slot_ptr,c_segment_t * current_chead,char * scratch_buf,__unused vm_compressor_options_t flags)4766 c_compress_page(
4767 	char             *src,
4768 	c_slot_mapping_t slot_ptr,
4769 	c_segment_t      *current_chead,
4770 	char             *scratch_buf,
4771 	__unused vm_compressor_options_t flags)
4772 {
4773 	int              c_size = -1;
4774 	int              c_rounded_size = 0;
4775 	int              max_csize;
4776 	c_slot_t         cs;
4777 	c_segment_t      c_seg;
4778 
4779 	KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START, *current_chead, 0, 0, 0, 0);
4780 retry:  /* may need to retry if the currently filling c_seg will not have enough space */
4781 	if ((c_seg = c_seg_allocate(current_chead)) == NULL) {
4782 		return 1;
4783 	}
4784 	/*
4785 	 * returns with c_seg lock held
4786 	 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
4787 	 * c_nextslot has been allocated and
4788 	 * c_store.c_buffer populated
4789 	 */
4790 	assert(c_seg->c_state == C_IS_FILLING);
4791 
4792 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_seg->c_nextslot);
4793 
4794 	C_SLOT_ASSERT_PACKABLE(slot_ptr);
4795 	cs->c_packed_ptr = C_SLOT_PACK_PTR(slot_ptr);
4796 
4797 	cs->c_offset = c_seg->c_nextoffset;
4798 
4799 	unsigned int avail_space = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)cs->c_offset);
4800 
4801 
4802 	max_csize = avail_space;
4803 	if (max_csize > PAGE_SIZE) {
4804 		max_csize = PAGE_SIZE;
4805 	}
4806 
4807 #if CHECKSUM_THE_DATA
4808 	cs->c_hash_data = vmc_hash(src, PAGE_SIZE);
4809 #endif
4810 	boolean_t incomp_copy = FALSE; /* codec indicates it already did copy an incompressible page */
4811 	int max_csize_adj = (max_csize - 4); /* how much size we have left in this c_seg to fill. */
4812 
4813 	if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
4814 #if defined(__arm64__)
4815 		uint16_t ccodec = CINVALID;
4816 		uint32_t inline_popcount;
4817 		if (max_csize >= C_SEG_OFFSET_ALIGNMENT_BOUNDARY) {
4818 			vm_memtag_disable_checking();
4819 			c_size = metacompressor((const uint8_t *) src,
4820 			    (uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
4821 			    max_csize_adj, &ccodec,
4822 			    scratch_buf, &incomp_copy, &inline_popcount);
4823 			vm_memtag_enable_checking();
4824 			assert(inline_popcount == C_SLOT_NO_POPCOUNT);
4825 
4826 #if C_SEG_OFFSET_ALIGNMENT_BOUNDARY > 4
4827 			if (c_size > max_csize_adj) {
4828 				c_size = -1;
4829 			}
4830 #endif
4831 		} else {
4832 			c_size = -1;
4833 		}
4834 		assert(ccodec == CCWK || ccodec == CCLZ4);
4835 		cs->c_codec = ccodec;
4836 #endif
4837 	} else {
4838 #if defined(__arm64__)
4839 		vm_memtag_disable_checking();
4840 		cs->c_codec = CCWK;
4841 		__unreachable_ok_push
4842 		if (PAGE_SIZE == 4096) {
4843 			c_size = WKdm_compress_4k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4844 			    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4845 		} else {
4846 			c_size = WKdm_compress_16k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4847 			    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4848 		}
4849 		__unreachable_ok_pop
4850 		vm_memtag_enable_checking();
4851 #else
4852 		vm_memtag_disable_checking();
4853 		c_size = WKdm_compress_new((const WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4854 		    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4855 		vm_memtag_enable_checking();
4856 #endif
4857 	}
4858 	/* c_size is the size written by the codec, or 0 if it's uniform 32 bit value or (-1 if there was not enough space
4859 	 * or it was incompressible) */
4860 	assertf(((c_size <= max_csize_adj) && (c_size >= -1)),
4861 	    "c_size invalid (%d, %d), cur compressions: %d", c_size, max_csize_adj, c_segment_pages_compressed);
4862 
4863 	if (c_size == -1) {
4864 		if (max_csize < PAGE_SIZE) {
4865 			c_current_seg_filled(c_seg, current_chead);
4866 			assert(*current_chead == NULL);
4867 
4868 			lck_mtx_unlock_always(&c_seg->c_lock);
4869 			/* TODO: it may be worth requiring codecs to distinguish
4870 			 * between incompressible inputs and failures due to budget exhaustion.
4871 			 * right now this assumes that if the space we had is > PAGE_SIZE, then the codec failed due to incompressible input */
4872 
4873 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
4874 			goto retry;  /* previous c_seg didn't have enought space, we finalized it and can try again with a fresh c_seg */
4875 		}
4876 		c_size = PAGE_SIZE;
4877 
4878 		if (incomp_copy == FALSE) { /* codec did not copy the incompressible input */
4879 			vm_memtag_disable_checking();
4880 			memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
4881 			vm_memtag_enable_checking();
4882 		}
4883 
4884 		OSAddAtomic(1, &c_segment_noncompressible_pages);
4885 	} else if (c_size == 0) {
4886 		/*
4887 		 * Special case - this is a page completely full of a single 32 bit value.
4888 		 * We store some values directly in the c_slot_mapping, if not there, the
4889 		 * 4 byte value goes in the compressor segment.
4890 		 */
4891 		int hash_index = c_segment_sv_hash_insert(*(uint32_t *)(uintptr_t)src);
4892 
4893 		if (hash_index != -1
4894 		    ) {
4895 			slot_ptr->s_cindx = hash_index;
4896 			slot_ptr->s_cseg = C_SV_CSEG_ID;
4897 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4898 			slot_ptr->s_uncompressed = 0;
4899 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4900 
4901 			OSAddAtomic(1, &c_segment_svp_hash_succeeded);
4902 #if RECORD_THE_COMPRESSED_DATA
4903 			c_compressed_record_data(src, 4);
4904 #endif
4905 			/* we didn't write anything to c_buffer and didn't end up using the slot in the c_seg at all, so skip all
4906 			 * the book-keeping of the case that we did */
4907 			goto sv_compression;
4908 		}
4909 		OSAddAtomic(1, &c_segment_svp_hash_failed);
4910 
4911 		c_size = 4;
4912 		vm_memtag_disable_checking();
4913 		memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
4914 		vm_memtag_enable_checking();
4915 	}
4916 
4917 #if RECORD_THE_COMPRESSED_DATA
4918 	c_compressed_record_data((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
4919 #endif
4920 #if CHECKSUM_THE_COMPRESSED_DATA
4921 	cs->c_hash_compressed_data = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
4922 #endif
4923 #if POPCOUNT_THE_COMPRESSED_DATA
4924 	cs->c_pop_cdata = vmc_pop((uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset], c_size);
4925 #endif
4926 
4927 	PACK_C_SIZE(cs, c_size);
4928 
4929 	c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
4930 
4931 	c_seg->c_bytes_used += c_rounded_size;
4932 	c_seg->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
4933 	c_seg->c_slots_used++;
4934 
4935 #if CONFIG_FREEZE
4936 	/* TODO: should c_segment_pages_compressed be up here too? See 88598046 for details */
4937 	OSAddAtomic(1, &c_segment_pages_compressed_incore);
4938 	if (c_seg->c_has_donated_pages) {
4939 		OSAddAtomic(1, &c_segment_pages_compressed_incore_late_swapout);
4940 	}
4941 #endif /* CONFIG_FREEZE */
4942 
4943 	slot_ptr->s_cindx = c_seg->c_nextslot++;
4944 	/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1, see other usages of s_cseg where it's decremented */
4945 	slot_ptr->s_cseg = c_seg->c_mysegno + 1;
4946 
4947 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4948 	slot_ptr->s_uncompressed = 0;
4949 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4950 
4951 sv_compression:
4952 	/* can we say this c_seg is full? */
4953 	if (c_seg->c_nextoffset >= c_seg_off_limit || c_seg->c_nextslot >= C_SLOT_MAX_INDEX) {
4954 		/* condition 1: segment buffer is almost full, don't bother trying to fill it further.
4955 		 * condition 2: we can't have any more slots in this c_segment even if we had buffer space */
4956 		c_current_seg_filled(c_seg, current_chead);
4957 		assert(*current_chead == NULL);
4958 	}
4959 
4960 	lck_mtx_unlock_always(&c_seg->c_lock);
4961 
4962 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
4963 
4964 #if RECORD_THE_COMPRESSED_DATA
4965 	if ((c_compressed_record_cptr - c_compressed_record_sbuf) >= c_seg_allocsize) {
4966 		c_compressed_record_write(c_compressed_record_sbuf, (int)(c_compressed_record_cptr - c_compressed_record_sbuf));
4967 		c_compressed_record_cptr = c_compressed_record_sbuf;
4968 	}
4969 #endif
4970 	if (c_size) {
4971 		OSAddAtomic64(c_size, &c_segment_compressed_bytes);
4972 		OSAddAtomic64(c_rounded_size, &compressor_bytes_used);
4973 	}
4974 	OSAddAtomic64(PAGE_SIZE, &c_segment_input_bytes);
4975 
4976 	OSAddAtomic(1, &c_segment_pages_compressed);
4977 #if DEVELOPMENT || DEBUG
4978 	if (!compressor_running_perf_test) {
4979 		/*
4980 		 * The perf_compressor benchmark should not be able to trigger
4981 		 * compressor thrashing jetsams.
4982 		 */
4983 		OSAddAtomic(1, &sample_period_compression_count);
4984 	}
4985 #else /* DEVELOPMENT || DEBUG */
4986 	OSAddAtomic(1, &sample_period_compression_count);
4987 #endif /* DEVELOPMENT || DEBUG */
4988 
4989 	KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END, *current_chead, c_size, c_segment_input_bytes, c_segment_compressed_bytes, 0);
4990 
4991 	return 0;
4992 }
4993 
4994 static inline void
sv_decompress(int32_t * ddst,int32_t pattern)4995 sv_decompress(int32_t *ddst, int32_t pattern)
4996 {
4997 //	assert(__builtin_constant_p(PAGE_SIZE) != 0);
4998 #if defined(__x86_64__)
4999 	memset_word(ddst, pattern, PAGE_SIZE / sizeof(int32_t));
5000 #elif defined(__arm64__)
5001 	assert((PAGE_SIZE % 128) == 0);
5002 	if (pattern == 0) {
5003 		fill32_dczva((addr64_t)ddst, PAGE_SIZE);
5004 	} else {
5005 		fill32_nt((addr64_t)ddst, PAGE_SIZE, pattern);
5006 	}
5007 #else
5008 	size_t          i;
5009 
5010 	/* Unroll the pattern fill loop 4x to encourage the
5011 	 * compiler to emit NEON stores, cf.
5012 	 * <rdar://problem/25839866> Loop autovectorization
5013 	 * anomalies.
5014 	 */
5015 	/* * We use separate loops for each PAGE_SIZE
5016 	 * to allow the autovectorizer to engage, as PAGE_SIZE
5017 	 * may not be a constant.
5018 	 */
5019 
5020 	__unreachable_ok_push
5021 	if (PAGE_SIZE == 4096) {
5022 		for (i = 0; i < (4096U / sizeof(int32_t)); i += 4) {
5023 			*ddst++ = pattern;
5024 			*ddst++ = pattern;
5025 			*ddst++ = pattern;
5026 			*ddst++ = pattern;
5027 		}
5028 	} else {
5029 		assert(PAGE_SIZE == 16384);
5030 		for (i = 0; i < (int)(16384U / sizeof(int32_t)); i += 4) {
5031 			*ddst++ = pattern;
5032 			*ddst++ = pattern;
5033 			*ddst++ = pattern;
5034 			*ddst++ = pattern;
5035 		}
5036 	}
5037 	__unreachable_ok_pop
5038 #endif
5039 }
5040 
5041 static int
c_decompress_page(char * dst,volatile c_slot_mapping_t slot_ptr,vm_compressor_options_t flags,int * zeroslot)5042 c_decompress_page(
5043 	char            *dst,
5044 	volatile c_slot_mapping_t slot_ptr,    /* why volatile? perhaps due to changes across hibernation */
5045 	vm_compressor_options_t flags,
5046 	int             *zeroslot)
5047 {
5048 	c_slot_t        cs;
5049 	c_segment_t     c_seg;
5050 	uint32_t        c_segno;
5051 	uint16_t        c_indx;
5052 	int             c_rounded_size;
5053 	uint32_t        c_size;
5054 	int             retval = 0;
5055 	boolean_t       need_unlock = TRUE;
5056 	boolean_t       consider_defragmenting = FALSE;
5057 	boolean_t       kdp_mode = FALSE;
5058 
5059 	if (__improbable(flags & C_KDP)) {
5060 		if (not_in_kdp) {
5061 			panic("C_KDP passed to decompress page from outside of debugger context");
5062 		}
5063 
5064 		assert((flags & C_KEEP) == C_KEEP);
5065 		assert((flags & C_DONT_BLOCK) == C_DONT_BLOCK);
5066 
5067 		if ((flags & (C_DONT_BLOCK | C_KEEP)) != (C_DONT_BLOCK | C_KEEP)) {
5068 			return -2;
5069 		}
5070 
5071 		kdp_mode = TRUE;
5072 		*zeroslot = 0;
5073 	}
5074 
5075 ReTry:
5076 	if (__probable(!kdp_mode)) {
5077 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
5078 	} else {
5079 		if (kdp_lck_rw_lock_is_acquired_exclusive(&c_master_lock)) {
5080 			return -2;
5081 		}
5082 	}
5083 
5084 #if HIBERNATION
5085 	/*
5086 	 * if hibernation is enabled, it indicates (via a call
5087 	 * to 'vm_decompressor_lock' that no further
5088 	 * decompressions are allowed once it reaches
5089 	 * the point of flushing all of the currently dirty
5090 	 * anonymous memory through the compressor and out
5091 	 * to disk... in this state we allow freeing of compressed
5092 	 * pages and must honor the C_DONT_BLOCK case
5093 	 */
5094 	if (__improbable(dst && decompressions_blocked == TRUE)) {
5095 		if (flags & C_DONT_BLOCK) {
5096 			if (__probable(!kdp_mode)) {
5097 				PAGE_REPLACEMENT_DISALLOWED(FALSE);
5098 			}
5099 
5100 			*zeroslot = 0;
5101 			return -2;
5102 		}
5103 		/*
5104 		 * it's safe to atomically assert and block behind the
5105 		 * lock held in shared mode because "decompressions_blocked" is
5106 		 * only set and cleared and the thread_wakeup done when the lock
5107 		 * is held exclusively
5108 		 */
5109 		assert_wait((event_t)&decompressions_blocked, THREAD_UNINT);
5110 
5111 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5112 
5113 		thread_block(THREAD_CONTINUE_NULL);
5114 
5115 		goto ReTry;
5116 	}
5117 #endif
5118 	/* s_cseg is actually "segno+1" */
5119 	c_segno = slot_ptr->s_cseg - 1;
5120 
5121 	if (__improbable(c_segno >= c_segments_available)) {
5122 		panic("c_decompress_page: c_segno %d >= c_segments_available %d, slot_ptr(%p), slot_data(%x)",
5123 		    c_segno, c_segments_available, slot_ptr, *(int *)((void *)slot_ptr));
5124 	}
5125 
5126 	if (__improbable(c_segments[c_segno].c_segno < c_segments_available)) {
5127 		panic("c_decompress_page: c_segno %d is free, slot_ptr(%p), slot_data(%x)",
5128 		    c_segno, slot_ptr, *(int *)((void *)slot_ptr));
5129 	}
5130 
5131 	c_seg = c_segments[c_segno].c_seg;
5132 
5133 	if (__probable(!kdp_mode)) {
5134 		lck_mtx_lock_spin_always(&c_seg->c_lock);
5135 	} else {
5136 		if (kdp_lck_mtx_lock_spin_is_acquired(&c_seg->c_lock)) {
5137 			return -2;
5138 		}
5139 	}
5140 
5141 	assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
5142 
5143 	if (dst == NULL && c_seg->c_busy_swapping) {
5144 		assert(c_seg->c_busy);
5145 
5146 		goto bypass_busy_check;
5147 	}
5148 	if (flags & C_DONT_BLOCK) {
5149 		if (c_seg->c_busy || (C_SEG_IS_ONDISK(c_seg) && dst)) {
5150 			*zeroslot = 0;
5151 
5152 			retval = -2;
5153 			goto done;
5154 		}
5155 	}
5156 	if (c_seg->c_busy) {
5157 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5158 
5159 		c_seg_wait_on_busy(c_seg);
5160 
5161 		goto ReTry;
5162 	}
5163 bypass_busy_check:
5164 
5165 	c_indx = slot_ptr->s_cindx;
5166 
5167 	if (__improbable(c_indx >= c_seg->c_nextslot)) {
5168 		panic("c_decompress_page: c_indx %d >= c_nextslot %d, c_seg(%p), slot_ptr(%p), slot_data(%x)",
5169 		    c_indx, c_seg->c_nextslot, c_seg, slot_ptr, *(int *)((void *)slot_ptr));
5170 	}
5171 
5172 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
5173 
5174 	c_size = UNPACK_C_SIZE(cs);
5175 
5176 	if (__improbable(c_size == 0)) { /* sanity check it's not an empty slot */
5177 		panic("c_decompress_page: c_size == 0, c_seg(%p), slot_ptr(%p), slot_data(%x)",
5178 		    c_seg, slot_ptr, *(int *)((void *)slot_ptr));
5179 	}
5180 
5181 	c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
5182 
5183 	if (dst) {  /* would be NULL if we don't want the page content, from free */
5184 		uint32_t        age_of_cseg;
5185 		clock_sec_t     cur_ts_sec;
5186 		clock_nsec_t    cur_ts_nsec;
5187 
5188 		if (C_SEG_IS_ONDISK(c_seg)) {
5189 #if CONFIG_FREEZE
5190 			if (freezer_incore_cseg_acct) {
5191 				if ((c_seg->c_slots_used + c_segment_pages_compressed_incore) >= c_segment_pages_compressed_nearing_limit) {
5192 					PAGE_REPLACEMENT_DISALLOWED(FALSE);
5193 					lck_mtx_unlock_always(&c_seg->c_lock);
5194 
5195 					memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
5196 
5197 					goto ReTry;
5198 				}
5199 
5200 				uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
5201 				if ((incore_seg_count + 1) >= c_segments_nearing_limit) {
5202 					PAGE_REPLACEMENT_DISALLOWED(FALSE);
5203 					lck_mtx_unlock_always(&c_seg->c_lock);
5204 
5205 					memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
5206 
5207 					goto ReTry;
5208 				}
5209 			}
5210 #endif /* CONFIG_FREEZE */
5211 			assert(kdp_mode == FALSE);
5212 			retval = c_seg_swapin(c_seg, FALSE, TRUE);
5213 			assert(retval == 0);
5214 
5215 			retval = 1;
5216 		}
5217 		if (c_seg->c_state == C_ON_BAD_Q) {
5218 			assert(c_seg->c_store.c_buffer == NULL);
5219 			*zeroslot = 0;
5220 
5221 			retval = -1;
5222 			goto done;
5223 		}
5224 
5225 #if POPCOUNT_THE_COMPRESSED_DATA
5226 		unsigned csvpop;
5227 		uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
5228 		if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
5229 			panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%x 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
5230 		}
5231 #endif
5232 
5233 #if CHECKSUM_THE_COMPRESSED_DATA
5234 		unsigned csvhash;
5235 		if (cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
5236 			panic("Compressed data doesn't match original %p %p %u %u %u", c_seg, cs, c_size, cs->c_hash_compressed_data, csvhash);
5237 		}
5238 #endif
5239 		if (c_rounded_size == PAGE_SIZE) {
5240 			/* page wasn't compressible... just copy it out */
5241 			vm_memtag_disable_checking();
5242 			memcpy(dst, &c_seg->c_store.c_buffer[cs->c_offset], PAGE_SIZE);
5243 			vm_memtag_enable_checking();
5244 		} else if (c_size == 4) {
5245 			int32_t         data;
5246 			int32_t         *dptr;
5247 
5248 			/*
5249 			 * page was populated with a single value
5250 			 * that didn't fit into our fast hash
5251 			 * so we packed it in as a single non-compressed value
5252 			 * that we need to populate the page with
5253 			 */
5254 			dptr = (int32_t *)(uintptr_t)dst;
5255 			data = *(int32_t *)(&c_seg->c_store.c_buffer[cs->c_offset]);
5256 			vm_memtag_disable_checking();
5257 			sv_decompress(dptr, data);
5258 			vm_memtag_enable_checking();
5259 		} else {  /* normal segment decompress */
5260 			uint32_t        my_cpu_no;
5261 			char            *scratch_buf;
5262 
5263 			my_cpu_no = cpu_number();
5264 
5265 			assert(my_cpu_no < compressor_cpus);
5266 
5267 			if (__probable(!kdp_mode)) {
5268 				/*
5269 				 * we're behind the c_seg lock held in spin mode
5270 				 * which means pre-emption is disabled... therefore
5271 				 * the following sequence is atomic and safe
5272 				 */
5273 				scratch_buf = &compressor_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
5274 			} else if (flags & C_KDP_MULTICPU) {
5275 				assert(vm_compressor_kdp_state.kc_scratch_bufs != NULL);
5276 				scratch_buf = &vm_compressor_kdp_state.kc_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
5277 			} else {
5278 				scratch_buf = vm_compressor_kdp_state.kc_panic_scratch_buf;
5279 			}
5280 
5281 			if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
5282 #if defined(__arm64__)
5283 				uint16_t c_codec = cs->c_codec;
5284 				uint32_t inline_popcount;
5285 				vm_memtag_disable_checking();
5286 				if (!metadecompressor((const uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
5287 				    (uint8_t *)dst, c_size, c_codec, (void *)scratch_buf, &inline_popcount)) {
5288 					vm_memtag_enable_checking();
5289 					retval = -1;
5290 				} else {
5291 					vm_memtag_enable_checking();
5292 					assert(inline_popcount == C_SLOT_NO_POPCOUNT);
5293 				}
5294 #endif
5295 			} else {  /* algorithm == VM_COMPRESSOR_DEFAULT_CODEC */
5296 				vm_memtag_disable_checking();
5297 #if defined(__arm64__)
5298 				__unreachable_ok_push
5299 				if (PAGE_SIZE == 4096) {
5300 					WKdm_decompress_4k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5301 					    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5302 				} else {
5303 					WKdm_decompress_16k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5304 					    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5305 				}
5306 				__unreachable_ok_pop
5307 #else
5308 				WKdm_decompress_new((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5309 				    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5310 #endif
5311 				vm_memtag_enable_checking();
5312 			}
5313 		} /* normal segment decompress */
5314 
5315 #if CHECKSUM_THE_DATA
5316 		if (cs->c_hash_data != vmc_hash(dst, PAGE_SIZE)) {
5317 #if defined(__arm64__)
5318 			int32_t *dinput = &c_seg->c_store.c_buffer[cs->c_offset];
5319 			panic("decompressed data doesn't match original cs: %p, hash: 0x%x, offset: %d, c_size: %d, c_rounded_size: %d, codec: %d, header: 0x%x 0x%x 0x%x", cs, cs->c_hash_data, cs->c_offset, c_size, c_rounded_size, cs->c_codec, *dinput, *(dinput + 1), *(dinput + 2));
5320 #else
5321 			panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs, cs->c_hash_data, cs->c_offset, c_size);
5322 #endif
5323 		}
5324 #endif
5325 		if (c_seg->c_swappedin_ts == 0 && !kdp_mode) {
5326 			clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
5327 
5328 			age_of_cseg = (uint32_t)cur_ts_sec - c_seg->c_creation_ts;
5329 			if (age_of_cseg < DECOMPRESSION_SAMPLE_MAX_AGE) {
5330 				OSAddAtomic(1, &age_of_decompressions_during_sample_period[age_of_cseg]);
5331 			} else {
5332 				OSAddAtomic(1, &overage_decompressions_during_sample_period);
5333 			}
5334 
5335 			OSAddAtomic(1, &sample_period_decompression_count);
5336 		}
5337 
5338 
5339 #if TRACK_C_SEGMENT_UTILIZATION
5340 		if (c_seg->c_swappedin) {
5341 			c_seg->c_decompressions_since_swapin++;
5342 		}
5343 #endif /* TRACK_C_SEGMENT_UTILIZATION */
5344 	} /* dst */
5345 #if CONFIG_FREEZE
5346 	else {
5347 		/*
5348 		 * We are freeing an uncompressed page from this c_seg and so balance the ledgers.
5349 		 */
5350 		if (C_SEG_IS_ONDISK(c_seg)) {
5351 			/*
5352 			 * The compression sweep feature will push out anonymous pages to disk
5353 			 * without going through the freezer path and so those c_segs, while
5354 			 * swapped out, won't have an owner.
5355 			 */
5356 			if (c_seg->c_task_owner) {
5357 				task_update_frozen_to_swap_acct(c_seg->c_task_owner, PAGE_SIZE_64, DEBIT_FROM_SWAP);
5358 			}
5359 
5360 			/*
5361 			 * We are freeing a page in swap without swapping it in. We bump the in-core
5362 			 * count here to simulate a swapin of a page so that we can accurately
5363 			 * decrement it below.
5364 			 */
5365 			OSAddAtomic(1, &c_segment_pages_compressed_incore);
5366 			if (c_seg->c_has_donated_pages) {
5367 				OSAddAtomic(1, &c_segment_pages_compressed_incore_late_swapout);
5368 			}
5369 		} else if (c_seg->c_state == C_ON_BAD_Q) {
5370 			assert(c_seg->c_store.c_buffer == NULL);
5371 			*zeroslot = 0;
5372 
5373 			retval = -1;
5374 			goto done;
5375 		}
5376 	}
5377 #endif /* CONFIG_FREEZE */
5378 
5379 	if (flags & C_KEEP) {
5380 		*zeroslot = 0;
5381 		goto done;
5382 	}
5383 	/* now perform needed bookkeeping for the removal of the slot from the segment */
5384 	assert(kdp_mode == FALSE);
5385 
5386 	c_seg->c_bytes_unused += c_rounded_size;
5387 	c_seg->c_bytes_used -= c_rounded_size;
5388 
5389 	assert(c_seg->c_slots_used);
5390 	c_seg->c_slots_used--;
5391 	if (dst && c_seg->c_swappedin) {
5392 		task_t task = current_task();
5393 		if (task) {
5394 			ledger_credit(task->ledger, task_ledgers.swapins, PAGE_SIZE);
5395 		}
5396 	}
5397 
5398 	PACK_C_SIZE(cs, 0); /* mark slot as empty */
5399 
5400 	if (c_indx < c_seg->c_firstemptyslot) {
5401 		c_seg->c_firstemptyslot = c_indx;
5402 	}
5403 
5404 	OSAddAtomic(-1, &c_segment_pages_compressed);
5405 #if CONFIG_FREEZE
5406 	OSAddAtomic(-1, &c_segment_pages_compressed_incore);
5407 	assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count %p 0x%x", c_seg, c_segment_pages_compressed_incore);
5408 	if (c_seg->c_has_donated_pages) {
5409 		OSAddAtomic(-1, &c_segment_pages_compressed_incore_late_swapout);
5410 		assertf(c_segment_pages_compressed_incore_late_swapout >= 0, "-ve lateswapout count %p 0x%x", c_seg, c_segment_pages_compressed_incore_late_swapout);
5411 	}
5412 #endif /* CONFIG_FREEZE */
5413 
5414 	if (c_seg->c_state != C_ON_BAD_Q && !(C_SEG_IS_ONDISK(c_seg))) {
5415 		/*
5416 		 * C_SEG_IS_ONDISK == TRUE can occur when we're doing a
5417 		 * free of a compressed page (i.e. dst == NULL)
5418 		 */
5419 		OSAddAtomic64(-c_rounded_size, &compressor_bytes_used);
5420 	}
5421 	if (c_seg->c_busy_swapping) {
5422 		/*
5423 		 * bypass case for c_busy_swapping...
5424 		 * let the swapin/swapout paths deal with putting
5425 		 * the c_seg on the minor compaction queue if needed
5426 		 */
5427 		assert(c_seg->c_busy);
5428 		goto done;
5429 	}
5430 	assert(!c_seg->c_busy);
5431 
5432 	if (c_seg->c_state != C_IS_FILLING) {
5433 		/* did we just remove the last slot from the segment? */
5434 		if (c_seg->c_bytes_used == 0) {
5435 			if (!(C_SEG_IS_ONDISK(c_seg))) {
5436 				/* it was compressed resident in memory */
5437 				int     pages_populated;
5438 
5439 				pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
5440 				c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
5441 
5442 				if (pages_populated) {
5443 					assert(c_seg->c_state != C_ON_BAD_Q);
5444 					assert(c_seg->c_store.c_buffer != NULL);
5445 
5446 					C_SEG_BUSY(c_seg);
5447 					lck_mtx_unlock_always(&c_seg->c_lock);
5448 
5449 					kernel_memory_depopulate(
5450 						(vm_offset_t) c_seg->c_store.c_buffer,
5451 						ptoa(pages_populated),
5452 						KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
5453 
5454 					lck_mtx_lock_spin_always(&c_seg->c_lock);
5455 					C_SEG_WAKEUP_DONE(c_seg);
5456 				}
5457 				/* minor compaction will free it */
5458 				if (!c_seg->c_on_minorcompact_q && c_seg->c_state != C_ON_SWAPIO_Q) {
5459 					if (c_seg->c_state == C_ON_SWAPOUT_Q) {
5460 						/* If we're on the swapout q, we want to get out of it since there's no reason to swapout
5461 						 * anymore, so put on AGE Q in the meantime until minor compact */
5462 						bool clear_busy = false;
5463 						if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
5464 							C_SEG_BUSY(c_seg);
5465 
5466 							lck_mtx_unlock_always(&c_seg->c_lock);
5467 							lck_mtx_lock_spin_always(c_list_lock);
5468 							lck_mtx_lock_spin_always(&c_seg->c_lock);
5469 							clear_busy = true;
5470 						}
5471 						c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
5472 						if (clear_busy) {
5473 							C_SEG_WAKEUP_DONE(c_seg);
5474 							clear_busy = false;
5475 						}
5476 						lck_mtx_unlock_always(c_list_lock);
5477 					}
5478 					c_seg_need_delayed_compaction(c_seg, FALSE);
5479 				}
5480 			} else { /* C_SEG_IS_ONDISK(c_seg) */
5481 				/* it's empty and on-disk, make sure it's marked as sparse */
5482 				if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q) {
5483 					c_seg_move_to_sparse_list(c_seg);
5484 					consider_defragmenting = TRUE;
5485 				}
5486 			}
5487 		} else if (c_seg->c_on_minorcompact_q) {
5488 			assert(c_seg->c_state != C_ON_BAD_Q);
5489 			assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
5490 
5491 			if (C_SEG_SHOULD_MINORCOMPACT_NOW(c_seg)) {
5492 				c_seg_try_minor_compaction_and_unlock(c_seg);
5493 				need_unlock = FALSE;
5494 			}
5495 		} else if (!(C_SEG_IS_ONDISK(c_seg))) {
5496 			if (c_seg->c_state != C_ON_BAD_Q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q &&
5497 			    C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
5498 				c_seg_need_delayed_compaction(c_seg, FALSE);
5499 			}
5500 		} else if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q && C_SEG_ONDISK_IS_SPARSE(c_seg)) {
5501 			c_seg_move_to_sparse_list(c_seg);
5502 			consider_defragmenting = TRUE;
5503 		}
5504 	} /* c_state != C_IS_FILLING */
5505 done:
5506 	if (__improbable(kdp_mode)) {
5507 		return retval;
5508 	}
5509 
5510 	if (need_unlock == TRUE) {
5511 		lck_mtx_unlock_always(&c_seg->c_lock);
5512 	}
5513 
5514 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
5515 
5516 	if (consider_defragmenting == TRUE) {
5517 		vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE);
5518 	}
5519 
5520 #if !XNU_TARGET_OS_OSX
5521 	/*
5522 	 * Decompressions will generate fragmentation in the compressor pool
5523 	 * over time. Consider waking the compactor thread if any of the
5524 	 * fragmentation thresholds have been crossed as a result of this
5525 	 * decompression.
5526 	 */
5527 	vm_consider_waking_compactor_swapper();
5528 #endif /* !XNU_TARGET_OS_OSX */
5529 
5530 	return retval;
5531 }
5532 
5533 
5534 inline bool
vm_compressor_is_slot_compressed(int * slot)5535 vm_compressor_is_slot_compressed(int *slot)
5536 {
5537 #if !CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5538 #pragma unused(slot)
5539 	return true;
5540 #else /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
5541 	c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
5542 	return !slot_ptr->s_uncompressed;
5543 #endif /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
5544 }
5545 
5546 int
vm_compressor_get(ppnum_t pn,int * slot,vm_compressor_options_t flags)5547 vm_compressor_get(ppnum_t pn, int *slot, vm_compressor_options_t flags)
5548 {
5549 	c_slot_mapping_t  slot_ptr;
5550 	char    *dst;
5551 	int     zeroslot = 1;
5552 	int     retval;
5553 
5554 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5555 	if (flags & C_PAGE_UNMODIFIED) {
5556 		retval = vm_uncompressed_get(pn, slot, flags | C_KEEP);
5557 		if (retval == 0) {
5558 			os_atomic_inc(&compressor_ro_uncompressed_get, relaxed);
5559 		}
5560 
5561 		return retval;
5562 	}
5563 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5564 
5565 	/* get address in physical aperture of this page for fill into */
5566 	dst = pmap_map_compressor_page(pn);
5567 	slot_ptr = (c_slot_mapping_t)slot;
5568 
5569 	assert(dst != NULL);
5570 
5571 	if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
5572 		int32_t         data;
5573 		int32_t         *dptr;
5574 
5575 		/*
5576 		 * page was populated with a single value
5577 		 * that found a home in our hash table
5578 		 * grab that value from the hash and populate the page
5579 		 * that we need to populate the page with
5580 		 */
5581 		dptr = (int32_t *)(uintptr_t)dst;
5582 		data = c_segment_sv_hash_table[slot_ptr->s_cindx].he_data;
5583 		sv_decompress(dptr, data);
5584 
5585 		if (!(flags & C_KEEP)) {
5586 			c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
5587 
5588 			OSAddAtomic(-1, &c_segment_pages_compressed);
5589 			*slot = 0;
5590 		}
5591 		if (data) {
5592 			OSAddAtomic(1, &c_segment_svp_nonzero_decompressions);
5593 		} else {
5594 			OSAddAtomic(1, &c_segment_svp_zero_decompressions);
5595 		}
5596 
5597 		pmap_unmap_compressor_page(pn, dst);
5598 		return 0;
5599 	}
5600 	retval = c_decompress_page(dst, slot_ptr, flags, &zeroslot);
5601 
5602 	/*
5603 	 * zeroslot will be set to 0 by c_decompress_page if (flags & C_KEEP)
5604 	 * or (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be TRUE
5605 	 */
5606 	if (zeroslot) {
5607 		*slot = 0;
5608 	}
5609 
5610 	pmap_unmap_compressor_page(pn, dst);
5611 
5612 	/*
5613 	 * returns 0 if we successfully decompressed a page from a segment already in memory
5614 	 * returns 1 if we had to first swap in the segment, before successfully decompressing the page
5615 	 * returns -1 if we encountered an error swapping in the segment - decompression failed
5616 	 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be true
5617 	 */
5618 	return retval;
5619 }
5620 
5621 int
vm_compressor_free(int * slot,vm_compressor_options_t flags)5622 vm_compressor_free(int *slot, vm_compressor_options_t flags)
5623 {
5624 	bool slot_is_compressed = vm_compressor_is_slot_compressed(slot);
5625 
5626 	if (slot_is_compressed) {
5627 		c_slot_mapping_t  slot_ptr;
5628 		int     zeroslot = 1;
5629 		int     retval = 0;
5630 
5631 		assert(flags == 0 || flags == C_DONT_BLOCK);
5632 
5633 		slot_ptr = (c_slot_mapping_t)slot;
5634 
5635 		if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
5636 			c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
5637 			OSAddAtomic(-1, &c_segment_pages_compressed);
5638 
5639 			*slot = 0;
5640 			return 0;
5641 		}
5642 
5643 		retval = c_decompress_page(NULL, slot_ptr, flags, &zeroslot);
5644 		/*
5645 		 * returns 0 if we successfully freed the specified compressed page
5646 		 * returns -1 if we encountered an error swapping in the segment - decompression failed
5647 		 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set
5648 		 */
5649 
5650 		if (retval == 0) {
5651 			*slot = 0;
5652 		}
5653 
5654 		return retval;
5655 	}
5656 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5657 	else {
5658 		if ((flags & C_PAGE_UNMODIFIED) == 0) {
5659 			/* moving from uncompressed state to compressed. Free it.*/
5660 			vm_uncompressed_free(slot, 0);
5661 			assert(*slot == 0);
5662 		}
5663 	}
5664 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5665 	return KERN_SUCCESS;
5666 }
5667 
5668 int
vm_compressor_put(ppnum_t pn,int * slot,void ** current_chead,char * scratch_buf,vm_compressor_options_t flags)5669 vm_compressor_put(ppnum_t pn, int *slot, void  **current_chead, char *scratch_buf, vm_compressor_options_t flags)
5670 {
5671 	char    *src;
5672 	int     retval = 0;
5673 
5674 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5675 	if (flags & C_PAGE_UNMODIFIED) {
5676 		if (*slot) {
5677 			os_atomic_inc(&compressor_ro_uncompressed_skip_returned, relaxed);
5678 			return retval;
5679 		} else {
5680 			retval = vm_uncompressed_put(pn, slot);
5681 			if (retval == KERN_SUCCESS) {
5682 				os_atomic_inc(&compressor_ro_uncompressed_put, relaxed);
5683 				return retval;
5684 			}
5685 		}
5686 	}
5687 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5688 
5689 	/* get the address of the page in the physical apperture in the kernel task virtual memory */
5690 	src = pmap_map_compressor_page(pn);   /* XXX HERE JOE this needs to map with MTE */
5691 	assert(src != NULL);
5692 
5693 	retval = c_compress_page(src, (c_slot_mapping_t)slot, (c_segment_t *)current_chead, scratch_buf,
5694 	    flags);
5695 	pmap_unmap_compressor_page(pn, src);
5696 
5697 	return retval;
5698 }
5699 
5700 void
vm_compressor_transfer(int * dst_slot_p,int * src_slot_p)5701 vm_compressor_transfer(
5702 	int     *dst_slot_p,
5703 	int     *src_slot_p)
5704 {
5705 	c_slot_mapping_t        dst_slot, src_slot;
5706 	c_segment_t             c_seg;
5707 	uint16_t                c_indx;
5708 	c_slot_t                cs;
5709 
5710 	src_slot = (c_slot_mapping_t) src_slot_p;
5711 
5712 	if (src_slot->s_cseg == C_SV_CSEG_ID || !vm_compressor_is_slot_compressed(src_slot_p)) {
5713 		*dst_slot_p = *src_slot_p;
5714 		*src_slot_p = 0;
5715 		return;
5716 	}
5717 	dst_slot = (c_slot_mapping_t) dst_slot_p;
5718 Retry:
5719 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
5720 	/* get segment for src_slot */
5721 	c_seg = c_segments[src_slot->s_cseg - 1].c_seg;
5722 	/* lock segment */
5723 	lck_mtx_lock_spin_always(&c_seg->c_lock);
5724 	/* wait if it's busy */
5725 	if (c_seg->c_busy && !c_seg->c_busy_swapping) {
5726 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5727 		c_seg_wait_on_busy(c_seg);
5728 		goto Retry;
5729 	}
5730 	/* find the c_slot */
5731 	c_indx = src_slot->s_cindx;
5732 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
5733 	/* point the c_slot back to dst_slot instead of src_slot */
5734 	C_SLOT_ASSERT_PACKABLE(dst_slot);
5735 	cs->c_packed_ptr = C_SLOT_PACK_PTR(dst_slot);
5736 	/* transfer */
5737 	*dst_slot_p = *src_slot_p;
5738 	*src_slot_p = 0;
5739 	lck_mtx_unlock_always(&c_seg->c_lock);
5740 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
5741 }
5742 
5743 #if defined(__arm64__)
5744 extern uint64_t vm_swapfile_last_failed_to_create_ts;
5745 __attribute__((noreturn))
5746 void
vm_panic_hibernate_write_image_failed(int err)5747 vm_panic_hibernate_write_image_failed(int err)
5748 {
5749 	panic("hibernate_write_image encountered error 0x%x - %u, %u, %d, %d, %d, %d, %d, %d, %d, %d, %llu, %d, %d, %d\n",
5750 	    err,
5751 	    VM_PAGE_COMPRESSOR_COUNT, vm_page_wire_count,
5752 	    c_age_count, c_major_count, c_minor_count, (c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count), c_swappedout_sparse_count,
5753 	    vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled, vm_swap_put_failures,
5754 	    (vm_swapfile_last_failed_to_create_ts ? 1:0), hibernate_no_swapspace, hibernate_flush_timed_out);
5755 }
5756 #endif /*(__arm64__)*/
5757 
5758 #if CONFIG_FREEZE
5759 
5760 int     freezer_finished_filling = 0;
5761 
5762 void
vm_compressor_finished_filling(void ** current_chead)5763 vm_compressor_finished_filling(
5764 	void    **current_chead)
5765 {
5766 	c_segment_t     c_seg;
5767 
5768 	if ((c_seg = *(c_segment_t *)current_chead) == NULL) {
5769 		return;
5770 	}
5771 
5772 	assert(c_seg->c_state == C_IS_FILLING);
5773 
5774 	lck_mtx_lock_spin_always(&c_seg->c_lock);
5775 
5776 	c_current_seg_filled(c_seg, (c_segment_t *)current_chead);
5777 
5778 	lck_mtx_unlock_always(&c_seg->c_lock);
5779 
5780 	freezer_finished_filling++;
5781 }
5782 
5783 
5784 /*
5785  * This routine is used to transfer the compressed chunks from
5786  * the c_seg/cindx pointed to by slot_p into a new c_seg headed
5787  * by the current_chead and a new cindx within that c_seg.
5788  *
5789  * Currently, this routine is only used by the "freezer backed by
5790  * compressor with swap" mode to create a series of c_segs that
5791  * only contain compressed data belonging to one task. So, we
5792  * move a task's previously compressed data into a set of new
5793  * c_segs which will also hold the task's yet to be compressed data.
5794  */
5795 
5796 kern_return_t
vm_compressor_relocate(void ** current_chead,int * slot_p)5797 vm_compressor_relocate(
5798 	void            **current_chead,
5799 	int             *slot_p)
5800 {
5801 	c_slot_mapping_t        slot_ptr;
5802 	c_slot_mapping_t        src_slot;
5803 	uint32_t                c_rounded_size;
5804 	uint32_t                c_size;
5805 	uint16_t                dst_slot;
5806 	c_slot_t                c_dst;
5807 	c_slot_t                c_src;
5808 	uint16_t                c_indx;
5809 	c_segment_t             c_seg_dst = NULL;
5810 	c_segment_t             c_seg_src = NULL;
5811 	kern_return_t           kr = KERN_SUCCESS;
5812 
5813 
5814 	src_slot = (c_slot_mapping_t) slot_p;
5815 
5816 	if (src_slot->s_cseg == C_SV_CSEG_ID) {
5817 		/*
5818 		 * no need to relocate... this is a page full of a single
5819 		 * value which is hashed to a single entry not contained
5820 		 * in a c_segment_t
5821 		 */
5822 		return kr;
5823 	}
5824 
5825 	if (vm_compressor_is_slot_compressed((int *)src_slot) == false) {
5826 		/*
5827 		 * Unmodified anonymous pages are sitting uncompressed on disk.
5828 		 * So don't pull them back in again.
5829 		 */
5830 		return kr;
5831 	}
5832 
5833 Relookup_dst:
5834 	c_seg_dst = c_seg_allocate((c_segment_t *)current_chead);
5835 	/*
5836 	 * returns with c_seg lock held
5837 	 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
5838 	 * c_nextslot has been allocated and
5839 	 * c_store.c_buffer populated
5840 	 */
5841 	if (c_seg_dst == NULL) {
5842 		/*
5843 		 * Out of compression segments?
5844 		 */
5845 		kr = KERN_RESOURCE_SHORTAGE;
5846 		goto out;
5847 	}
5848 
5849 	assert(c_seg_dst->c_busy == 0);
5850 
5851 	C_SEG_BUSY(c_seg_dst);
5852 
5853 	dst_slot = c_seg_dst->c_nextslot;
5854 
5855 	lck_mtx_unlock_always(&c_seg_dst->c_lock);
5856 
5857 Relookup_src:
5858 	c_seg_src = c_segments[src_slot->s_cseg - 1].c_seg;
5859 
5860 	assert(c_seg_dst != c_seg_src);
5861 
5862 	lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5863 
5864 	if (C_SEG_IS_ON_DISK_OR_SOQ(c_seg_src) ||
5865 	    c_seg_src->c_state == C_IS_FILLING) {
5866 		/*
5867 		 * Skip this page if :-
5868 		 * a) the src c_seg is already on-disk (or on its way there)
5869 		 *    A "thaw" can mark a process as eligible for
5870 		 * another freeze cycle without bringing any of
5871 		 * its swapped out c_segs back from disk (because
5872 		 * that is done on-demand).
5873 		 *    Or, this page may be mapped elsewhere in the task's map,
5874 		 * and we may have marked it for swap already.
5875 		 *
5876 		 * b) Or, the src c_seg is being filled by the compressor
5877 		 * thread. We don't want the added latency of waiting for
5878 		 * this c_seg in the freeze path and so we skip it.
5879 		 */
5880 
5881 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5882 
5883 		lck_mtx_unlock_always(&c_seg_src->c_lock);
5884 
5885 		c_seg_src = NULL;
5886 
5887 		goto out;
5888 	}
5889 
5890 	if (c_seg_src->c_busy) {
5891 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5892 		c_seg_wait_on_busy(c_seg_src);
5893 
5894 		c_seg_src = NULL;
5895 
5896 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
5897 
5898 		goto Relookup_src;
5899 	}
5900 
5901 	C_SEG_BUSY(c_seg_src);
5902 
5903 	lck_mtx_unlock_always(&c_seg_src->c_lock);
5904 
5905 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
5906 
5907 	/* find the c_slot */
5908 	c_indx = src_slot->s_cindx;
5909 
5910 	c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, c_indx);
5911 
5912 	c_size = UNPACK_C_SIZE(c_src);
5913 
5914 	assert(c_size);
5915 
5916 	int combined_size;
5917 	combined_size = c_size;
5918 
5919 	if (combined_size > (uint32_t)(c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)c_seg_dst->c_nextoffset))) {
5920 		/*
5921 		 * This segment is full. We need a new one.
5922 		 */
5923 
5924 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
5925 
5926 		lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5927 		C_SEG_WAKEUP_DONE(c_seg_src);
5928 		lck_mtx_unlock_always(&c_seg_src->c_lock);
5929 
5930 		c_seg_src = NULL;
5931 
5932 		lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
5933 
5934 		assert(c_seg_dst->c_busy);
5935 		assert(c_seg_dst->c_state == C_IS_FILLING);
5936 		assert(!c_seg_dst->c_on_minorcompact_q);
5937 
5938 		c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
5939 		assert(*current_chead == NULL);
5940 
5941 		C_SEG_WAKEUP_DONE(c_seg_dst);
5942 
5943 		lck_mtx_unlock_always(&c_seg_dst->c_lock);
5944 
5945 		c_seg_dst = NULL;
5946 
5947 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5948 
5949 		goto Relookup_dst;
5950 	}
5951 
5952 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
5953 
5954 	memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], combined_size);
5955 	/*
5956 	 * Is platform alignment actually necessary since wkdm aligns its output?
5957 	 */
5958 	c_rounded_size = (combined_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
5959 
5960 	cslot_copy(c_dst, c_src);
5961 	c_dst->c_offset = c_seg_dst->c_nextoffset;
5962 
5963 	if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
5964 		c_seg_dst->c_firstemptyslot++;
5965 	}
5966 
5967 	c_seg_dst->c_slots_used++;
5968 	c_seg_dst->c_nextslot++;
5969 	c_seg_dst->c_bytes_used += c_rounded_size;
5970 	c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
5971 
5972 
5973 	PACK_C_SIZE(c_src, 0);
5974 
5975 	c_seg_src->c_bytes_used -= c_rounded_size;
5976 	c_seg_src->c_bytes_unused += c_rounded_size;
5977 
5978 	assert(c_seg_src->c_slots_used);
5979 	c_seg_src->c_slots_used--;
5980 
5981 	if (!c_seg_src->c_swappedin) {
5982 		/* Pessimistically lose swappedin status when non-swappedin pages are added. */
5983 		c_seg_dst->c_swappedin = false;
5984 	}
5985 
5986 	if (c_indx < c_seg_src->c_firstemptyslot) {
5987 		c_seg_src->c_firstemptyslot = c_indx;
5988 	}
5989 
5990 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
5991 
5992 	PAGE_REPLACEMENT_ALLOWED(TRUE);
5993 	slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
5994 	/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
5995 	slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
5996 	slot_ptr->s_cindx = dst_slot;
5997 
5998 	PAGE_REPLACEMENT_ALLOWED(FALSE);
5999 
6000 out:
6001 	if (c_seg_src) {
6002 		lck_mtx_lock_spin_always(&c_seg_src->c_lock);
6003 
6004 		C_SEG_WAKEUP_DONE(c_seg_src);
6005 
6006 		if (c_seg_src->c_bytes_used == 0 && c_seg_src->c_state != C_IS_FILLING) {
6007 			if (!c_seg_src->c_on_minorcompact_q) {
6008 				c_seg_need_delayed_compaction(c_seg_src, FALSE);
6009 			}
6010 		}
6011 
6012 		lck_mtx_unlock_always(&c_seg_src->c_lock);
6013 	}
6014 
6015 	if (c_seg_dst) {
6016 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
6017 
6018 		lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
6019 
6020 		if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
6021 			/*
6022 			 * Nearing or exceeded maximum slot and offset capacity.
6023 			 */
6024 			assert(c_seg_dst->c_busy);
6025 			assert(c_seg_dst->c_state == C_IS_FILLING);
6026 			assert(!c_seg_dst->c_on_minorcompact_q);
6027 
6028 			c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
6029 			assert(*current_chead == NULL);
6030 		}
6031 
6032 		C_SEG_WAKEUP_DONE(c_seg_dst);
6033 
6034 		lck_mtx_unlock_always(&c_seg_dst->c_lock);
6035 
6036 		c_seg_dst = NULL;
6037 
6038 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
6039 	}
6040 
6041 	return kr;
6042 }
6043 #endif /* CONFIG_FREEZE */
6044 
6045 #if DEVELOPMENT || DEBUG
6046 
6047 void
vm_compressor_inject_error(int * slot)6048 vm_compressor_inject_error(int *slot)
6049 {
6050 	c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
6051 
6052 	/* No error detection for single-value compression. */
6053 	if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
6054 		printf("%s(): cannot inject errors in SV-compressed pages\n", __func__ );
6055 		return;
6056 	}
6057 
6058 	/* s_cseg is actually "segno+1" */
6059 	const uint32_t c_segno = slot_ptr->s_cseg - 1;
6060 
6061 	assert(c_segno < c_segments_available);
6062 	assert(c_segments[c_segno].c_segno >= c_segments_available);
6063 
6064 	const c_segment_t c_seg = c_segments[c_segno].c_seg;
6065 
6066 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
6067 
6068 	lck_mtx_lock_spin_always(&c_seg->c_lock);
6069 	assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
6070 
6071 	const uint16_t c_indx = slot_ptr->s_cindx;
6072 	assert(c_indx < c_seg->c_nextslot);
6073 
6074 	/*
6075 	 * To safely make this segment temporarily writable, we need to mark
6076 	 * the segment busy, which allows us to release the segment lock.
6077 	 */
6078 	while (c_seg->c_busy) {
6079 		c_seg_wait_on_busy(c_seg);
6080 		lck_mtx_lock_spin_always(&c_seg->c_lock);
6081 	}
6082 	C_SEG_BUSY(c_seg);
6083 
6084 	bool already_writable = (c_seg->c_state == C_IS_FILLING);
6085 	if (!already_writable) {
6086 		/*
6087 		 * Protection update must be performed preemptibly, so temporarily drop
6088 		 * the lock. Having set c_busy will prevent most other concurrent
6089 		 * operations.
6090 		 */
6091 		lck_mtx_unlock_always(&c_seg->c_lock);
6092 		C_SEG_MAKE_WRITEABLE(c_seg);
6093 		lck_mtx_lock_spin_always(&c_seg->c_lock);
6094 	}
6095 
6096 	/*
6097 	 * Once we've released the lock following our c_state == C_IS_FILLING check,
6098 	 * c_current_seg_filled() can (re-)write-protect the segment. However, it
6099 	 * will transition from C_IS_FILLING before releasing the c_seg lock, so we
6100 	 * can detect this by re-checking after we've reobtained the lock.
6101 	 */
6102 	if (already_writable && c_seg->c_state != C_IS_FILLING) {
6103 		lck_mtx_unlock_always(&c_seg->c_lock);
6104 		C_SEG_MAKE_WRITEABLE(c_seg);
6105 		lck_mtx_lock_spin_always(&c_seg->c_lock);
6106 		already_writable = false;
6107 		/* Segment can't be freed while c_busy is set. */
6108 		assert(c_seg->c_state != C_IS_FILLING);
6109 	}
6110 
6111 	/*
6112 	 * Skip if the segment is on disk. This check can only be performed after
6113 	 * the final acquisition of the segment lock before we attempt to write to
6114 	 * the segment.
6115 	 */
6116 	if (!C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) {
6117 		c_slot_t cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
6118 		int32_t *data = &c_seg->c_store.c_buffer[cs->c_offset];
6119 		/* assume that the compressed data holds at least one int32_t */
6120 		assert(UNPACK_C_SIZE(cs) > sizeof(*data));
6121 		/*
6122 		 * This bit is known to be in the payload of a MISS packet resulting from
6123 		 * the pattern used in the test pattern from decompression_failure.c.
6124 		 * Flipping it should result in many corrupted bits in the test page.
6125 		 */
6126 		data[0] ^= 0x00000100;
6127 	}
6128 
6129 	if (!already_writable) {
6130 		lck_mtx_unlock_always(&c_seg->c_lock);
6131 		C_SEG_WRITE_PROTECT(c_seg);
6132 		lck_mtx_lock_spin_always(&c_seg->c_lock);
6133 	}
6134 
6135 	C_SEG_WAKEUP_DONE(c_seg);
6136 	lck_mtx_unlock_always(&c_seg->c_lock);
6137 
6138 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
6139 }
6140 
6141 /*
6142  * Serialize information about a specific segment
6143  * returns true if the segment was written or there's nothing to write for the segno
6144  *         false if there's not enough space
6145  * argument size input - the size of the input buffer, output - the size written, set to 0 on failure
6146  */
6147 kern_return_t
vm_compressor_serialize_segment_debug_info(int segno,char * buf,size_t * size)6148 vm_compressor_serialize_segment_debug_info(int segno, char *buf, size_t *size)
6149 {
6150 	size_t insize = *size;
6151 	size_t offset = 0;
6152 	*size = 0;
6153 	if (c_segments[segno].c_segno < c_segments_available) {
6154 		/* This check means there's no pointer assigned here so it must be an index in the free list.
6155 		 * if this was an active c_segment, .c_seg would be assigned to, which is a pointer, interpreted as an int it
6156 		 * would be higher than c_segments_available. See also assert to this effect right after assigning to c_seg in
6157 		 * c_seg_allocate()
6158 		 */
6159 		return KERN_SUCCESS;
6160 	}
6161 	if (c_segments[segno].c_segno == (uint32_t)-1) {
6162 		/* c_segno of the end of the free-list */
6163 		return KERN_SUCCESS;
6164 	}
6165 
6166 	const struct c_segment* c_seg = c_segments[segno].c_seg;
6167 	if (c_seg->c_state == C_IS_FREE) {
6168 		return KERN_SUCCESS; /* nothing needs to be done */
6169 	}
6170 
6171 	int nslots = c_seg->c_nextslot;
6172 	/* do we have enough space? */
6173 	if (sizeof(struct c_segment_info) + (nslots * sizeof(struct c_slot_info)) > insize) {
6174 		return KERN_NO_SPACE; /* not enough space, please call me again */
6175 	}
6176 
6177 	struct c_segment_info* csi = (struct c_segment_info*)buf;
6178 	offset += sizeof(struct c_segment_info);
6179 
6180 	csi->csi_mysegno = c_seg->c_mysegno;
6181 	csi->csi_creation_ts = c_seg->c_creation_ts;
6182 	csi->csi_swappedin_ts = c_seg->c_swappedin_ts;
6183 	csi->csi_bytes_unused = c_seg->c_bytes_unused;
6184 	csi->csi_bytes_used = c_seg->c_bytes_used;
6185 	csi->csi_populated_offset = c_seg->c_populated_offset;
6186 	csi->csi_state = c_seg->c_state;
6187 	csi->csi_swappedin = c_seg->c_swappedin;
6188 	csi->csi_on_minor_compact_q = c_seg->c_on_minorcompact_q;
6189 	csi->csi_has_donated_pages = c_seg->c_has_donated_pages;
6190 	csi->csi_slots_used = (uint16_t)c_seg->c_slots_used;
6191 	csi->csi_slot_var_array_len = c_seg->c_slot_var_array_len;
6192 	csi->csi_slots_len = (uint16_t)nslots;
6193 #if TRACK_C_SEGMENT_UTILIZATION
6194 	csi->csi_decompressions_since_swapin = c_seg->c_decompressions_since_swapin;
6195 #else
6196 	csi->csi_decompressions_since_swapin = 0;
6197 #endif /* TRACK_C_SEGMENT_UTILIZATION */
6198 
6199 	for (int si = 0; si < nslots; ++si) {
6200 		/* see also c_seg_validate() for some of the details */
6201 		const struct c_slot* cs = C_SEG_SLOT_FROM_INDEX(c_seg, si);
6202 		struct c_slot_info* ssi = (struct c_slot_info*)(buf + offset);
6203 		ssi->csi_size = UNPACK_C_SIZE(cs);
6204 		offset += sizeof(struct c_slot_info);
6205 	}
6206 	*size = offset;
6207 	return KERN_SUCCESS;
6208 }
6209 
6210 #endif /* DEVELOPMENT || DEBUG */
6211 
6212 
6213 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
6214 
6215 struct vnode;
6216 extern void vm_swapfile_open(const char *path, struct vnode **vp);
6217 extern int vm_swapfile_preallocate(struct vnode *vp, uint64_t *size, boolean_t *pin);
6218 
6219 struct vnode *uncompressed_vp0 = NULL;
6220 struct vnode *uncompressed_vp1 = NULL;
6221 uint32_t uncompressed_file0_free_pages = 0, uncompressed_file1_free_pages = 0;
6222 uint64_t uncompressed_file0_free_offset = 0, uncompressed_file1_free_offset = 0;
6223 
6224 uint64_t compressor_ro_uncompressed = 0;
6225 uint64_t compressor_ro_uncompressed_total_returned = 0;
6226 uint64_t compressor_ro_uncompressed_skip_returned = 0;
6227 uint64_t compressor_ro_uncompressed_get = 0;
6228 uint64_t compressor_ro_uncompressed_put = 0;
6229 uint64_t compressor_ro_uncompressed_swap_usage = 0;
6230 
6231 extern void vnode_put(struct vnode* vp);
6232 extern int vnode_getwithref(struct vnode* vp);
6233 extern int vm_swapfile_io(struct vnode *vp, uint64_t offset, uint64_t start, int npages, int flags, void *upl_ctx);
6234 
6235 #define MAX_OFFSET_PAGES        (255)
6236 uint64_t uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6237 uint64_t uncompressed_file1_space_bitmap[MAX_OFFSET_PAGES];
6238 
6239 #define UNCOMPRESSED_FILEIDX_OFFSET_MASK (((uint32_t)1<<31ull) - 1)
6240 #define UNCOMPRESSED_FILEIDX_SHIFT (29)
6241 #define UNCOMPRESSED_FILEIDX_MASK (3)
6242 #define UNCOMPRESSED_OFFSET_SHIFT (29)
6243 #define UNCOMPRESSED_OFFSET_MASK (7)
6244 
6245 static uint32_t
vm_uncompressed_extract_swap_file(int slot)6246 vm_uncompressed_extract_swap_file(int slot)
6247 {
6248 	uint32_t fileidx = (((uint32_t)slot & UNCOMPRESSED_FILEIDX_OFFSET_MASK) >> UNCOMPRESSED_FILEIDX_SHIFT) & UNCOMPRESSED_FILEIDX_MASK;
6249 	return fileidx;
6250 }
6251 
6252 static uint32_t
vm_uncompressed_extract_swap_offset(int slot)6253 vm_uncompressed_extract_swap_offset(int slot)
6254 {
6255 	return slot & (uint32_t)(~(UNCOMPRESSED_OFFSET_MASK << UNCOMPRESSED_OFFSET_SHIFT));
6256 }
6257 
6258 static void
vm_uncompressed_return_space_to_swap(int slot)6259 vm_uncompressed_return_space_to_swap(int slot)
6260 {
6261 	PAGE_REPLACEMENT_ALLOWED(TRUE);
6262 	uint32_t fileidx = vm_uncompressed_extract_swap_file(slot);
6263 	if (fileidx == 1) {
6264 		uint32_t free_offset = vm_uncompressed_extract_swap_offset(slot);
6265 		uint64_t pgidx = free_offset / PAGE_SIZE_64;
6266 		uint64_t chunkidx = pgidx / 64;
6267 		uint64_t chunkoffset = pgidx % 64;
6268 #if DEVELOPMENT || DEBUG
6269 		uint64_t vaddr = (uint64_t)&uncompressed_file0_space_bitmap[chunkidx];
6270 		uint64_t maxvaddr = (uint64_t)&uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6271 		assertf(vaddr < maxvaddr, "0x%llx 0x%llx", vaddr, maxvaddr);
6272 #endif /*DEVELOPMENT || DEBUG*/
6273 		assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6274 		    "0x%x %llu %llu", slot, chunkidx, chunkoffset);
6275 		uncompressed_file0_space_bitmap[chunkidx] &= ~((uint64_t)1 << chunkoffset);
6276 		assertf(!(uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6277 		    "0x%x %llu %llu", slot, chunkidx, chunkoffset);
6278 
6279 		uncompressed_file0_free_pages++;
6280 	} else {
6281 		uint32_t free_offset = vm_uncompressed_extract_swap_offset(slot);
6282 		uint64_t pgidx = free_offset / PAGE_SIZE_64;
6283 		uint64_t chunkidx = pgidx / 64;
6284 		uint64_t chunkoffset = pgidx % 64;
6285 		assertf((uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6286 		    "%llu %llu", chunkidx, chunkoffset);
6287 		uncompressed_file1_space_bitmap[chunkidx] &= ~((uint64_t)1 << chunkoffset);
6288 
6289 		uncompressed_file1_free_pages++;
6290 	}
6291 	compressor_ro_uncompressed_swap_usage--;
6292 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6293 }
6294 
6295 static int
vm_uncompressed_reserve_space_in_swap()6296 vm_uncompressed_reserve_space_in_swap()
6297 {
6298 	int slot = 0;
6299 	if (uncompressed_file0_free_pages == 0 && uncompressed_file1_free_pages == 0) {
6300 		return -1;
6301 	}
6302 
6303 	PAGE_REPLACEMENT_ALLOWED(TRUE);
6304 	if (uncompressed_file0_free_pages) {
6305 		uint64_t chunkidx = 0;
6306 		uint64_t chunkoffset = 0;
6307 		while (uncompressed_file0_space_bitmap[chunkidx] == 0xffffffffffffffff) {
6308 			chunkidx++;
6309 		}
6310 		while (uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) {
6311 			chunkoffset++;
6312 		}
6313 
6314 		assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) == 0,
6315 		    "%llu %llu", chunkidx, chunkoffset);
6316 #if DEVELOPMENT || DEBUG
6317 		uint64_t vaddr = (uint64_t)&uncompressed_file0_space_bitmap[chunkidx];
6318 		uint64_t maxvaddr = (uint64_t)&uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6319 		assertf(vaddr < maxvaddr, "0x%llx 0x%llx", vaddr, maxvaddr);
6320 #endif /*DEVELOPMENT || DEBUG*/
6321 		uncompressed_file0_space_bitmap[chunkidx] |= ((uint64_t)1 << chunkoffset);
6322 		uncompressed_file0_free_offset = ((chunkidx * 64) + chunkoffset) * PAGE_SIZE_64;
6323 		assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6324 		    "%llu %llu", chunkidx, chunkoffset);
6325 
6326 		assert(uncompressed_file0_free_offset <= (1 << UNCOMPRESSED_OFFSET_SHIFT));
6327 		slot = (int)((1 << UNCOMPRESSED_FILEIDX_SHIFT) + uncompressed_file0_free_offset);
6328 		uncompressed_file0_free_pages--;
6329 	} else {
6330 		uint64_t chunkidx = 0;
6331 		uint64_t chunkoffset = 0;
6332 		while (uncompressed_file1_space_bitmap[chunkidx] == 0xFFFFFFFFFFFFFFFF) {
6333 			chunkidx++;
6334 		}
6335 		while (uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) {
6336 			chunkoffset++;
6337 		}
6338 		assert((uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) == 0);
6339 		uncompressed_file1_space_bitmap[chunkidx] |= ((uint64_t)1 << chunkoffset);
6340 		uncompressed_file1_free_offset = ((chunkidx * 64) + chunkoffset) * PAGE_SIZE_64;
6341 		slot = (int)((2 << UNCOMPRESSED_FILEIDX_SHIFT) + uncompressed_file1_free_offset);
6342 		uncompressed_file1_free_pages--;
6343 	}
6344 	compressor_ro_uncompressed_swap_usage++;
6345 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6346 	return slot;
6347 }
6348 
6349 #define MAX_IO_REQ (16)
6350 struct _uncompressor_io_req {
6351 	uint64_t addr;
6352 	bool inuse;
6353 } uncompressor_io_req[MAX_IO_REQ];
6354 
6355 int
vm_uncompressed_put(ppnum_t pn,int * slot)6356 vm_uncompressed_put(ppnum_t pn, int *slot)
6357 {
6358 	int retval = 0;
6359 	struct vnode *uncompressed_vp = NULL;
6360 	uint64_t uncompress_offset = 0;
6361 
6362 again:
6363 	if (uncompressed_vp0 == NULL) {
6364 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6365 		if (uncompressed_vp0 == NULL) {
6366 			uint64_t size = (MAX_OFFSET_PAGES * 1024 * 1024ULL);
6367 			vm_swapfile_open("/private/var/vm/uncompressedswap0", &uncompressed_vp0);
6368 			if (uncompressed_vp0 == NULL) {
6369 				PAGE_REPLACEMENT_ALLOWED(FALSE);
6370 				return KERN_NO_ACCESS;
6371 			}
6372 			vm_swapfile_preallocate(uncompressed_vp0, &size, NULL);
6373 			uncompressed_file0_free_pages = (uint32_t)atop(size);
6374 			bzero(uncompressed_file0_space_bitmap, sizeof(uint64_t) * MAX_OFFSET_PAGES);
6375 
6376 			int i = 0;
6377 			for (; i < MAX_IO_REQ; i++) {
6378 				kmem_alloc(kernel_map, (vm_offset_t*)&uncompressor_io_req[i].addr, PAGE_SIZE_64, KMA_NOFAIL | KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR);
6379 				uncompressor_io_req[i].inuse = false;
6380 			}
6381 
6382 			vm_swapfile_open("/private/var/vm/uncompressedswap1", &uncompressed_vp1);
6383 			assert(uncompressed_vp1);
6384 			vm_swapfile_preallocate(uncompressed_vp1, &size, NULL);
6385 			uncompressed_file1_free_pages = (uint32_t)atop(size);
6386 			bzero(uncompressed_file1_space_bitmap, sizeof(uint64_t) * MAX_OFFSET_PAGES);
6387 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6388 		} else {
6389 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6390 			delay(100);
6391 			goto again;
6392 		}
6393 	}
6394 
6395 	int swapinfo = vm_uncompressed_reserve_space_in_swap();
6396 	if (swapinfo == -1) {
6397 		*slot = 0;
6398 		return KERN_RESOURCE_SHORTAGE;
6399 	}
6400 
6401 	if (vm_uncompressed_extract_swap_file(swapinfo) == 1) {
6402 		uncompressed_vp = uncompressed_vp0;
6403 	} else {
6404 		uncompressed_vp = uncompressed_vp1;
6405 	}
6406 	uncompress_offset = vm_uncompressed_extract_swap_offset(swapinfo);
6407 	if ((retval = vnode_getwithref(uncompressed_vp)) != 0) {
6408 		os_log_error_with_startup_serial(OS_LOG_DEFAULT, "vm_uncompressed_put: vnode_getwithref on swapfile failed with %d\n", retval);
6409 	} else {
6410 		int i = 0;
6411 retry:
6412 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6413 		for (i = 0; i < MAX_IO_REQ; i++) {
6414 			if (uncompressor_io_req[i].inuse == false) {
6415 				uncompressor_io_req[i].inuse = true;
6416 				break;
6417 			}
6418 		}
6419 		if (i == MAX_IO_REQ) {
6420 			assert_wait((event_t)&uncompressor_io_req, THREAD_UNINT);
6421 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6422 			thread_block(THREAD_CONTINUE_NULL);
6423 			goto retry;
6424 		}
6425 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6426 		void *addr = pmap_map_compressor_page(pn);
6427 		memcpy((void*)uncompressor_io_req[i].addr, addr, PAGE_SIZE_64);
6428 		pmap_unmap_compressor_page(pn, addr);
6429 
6430 		retval = vm_swapfile_io(uncompressed_vp, uncompress_offset, (uint64_t)uncompressor_io_req[i].addr, 1, SWAP_WRITE, NULL);
6431 		if (retval) {
6432 			*slot = 0;
6433 		} else {
6434 			*slot = (int)swapinfo;
6435 			((c_slot_mapping_t)(slot))->s_uncompressed = 1;
6436 		}
6437 		vnode_put(uncompressed_vp);
6438 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6439 		uncompressor_io_req[i].inuse = false;
6440 		thread_wakeup((event_t)&uncompressor_io_req);
6441 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6442 	}
6443 	return retval;
6444 }
6445 
6446 int
vm_uncompressed_get(ppnum_t pn,int * slot,__unused vm_compressor_options_t flags)6447 vm_uncompressed_get(ppnum_t pn, int *slot, __unused vm_compressor_options_t flags)
6448 {
6449 	int retval = 0;
6450 	struct vnode *uncompressed_vp = NULL;
6451 	uint32_t fileidx = vm_uncompressed_extract_swap_file(*slot);
6452 	uint64_t uncompress_offset = vm_uncompressed_extract_swap_offset(*slot);
6453 
6454 	if (__improbable(flags & C_KDP)) {
6455 		return -2;
6456 	}
6457 
6458 	if (fileidx == 1) {
6459 		uncompressed_vp = uncompressed_vp0;
6460 	} else {
6461 		uncompressed_vp = uncompressed_vp1;
6462 	}
6463 
6464 	if ((retval = vnode_getwithref(uncompressed_vp)) != 0) {
6465 		os_log_error_with_startup_serial(OS_LOG_DEFAULT, "vm_uncompressed_put: vnode_getwithref on swapfile failed with %d\n", retval);
6466 	} else {
6467 		int i = 0;
6468 retry:
6469 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6470 		for (i = 0; i < MAX_IO_REQ; i++) {
6471 			if (uncompressor_io_req[i].inuse == false) {
6472 				uncompressor_io_req[i].inuse = true;
6473 				break;
6474 			}
6475 		}
6476 		if (i == MAX_IO_REQ) {
6477 			assert_wait((event_t)&uncompressor_io_req, THREAD_UNINT);
6478 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6479 			thread_block(THREAD_CONTINUE_NULL);
6480 			goto retry;
6481 		}
6482 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6483 		retval = vm_swapfile_io(uncompressed_vp, uncompress_offset, (uint64_t)uncompressor_io_req[i].addr, 1, SWAP_READ, NULL);
6484 		vnode_put(uncompressed_vp);
6485 		void *addr = pmap_map_compressor_page(pn);
6486 		memcpy(addr, (void*)uncompressor_io_req[i].addr, PAGE_SIZE_64);
6487 		pmap_unmap_compressor_page(pn, addr);
6488 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6489 		uncompressor_io_req[i].inuse = false;
6490 		thread_wakeup((event_t)&uncompressor_io_req);
6491 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6492 	}
6493 	return retval;
6494 }
6495 
6496 int
vm_uncompressed_free(int * slot,__unused vm_compressor_options_t flags)6497 vm_uncompressed_free(int *slot, __unused vm_compressor_options_t flags)
6498 {
6499 	vm_uncompressed_return_space_to_swap(*slot);
6500 	*slot = 0;
6501 	return 0;
6502 }
6503 
6504 #endif /*CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
6505