xref: /xnu-10002.61.3/osfmk/vm/vm_compressor.c (revision 0f4c859e951fba394238ab619495c4e1d54d0f34)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <vm/vm_compressor.h>
30 
31 #if CONFIG_PHANTOM_CACHE
32 #include <vm/vm_phantom_cache.h>
33 #endif
34 
35 #include <vm/vm_map.h>
36 #include <vm/vm_pageout.h>
37 #include <vm/memory_object.h>
38 #include <vm/vm_compressor_algorithms.h>
39 #include <vm/vm_compressor_backing_store.h>
40 #include <vm/vm_fault.h>
41 #include <vm/vm_protos.h>
42 #include <mach/mach_host.h>             /* for host_info() */
43 #if DEVELOPMENT || DEBUG
44 #include <kern/hvg_hypercall.h>
45 #endif
46 #include <kern/ledger.h>
47 #include <kern/policy_internal.h>
48 #include <kern/thread_group.h>
49 #include <san/kasan.h>
50 #include <os/log.h>
51 #include <pexpert/pexpert.h>
52 #include <pexpert/device_tree.h>
53 
54 #if defined(__x86_64__)
55 #include <i386/misc_protos.h>
56 #endif
57 #if defined(__arm64__)
58 #include <arm/machine_routines.h>
59 #endif
60 
61 #include <IOKit/IOHibernatePrivate.h>
62 
63 /*
64  * The segment buffer size is a tradeoff.
65  * A larger buffer leads to faster I/O throughput, better compression ratios
66  * (since fewer bytes are wasted at the end of the segment),
67  * and less overhead (both in time and space).
68  * However, a smaller buffer causes less swap when the system is overcommited
69  * b/c a higher percentage of the swapped-in segment is definitely accessed
70  * before it goes back out to storage.
71  *
72  * So on systems without swap, a larger segment is a clear win.
73  * On systems with swap, the choice is murkier. Empirically, we've
74  * found that a 64KB segment provides a better tradeoff both in terms of
75  * performance and swap writes than a 256KB segment on systems with fast SSDs
76  * and a HW compression block.
77  */
78 #define C_SEG_BUFSIZE_ARM_SWAP (1024 * 64)
79 #if XNU_TARGET_OS_OSX && defined(__arm64__)
80 #define C_SEG_BUFSIZE_DEFAULT C_SEG_BUFSIZE_ARM_SWAP
81 #else
82 #define C_SEG_BUFSIZE_DEFAULT (1024 * 256)
83 #endif /* TARGET_OS_OSX && defined(__arm64__) */
84 uint32_t c_seg_bufsize;
85 
86 uint32_t c_seg_max_pages, c_seg_off_limit, c_seg_allocsize, c_seg_slot_var_array_min_len;
87 
88 extern boolean_t vm_darkwake_mode;
89 extern zone_t vm_page_zone;
90 
91 #if DEVELOPMENT || DEBUG
92 /* sysctl defined in bsd/dev/arm64/sysctl.c */
93 int do_cseg_wedge_thread(void);
94 int do_cseg_unwedge_thread(void);
95 static event_t debug_cseg_wait_event = NULL;
96 #endif /* DEVELOPMENT || DEBUG */
97 
98 #if CONFIG_FREEZE
99 bool freezer_incore_cseg_acct = TRUE; /* Only count incore compressed memory for jetsams. */
100 void task_disown_frozen_csegs(task_t owner_task);
101 #endif /* CONFIG_FREEZE */
102 
103 #if POPCOUNT_THE_COMPRESSED_DATA
104 boolean_t popcount_c_segs = TRUE;
105 
106 static inline uint32_t
vmc_pop(uintptr_t ins,int sz)107 vmc_pop(uintptr_t ins, int sz)
108 {
109 	uint32_t rv = 0;
110 
111 	if (__probable(popcount_c_segs == FALSE)) {
112 		return 0xDEAD707C;
113 	}
114 
115 	while (sz >= 16) {
116 		uint32_t rv1, rv2;
117 		uint64_t *ins64 = (uint64_t *) ins;
118 		uint64_t *ins642 = (uint64_t *) (ins + 8);
119 		rv1 = __builtin_popcountll(*ins64);
120 		rv2 = __builtin_popcountll(*ins642);
121 		rv += rv1 + rv2;
122 		sz -= 16;
123 		ins += 16;
124 	}
125 
126 	while (sz >= 4) {
127 		uint32_t *ins32 = (uint32_t *) ins;
128 		rv += __builtin_popcount(*ins32);
129 		sz -= 4;
130 		ins += 4;
131 	}
132 
133 	while (sz > 0) {
134 		char *ins8 = (char *)ins;
135 		rv += __builtin_popcount(*ins8);
136 		sz--;
137 		ins++;
138 	}
139 	return rv;
140 }
141 #endif
142 
143 #if VALIDATE_C_SEGMENTS
144 boolean_t validate_c_segs = TRUE;
145 #endif
146 /*
147  * vm_compressor_mode has a hierarchy of control to set its value.
148  * boot-args are checked first, then device-tree, and finally
149  * the default value that is defined below. See vm_fault_init() for
150  * the boot-arg & device-tree code.
151  */
152 
153 #if !XNU_TARGET_OS_OSX
154 
155 #if CONFIG_FREEZE
156 int     vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT;
157 struct  freezer_context freezer_context_global;
158 #else /* CONFIG_FREEZE */
159 int     vm_compressor_mode = VM_PAGER_NOT_CONFIGURED;
160 #endif /* CONFIG_FREEZE */
161 
162 #else /* !XNU_TARGET_OS_OSX */
163 int             vm_compressor_mode = VM_PAGER_COMPRESSOR_WITH_SWAP;
164 
165 #endif /* !XNU_TARGET_OS_OSX */
166 
167 TUNABLE(uint32_t, vm_compression_limit, "vm_compression_limit", 0);
168 int             vm_compressor_is_active = 0;
169 int             vm_compressor_available = 0;
170 
171 extern uint64_t vm_swap_get_max_configured_space(void);
172 extern void     vm_pageout_io_throttle(void);
173 bool vm_compressor_swapout_is_ripe(void);
174 
175 #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
176 extern unsigned int hash_string(char *cp, int len);
177 static unsigned int vmc_hash(char *, int);
178 boolean_t checksum_c_segs = TRUE;
179 
180 unsigned int
vmc_hash(char * cp,int len)181 vmc_hash(char *cp, int len)
182 {
183 	if (__probable(checksum_c_segs == FALSE)) {
184 		return 0xDEAD7A37;
185 	}
186 	return hash_string(cp, len);
187 }
188 #endif
189 
190 #define UNPACK_C_SIZE(cs)       ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
191 #define PACK_C_SIZE(cs, size)   (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size))
192 
193 
194 struct c_sv_hash_entry {
195 	union {
196 		struct  {
197 			uint32_t        c_sv_he_ref;
198 			uint32_t        c_sv_he_data;
199 		} c_sv_he;
200 		uint64_t        c_sv_he_record;
201 	} c_sv_he_un;
202 };
203 
204 #define he_ref  c_sv_he_un.c_sv_he.c_sv_he_ref
205 #define he_data c_sv_he_un.c_sv_he.c_sv_he_data
206 #define he_record c_sv_he_un.c_sv_he_record
207 
208 #define C_SV_HASH_MAX_MISS      32
209 #define C_SV_HASH_SIZE          ((1 << 10))
210 #define C_SV_HASH_MASK          ((1 << 10) - 1)
211 
212 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
213 #define C_SV_CSEG_ID            ((1 << 21) - 1)
214 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
215 #define C_SV_CSEG_ID            ((1 << 22) - 1)
216 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
217 
218 
219 union c_segu {
220 	c_segment_t     c_seg;
221 	uintptr_t       c_segno;
222 };
223 
224 #define C_SLOT_ASSERT_PACKABLE(ptr) \
225 	VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(ptr), C_SLOT_PACKED_PTR);
226 
227 #define C_SLOT_PACK_PTR(ptr) \
228 	VM_PACK_POINTER((vm_offset_t)(ptr), C_SLOT_PACKED_PTR)
229 
230 #define C_SLOT_UNPACK_PTR(cslot) \
231 	(c_slot_mapping_t)VM_UNPACK_POINTER((cslot)->c_packed_ptr, C_SLOT_PACKED_PTR)
232 
233 /* for debugging purposes */
234 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) c_slot_packing_params =
235     VM_PACKING_PARAMS(C_SLOT_PACKED_PTR);
236 
237 uint32_t        c_segment_count = 0;
238 uint32_t        c_segment_count_max = 0;
239 
240 uint64_t        c_generation_id = 0;
241 uint64_t        c_generation_id_flush_barrier;
242 
243 
244 #define         HIBERNATE_FLUSHING_SECS_TO_COMPLETE     120
245 
246 boolean_t       hibernate_no_swapspace = FALSE;
247 boolean_t       hibernate_flush_timed_out = FALSE;
248 clock_sec_t     hibernate_flushing_deadline = 0;
249 
250 #if RECORD_THE_COMPRESSED_DATA
251 char    *c_compressed_record_sbuf;
252 char    *c_compressed_record_ebuf;
253 char    *c_compressed_record_cptr;
254 #endif
255 
256 
257 queue_head_t    c_age_list_head;
258 queue_head_t    c_early_swappedin_list_head, c_regular_swappedin_list_head, c_late_swappedin_list_head;
259 queue_head_t    c_early_swapout_list_head, c_regular_swapout_list_head, c_late_swapout_list_head;
260 queue_head_t    c_swapio_list_head;
261 queue_head_t    c_swappedout_list_head;
262 queue_head_t    c_swappedout_sparse_list_head;
263 queue_head_t    c_major_list_head;
264 queue_head_t    c_filling_list_head;
265 queue_head_t    c_bad_list_head;
266 
267 uint32_t        c_age_count = 0;
268 uint32_t        c_early_swappedin_count = 0, c_regular_swappedin_count = 0, c_late_swappedin_count = 0;
269 uint32_t        c_early_swapout_count = 0, c_regular_swapout_count = 0, c_late_swapout_count = 0;
270 uint32_t        c_swapio_count = 0;
271 uint32_t        c_swappedout_count = 0;
272 uint32_t        c_swappedout_sparse_count = 0;
273 uint32_t        c_major_count = 0;
274 uint32_t        c_filling_count = 0;
275 uint32_t        c_empty_count = 0;
276 uint32_t        c_bad_count = 0;
277 
278 
279 queue_head_t    c_minor_list_head;
280 uint32_t        c_minor_count = 0;
281 
282 int             c_overage_swapped_count = 0;
283 int             c_overage_swapped_limit = 0;
284 
285 int             c_seg_fixed_array_len;
286 union  c_segu   *c_segments;
287 vm_offset_t     c_buffers;
288 vm_size_t       c_buffers_size;
289 caddr_t         c_segments_next_page;
290 boolean_t       c_segments_busy;
291 uint32_t        c_segments_available;
292 uint32_t        c_segments_limit;
293 uint32_t        c_segments_nearing_limit;
294 
295 uint32_t        c_segment_svp_in_hash;
296 uint32_t        c_segment_svp_hash_succeeded;
297 uint32_t        c_segment_svp_hash_failed;
298 uint32_t        c_segment_svp_zero_compressions;
299 uint32_t        c_segment_svp_nonzero_compressions;
300 uint32_t        c_segment_svp_zero_decompressions;
301 uint32_t        c_segment_svp_nonzero_decompressions;
302 
303 uint32_t        c_segment_noncompressible_pages;
304 
305 uint32_t        c_segment_pages_compressed = 0; /* Tracks # of uncompressed pages fed into the compressor */
306 #if CONFIG_FREEZE
307 int32_t         c_segment_pages_compressed_incore = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory */
308 int32_t         c_segment_pages_compressed_incore_late_swapout = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory and tagged for swapout */
309 uint32_t        c_segments_incore_limit = 0; /* Tracks # of segments allowed to be in-core. Based on compressor pool size */
310 #endif /* CONFIG_FREEZE */
311 
312 uint32_t        c_segment_pages_compressed_limit;
313 uint32_t        c_segment_pages_compressed_nearing_limit;
314 uint32_t        c_free_segno_head = (uint32_t)-1;
315 
316 uint32_t        vm_compressor_minorcompact_threshold_divisor = 10;
317 uint32_t        vm_compressor_majorcompact_threshold_divisor = 10;
318 uint32_t        vm_compressor_unthrottle_threshold_divisor = 10;
319 uint32_t        vm_compressor_catchup_threshold_divisor = 10;
320 
321 uint32_t        vm_compressor_minorcompact_threshold_divisor_overridden = 0;
322 uint32_t        vm_compressor_majorcompact_threshold_divisor_overridden = 0;
323 uint32_t        vm_compressor_unthrottle_threshold_divisor_overridden = 0;
324 uint32_t        vm_compressor_catchup_threshold_divisor_overridden = 0;
325 
326 #define         C_SEGMENTS_PER_PAGE     (PAGE_SIZE / sizeof(union c_segu))
327 
328 LCK_GRP_DECLARE(vm_compressor_lck_grp, "vm_compressor");
329 LCK_RW_DECLARE(c_master_lock, &vm_compressor_lck_grp);
330 LCK_MTX_DECLARE(c_list_lock_storage, &vm_compressor_lck_grp);
331 
332 boolean_t       decompressions_blocked = FALSE;
333 
334 zone_t          compressor_segment_zone;
335 int             c_compressor_swap_trigger = 0;
336 
337 uint32_t        compressor_cpus;
338 char            *compressor_scratch_bufs;
339 char            *kdp_compressor_scratch_buf;
340 char            *kdp_compressor_decompressed_page;
341 addr64_t        kdp_compressor_decompressed_page_paddr;
342 ppnum_t         kdp_compressor_decompressed_page_ppnum;
343 
344 clock_sec_t     start_of_sample_period_sec = 0;
345 clock_nsec_t    start_of_sample_period_nsec = 0;
346 clock_sec_t     start_of_eval_period_sec = 0;
347 clock_nsec_t    start_of_eval_period_nsec = 0;
348 uint32_t        sample_period_decompression_count = 0;
349 uint32_t        sample_period_compression_count = 0;
350 uint32_t        last_eval_decompression_count = 0;
351 uint32_t        last_eval_compression_count = 0;
352 
353 #define         DECOMPRESSION_SAMPLE_MAX_AGE            (60 * 30)
354 
355 boolean_t       vm_swapout_ripe_segments = FALSE;
356 uint32_t        vm_ripe_target_age = (60 * 60 * 48);
357 
358 uint32_t        swapout_target_age = 0;
359 uint32_t        age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE];
360 uint32_t        overage_decompressions_during_sample_period = 0;
361 
362 
363 void            do_fastwake_warmup(queue_head_t *, boolean_t);
364 boolean_t       fastwake_warmup = FALSE;
365 boolean_t       fastwake_recording_in_progress = FALSE;
366 clock_sec_t     dont_trim_until_ts = 0;
367 
368 uint64_t        c_segment_warmup_count;
369 uint64_t        first_c_segment_to_warm_generation_id = 0;
370 uint64_t        last_c_segment_to_warm_generation_id = 0;
371 boolean_t       hibernate_flushing = FALSE;
372 
373 int64_t         c_segment_input_bytes __attribute__((aligned(8))) = 0;
374 int64_t         c_segment_compressed_bytes __attribute__((aligned(8))) = 0;
375 int64_t         compressor_bytes_used __attribute__((aligned(8))) = 0;
376 
377 /* Keeps track of the most recent timestamp for when major compaction finished. */
378 mach_timespec_t major_compact_ts;
379 
380 struct c_sv_hash_entry c_segment_sv_hash_table[C_SV_HASH_SIZE]  __attribute__ ((aligned(8)));
381 
382 static void vm_compressor_swap_trigger_thread(void);
383 static void vm_compressor_do_delayed_compactions(boolean_t);
384 static void vm_compressor_compact_and_swap(boolean_t);
385 static void vm_compressor_process_regular_swapped_in_segments(boolean_t);
386 void vm_compressor_process_special_swapped_in_segments(void);
387 static void vm_compressor_process_special_swapped_in_segments_locked(void);
388 
389 struct vm_compressor_swapper_stats vmcs_stats;
390 
391 static void vm_compressor_process_major_segments(bool);
392 #if XNU_TARGET_OS_OSX
393 static void vm_compressor_take_paging_space_action(void);
394 #endif /* XNU_TARGET_OS_OSX */
395 
396 void compute_swapout_target_age(void);
397 
398 boolean_t c_seg_major_compact(c_segment_t, c_segment_t);
399 boolean_t c_seg_major_compact_ok(c_segment_t, c_segment_t);
400 
401 int  c_seg_minor_compaction_and_unlock(c_segment_t, boolean_t);
402 int  c_seg_do_minor_compaction_and_unlock(c_segment_t, boolean_t, boolean_t, boolean_t);
403 void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg);
404 
405 void c_seg_move_to_sparse_list(c_segment_t);
406 void c_seg_insert_into_q(queue_head_t *, c_segment_t);
407 
408 uint64_t vm_available_memory(void);
409 uint64_t vm_compressor_pages_compressed(void);
410 uint32_t vm_compressor_pool_size(void);
411 uint32_t vm_compressor_fragmentation_level(void);
412 uint32_t vm_compression_ratio(void);
413 
414 /*
415  * indicate the need to do a major compaction if
416  * the overall set of in-use compression segments
417  * becomes sparse... on systems that support pressure
418  * driven swapping, this will also cause swapouts to
419  * be initiated.
420  */
421 static inline bool
vm_compressor_needs_to_major_compact()422 vm_compressor_needs_to_major_compact()
423 {
424 	uint32_t        incore_seg_count;
425 
426 	incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
427 
428 	if ((c_segment_count >= (c_segments_nearing_limit / 8)) &&
429 	    ((incore_seg_count * c_seg_max_pages) - VM_PAGE_COMPRESSOR_COUNT) >
430 	    ((incore_seg_count / 8) * c_seg_max_pages)) {
431 		return true;
432 	}
433 	return false;
434 }
435 
436 
437 uint64_t
vm_available_memory(void)438 vm_available_memory(void)
439 {
440 	return ((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE_64;
441 }
442 
443 
444 uint32_t
vm_compressor_pool_size(void)445 vm_compressor_pool_size(void)
446 {
447 	return VM_PAGE_COMPRESSOR_COUNT;
448 }
449 
450 uint32_t
vm_compressor_fragmentation_level(void)451 vm_compressor_fragmentation_level(void)
452 {
453 	const uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
454 	if ((incore_seg_count == 0) || (c_seg_max_pages == 0)) {
455 		return 0;
456 	}
457 	return 100 - (vm_compressor_pool_size() * 100 / (incore_seg_count * c_seg_max_pages));
458 }
459 
460 uint32_t
vm_compression_ratio(void)461 vm_compression_ratio(void)
462 {
463 	if (vm_compressor_pool_size() == 0) {
464 		return UINT32_MAX;
465 	}
466 	return c_segment_pages_compressed / vm_compressor_pool_size();
467 }
468 
469 uint64_t
vm_compressor_pages_compressed(void)470 vm_compressor_pages_compressed(void)
471 {
472 	return c_segment_pages_compressed * PAGE_SIZE_64;
473 }
474 
475 bool
vm_compressor_compressed_pages_nearing_limit(void)476 vm_compressor_compressed_pages_nearing_limit(void)
477 {
478 	uint32_t pages = 0;
479 
480 #if CONFIG_FREEZE
481 	pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
482 #else /* CONFIG_FREEZE */
483 	pages = c_segment_pages_compressed;
484 #endif /* CONFIG_FREEZE */
485 
486 	return pages > c_segment_pages_compressed_nearing_limit;
487 }
488 
489 static bool
vm_compressor_segments_nearing_limit(void)490 vm_compressor_segments_nearing_limit(void)
491 {
492 	uint64_t segments;
493 
494 #if CONFIG_FREEZE
495 	if (freezer_incore_cseg_acct) {
496 		if (os_sub_overflow(c_segment_count, c_swappedout_count, &segments)) {
497 			segments = 0;
498 		}
499 		if (os_sub_overflow(segments, c_swappedout_sparse_count, &segments)) {
500 			segments = 0;
501 		}
502 	} else {
503 		segments = os_atomic_load(&c_segment_count, relaxed);
504 	}
505 #else /* CONFIG_FREEZE */
506 	segments = c_segment_count;
507 #endif /* CONFIG_FREEZE */
508 
509 	return segments > c_segments_nearing_limit;
510 }
511 
512 boolean_t
vm_compressor_low_on_space(void)513 vm_compressor_low_on_space(void)
514 {
515 	return vm_compressor_compressed_pages_nearing_limit() ||
516 	       vm_compressor_segments_nearing_limit();
517 }
518 
519 
520 boolean_t
vm_compressor_out_of_space(void)521 vm_compressor_out_of_space(void)
522 {
523 #if CONFIG_FREEZE
524 	uint64_t incore_seg_count;
525 	uint32_t incore_compressed_pages;
526 	if (freezer_incore_cseg_acct) {
527 		if (os_sub_overflow(c_segment_count, c_swappedout_count, &incore_seg_count)) {
528 			incore_seg_count = 0;
529 		}
530 		if (os_sub_overflow(incore_seg_count, c_swappedout_sparse_count, &incore_seg_count)) {
531 			incore_seg_count = 0;
532 		}
533 		incore_compressed_pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
534 	} else {
535 		incore_seg_count = os_atomic_load(&c_segment_count, relaxed);
536 		incore_compressed_pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
537 	}
538 
539 	if ((incore_compressed_pages >= c_segment_pages_compressed_limit) ||
540 	    (incore_seg_count > c_segments_incore_limit)) {
541 		return TRUE;
542 	}
543 #else /* CONFIG_FREEZE */
544 	if ((c_segment_pages_compressed >= c_segment_pages_compressed_limit) ||
545 	    (c_segment_count >= c_segments_limit)) {
546 		return TRUE;
547 	}
548 #endif /* CONFIG_FREEZE */
549 	return FALSE;
550 }
551 
552 bool
vm_compressor_is_thrashing()553 vm_compressor_is_thrashing()
554 {
555 	compute_swapout_target_age();
556 
557 	if (swapout_target_age) {
558 		c_segment_t     c_seg;
559 
560 		lck_mtx_lock_spin_always(c_list_lock);
561 
562 		if (!queue_empty(&c_age_list_head)) {
563 			c_seg = (c_segment_t) queue_first(&c_age_list_head);
564 
565 			if (c_seg->c_creation_ts > swapout_target_age) {
566 				swapout_target_age = 0;
567 			}
568 		}
569 		lck_mtx_unlock_always(c_list_lock);
570 	}
571 
572 	return swapout_target_age != 0;
573 }
574 
575 
576 int
vm_wants_task_throttled(task_t task)577 vm_wants_task_throttled(task_t task)
578 {
579 	ledger_amount_t compressed;
580 	if (task == kernel_task) {
581 		return 0;
582 	}
583 
584 	if (VM_CONFIG_SWAP_IS_ACTIVE) {
585 		if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED())) {
586 			ledger_get_balance(task->ledger, task_ledgers.internal_compressed, &compressed);
587 			compressed >>= VM_MAP_PAGE_SHIFT(task->map);
588 			if ((unsigned int)compressed > (c_segment_pages_compressed / 4)) {
589 				return 1;
590 			}
591 		}
592 	}
593 	return 0;
594 }
595 
596 
597 #if DEVELOPMENT || DEBUG
598 /*
599  * On compressor/swap exhaustion, kill the largest process regardless of
600  * its chosen process policy.
601  */
602 TUNABLE(bool, kill_on_no_paging_space, "-kill_on_no_paging_space", false);
603 #endif /* DEVELOPMENT || DEBUG */
604 
605 #if CONFIG_JETSAM
606 boolean_t       memorystatus_kill_on_VM_compressor_space_shortage(boolean_t);
607 void            memorystatus_thread_wake(void);
608 extern uint32_t jetsam_kill_on_low_swap;
609 bool            memorystatus_disable_swap(void);
610 #if CONFIG_PHANTOM_CACHE
611 extern bool memorystatus_phantom_cache_pressure;
612 #endif /* CONFIG_PHANTOM_CACHE */
613 int             compressor_thrashing_induced_jetsam = 0;
614 int             filecache_thrashing_induced_jetsam = 0;
615 static boolean_t        vm_compressor_thrashing_detected = FALSE;
616 #else  /* CONFIG_JETSAM */
617 static uint32_t no_paging_space_action_in_progress = 0;
618 extern void memorystatus_send_low_swap_note(void);
619 #endif /* CONFIG_JETSAM */
620 
621 static void
vm_compressor_take_paging_space_action(void)622 vm_compressor_take_paging_space_action(void)
623 {
624 #if CONFIG_JETSAM
625 	/*
626 	 * On systems with both swap and jetsam,
627 	 * just wake up the jetsam thread and have it handle the low swap condition
628 	 * by killing apps.
629 	 */
630 	if (jetsam_kill_on_low_swap) {
631 		memorystatus_thread_wake();
632 	}
633 #else /* CONFIG_JETSAM */
634 	if (no_paging_space_action_in_progress == 0) {
635 		if (OSCompareAndSwap(0, 1, (UInt32 *)&no_paging_space_action_in_progress)) {
636 			if (no_paging_space_action()) {
637 #if DEVELOPMENT || DEBUG
638 				if (kill_on_no_paging_space) {
639 					/*
640 					 * Since we are choosing to always kill a process, we don't need the
641 					 * "out of application memory" dialog box in this mode. And, hence we won't
642 					 * send the knote.
643 					 */
644 					no_paging_space_action_in_progress = 0;
645 					return;
646 				}
647 #endif /* DEVELOPMENT || DEBUG */
648 				memorystatus_send_low_swap_note();
649 			}
650 
651 			no_paging_space_action_in_progress = 0;
652 		}
653 	}
654 #endif /* !CONFIG_JETSAM */
655 }
656 
657 
658 void
vm_decompressor_lock(void)659 vm_decompressor_lock(void)
660 {
661 	PAGE_REPLACEMENT_ALLOWED(TRUE);
662 
663 	decompressions_blocked = TRUE;
664 
665 	PAGE_REPLACEMENT_ALLOWED(FALSE);
666 }
667 
668 void
vm_decompressor_unlock(void)669 vm_decompressor_unlock(void)
670 {
671 	PAGE_REPLACEMENT_ALLOWED(TRUE);
672 
673 	decompressions_blocked = FALSE;
674 
675 	PAGE_REPLACEMENT_ALLOWED(FALSE);
676 
677 	thread_wakeup((event_t)&decompressions_blocked);
678 }
679 
680 static inline void
cslot_copy(c_slot_t cdst,c_slot_t csrc)681 cslot_copy(c_slot_t cdst, c_slot_t csrc)
682 {
683 #if CHECKSUM_THE_DATA
684 	cdst->c_hash_data = csrc->c_hash_data;
685 #endif
686 #if CHECKSUM_THE_COMPRESSED_DATA
687 	cdst->c_hash_compressed_data = csrc->c_hash_compressed_data;
688 #endif
689 #if POPCOUNT_THE_COMPRESSED_DATA
690 	cdst->c_pop_cdata = csrc->c_pop_cdata;
691 #endif
692 	cdst->c_size = csrc->c_size;
693 	cdst->c_packed_ptr = csrc->c_packed_ptr;
694 #if defined(__arm64__)
695 	cdst->c_codec = csrc->c_codec;
696 #endif
697 }
698 
699 #if XNU_TARGET_OS_OSX
700 #define VM_COMPRESSOR_MAX_POOL_SIZE (192UL << 30)
701 #else
702 #define VM_COMPRESSOR_MAX_POOL_SIZE (0)
703 #endif
704 
705 static vm_map_size_t compressor_size;
706 static SECURITY_READ_ONLY_LATE(struct mach_vm_range) compressor_range;
707 vm_map_t compressor_map;
708 uint64_t compressor_pool_max_size;
709 uint64_t compressor_pool_size;
710 uint32_t compressor_pool_multiplier;
711 
712 #if DEVELOPMENT || DEBUG
713 /*
714  * Compressor segments are write-protected in development/debug
715  * kernels to help debug memory corruption.
716  * In cases where performance is a concern, this can be disabled
717  * via the boot-arg "-disable_cseg_write_protection".
718  */
719 boolean_t write_protect_c_segs = TRUE;
720 int vm_compressor_test_seg_wp;
721 uint32_t vm_ktrace_enabled;
722 #endif /* DEVELOPMENT || DEBUG */
723 
724 #if (XNU_TARGET_OS_OSX && __arm64__)
725 
726 #include <IOKit/IOPlatformExpert.h>
727 #include <sys/random.h>
728 
729 static const char *csegbufsizeExperimentProperty = "_csegbufsz_experiment";
730 static thread_call_t csegbufsz_experiment_thread_call;
731 
732 extern boolean_t IOServiceWaitForMatchingResource(const char * property, uint64_t timeout);
733 static void
erase_csegbufsz_experiment_property(__unused void * param0,__unused void * param1)734 erase_csegbufsz_experiment_property(__unused void *param0, __unused void *param1)
735 {
736 	// Wait for NVRAM to be writable
737 	if (!IOServiceWaitForMatchingResource("IONVRAM", UINT64_MAX)) {
738 		printf("csegbufsz_experiment_property: Failed to wait for IONVRAM.");
739 	}
740 
741 	if (!PERemoveNVRAMProperty(csegbufsizeExperimentProperty)) {
742 		printf("csegbufsize_experiment_property: Failed to remove %s from NVRAM.", csegbufsizeExperimentProperty);
743 	}
744 	thread_call_free(csegbufsz_experiment_thread_call);
745 }
746 
747 static void
erase_csegbufsz_experiment_property_async()748 erase_csegbufsz_experiment_property_async()
749 {
750 	csegbufsz_experiment_thread_call = thread_call_allocate_with_priority(
751 		erase_csegbufsz_experiment_property,
752 		NULL,
753 		THREAD_CALL_PRIORITY_LOW
754 		);
755 	if (csegbufsz_experiment_thread_call == NULL) {
756 		printf("csegbufsize_experiment_property: Unable to allocate thread call.");
757 	} else {
758 		thread_call_enter(csegbufsz_experiment_thread_call);
759 	}
760 }
761 
762 static void
cleanup_csegbufsz_experiment(__unused void * arg0)763 cleanup_csegbufsz_experiment(__unused void *arg0)
764 {
765 	char nvram = 0;
766 	unsigned int len = sizeof(nvram);
767 	if (PEReadNVRAMProperty(csegbufsizeExperimentProperty, &nvram, &len)) {
768 		erase_csegbufsz_experiment_property_async();
769 	}
770 }
771 
772 STARTUP_ARG(EARLY_BOOT, STARTUP_RANK_FIRST, cleanup_csegbufsz_experiment, NULL);
773 #endif /* XNU_TARGET_OS_OSX && __arm64__ */
774 
775 #if CONFIG_JETSAM
776 extern unsigned int memorystatus_swap_all_apps;
777 #endif /* CONFIG_JETSAM */
778 
779 TUNABLE_DT(uint64_t, swap_vol_min_capacity, "/defaults", "kern.swap_min_capacity", "kern.swap_min_capacity", 0, TUNABLE_DT_NONE);
780 
781 static void
vm_compressor_set_size(void)782 vm_compressor_set_size(void)
783 {
784 	/*
785 	 * Note that this function may be called multiple times on systems with app swap
786 	 * because the value of vm_swap_get_max_configured_space() and memorystatus_swap_all_apps
787 	 * can change based the size of the swap volume. On these systems, we'll call
788 	 * this function once early in boot to reserve the maximum amount of VA required
789 	 * for the compressor submap and then one more time in vm_compressor_init after
790 	 * determining the swap volume size. We must not return a larger value the second
791 	 * time around.
792 	 */
793 	vm_size_t       c_segments_arr_size = 0;
794 	struct c_slot_mapping tmp_slot_ptr;
795 
796 	/* The segment size can be overwritten by a boot-arg */
797 	if (!PE_parse_boot_argn("vm_compressor_segment_buffer_size", &c_seg_bufsize, sizeof(c_seg_bufsize))) {
798 #if CONFIG_JETSAM
799 		if (memorystatus_swap_all_apps) {
800 			c_seg_bufsize = C_SEG_BUFSIZE_ARM_SWAP;
801 		} else {
802 			c_seg_bufsize = C_SEG_BUFSIZE_DEFAULT;
803 		}
804 #else
805 		c_seg_bufsize = C_SEG_BUFSIZE_DEFAULT;
806 #endif /* CONFIG_JETSAM */
807 	}
808 
809 	vm_compressor_swap_init_swap_file_limit();
810 	if (vm_compression_limit) {
811 		compressor_pool_size = ptoa_64(vm_compression_limit);
812 	}
813 
814 	compressor_pool_max_size = C_SEG_MAX_LIMIT;
815 	compressor_pool_max_size *= c_seg_bufsize;
816 
817 #if XNU_TARGET_OS_OSX
818 
819 	if (vm_compression_limit == 0) {
820 		if (max_mem <= (4ULL * 1024ULL * 1024ULL * 1024ULL)) {
821 			compressor_pool_size = 16ULL * max_mem;
822 		} else if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
823 			compressor_pool_size = 8ULL * max_mem;
824 		} else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
825 			compressor_pool_size = 4ULL * max_mem;
826 		} else {
827 			compressor_pool_size = 2ULL * max_mem;
828 		}
829 	}
830 	/*
831 	 * Cap the compressor pool size to a max of 192G
832 	 */
833 	if (compressor_pool_size > VM_COMPRESSOR_MAX_POOL_SIZE) {
834 		compressor_pool_size = VM_COMPRESSOR_MAX_POOL_SIZE;
835 	}
836 	if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
837 		compressor_pool_multiplier = 1;
838 	} else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
839 		compressor_pool_multiplier = 2;
840 	} else {
841 		compressor_pool_multiplier = 4;
842 	}
843 
844 #else
845 
846 	if (compressor_pool_max_size > max_mem) {
847 		compressor_pool_max_size = max_mem;
848 	}
849 
850 	if (vm_compression_limit == 0) {
851 		compressor_pool_size = max_mem;
852 	}
853 
854 #if XNU_TARGET_OS_WATCH
855 	compressor_pool_multiplier = 2;
856 #elif XNU_TARGET_OS_IOS
857 	if (max_mem <= (2ULL * 1024ULL * 1024ULL * 1024ULL)) {
858 		compressor_pool_multiplier = 2;
859 	} else {
860 		compressor_pool_multiplier = 1;
861 	}
862 #else
863 	compressor_pool_multiplier = 1;
864 #endif
865 
866 #endif
867 
868 	PE_parse_boot_argn("kern.compressor_pool_multiplier", &compressor_pool_multiplier, sizeof(compressor_pool_multiplier));
869 	if (compressor_pool_multiplier < 1) {
870 		compressor_pool_multiplier = 1;
871 	}
872 
873 	if (compressor_pool_size > compressor_pool_max_size) {
874 		compressor_pool_size = compressor_pool_max_size;
875 	}
876 
877 	c_seg_max_pages = (c_seg_bufsize / PAGE_SIZE);
878 	c_seg_slot_var_array_min_len = c_seg_max_pages;
879 
880 #if !defined(__x86_64__)
881 	c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 512)));
882 	c_seg_allocsize = (c_seg_bufsize + PAGE_SIZE);
883 #else
884 	c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 128)));
885 	c_seg_allocsize = c_seg_bufsize;
886 #endif /* !defined(__x86_64__) */
887 
888 	c_segments_limit = (uint32_t)(compressor_pool_size / (vm_size_t)(c_seg_allocsize));
889 	tmp_slot_ptr.s_cseg = c_segments_limit;
890 	/* Panic on internal configs*/
891 	assertf((tmp_slot_ptr.s_cseg == c_segments_limit), "vm_compressor_init: overflowed s_cseg field in c_slot_mapping with c_segno: %d", c_segments_limit);
892 
893 	if (tmp_slot_ptr.s_cseg != c_segments_limit) {
894 		tmp_slot_ptr.s_cseg = -1;
895 		c_segments_limit = tmp_slot_ptr.s_cseg - 1; /*limited by segment idx bits in c_slot_mapping*/
896 		compressor_pool_size = (c_segments_limit * (vm_size_t)(c_seg_allocsize));
897 	}
898 
899 	c_segments_nearing_limit = (uint32_t)(((uint64_t)c_segments_limit * 98ULL) / 100ULL);
900 
901 	c_segment_pages_compressed_limit = (c_segments_limit * (c_seg_bufsize / PAGE_SIZE) * compressor_pool_multiplier);
902 
903 	if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE)) {
904 #if defined(XNU_TARGET_OS_WATCH)
905 		c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
906 #else
907 		if (!vm_compression_limit) {
908 			c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
909 		}
910 #endif
911 	}
912 
913 	c_segment_pages_compressed_nearing_limit = (uint32_t)(((uint64_t)c_segment_pages_compressed_limit * 98ULL) / 100ULL);
914 
915 #if CONFIG_FREEZE
916 	/*
917 	 * Our in-core limits are based on the size of the compressor pool.
918 	 * The c_segments_nearing_limit is also based on the compressor pool
919 	 * size and calculated above.
920 	 */
921 	c_segments_incore_limit = c_segments_limit;
922 
923 	if (freezer_incore_cseg_acct) {
924 		/*
925 		 * Add enough segments to track all frozen c_segs that can be stored in swap.
926 		 */
927 		c_segments_limit += (uint32_t)(vm_swap_get_max_configured_space() / (vm_size_t)(c_seg_allocsize));
928 		tmp_slot_ptr.s_cseg = c_segments_limit;
929 		/* Panic on internal configs*/
930 		assertf((tmp_slot_ptr.s_cseg == c_segments_limit), "vm_compressor_init: freezer reserve overflowed s_cseg field in c_slot_mapping with c_segno: %d", c_segments_limit);
931 	}
932 #endif
933 	/*
934 	 * Submap needs space for:
935 	 * - c_segments
936 	 * - c_buffers
937 	 * - swap reclaimations -- c_seg_bufsize
938 	 */
939 	c_segments_arr_size = vm_map_round_page((sizeof(union c_segu) * c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
940 	c_buffers_size = vm_map_round_page(((vm_size_t)c_seg_allocsize * (vm_size_t)c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
941 
942 	compressor_size = c_segments_arr_size + c_buffers_size + c_seg_bufsize;
943 
944 #if RECORD_THE_COMPRESSED_DATA
945 	c_compressed_record_sbuf_size = (vm_size_t)c_seg_allocsize + (PAGE_SIZE * 2);
946 	compressor_size += c_compressed_record_sbuf_size;
947 #endif /* RECORD_THE_COMPRESSED_DATA */
948 }
949 STARTUP(KMEM, STARTUP_RANK_FIRST, vm_compressor_set_size);
950 
951 KMEM_RANGE_REGISTER_DYNAMIC(compressor, &compressor_range, ^() {
952 	return compressor_size;
953 });
954 
955 bool
osenvironment_is_diagnostics(void)956 osenvironment_is_diagnostics(void)
957 {
958 	DTEntry chosen;
959 	const char *osenvironment;
960 	unsigned int size;
961 	if (kSuccess == SecureDTLookupEntry(0, "/chosen", &chosen)) {
962 		if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &osenvironment, &size)) {
963 			return strcmp(osenvironment, "diagnostics") == 0;
964 		}
965 	}
966 	return false;
967 }
968 
969 void
vm_compressor_init(void)970 vm_compressor_init(void)
971 {
972 	thread_t        thread;
973 #if RECORD_THE_COMPRESSED_DATA
974 	vm_size_t       c_compressed_record_sbuf_size = 0;
975 #endif /* RECORD_THE_COMPRESSED_DATA */
976 
977 #if DEVELOPMENT || DEBUG || CONFIG_FREEZE
978 	char bootarg_name[32];
979 #endif /* DEVELOPMENT || DEBUG || CONFIG_FREEZE */
980 	__unused uint64_t early_boot_compressor_size = compressor_size;
981 
982 #if CONFIG_JETSAM
983 	if (memorystatus_swap_all_apps && osenvironment_is_diagnostics()) {
984 		printf("osenvironment == \"diagnostics\". Disabling app swap.\n");
985 		memorystatus_disable_swap();
986 	}
987 
988 	if (memorystatus_swap_all_apps) {
989 		/*
990 		 * App swap is disabled on devices with small NANDs.
991 		 * Now that we're no longer in early boot, we can get
992 		 * the NAND size and re-run vm_compressor_set_size.
993 		 */
994 		int error = vm_swap_vol_get_capacity(SWAP_VOLUME_NAME, &vm_swap_volume_capacity);
995 #if DEVELOPMENT || DEBUG
996 		if (error != 0) {
997 			panic("vm_compressor_init: Unable to get swap volume capacity. error=%d\n", error);
998 		}
999 #else
1000 		if (error != 0) {
1001 			os_log_with_startup_serial(OS_LOG_DEFAULT, "vm_compressor_init: Unable to get swap volume capacity. error=%d\n", error);
1002 		}
1003 #endif /* DEVELOPMENT || DEBUG */
1004 		if (vm_swap_volume_capacity < swap_vol_min_capacity) {
1005 			memorystatus_disable_swap();
1006 		}
1007 		/*
1008 		 * Resize the compressor and swap now that we know the capacity
1009 		 * of the swap volume.
1010 		 */
1011 		vm_compressor_set_size();
1012 		/*
1013 		 * We reserved a chunk of VA early in boot for the compressor submap.
1014 		 * We can't allocate more than that.
1015 		 */
1016 		assert(compressor_size <= early_boot_compressor_size);
1017 	}
1018 #endif /* CONFIG_JETSAM */
1019 
1020 #if DEVELOPMENT || DEBUG
1021 	if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name, sizeof(bootarg_name))) {
1022 		write_protect_c_segs = FALSE;
1023 	}
1024 
1025 	int vmcval = 1;
1026 #if defined(XNU_TARGET_OS_WATCH)
1027 	vmcval = 0;
1028 #endif /* XNU_TARGET_OS_WATCH */
1029 	PE_parse_boot_argn("vm_compressor_validation", &vmcval, sizeof(vmcval));
1030 
1031 	if (kern_feature_override(KF_COMPRSV_OVRD)) {
1032 		vmcval = 0;
1033 	}
1034 
1035 	if (vmcval == 0) {
1036 #if POPCOUNT_THE_COMPRESSED_DATA
1037 		popcount_c_segs = FALSE;
1038 #endif
1039 #if CHECKSUM_THE_DATA || CHECKSUM_THE_COMPRESSED_DATA
1040 		checksum_c_segs = FALSE;
1041 #endif
1042 #if VALIDATE_C_SEGMENTS
1043 		validate_c_segs = FALSE;
1044 #endif
1045 		write_protect_c_segs = FALSE;
1046 	}
1047 #endif /* DEVELOPMENT || DEBUG */
1048 
1049 #if CONFIG_FREEZE
1050 	if (PE_parse_boot_argn("-disable_freezer_cseg_acct", bootarg_name, sizeof(bootarg_name))) {
1051 		freezer_incore_cseg_acct = FALSE;
1052 	}
1053 #endif /* CONFIG_FREEZE */
1054 
1055 	assert((C_SEGMENTS_PER_PAGE * sizeof(union c_segu)) == PAGE_SIZE);
1056 
1057 #if !XNU_TARGET_OS_OSX
1058 	vm_compressor_minorcompact_threshold_divisor = 20;
1059 	vm_compressor_majorcompact_threshold_divisor = 30;
1060 	vm_compressor_unthrottle_threshold_divisor = 40;
1061 	vm_compressor_catchup_threshold_divisor = 60;
1062 #else /* !XNU_TARGET_OS_OSX */
1063 	if (max_mem <= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
1064 		vm_compressor_minorcompact_threshold_divisor = 11;
1065 		vm_compressor_majorcompact_threshold_divisor = 13;
1066 		vm_compressor_unthrottle_threshold_divisor = 20;
1067 		vm_compressor_catchup_threshold_divisor = 35;
1068 	} else {
1069 		vm_compressor_minorcompact_threshold_divisor = 20;
1070 		vm_compressor_majorcompact_threshold_divisor = 25;
1071 		vm_compressor_unthrottle_threshold_divisor = 35;
1072 		vm_compressor_catchup_threshold_divisor = 50;
1073 	}
1074 #endif /* !XNU_TARGET_OS_OSX */
1075 
1076 	queue_init(&c_bad_list_head);
1077 	queue_init(&c_age_list_head);
1078 	queue_init(&c_minor_list_head);
1079 	queue_init(&c_major_list_head);
1080 	queue_init(&c_filling_list_head);
1081 	queue_init(&c_early_swapout_list_head);
1082 	queue_init(&c_regular_swapout_list_head);
1083 	queue_init(&c_late_swapout_list_head);
1084 	queue_init(&c_swapio_list_head);
1085 	queue_init(&c_early_swappedin_list_head);
1086 	queue_init(&c_regular_swappedin_list_head);
1087 	queue_init(&c_late_swappedin_list_head);
1088 	queue_init(&c_swappedout_list_head);
1089 	queue_init(&c_swappedout_sparse_list_head);
1090 
1091 	c_free_segno_head = -1;
1092 	c_segments_available = 0;
1093 
1094 	compressor_map = kmem_suballoc(kernel_map, &compressor_range.min_address,
1095 	    compressor_size, VM_MAP_CREATE_NEVER_FAULTS,
1096 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, KMS_NOFAIL | KMS_PERMANENT,
1097 	    VM_KERN_MEMORY_COMPRESSOR).kmr_submap;
1098 
1099 	kmem_alloc(compressor_map, (vm_offset_t *)(&c_segments),
1100 	    (sizeof(union c_segu) * c_segments_limit),
1101 	    KMA_NOFAIL | KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT,
1102 	    VM_KERN_MEMORY_COMPRESSOR);
1103 	kmem_alloc(compressor_map, &c_buffers, c_buffers_size,
1104 	    KMA_NOFAIL | KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT,
1105 	    VM_KERN_MEMORY_COMPRESSOR);
1106 
1107 #if DEVELOPMENT || DEBUG
1108 	if (hvg_is_hcall_available(HVG_HCALL_SET_COREDUMP_DATA)) {
1109 		hvg_hcall_set_coredump_data();
1110 	}
1111 #endif
1112 
1113 	/*
1114 	 * Pick a good size that will minimize fragmentation in zalloc
1115 	 * by minimizing the fragmentation in a 16k run.
1116 	 *
1117 	 * c_seg_slot_var_array_min_len is larger on 4k systems than 16k ones,
1118 	 * making the fragmentation in a 4k page terrible. Using 16k for all
1119 	 * systems matches zalloc() and will minimize fragmentation.
1120 	 */
1121 	uint32_t c_segment_size = sizeof(struct c_segment) + (c_seg_slot_var_array_min_len * sizeof(struct c_slot));
1122 	uint32_t cnt  = (16 << 10) / c_segment_size;
1123 	uint32_t frag = (16 << 10) % c_segment_size;
1124 
1125 	c_seg_fixed_array_len = c_seg_slot_var_array_min_len;
1126 
1127 	while (cnt * sizeof(struct c_slot) < frag) {
1128 		c_segment_size += sizeof(struct c_slot);
1129 		c_seg_fixed_array_len++;
1130 		frag -= cnt * sizeof(struct c_slot);
1131 	}
1132 
1133 	compressor_segment_zone = zone_create("compressor_segment",
1134 	    c_segment_size, ZC_PGZ_USE_GUARDS | ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
1135 
1136 	c_segments_busy = FALSE;
1137 
1138 	c_segments_next_page = (caddr_t)c_segments;
1139 	vm_compressor_algorithm_init();
1140 
1141 	{
1142 		host_basic_info_data_t hinfo;
1143 		mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
1144 		size_t bufsize;
1145 		char *buf;
1146 
1147 #define BSD_HOST 1
1148 		host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
1149 
1150 		compressor_cpus = hinfo.max_cpus;
1151 
1152 		bufsize = PAGE_SIZE;
1153 		bufsize += compressor_cpus * vm_compressor_get_decode_scratch_size();
1154 		/* For the KDP path */
1155 		bufsize += vm_compressor_get_decode_scratch_size();
1156 #if CONFIG_FREEZE
1157 		bufsize += vm_compressor_get_encode_scratch_size();
1158 #endif
1159 #if RECORD_THE_COMPRESSED_DATA
1160 		bufsize += c_compressed_record_sbuf_size;
1161 #endif
1162 
1163 		kmem_alloc(kernel_map, (vm_offset_t *)&buf, bufsize,
1164 		    KMA_DATA | KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT,
1165 		    VM_KERN_MEMORY_COMPRESSOR);
1166 
1167 		/*
1168 		 * kdp_compressor_decompressed_page must be page aligned because we access
1169 		 * it through the physical aperture by page number.
1170 		 */
1171 		kdp_compressor_decompressed_page = buf;
1172 		kdp_compressor_decompressed_page_paddr = kvtophys((vm_offset_t)kdp_compressor_decompressed_page);
1173 		kdp_compressor_decompressed_page_ppnum = (ppnum_t) atop(kdp_compressor_decompressed_page_paddr);
1174 		buf += PAGE_SIZE;
1175 		bufsize -= PAGE_SIZE;
1176 
1177 		compressor_scratch_bufs = buf;
1178 		buf += compressor_cpus * vm_compressor_get_decode_scratch_size();
1179 		bufsize -= compressor_cpus * vm_compressor_get_decode_scratch_size();
1180 
1181 		kdp_compressor_scratch_buf = buf;
1182 		buf += vm_compressor_get_decode_scratch_size();
1183 		bufsize -= vm_compressor_get_decode_scratch_size();
1184 
1185 #if CONFIG_FREEZE
1186 		freezer_context_global.freezer_ctx_compressor_scratch_buf = buf;
1187 		buf += vm_compressor_get_encode_scratch_size();
1188 		bufsize -= vm_compressor_get_encode_scratch_size();
1189 #endif
1190 
1191 #if RECORD_THE_COMPRESSED_DATA
1192 		c_compressed_record_sbuf = buf;
1193 		c_compressed_record_cptr = buf;
1194 		c_compressed_record_ebuf = c_compressed_record_sbuf + c_compressed_record_sbuf_size;
1195 		buf += c_compressed_record_sbuf_size;
1196 		bufsize -= c_compressed_record_sbuf_size;
1197 #endif
1198 		assert(bufsize == 0);
1199 	}
1200 
1201 	if (kernel_thread_start_priority((thread_continue_t)vm_compressor_swap_trigger_thread, NULL,
1202 	    BASEPRI_VM, &thread) != KERN_SUCCESS) {
1203 		panic("vm_compressor_swap_trigger_thread: create failed");
1204 	}
1205 	thread_deallocate(thread);
1206 
1207 	if (vm_pageout_internal_start() != KERN_SUCCESS) {
1208 		panic("vm_compressor_init: Failed to start the internal pageout thread.");
1209 	}
1210 	if (VM_CONFIG_SWAP_IS_PRESENT) {
1211 		vm_compressor_swap_init();
1212 	}
1213 
1214 	if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
1215 		vm_compressor_is_active = 1;
1216 	}
1217 
1218 #if CONFIG_FREEZE
1219 	memorystatus_freeze_enabled = TRUE;
1220 #endif /* CONFIG_FREEZE */
1221 
1222 	vm_compressor_available = 1;
1223 
1224 	vm_page_reactivate_all_throttled();
1225 
1226 	bzero(&vmcs_stats, sizeof(struct vm_compressor_swapper_stats));
1227 }
1228 
1229 
1230 #if VALIDATE_C_SEGMENTS
1231 
1232 static void
c_seg_validate(c_segment_t c_seg,boolean_t must_be_compact)1233 c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact)
1234 {
1235 	uint16_t        c_indx;
1236 	int32_t         bytes_used;
1237 	uint32_t        c_rounded_size;
1238 	uint32_t        c_size;
1239 	c_slot_t        cs;
1240 
1241 	if (__probable(validate_c_segs == FALSE)) {
1242 		return;
1243 	}
1244 	if (c_seg->c_firstemptyslot < c_seg->c_nextslot) {
1245 		c_indx = c_seg->c_firstemptyslot;
1246 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1247 
1248 		if (cs == NULL) {
1249 			panic("c_seg_validate:  no slot backing c_firstemptyslot");
1250 		}
1251 
1252 		if (cs->c_size) {
1253 			panic("c_seg_validate:  c_firstemptyslot has non-zero size (%d)", cs->c_size);
1254 		}
1255 	}
1256 	bytes_used = 0;
1257 
1258 	for (c_indx = 0; c_indx < c_seg->c_nextslot; c_indx++) {
1259 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1260 
1261 		c_size = UNPACK_C_SIZE(cs);
1262 
1263 		c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
1264 
1265 		bytes_used += c_rounded_size;
1266 
1267 #if CHECKSUM_THE_COMPRESSED_DATA
1268 		unsigned csvhash;
1269 		if (c_size && cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
1270 			addr64_t csvphys = kvtophys((vm_offset_t)&c_seg->c_store.c_buffer[cs->c_offset]);
1271 			panic("Compressed data doesn't match original %p phys: 0x%llx %d %p %d %d 0x%x 0x%x", c_seg, csvphys, cs->c_offset, cs, c_indx, c_size, cs->c_hash_compressed_data, csvhash);
1272 		}
1273 #endif
1274 #if POPCOUNT_THE_COMPRESSED_DATA
1275 		unsigned csvpop;
1276 		if (c_size) {
1277 			uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
1278 			if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
1279 				panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%llx 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, (uint64_t)cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
1280 			}
1281 		}
1282 #endif
1283 	}
1284 
1285 	if (bytes_used != c_seg->c_bytes_used) {
1286 		panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d", bytes_used, c_seg->c_bytes_used);
1287 	}
1288 
1289 	if (c_seg->c_bytes_used > C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1290 		panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d,  c_bytes_used = %d",
1291 		    (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1292 	}
1293 
1294 	if (must_be_compact) {
1295 		if (c_seg->c_bytes_used != C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1296 			panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d,  c_bytes_used = %d",
1297 			    (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1298 		}
1299 	}
1300 }
1301 
1302 #endif
1303 
1304 
1305 void
c_seg_need_delayed_compaction(c_segment_t c_seg,boolean_t c_list_lock_held)1306 c_seg_need_delayed_compaction(c_segment_t c_seg, boolean_t c_list_lock_held)
1307 {
1308 	boolean_t       clear_busy = FALSE;
1309 
1310 	if (c_list_lock_held == FALSE) {
1311 		if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1312 			C_SEG_BUSY(c_seg);
1313 
1314 			lck_mtx_unlock_always(&c_seg->c_lock);
1315 			lck_mtx_lock_spin_always(c_list_lock);
1316 			lck_mtx_lock_spin_always(&c_seg->c_lock);
1317 
1318 			clear_busy = TRUE;
1319 		}
1320 	}
1321 	assert(c_seg->c_state != C_IS_FILLING);
1322 
1323 	if (!c_seg->c_on_minorcompact_q && !(C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) && !c_seg->c_has_donated_pages) {
1324 		queue_enter(&c_minor_list_head, c_seg, c_segment_t, c_list);
1325 		c_seg->c_on_minorcompact_q = 1;
1326 		c_minor_count++;
1327 	}
1328 	if (c_list_lock_held == FALSE) {
1329 		lck_mtx_unlock_always(c_list_lock);
1330 	}
1331 
1332 	if (clear_busy == TRUE) {
1333 		C_SEG_WAKEUP_DONE(c_seg);
1334 	}
1335 }
1336 
1337 
1338 unsigned int c_seg_moved_to_sparse_list = 0;
1339 
1340 void
c_seg_move_to_sparse_list(c_segment_t c_seg)1341 c_seg_move_to_sparse_list(c_segment_t c_seg)
1342 {
1343 	boolean_t       clear_busy = FALSE;
1344 
1345 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1346 		C_SEG_BUSY(c_seg);
1347 
1348 		lck_mtx_unlock_always(&c_seg->c_lock);
1349 		lck_mtx_lock_spin_always(c_list_lock);
1350 		lck_mtx_lock_spin_always(&c_seg->c_lock);
1351 
1352 		clear_busy = TRUE;
1353 	}
1354 	c_seg_switch_state(c_seg, C_ON_SWAPPEDOUTSPARSE_Q, FALSE);
1355 
1356 	c_seg_moved_to_sparse_list++;
1357 
1358 	lck_mtx_unlock_always(c_list_lock);
1359 
1360 	if (clear_busy == TRUE) {
1361 		C_SEG_WAKEUP_DONE(c_seg);
1362 	}
1363 }
1364 
1365 
1366 void
c_seg_insert_into_q(queue_head_t * qhead,c_segment_t c_seg)1367 c_seg_insert_into_q(queue_head_t *qhead, c_segment_t c_seg)
1368 {
1369 	c_segment_t c_seg_next;
1370 
1371 	if (queue_empty(qhead)) {
1372 		queue_enter(qhead, c_seg, c_segment_t, c_age_list);
1373 	} else {
1374 		c_seg_next = (c_segment_t)queue_first(qhead);
1375 
1376 		while (TRUE) {
1377 			if (c_seg->c_generation_id < c_seg_next->c_generation_id) {
1378 				queue_insert_before(qhead, c_seg, c_seg_next, c_segment_t, c_age_list);
1379 				break;
1380 			}
1381 			c_seg_next = (c_segment_t) queue_next(&c_seg_next->c_age_list);
1382 
1383 			if (queue_end(qhead, (queue_entry_t) c_seg_next)) {
1384 				queue_enter(qhead, c_seg, c_segment_t, c_age_list);
1385 				break;
1386 			}
1387 		}
1388 	}
1389 }
1390 
1391 
1392 int try_minor_compaction_failed = 0;
1393 int try_minor_compaction_succeeded = 0;
1394 
1395 void
c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)1396 c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)
1397 {
1398 	assert(c_seg->c_on_minorcompact_q);
1399 	/*
1400 	 * c_seg is currently on the delayed minor compaction
1401 	 * queue and we have c_seg locked... if we can get the
1402 	 * c_list_lock w/o blocking (if we blocked we could deadlock
1403 	 * because the lock order is c_list_lock then c_seg's lock)
1404 	 * we'll pull it from the delayed list and free it directly
1405 	 */
1406 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1407 		/*
1408 		 * c_list_lock is held, we need to bail
1409 		 */
1410 		try_minor_compaction_failed++;
1411 
1412 		lck_mtx_unlock_always(&c_seg->c_lock);
1413 	} else {
1414 		try_minor_compaction_succeeded++;
1415 
1416 		C_SEG_BUSY(c_seg);
1417 		c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, FALSE);
1418 	}
1419 }
1420 
1421 
1422 int
c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy,boolean_t need_list_lock,boolean_t disallow_page_replacement)1423 c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy, boolean_t need_list_lock, boolean_t disallow_page_replacement)
1424 {
1425 	int     c_seg_freed;
1426 
1427 	assert(c_seg->c_busy);
1428 	assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
1429 
1430 	/*
1431 	 * check for the case that can occur when we are not swapping
1432 	 * and this segment has been major compacted in the past
1433 	 * and moved to the majorcompact q to remove it from further
1434 	 * consideration... if the occupancy falls too low we need
1435 	 * to put it back on the age_q so that it will be considered
1436 	 * in the next major compaction sweep... if we don't do this
1437 	 * we will eventually run into the c_segments_limit
1438 	 */
1439 	if (c_seg->c_state == C_ON_MAJORCOMPACT_Q && C_SEG_SHOULD_MAJORCOMPACT_NOW(c_seg)) {
1440 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
1441 	}
1442 	if (!c_seg->c_on_minorcompact_q) {
1443 		if (clear_busy == TRUE) {
1444 			C_SEG_WAKEUP_DONE(c_seg);
1445 		}
1446 
1447 		lck_mtx_unlock_always(&c_seg->c_lock);
1448 
1449 		return 0;
1450 	}
1451 	queue_remove(&c_minor_list_head, c_seg, c_segment_t, c_list);
1452 	c_seg->c_on_minorcompact_q = 0;
1453 	c_minor_count--;
1454 
1455 	lck_mtx_unlock_always(c_list_lock);
1456 
1457 	if (disallow_page_replacement == TRUE) {
1458 		lck_mtx_unlock_always(&c_seg->c_lock);
1459 
1460 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
1461 
1462 		lck_mtx_lock_spin_always(&c_seg->c_lock);
1463 	}
1464 	c_seg_freed = c_seg_minor_compaction_and_unlock(c_seg, clear_busy);
1465 
1466 	if (disallow_page_replacement == TRUE) {
1467 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
1468 	}
1469 
1470 	if (need_list_lock == TRUE) {
1471 		lck_mtx_lock_spin_always(c_list_lock);
1472 	}
1473 
1474 	return c_seg_freed;
1475 }
1476 
1477 void
kdp_compressor_busy_find_owner(event64_t wait_event,thread_waitinfo_t * waitinfo)1478 kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo)
1479 {
1480 	c_segment_t c_seg = (c_segment_t) wait_event;
1481 
1482 	waitinfo->owner = thread_tid(c_seg->c_busy_for_thread);
1483 	waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(c_seg);
1484 }
1485 
1486 #if DEVELOPMENT || DEBUG
1487 int
do_cseg_wedge_thread(void)1488 do_cseg_wedge_thread(void)
1489 {
1490 	struct c_segment c_seg;
1491 	c_seg.c_busy_for_thread = current_thread();
1492 
1493 	debug_cseg_wait_event = (event_t) &c_seg;
1494 
1495 	thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1496 	assert_wait((event_t) (&c_seg), THREAD_INTERRUPTIBLE);
1497 
1498 	thread_block(THREAD_CONTINUE_NULL);
1499 
1500 	return 0;
1501 }
1502 
1503 int
do_cseg_unwedge_thread(void)1504 do_cseg_unwedge_thread(void)
1505 {
1506 	thread_wakeup(debug_cseg_wait_event);
1507 	debug_cseg_wait_event = NULL;
1508 
1509 	return 0;
1510 }
1511 #endif /* DEVELOPMENT || DEBUG */
1512 
1513 void
c_seg_wait_on_busy(c_segment_t c_seg)1514 c_seg_wait_on_busy(c_segment_t c_seg)
1515 {
1516 	c_seg->c_wanted = 1;
1517 
1518 	thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1519 	assert_wait((event_t) (c_seg), THREAD_UNINT);
1520 
1521 	lck_mtx_unlock_always(&c_seg->c_lock);
1522 	thread_block(THREAD_CONTINUE_NULL);
1523 }
1524 
1525 #if CONFIG_FREEZE
1526 /*
1527  * We don't have the task lock held while updating the task's
1528  * c_seg queues. We can do that because of the following restrictions:
1529  *
1530  * - SINGLE FREEZER CONTEXT:
1531  *   We 'insert' c_segs into the task list on the task_freeze path.
1532  *   There can only be one such freeze in progress and the task
1533  *   isn't disappearing because we have the VM map lock held throughout
1534  *   and we have a reference on the proc too.
1535  *
1536  * - SINGLE TASK DISOWN CONTEXT:
1537  *   We 'disown' c_segs of a task ONLY from the task_terminate context. So
1538  *   we don't need the task lock but we need the c_list_lock and the
1539  *   compressor master lock (shared). We also hold the individual
1540  *   c_seg locks (exclusive).
1541  *
1542  *   If we either:
1543  *   - can't get the c_seg lock on a try, then we start again because maybe
1544  *   the c_seg is part of a compaction and might get freed. So we can't trust
1545  *   that linkage and need to restart our queue traversal.
1546  *   - OR, we run into a busy c_seg (say being swapped in or free-ing) we
1547  *   drop all locks again and wait and restart our queue traversal.
1548  *
1549  * - The new_owner_task below is currently only the kernel or NULL.
1550  *
1551  */
1552 void
c_seg_update_task_owner(c_segment_t c_seg,task_t new_owner_task)1553 c_seg_update_task_owner(c_segment_t c_seg, task_t new_owner_task)
1554 {
1555 	task_t          owner_task = c_seg->c_task_owner;
1556 	uint64_t        uncompressed_bytes = ((c_seg->c_slots_used) * PAGE_SIZE_64);
1557 
1558 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1559 	LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1560 
1561 	if (owner_task) {
1562 		task_update_frozen_to_swap_acct(owner_task, uncompressed_bytes, DEBIT_FROM_SWAP);
1563 		queue_remove(&owner_task->task_frozen_cseg_q, c_seg,
1564 		    c_segment_t, c_task_list_next_cseg);
1565 	}
1566 
1567 	if (new_owner_task) {
1568 		queue_enter(&new_owner_task->task_frozen_cseg_q, c_seg,
1569 		    c_segment_t, c_task_list_next_cseg);
1570 		task_update_frozen_to_swap_acct(new_owner_task, uncompressed_bytes, CREDIT_TO_SWAP);
1571 	}
1572 
1573 	c_seg->c_task_owner = new_owner_task;
1574 }
1575 
1576 void
task_disown_frozen_csegs(task_t owner_task)1577 task_disown_frozen_csegs(task_t owner_task)
1578 {
1579 	c_segment_t c_seg = NULL, next_cseg = NULL;
1580 
1581 again:
1582 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
1583 	lck_mtx_lock_spin_always(c_list_lock);
1584 
1585 	for (c_seg = (c_segment_t) queue_first(&owner_task->task_frozen_cseg_q);
1586 	    !queue_end(&owner_task->task_frozen_cseg_q, (queue_entry_t) c_seg);
1587 	    c_seg = next_cseg) {
1588 		next_cseg = (c_segment_t) queue_next(&c_seg->c_task_list_next_cseg);
1589 
1590 		if (!lck_mtx_try_lock_spin_always(&c_seg->c_lock)) {
1591 			lck_mtx_unlock(c_list_lock);
1592 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
1593 			goto again;
1594 		}
1595 
1596 		if (c_seg->c_busy) {
1597 			lck_mtx_unlock(c_list_lock);
1598 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
1599 
1600 			c_seg_wait_on_busy(c_seg);
1601 
1602 			goto again;
1603 		}
1604 		assert(c_seg->c_task_owner == owner_task);
1605 		c_seg_update_task_owner(c_seg, kernel_task);
1606 		lck_mtx_unlock_always(&c_seg->c_lock);
1607 	}
1608 
1609 	lck_mtx_unlock(c_list_lock);
1610 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
1611 }
1612 #endif /* CONFIG_FREEZE */
1613 
1614 void
c_seg_switch_state(c_segment_t c_seg,int new_state,boolean_t insert_head)1615 c_seg_switch_state(c_segment_t c_seg, int new_state, boolean_t insert_head)
1616 {
1617 	int     old_state = c_seg->c_state;
1618 	queue_head_t *donate_swapout_list_head, *donate_swappedin_list_head;
1619 	uint32_t     *donate_swapout_count, *donate_swappedin_count;
1620 
1621 	/*
1622 	 * On macOS the donate queue is swapped first ie the c_early_swapout queue.
1623 	 * On other swap-capable platforms, we want to swap those out last. So we
1624 	 * use the c_late_swapout queue.
1625 	 */
1626 #if XNU_TARGET_OS_OSX
1627 #if (DEVELOPMENT || DEBUG)
1628 	if (new_state != C_IS_FILLING) {
1629 		LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1630 	}
1631 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1632 #endif /* DEVELOPMENT || DEBUG */
1633 
1634 	donate_swapout_list_head = &c_early_swapout_list_head;
1635 	donate_swapout_count = &c_early_swapout_count;
1636 	donate_swappedin_list_head = &c_early_swappedin_list_head;
1637 	donate_swappedin_count = &c_early_swappedin_count;
1638 #else /* XNU_TARGET_OS_OSX */
1639 	donate_swapout_list_head = &c_late_swapout_list_head;
1640 	donate_swapout_count = &c_late_swapout_count;
1641 	donate_swappedin_list_head = &c_late_swappedin_list_head;
1642 	donate_swappedin_count = &c_late_swappedin_count;
1643 #endif /* XNU_TARGET_OS_OSX */
1644 
1645 	switch (old_state) {
1646 	case C_IS_EMPTY:
1647 		assert(new_state == C_IS_FILLING || new_state == C_IS_FREE);
1648 
1649 		c_empty_count--;
1650 		break;
1651 
1652 	case C_IS_FILLING:
1653 		assert(new_state == C_ON_AGE_Q || new_state == C_ON_SWAPOUT_Q);
1654 
1655 		queue_remove(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1656 		c_filling_count--;
1657 		break;
1658 
1659 	case C_ON_AGE_Q:
1660 		assert(new_state == C_ON_SWAPOUT_Q || new_state == C_ON_MAJORCOMPACT_Q ||
1661 		    new_state == C_IS_FREE);
1662 
1663 		queue_remove(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1664 		c_age_count--;
1665 		break;
1666 
1667 	case C_ON_SWAPPEDIN_Q:
1668 		if (c_seg->c_has_donated_pages) {
1669 			assert(new_state == C_ON_SWAPOUT_Q || new_state == C_IS_FREE);
1670 			queue_remove(donate_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1671 			*donate_swappedin_count -= 1;
1672 		} else {
1673 			assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1674 #if CONFIG_FREEZE
1675 			assert(c_seg->c_has_freezer_pages);
1676 			queue_remove(&c_early_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1677 			c_early_swappedin_count--;
1678 #else /* CONFIG_FREEZE */
1679 			queue_remove(&c_regular_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1680 			c_regular_swappedin_count--;
1681 #endif /* CONFIG_FREEZE */
1682 		}
1683 		break;
1684 
1685 	case C_ON_SWAPOUT_Q:
1686 		assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE || new_state == C_IS_EMPTY || new_state == C_ON_SWAPIO_Q);
1687 
1688 #if CONFIG_FREEZE
1689 		if (c_seg->c_has_freezer_pages) {
1690 			if (c_seg->c_task_owner && (new_state != C_ON_SWAPIO_Q)) {
1691 				c_seg_update_task_owner(c_seg, NULL);
1692 			}
1693 			queue_remove(&c_early_swapout_list_head, c_seg, c_segment_t, c_age_list);
1694 			c_early_swapout_count--;
1695 		} else
1696 #endif /* CONFIG_FREEZE */
1697 		{
1698 			if (c_seg->c_has_donated_pages) {
1699 				queue_remove(donate_swapout_list_head, c_seg, c_segment_t, c_age_list);
1700 				*donate_swapout_count -= 1;
1701 			} else {
1702 				queue_remove(&c_regular_swapout_list_head, c_seg, c_segment_t, c_age_list);
1703 				c_regular_swapout_count--;
1704 			}
1705 		}
1706 
1707 		if (new_state == C_ON_AGE_Q) {
1708 			c_seg->c_has_donated_pages = 0;
1709 		}
1710 		thread_wakeup((event_t)&compaction_swapper_running);
1711 		break;
1712 
1713 	case C_ON_SWAPIO_Q:
1714 #if CONFIG_FREEZE
1715 		if (c_seg->c_has_freezer_pages) {
1716 			assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1717 		} else
1718 #endif /* CONFIG_FREEZE */
1719 		{
1720 			if (c_seg->c_has_donated_pages) {
1721 				assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_SWAPPEDIN_Q);
1722 			} else {
1723 				assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1724 			}
1725 		}
1726 
1727 		queue_remove(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1728 		c_swapio_count--;
1729 		break;
1730 
1731 	case C_ON_SWAPPEDOUT_Q:
1732 		assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1733 		    new_state == C_ON_SWAPPEDOUTSPARSE_Q ||
1734 		    new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1735 
1736 		queue_remove(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1737 		c_swappedout_count--;
1738 		break;
1739 
1740 	case C_ON_SWAPPEDOUTSPARSE_Q:
1741 		assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1742 		    new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1743 
1744 		queue_remove(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1745 		c_swappedout_sparse_count--;
1746 		break;
1747 
1748 	case C_ON_MAJORCOMPACT_Q:
1749 		assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1750 
1751 		queue_remove(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1752 		c_major_count--;
1753 		break;
1754 
1755 	case C_ON_BAD_Q:
1756 		assert(new_state == C_IS_FREE);
1757 
1758 		queue_remove(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1759 		c_bad_count--;
1760 		break;
1761 
1762 	default:
1763 		panic("c_seg %p has bad c_state = %d", c_seg, old_state);
1764 	}
1765 
1766 	switch (new_state) {
1767 	case C_IS_FREE:
1768 		assert(old_state != C_IS_FILLING);
1769 
1770 		break;
1771 
1772 	case C_IS_EMPTY:
1773 		assert(old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1774 
1775 		c_empty_count++;
1776 		break;
1777 
1778 	case C_IS_FILLING:
1779 		assert(old_state == C_IS_EMPTY);
1780 
1781 		queue_enter(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1782 		c_filling_count++;
1783 		break;
1784 
1785 	case C_ON_AGE_Q:
1786 		assert(old_state == C_IS_FILLING || old_state == C_ON_SWAPPEDIN_Q ||
1787 		    old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPIO_Q ||
1788 		    old_state == C_ON_MAJORCOMPACT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1789 
1790 		assert(!c_seg->c_has_donated_pages);
1791 		if (old_state == C_IS_FILLING) {
1792 			queue_enter(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1793 		} else {
1794 			if (!queue_empty(&c_age_list_head)) {
1795 				c_segment_t     c_first;
1796 
1797 				c_first = (c_segment_t)queue_first(&c_age_list_head);
1798 				c_seg->c_creation_ts = c_first->c_creation_ts;
1799 			}
1800 			queue_enter_first(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1801 		}
1802 		c_age_count++;
1803 		break;
1804 
1805 	case C_ON_SWAPPEDIN_Q:
1806 	{
1807 		queue_head_t *list_head;
1808 
1809 		assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q || old_state == C_ON_SWAPIO_Q);
1810 		if (c_seg->c_has_donated_pages) {
1811 			/* Error in swapouts could happen while the c_seg is still on the swapio queue */
1812 			list_head = donate_swappedin_list_head;
1813 			*donate_swappedin_count += 1;
1814 		} else {
1815 #if CONFIG_FREEZE
1816 			assert(c_seg->c_has_freezer_pages);
1817 			list_head = &c_early_swappedin_list_head;
1818 			c_early_swappedin_count++;
1819 #else /* CONFIG_FREEZE */
1820 			list_head = &c_regular_swappedin_list_head;
1821 			c_regular_swappedin_count++;
1822 #endif /* CONFIG_FREEZE */
1823 		}
1824 
1825 		if (insert_head == TRUE) {
1826 			queue_enter_first(list_head, c_seg, c_segment_t, c_age_list);
1827 		} else {
1828 			queue_enter(list_head, c_seg, c_segment_t, c_age_list);
1829 		}
1830 		break;
1831 	}
1832 
1833 	case C_ON_SWAPOUT_Q:
1834 	{
1835 		queue_head_t *list_head;
1836 
1837 #if CONFIG_FREEZE
1838 		/*
1839 		 * A segment with both identities of frozen + donated pages
1840 		 * will be put on early swapout Q ie the frozen identity wins.
1841 		 * This is because when both identities are set, the donation bit
1842 		 * is added on after in the c_current_seg_filled path for accounting
1843 		 * purposes.
1844 		 */
1845 		if (c_seg->c_has_freezer_pages) {
1846 			assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1847 			list_head = &c_early_swapout_list_head;
1848 			c_early_swapout_count++;
1849 		} else
1850 #endif
1851 		{
1852 			if (c_seg->c_has_donated_pages) {
1853 				assert(old_state == C_ON_SWAPPEDIN_Q || old_state == C_IS_FILLING);
1854 				list_head = donate_swapout_list_head;
1855 				*donate_swapout_count += 1;
1856 			} else {
1857 				assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1858 				list_head = &c_regular_swapout_list_head;
1859 				c_regular_swapout_count++;
1860 			}
1861 		}
1862 
1863 		if (insert_head == TRUE) {
1864 			queue_enter_first(list_head, c_seg, c_segment_t, c_age_list);
1865 		} else {
1866 			queue_enter(list_head, c_seg, c_segment_t, c_age_list);
1867 		}
1868 		break;
1869 	}
1870 
1871 	case C_ON_SWAPIO_Q:
1872 		assert(old_state == C_ON_SWAPOUT_Q);
1873 
1874 		if (insert_head == TRUE) {
1875 			queue_enter_first(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1876 		} else {
1877 			queue_enter(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1878 		}
1879 		c_swapio_count++;
1880 		break;
1881 
1882 	case C_ON_SWAPPEDOUT_Q:
1883 		assert(old_state == C_ON_SWAPIO_Q);
1884 
1885 		if (insert_head == TRUE) {
1886 			queue_enter_first(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1887 		} else {
1888 			queue_enter(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1889 		}
1890 		c_swappedout_count++;
1891 		break;
1892 
1893 	case C_ON_SWAPPEDOUTSPARSE_Q:
1894 		assert(old_state == C_ON_SWAPIO_Q || old_state == C_ON_SWAPPEDOUT_Q);
1895 
1896 		if (insert_head == TRUE) {
1897 			queue_enter_first(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1898 		} else {
1899 			queue_enter(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1900 		}
1901 
1902 		c_swappedout_sparse_count++;
1903 		break;
1904 
1905 	case C_ON_MAJORCOMPACT_Q:
1906 		assert(old_state == C_ON_AGE_Q);
1907 		assert(!c_seg->c_has_donated_pages);
1908 
1909 		if (insert_head == TRUE) {
1910 			queue_enter_first(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1911 		} else {
1912 			queue_enter(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1913 		}
1914 		c_major_count++;
1915 		break;
1916 
1917 	case C_ON_BAD_Q:
1918 		assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1919 
1920 		if (insert_head == TRUE) {
1921 			queue_enter_first(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1922 		} else {
1923 			queue_enter(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1924 		}
1925 		c_bad_count++;
1926 		break;
1927 
1928 	default:
1929 		panic("c_seg %p requesting bad c_state = %d", c_seg, new_state);
1930 	}
1931 	c_seg->c_state = new_state;
1932 }
1933 
1934 
1935 
1936 void
c_seg_free(c_segment_t c_seg)1937 c_seg_free(c_segment_t c_seg)
1938 {
1939 	assert(c_seg->c_busy);
1940 
1941 	lck_mtx_unlock_always(&c_seg->c_lock);
1942 	lck_mtx_lock_spin_always(c_list_lock);
1943 	lck_mtx_lock_spin_always(&c_seg->c_lock);
1944 
1945 	c_seg_free_locked(c_seg);
1946 }
1947 
1948 
1949 void
c_seg_free_locked(c_segment_t c_seg)1950 c_seg_free_locked(c_segment_t c_seg)
1951 {
1952 	int             segno;
1953 	int             pages_populated = 0;
1954 	int32_t         *c_buffer = NULL;
1955 	uint64_t        c_swap_handle = 0;
1956 
1957 	assert(c_seg->c_busy);
1958 	assert(c_seg->c_slots_used == 0);
1959 	assert(!c_seg->c_on_minorcompact_q);
1960 	assert(!c_seg->c_busy_swapping);
1961 
1962 	if (c_seg->c_overage_swap == TRUE) {
1963 		c_overage_swapped_count--;
1964 		c_seg->c_overage_swap = FALSE;
1965 	}
1966 	if (!(C_SEG_IS_ONDISK(c_seg))) {
1967 		c_buffer = c_seg->c_store.c_buffer;
1968 	} else {
1969 		c_swap_handle = c_seg->c_store.c_swap_handle;
1970 	}
1971 
1972 	c_seg_switch_state(c_seg, C_IS_FREE, FALSE);
1973 
1974 	if (c_buffer) {
1975 		pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
1976 		c_seg->c_store.c_buffer = NULL;
1977 	} else {
1978 #if CONFIG_FREEZE
1979 		c_seg_update_task_owner(c_seg, NULL);
1980 #endif /* CONFIG_FREEZE */
1981 
1982 		c_seg->c_store.c_swap_handle = (uint64_t)-1;
1983 	}
1984 
1985 	lck_mtx_unlock_always(&c_seg->c_lock);
1986 
1987 	lck_mtx_unlock_always(c_list_lock);
1988 
1989 	if (c_buffer) {
1990 		if (pages_populated) {
1991 			kernel_memory_depopulate((vm_offset_t)c_buffer,
1992 			    ptoa(pages_populated), KMA_COMPRESSOR,
1993 			    VM_KERN_MEMORY_COMPRESSOR);
1994 		}
1995 	} else if (c_swap_handle) {
1996 		/*
1997 		 * Free swap space on disk.
1998 		 */
1999 		vm_swap_free(c_swap_handle);
2000 	}
2001 	lck_mtx_lock_spin_always(&c_seg->c_lock);
2002 	/*
2003 	 * c_seg must remain busy until
2004 	 * after the call to vm_swap_free
2005 	 */
2006 	C_SEG_WAKEUP_DONE(c_seg);
2007 	lck_mtx_unlock_always(&c_seg->c_lock);
2008 
2009 	segno = c_seg->c_mysegno;
2010 
2011 	lck_mtx_lock_spin_always(c_list_lock);
2012 	/*
2013 	 * because the c_buffer is now associated with the segno,
2014 	 * we can't put the segno back on the free list until
2015 	 * after we have depopulated the c_buffer range, or
2016 	 * we run the risk of depopulating a range that is
2017 	 * now being used in one of the compressor heads
2018 	 */
2019 	c_segments[segno].c_segno = c_free_segno_head;
2020 	c_free_segno_head = segno;
2021 	c_segment_count--;
2022 
2023 	lck_mtx_unlock_always(c_list_lock);
2024 
2025 	lck_mtx_destroy(&c_seg->c_lock, &vm_compressor_lck_grp);
2026 
2027 	if (c_seg->c_slot_var_array_len) {
2028 		kfree_type(struct c_slot, c_seg->c_slot_var_array_len,
2029 		    c_seg->c_slot_var_array);
2030 	}
2031 
2032 	zfree(compressor_segment_zone, c_seg);
2033 }
2034 
2035 #if DEVELOPMENT || DEBUG
2036 int c_seg_trim_page_count = 0;
2037 #endif
2038 
2039 void
c_seg_trim_tail(c_segment_t c_seg)2040 c_seg_trim_tail(c_segment_t c_seg)
2041 {
2042 	c_slot_t        cs;
2043 	uint32_t        c_size;
2044 	uint32_t        c_offset;
2045 	uint32_t        c_rounded_size;
2046 	uint16_t        current_nextslot;
2047 	uint32_t        current_populated_offset;
2048 
2049 	if (c_seg->c_bytes_used == 0) {
2050 		return;
2051 	}
2052 	current_nextslot = c_seg->c_nextslot;
2053 	current_populated_offset = c_seg->c_populated_offset;
2054 
2055 	while (c_seg->c_nextslot) {
2056 		cs = C_SEG_SLOT_FROM_INDEX(c_seg, (c_seg->c_nextslot - 1));
2057 
2058 		c_size = UNPACK_C_SIZE(cs);
2059 
2060 		if (c_size) {
2061 			if (current_nextslot != c_seg->c_nextslot) {
2062 				c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
2063 				c_offset = cs->c_offset + C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2064 
2065 				c_seg->c_nextoffset = c_offset;
2066 				c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) &
2067 				    ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
2068 
2069 				if (c_seg->c_firstemptyslot > c_seg->c_nextslot) {
2070 					c_seg->c_firstemptyslot = c_seg->c_nextslot;
2071 				}
2072 #if DEVELOPMENT || DEBUG
2073 				c_seg_trim_page_count += ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset)) -
2074 				    round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) /
2075 				    PAGE_SIZE);
2076 #endif
2077 			}
2078 			break;
2079 		}
2080 		c_seg->c_nextslot--;
2081 	}
2082 	assert(c_seg->c_nextslot);
2083 }
2084 
2085 
2086 int
c_seg_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy)2087 c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy)
2088 {
2089 	c_slot_mapping_t slot_ptr;
2090 	uint32_t        c_offset = 0;
2091 	uint32_t        old_populated_offset;
2092 	uint32_t        c_rounded_size;
2093 	uint32_t        c_size;
2094 	uint16_t        c_indx = 0;
2095 	int             i;
2096 	c_slot_t        c_dst;
2097 	c_slot_t        c_src;
2098 
2099 	assert(c_seg->c_busy);
2100 
2101 #if VALIDATE_C_SEGMENTS
2102 	c_seg_validate(c_seg, FALSE);
2103 #endif
2104 	if (c_seg->c_bytes_used == 0) {
2105 		c_seg_free(c_seg);
2106 		return 1;
2107 	}
2108 	lck_mtx_unlock_always(&c_seg->c_lock);
2109 
2110 	if (c_seg->c_firstemptyslot >= c_seg->c_nextslot || C_SEG_UNUSED_BYTES(c_seg) < PAGE_SIZE) {
2111 		goto done;
2112 	}
2113 
2114 /* TODO: assert first emptyslot's c_size is actually 0 */
2115 
2116 #if DEVELOPMENT || DEBUG
2117 	C_SEG_MAKE_WRITEABLE(c_seg);
2118 #endif
2119 
2120 #if VALIDATE_C_SEGMENTS
2121 	c_seg->c_was_minor_compacted++;
2122 #endif
2123 	c_indx = c_seg->c_firstemptyslot;
2124 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
2125 
2126 	old_populated_offset = c_seg->c_populated_offset;
2127 	c_offset = c_dst->c_offset;
2128 
2129 	for (i = c_indx + 1; i < c_seg->c_nextslot && c_offset < c_seg->c_nextoffset; i++) {
2130 		c_src = C_SEG_SLOT_FROM_INDEX(c_seg, i);
2131 
2132 		c_size = UNPACK_C_SIZE(c_src);
2133 
2134 		if (c_size == 0) {
2135 			continue;
2136 		}
2137 
2138 		c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
2139 /* N.B.: This memcpy may be an overlapping copy */
2140 		memcpy(&c_seg->c_store.c_buffer[c_offset], &c_seg->c_store.c_buffer[c_src->c_offset], c_rounded_size);
2141 
2142 		cslot_copy(c_dst, c_src);
2143 		c_dst->c_offset = c_offset;
2144 
2145 		slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2146 		slot_ptr->s_cindx = c_indx;
2147 
2148 		c_offset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2149 		PACK_C_SIZE(c_src, 0);
2150 		c_indx++;
2151 
2152 		c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
2153 	}
2154 	c_seg->c_firstemptyslot = c_indx;
2155 	c_seg->c_nextslot = c_indx;
2156 	c_seg->c_nextoffset = c_offset;
2157 	c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
2158 	c_seg->c_bytes_unused = 0;
2159 
2160 #if VALIDATE_C_SEGMENTS
2161 	c_seg_validate(c_seg, TRUE);
2162 #endif
2163 	if (old_populated_offset > c_seg->c_populated_offset) {
2164 		uint32_t        gc_size;
2165 		int32_t         *gc_ptr;
2166 
2167 		gc_size = C_SEG_OFFSET_TO_BYTES(old_populated_offset - c_seg->c_populated_offset);
2168 		gc_ptr = &c_seg->c_store.c_buffer[c_seg->c_populated_offset];
2169 
2170 		kernel_memory_depopulate((vm_offset_t)gc_ptr, gc_size,
2171 		    KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
2172 	}
2173 
2174 #if DEVELOPMENT || DEBUG
2175 	C_SEG_WRITE_PROTECT(c_seg);
2176 #endif
2177 
2178 done:
2179 	if (clear_busy == TRUE) {
2180 		lck_mtx_lock_spin_always(&c_seg->c_lock);
2181 		C_SEG_WAKEUP_DONE(c_seg);
2182 		lck_mtx_unlock_always(&c_seg->c_lock);
2183 	}
2184 	return 0;
2185 }
2186 
2187 
2188 static void
c_seg_alloc_nextslot(c_segment_t c_seg)2189 c_seg_alloc_nextslot(c_segment_t c_seg)
2190 {
2191 	struct c_slot   *old_slot_array = NULL;
2192 	struct c_slot   *new_slot_array = NULL;
2193 	int             newlen;
2194 	int             oldlen;
2195 
2196 	if (c_seg->c_nextslot < c_seg_fixed_array_len) {
2197 		return;
2198 	}
2199 
2200 	if ((c_seg->c_nextslot - c_seg_fixed_array_len) >= c_seg->c_slot_var_array_len) {
2201 		oldlen = c_seg->c_slot_var_array_len;
2202 		old_slot_array = c_seg->c_slot_var_array;
2203 
2204 		if (oldlen == 0) {
2205 			newlen = c_seg_slot_var_array_min_len;
2206 		} else {
2207 			newlen = oldlen * 2;
2208 		}
2209 
2210 		new_slot_array = kalloc_type(struct c_slot, newlen, Z_WAITOK);
2211 
2212 		lck_mtx_lock_spin_always(&c_seg->c_lock);
2213 
2214 		if (old_slot_array) {
2215 			memcpy(new_slot_array, old_slot_array,
2216 			    sizeof(struct c_slot) * oldlen);
2217 		}
2218 
2219 		c_seg->c_slot_var_array_len = newlen;
2220 		c_seg->c_slot_var_array = new_slot_array;
2221 
2222 		lck_mtx_unlock_always(&c_seg->c_lock);
2223 
2224 		kfree_type(struct c_slot, oldlen, old_slot_array);
2225 	}
2226 }
2227 
2228 
2229 #define C_SEG_MAJOR_COMPACT_STATS_MAX   (30)
2230 
2231 struct {
2232 	uint64_t asked_permission;
2233 	uint64_t compactions;
2234 	uint64_t moved_slots;
2235 	uint64_t moved_bytes;
2236 	uint64_t wasted_space_in_swapouts;
2237 	uint64_t count_of_swapouts;
2238 	uint64_t count_of_freed_segs;
2239 	uint64_t bailed_compactions;
2240 	uint64_t bytes_freed_rate_us;
2241 } c_seg_major_compact_stats[C_SEG_MAJOR_COMPACT_STATS_MAX];
2242 
2243 int c_seg_major_compact_stats_now = 0;
2244 
2245 
2246 #define C_MAJOR_COMPACTION_SIZE_APPROPRIATE     ((c_seg_bufsize * 90) / 100)
2247 
2248 
2249 boolean_t
c_seg_major_compact_ok(c_segment_t c_seg_dst,c_segment_t c_seg_src)2250 c_seg_major_compact_ok(
2251 	c_segment_t c_seg_dst,
2252 	c_segment_t c_seg_src)
2253 {
2254 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].asked_permission++;
2255 
2256 	if (c_seg_src->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE &&
2257 	    c_seg_dst->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE) {
2258 		return FALSE;
2259 	}
2260 
2261 	if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2262 		/*
2263 		 * destination segment is full... can't compact
2264 		 */
2265 		return FALSE;
2266 	}
2267 
2268 	return TRUE;
2269 }
2270 
2271 
2272 boolean_t
c_seg_major_compact(c_segment_t c_seg_dst,c_segment_t c_seg_src)2273 c_seg_major_compact(
2274 	c_segment_t c_seg_dst,
2275 	c_segment_t c_seg_src)
2276 {
2277 	c_slot_mapping_t slot_ptr;
2278 	uint32_t        c_rounded_size;
2279 	uint32_t        c_size;
2280 	uint16_t        dst_slot;
2281 	int             i;
2282 	c_slot_t        c_dst;
2283 	c_slot_t        c_src;
2284 	boolean_t       keep_compacting = TRUE;
2285 
2286 	/*
2287 	 * segments are not locked but they are both marked c_busy
2288 	 * which keeps c_decompress from working on them...
2289 	 * we can safely allocate new pages, move compressed data
2290 	 * from c_seg_src to c_seg_dst and update both c_segment's
2291 	 * state w/o holding the master lock
2292 	 */
2293 #if DEVELOPMENT || DEBUG
2294 	C_SEG_MAKE_WRITEABLE(c_seg_dst);
2295 #endif
2296 
2297 #if VALIDATE_C_SEGMENTS
2298 	c_seg_dst->c_was_major_compacted++;
2299 	c_seg_src->c_was_major_donor++;
2300 #endif
2301 	assertf(c_seg_dst->c_has_donated_pages == c_seg_src->c_has_donated_pages, "Mismatched donation status Dst: %p, Src: %p\n", c_seg_dst, c_seg_src);
2302 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].compactions++;
2303 
2304 	dst_slot = c_seg_dst->c_nextslot;
2305 
2306 	for (i = 0; i < c_seg_src->c_nextslot; i++) {
2307 		c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, i);
2308 
2309 		c_size = UNPACK_C_SIZE(c_src);
2310 
2311 		if (c_size == 0) {
2312 			/* BATCH: move what we have so far; */
2313 			continue;
2314 		}
2315 
2316 		if (C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset - c_seg_dst->c_nextoffset) < (unsigned) c_size) {
2317 			int     size_to_populate;
2318 
2319 			/* doesn't fit */
2320 			size_to_populate = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset);
2321 
2322 			if (size_to_populate == 0) {
2323 				/* can't fit */
2324 				keep_compacting = FALSE;
2325 				break;
2326 			}
2327 			if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
2328 				size_to_populate = C_SEG_MAX_POPULATE_SIZE;
2329 			}
2330 
2331 			kernel_memory_populate(
2332 				(vm_offset_t) &c_seg_dst->c_store.c_buffer[c_seg_dst->c_populated_offset],
2333 				size_to_populate,
2334 				KMA_NOFAIL | KMA_COMPRESSOR,
2335 				VM_KERN_MEMORY_COMPRESSOR);
2336 
2337 			c_seg_dst->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
2338 			assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset) <= c_seg_bufsize);
2339 		}
2340 		c_seg_alloc_nextslot(c_seg_dst);
2341 
2342 		c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
2343 
2344 		memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], c_size);
2345 
2346 		c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
2347 
2348 		c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_slots++;
2349 		c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_bytes += c_size;
2350 
2351 		cslot_copy(c_dst, c_src);
2352 		c_dst->c_offset = c_seg_dst->c_nextoffset;
2353 
2354 		if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
2355 			c_seg_dst->c_firstemptyslot++;
2356 		}
2357 		c_seg_dst->c_slots_used++;
2358 		c_seg_dst->c_nextslot++;
2359 		c_seg_dst->c_bytes_used += c_rounded_size;
2360 		c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2361 
2362 		PACK_C_SIZE(c_src, 0);
2363 
2364 		c_seg_src->c_bytes_used -= c_rounded_size;
2365 		c_seg_src->c_bytes_unused += c_rounded_size;
2366 		c_seg_src->c_firstemptyslot = 0;
2367 
2368 		assert(c_seg_src->c_slots_used);
2369 		c_seg_src->c_slots_used--;
2370 
2371 		if (!c_seg_src->c_swappedin) {
2372 			/* Pessimistically lose swappedin status when non-swappedin pages are added. */
2373 			c_seg_dst->c_swappedin = false;
2374 		}
2375 
2376 		if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2377 			/* dest segment is now full */
2378 			keep_compacting = FALSE;
2379 			break;
2380 		}
2381 	}
2382 #if DEVELOPMENT || DEBUG
2383 	C_SEG_WRITE_PROTECT(c_seg_dst);
2384 #endif
2385 	if (dst_slot < c_seg_dst->c_nextslot) {
2386 		PAGE_REPLACEMENT_ALLOWED(TRUE);
2387 		/*
2388 		 * we've now locked out c_decompress from
2389 		 * converting the slot passed into it into
2390 		 * a c_segment_t which allows us to use
2391 		 * the backptr to change which c_segment and
2392 		 * index the slot points to
2393 		 */
2394 		while (dst_slot < c_seg_dst->c_nextslot) {
2395 			c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
2396 
2397 			slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2398 			/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
2399 			slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
2400 			slot_ptr->s_cindx = dst_slot++;
2401 		}
2402 		PAGE_REPLACEMENT_ALLOWED(FALSE);
2403 	}
2404 	return keep_compacting;
2405 }
2406 
2407 
2408 uint64_t
vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec,clock_nsec_t end_nsec,clock_sec_t start_sec,clock_nsec_t start_nsec)2409 vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec, clock_nsec_t end_nsec, clock_sec_t start_sec, clock_nsec_t start_nsec)
2410 {
2411 	uint64_t end_msecs;
2412 	uint64_t start_msecs;
2413 
2414 	end_msecs = (end_sec * 1000) + end_nsec / 1000000;
2415 	start_msecs = (start_sec * 1000) + start_nsec / 1000000;
2416 
2417 	return end_msecs - start_msecs;
2418 }
2419 
2420 
2421 
2422 uint32_t compressor_eval_period_in_msecs = 250;
2423 uint32_t compressor_sample_min_in_msecs = 500;
2424 uint32_t compressor_sample_max_in_msecs = 10000;
2425 uint32_t compressor_thrashing_threshold_per_10msecs = 50;
2426 uint32_t compressor_thrashing_min_per_10msecs = 20;
2427 
2428 /* When true, reset sample data next chance we get. */
2429 static boolean_t        compressor_need_sample_reset = FALSE;
2430 
2431 
2432 void
compute_swapout_target_age(void)2433 compute_swapout_target_age(void)
2434 {
2435 	clock_sec_t     cur_ts_sec;
2436 	clock_nsec_t    cur_ts_nsec;
2437 	uint32_t        min_operations_needed_in_this_sample;
2438 	uint64_t        elapsed_msecs_in_eval;
2439 	uint64_t        elapsed_msecs_in_sample;
2440 	boolean_t       need_eval_reset = FALSE;
2441 
2442 	clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
2443 
2444 	elapsed_msecs_in_sample = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_sample_period_sec, start_of_sample_period_nsec);
2445 
2446 	if (compressor_need_sample_reset ||
2447 	    elapsed_msecs_in_sample >= compressor_sample_max_in_msecs) {
2448 		compressor_need_sample_reset = TRUE;
2449 		need_eval_reset = TRUE;
2450 		goto done;
2451 	}
2452 	elapsed_msecs_in_eval = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_eval_period_sec, start_of_eval_period_nsec);
2453 
2454 	if (elapsed_msecs_in_eval < compressor_eval_period_in_msecs) {
2455 		goto done;
2456 	}
2457 	need_eval_reset = TRUE;
2458 
2459 	KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START, elapsed_msecs_in_eval, sample_period_compression_count, sample_period_decompression_count, 0, 0);
2460 
2461 	min_operations_needed_in_this_sample = (compressor_thrashing_min_per_10msecs * (uint32_t)elapsed_msecs_in_eval) / 10;
2462 
2463 	if ((sample_period_compression_count - last_eval_compression_count) < min_operations_needed_in_this_sample ||
2464 	    (sample_period_decompression_count - last_eval_decompression_count) < min_operations_needed_in_this_sample) {
2465 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_compression_count - last_eval_compression_count,
2466 		    sample_period_decompression_count - last_eval_decompression_count, 0, 1, 0);
2467 
2468 		swapout_target_age = 0;
2469 
2470 		compressor_need_sample_reset = TRUE;
2471 		need_eval_reset = TRUE;
2472 		goto done;
2473 	}
2474 	last_eval_compression_count = sample_period_compression_count;
2475 	last_eval_decompression_count = sample_period_decompression_count;
2476 
2477 	if (elapsed_msecs_in_sample < compressor_sample_min_in_msecs) {
2478 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, 0, 0, 5, 0);
2479 		goto done;
2480 	}
2481 	if (sample_period_decompression_count > ((compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10)) {
2482 		uint64_t        running_total;
2483 		uint64_t        working_target;
2484 		uint64_t        aging_target;
2485 		uint32_t        oldest_age_of_csegs_sampled = 0;
2486 		uint64_t        working_set_approximation = 0;
2487 
2488 		swapout_target_age = 0;
2489 
2490 		working_target = (sample_period_decompression_count / 100) * 95;                /* 95 percent */
2491 		aging_target = (sample_period_decompression_count / 100) * 1;                   /* 1 percent */
2492 		running_total = 0;
2493 
2494 		for (oldest_age_of_csegs_sampled = 0; oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE; oldest_age_of_csegs_sampled++) {
2495 			running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2496 
2497 			working_set_approximation += oldest_age_of_csegs_sampled * age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2498 
2499 			if (running_total >= working_target) {
2500 				break;
2501 			}
2502 		}
2503 		if (oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE) {
2504 			working_set_approximation = (working_set_approximation * 1000) / elapsed_msecs_in_sample;
2505 
2506 			if (working_set_approximation < VM_PAGE_COMPRESSOR_COUNT) {
2507 				running_total = overage_decompressions_during_sample_period;
2508 
2509 				for (oldest_age_of_csegs_sampled = DECOMPRESSION_SAMPLE_MAX_AGE - 1; oldest_age_of_csegs_sampled; oldest_age_of_csegs_sampled--) {
2510 					running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2511 
2512 					if (running_total >= aging_target) {
2513 						break;
2514 					}
2515 				}
2516 				swapout_target_age = (uint32_t)cur_ts_sec - oldest_age_of_csegs_sampled;
2517 
2518 				KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 2, 0);
2519 			} else {
2520 				KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 0, 3, 0);
2521 			}
2522 		} else {
2523 			KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_target, running_total, 0, 4, 0);
2524 		}
2525 
2526 		compressor_need_sample_reset = TRUE;
2527 		need_eval_reset = TRUE;
2528 	} else {
2529 		KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_decompression_count, (compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10, 0, 6, 0);
2530 	}
2531 done:
2532 	if (compressor_need_sample_reset == TRUE) {
2533 		bzero(age_of_decompressions_during_sample_period, sizeof(age_of_decompressions_during_sample_period));
2534 		overage_decompressions_during_sample_period = 0;
2535 
2536 		start_of_sample_period_sec = cur_ts_sec;
2537 		start_of_sample_period_nsec = cur_ts_nsec;
2538 		sample_period_decompression_count = 0;
2539 		sample_period_compression_count = 0;
2540 		last_eval_decompression_count = 0;
2541 		last_eval_compression_count = 0;
2542 		compressor_need_sample_reset = FALSE;
2543 	}
2544 	if (need_eval_reset == TRUE) {
2545 		start_of_eval_period_sec = cur_ts_sec;
2546 		start_of_eval_period_nsec = cur_ts_nsec;
2547 	}
2548 }
2549 
2550 
2551 int             compaction_swapper_init_now = 0;
2552 int             compaction_swapper_running = 0;
2553 int             compaction_swapper_awakened = 0;
2554 int             compaction_swapper_abort = 0;
2555 
2556 bool
vm_compressor_swapout_is_ripe()2557 vm_compressor_swapout_is_ripe()
2558 {
2559 	bool is_ripe = false;
2560 	if (vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit) {
2561 		c_segment_t     c_seg;
2562 		clock_sec_t     now;
2563 		clock_sec_t     age;
2564 		clock_nsec_t    nsec;
2565 
2566 		clock_get_system_nanotime(&now, &nsec);
2567 		age = 0;
2568 
2569 		lck_mtx_lock_spin_always(c_list_lock);
2570 
2571 		if (!queue_empty(&c_age_list_head)) {
2572 			c_seg = (c_segment_t) queue_first(&c_age_list_head);
2573 
2574 			age = now - c_seg->c_creation_ts;
2575 		}
2576 		lck_mtx_unlock_always(c_list_lock);
2577 
2578 		if (age >= vm_ripe_target_age) {
2579 			is_ripe = true;
2580 		}
2581 	}
2582 	return is_ripe;
2583 }
2584 
2585 static bool
compressor_swapout_conditions_met(void)2586 compressor_swapout_conditions_met(void)
2587 {
2588 	bool should_swap = false;
2589 	if (COMPRESSOR_NEEDS_TO_SWAP()) {
2590 		should_swap = true;
2591 		vmcs_stats.compressor_swap_threshold_exceeded++;
2592 	}
2593 	if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external) && vm_page_anonymous_count < (vm_page_inactive_count / 20)) {
2594 		should_swap = true;
2595 		vmcs_stats.external_q_throttled++;
2596 	}
2597 	if (vm_page_free_count < (vm_page_free_reserved - (COMPRESSOR_FREE_RESERVED_LIMIT * 2))) {
2598 		should_swap = true;
2599 		vmcs_stats.free_count_below_reserve++;
2600 	}
2601 	return should_swap;
2602 }
2603 
2604 static bool
compressor_needs_to_swap()2605 compressor_needs_to_swap()
2606 {
2607 	bool should_swap = false;
2608 	if (vm_compressor_swapout_is_ripe()) {
2609 		should_swap = true;
2610 		goto check_if_low_space;
2611 	}
2612 
2613 	if (VM_CONFIG_SWAP_IS_ACTIVE) {
2614 		should_swap =  compressor_swapout_conditions_met();
2615 		if (should_swap) {
2616 			goto check_if_low_space;
2617 		}
2618 	}
2619 
2620 #if (XNU_TARGET_OS_OSX && __arm64__)
2621 	/*
2622 	 * Thrashing detection disabled.
2623 	 */
2624 #else /* (XNU_TARGET_OS_OSX && __arm64__) */
2625 
2626 	if (vm_compressor_is_thrashing()) {
2627 		should_swap = true;
2628 		vmcs_stats.thrashing_detected++;
2629 	}
2630 
2631 #if CONFIG_PHANTOM_CACHE
2632 	if (vm_phantom_cache_check_pressure()) {
2633 		os_atomic_store(&memorystatus_phantom_cache_pressure, true, release);
2634 		should_swap = true;
2635 	}
2636 #endif
2637 	if (swapout_target_age) {
2638 		should_swap = true;
2639 	}
2640 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
2641 
2642 check_if_low_space:
2643 
2644 #if CONFIG_JETSAM
2645 	if (should_swap || vm_compressor_low_on_space() == TRUE) {
2646 		if (vm_compressor_thrashing_detected == FALSE) {
2647 			vm_compressor_thrashing_detected = TRUE;
2648 
2649 			if (swapout_target_age) {
2650 				compressor_thrashing_induced_jetsam++;
2651 			} else if (vm_compressor_low_on_space() == TRUE) {
2652 				compressor_thrashing_induced_jetsam++;
2653 			} else {
2654 				filecache_thrashing_induced_jetsam++;
2655 			}
2656 			/*
2657 			 * Wake up the memorystatus thread so that it can return
2658 			 * the system to a healthy state (by killing processes).
2659 			 */
2660 			memorystatus_thread_wake();
2661 		}
2662 		/*
2663 		 * let the jetsam take precedence over
2664 		 * any major compactions we might have
2665 		 * been able to do... otherwise we run
2666 		 * the risk of doing major compactions
2667 		 * on segments we're about to free up
2668 		 * due to the jetsam activity.
2669 		 */
2670 		should_swap = false;
2671 		if (memorystatus_swap_all_apps && vm_swap_low_on_space()) {
2672 			vm_compressor_take_paging_space_action();
2673 		}
2674 	}
2675 
2676 #else /* CONFIG_JETSAM */
2677 	if (should_swap && vm_swap_low_on_space()) {
2678 		vm_compressor_take_paging_space_action();
2679 	}
2680 #endif /* CONFIG_JETSAM */
2681 
2682 	if (should_swap == false) {
2683 		/*
2684 		 * vm_compressor_needs_to_major_compact returns true only if we're
2685 		 * about to run out of available compressor segments... in this
2686 		 * case, we absolutely need to run a major compaction even if
2687 		 * we've just kicked off a jetsam or we don't otherwise need to
2688 		 * swap... terminating objects releases
2689 		 * pages back to the uncompressed cache, but does not guarantee
2690 		 * that we will free up even a single compression segment
2691 		 */
2692 		should_swap = vm_compressor_needs_to_major_compact();
2693 		if (should_swap) {
2694 			vmcs_stats.fragmentation_detected++;
2695 		}
2696 	}
2697 
2698 	/*
2699 	 * returning TRUE when swap_supported == FALSE
2700 	 * will cause the major compaction engine to
2701 	 * run, but will not trigger any swapping...
2702 	 * segments that have been major compacted
2703 	 * will be moved to the majorcompact queue
2704 	 */
2705 	return should_swap;
2706 }
2707 
2708 #if CONFIG_JETSAM
2709 /*
2710  * This function is called from the jetsam thread after killing something to
2711  * mitigate thrashing.
2712  *
2713  * We need to restart our thrashing detection heuristics since memory pressure
2714  * has potentially changed significantly, and we don't want to detect on old
2715  * data from before the jetsam.
2716  */
2717 void
vm_thrashing_jetsam_done(void)2718 vm_thrashing_jetsam_done(void)
2719 {
2720 	vm_compressor_thrashing_detected = FALSE;
2721 
2722 	/* Were we compressor-thrashing or filecache-thrashing? */
2723 	if (swapout_target_age) {
2724 		swapout_target_age = 0;
2725 		compressor_need_sample_reset = TRUE;
2726 	}
2727 #if CONFIG_PHANTOM_CACHE
2728 	else {
2729 		vm_phantom_cache_restart_sample();
2730 	}
2731 #endif
2732 }
2733 #endif /* CONFIG_JETSAM */
2734 
2735 uint32_t vm_wake_compactor_swapper_calls = 0;
2736 uint32_t vm_run_compactor_already_running = 0;
2737 uint32_t vm_run_compactor_empty_minor_q = 0;
2738 uint32_t vm_run_compactor_did_compact = 0;
2739 uint32_t vm_run_compactor_waited = 0;
2740 
2741 void
vm_run_compactor(void)2742 vm_run_compactor(void)
2743 {
2744 	if (c_segment_count == 0) {
2745 		return;
2746 	}
2747 
2748 	lck_mtx_lock_spin_always(c_list_lock);
2749 
2750 	if (c_minor_count == 0) {
2751 		vm_run_compactor_empty_minor_q++;
2752 
2753 		lck_mtx_unlock_always(c_list_lock);
2754 		return;
2755 	}
2756 	if (compaction_swapper_running) {
2757 		if (vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2758 			vm_run_compactor_already_running++;
2759 
2760 			lck_mtx_unlock_always(c_list_lock);
2761 			return;
2762 		}
2763 		vm_run_compactor_waited++;
2764 
2765 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2766 
2767 		lck_mtx_unlock_always(c_list_lock);
2768 
2769 		thread_block(THREAD_CONTINUE_NULL);
2770 
2771 		return;
2772 	}
2773 	vm_run_compactor_did_compact++;
2774 
2775 	fastwake_warmup = FALSE;
2776 	compaction_swapper_running = 1;
2777 
2778 	vm_compressor_do_delayed_compactions(FALSE);
2779 
2780 	compaction_swapper_running = 0;
2781 
2782 	lck_mtx_unlock_always(c_list_lock);
2783 
2784 	thread_wakeup((event_t)&compaction_swapper_running);
2785 }
2786 
2787 
2788 void
vm_wake_compactor_swapper(void)2789 vm_wake_compactor_swapper(void)
2790 {
2791 	if (compaction_swapper_running || compaction_swapper_awakened || c_segment_count == 0) {
2792 		return;
2793 	}
2794 
2795 	if (c_minor_count || vm_compressor_needs_to_major_compact()) {
2796 		lck_mtx_lock_spin_always(c_list_lock);
2797 
2798 		fastwake_warmup = FALSE;
2799 
2800 		if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2801 			vm_wake_compactor_swapper_calls++;
2802 
2803 			compaction_swapper_awakened = 1;
2804 			thread_wakeup((event_t)&c_compressor_swap_trigger);
2805 		}
2806 		lck_mtx_unlock_always(c_list_lock);
2807 	}
2808 }
2809 
2810 
2811 void
vm_consider_swapping()2812 vm_consider_swapping()
2813 {
2814 	assert(VM_CONFIG_SWAP_IS_PRESENT);
2815 
2816 	lck_mtx_lock_spin_always(c_list_lock);
2817 
2818 	compaction_swapper_abort = 1;
2819 
2820 	while (compaction_swapper_running) {
2821 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2822 
2823 		lck_mtx_unlock_always(c_list_lock);
2824 
2825 		thread_block(THREAD_CONTINUE_NULL);
2826 
2827 		lck_mtx_lock_spin_always(c_list_lock);
2828 	}
2829 	compaction_swapper_abort = 0;
2830 	compaction_swapper_running = 1;
2831 
2832 	vm_swapout_ripe_segments = TRUE;
2833 
2834 	vm_compressor_process_major_segments(vm_swapout_ripe_segments);
2835 
2836 	vm_compressor_compact_and_swap(FALSE);
2837 
2838 	compaction_swapper_running = 0;
2839 
2840 	vm_swapout_ripe_segments = FALSE;
2841 
2842 	lck_mtx_unlock_always(c_list_lock);
2843 
2844 	thread_wakeup((event_t)&compaction_swapper_running);
2845 }
2846 
2847 
2848 void
vm_consider_waking_compactor_swapper(void)2849 vm_consider_waking_compactor_swapper(void)
2850 {
2851 	boolean_t       need_wakeup = FALSE;
2852 
2853 	if (c_segment_count == 0) {
2854 		return;
2855 	}
2856 
2857 	if (compaction_swapper_running || compaction_swapper_awakened) {
2858 		return;
2859 	}
2860 
2861 	if (!compaction_swapper_inited && !compaction_swapper_init_now) {
2862 		compaction_swapper_init_now = 1;
2863 		need_wakeup = TRUE;
2864 	}
2865 
2866 	if (c_minor_count && (COMPRESSOR_NEEDS_TO_MINOR_COMPACT())) {
2867 		need_wakeup = TRUE;
2868 	} else if (compressor_needs_to_swap()) {
2869 		need_wakeup = TRUE;
2870 	} else if (c_minor_count) {
2871 		uint64_t        total_bytes;
2872 
2873 		total_bytes = compressor_object->resident_page_count * PAGE_SIZE_64;
2874 
2875 		if ((total_bytes - compressor_bytes_used) > total_bytes / 10) {
2876 			need_wakeup = TRUE;
2877 		}
2878 	}
2879 	if (need_wakeup == TRUE) {
2880 		lck_mtx_lock_spin_always(c_list_lock);
2881 
2882 		fastwake_warmup = FALSE;
2883 
2884 		if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2885 			memoryshot(VM_WAKEUP_COMPACTOR_SWAPPER, DBG_FUNC_NONE);
2886 
2887 			compaction_swapper_awakened = 1;
2888 			thread_wakeup((event_t)&c_compressor_swap_trigger);
2889 		}
2890 		lck_mtx_unlock_always(c_list_lock);
2891 	}
2892 }
2893 
2894 
2895 #define C_SWAPOUT_LIMIT                 4
2896 #define DELAYED_COMPACTIONS_PER_PASS    30
2897 
2898 void
vm_compressor_do_delayed_compactions(boolean_t flush_all)2899 vm_compressor_do_delayed_compactions(boolean_t flush_all)
2900 {
2901 	c_segment_t     c_seg;
2902 	int             number_compacted = 0;
2903 	boolean_t       needs_to_swap = FALSE;
2904 	uint32_t        c_swapout_count = 0;
2905 
2906 
2907 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, VM_COMPRESSOR_DO_DELAYED_COMPACTIONS, DBG_FUNC_START, c_minor_count, flush_all, 0, 0);
2908 
2909 #if XNU_TARGET_OS_OSX
2910 	LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
2911 #endif /* XNU_TARGET_OS_OSX */
2912 
2913 	while (!queue_empty(&c_minor_list_head) && needs_to_swap == FALSE) {
2914 		c_seg = (c_segment_t)queue_first(&c_minor_list_head);
2915 
2916 		lck_mtx_lock_spin_always(&c_seg->c_lock);
2917 
2918 		if (c_seg->c_busy) {
2919 			lck_mtx_unlock_always(c_list_lock);
2920 			c_seg_wait_on_busy(c_seg);
2921 			lck_mtx_lock_spin_always(c_list_lock);
2922 
2923 			continue;
2924 		}
2925 		C_SEG_BUSY(c_seg);
2926 
2927 		c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, TRUE);
2928 
2929 		c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
2930 		if (VM_CONFIG_SWAP_IS_ACTIVE && (number_compacted++ > DELAYED_COMPACTIONS_PER_PASS)) {
2931 			if ((flush_all == TRUE || compressor_needs_to_swap()) && c_swapout_count < C_SWAPOUT_LIMIT) {
2932 				needs_to_swap = TRUE;
2933 			}
2934 
2935 			number_compacted = 0;
2936 		}
2937 		lck_mtx_lock_spin_always(c_list_lock);
2938 	}
2939 
2940 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, VM_COMPRESSOR_DO_DELAYED_COMPACTIONS, DBG_FUNC_END, c_minor_count, number_compacted, needs_to_swap, 0);
2941 }
2942 
2943 int min_csegs_per_major_compaction = DELAYED_COMPACTIONS_PER_PASS;
2944 
2945 static bool
vm_compressor_major_compact_cseg(c_segment_t c_seg,uint32_t * c_seg_considered,bool * bail_wanted_cseg,uint64_t * total_bytes_freed)2946 vm_compressor_major_compact_cseg(c_segment_t c_seg, uint32_t* c_seg_considered, bool* bail_wanted_cseg, uint64_t* total_bytes_freed)
2947 {
2948 	/*
2949 	 * Major compaction
2950 	 */
2951 	bool keep_compacting = true, fully_compacted = true;
2952 	queue_head_t *list_head = NULL;
2953 	c_segment_t c_seg_next;
2954 	uint64_t        bytes_to_free = 0, bytes_freed = 0;
2955 	uint32_t        number_considered = 0;
2956 
2957 	if (c_seg->c_state == C_ON_AGE_Q) {
2958 		assert(!c_seg->c_has_donated_pages);
2959 		list_head = &c_age_list_head;
2960 	} else if (c_seg->c_state == C_ON_SWAPPEDIN_Q) {
2961 		assert(c_seg->c_has_donated_pages);
2962 		list_head = &c_late_swappedin_list_head;
2963 	}
2964 
2965 	while (keep_compacting == TRUE) {
2966 		assert(c_seg->c_busy);
2967 
2968 		/* look for another segment to consolidate */
2969 
2970 		c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
2971 
2972 		if (queue_end(list_head, (queue_entry_t)c_seg_next)) {
2973 			break;
2974 		}
2975 
2976 		assert(c_seg_next->c_state == c_seg->c_state);
2977 
2978 		number_considered++;
2979 
2980 		if (c_seg_major_compact_ok(c_seg, c_seg_next) == FALSE) {
2981 			break;
2982 		}
2983 
2984 		lck_mtx_lock_spin_always(&c_seg_next->c_lock);
2985 
2986 		if (c_seg_next->c_busy) {
2987 			/*
2988 			 * We are going to block for our neighbor.
2989 			 * If our c_seg is wanted, we should unbusy
2990 			 * it because we don't know how long we might
2991 			 * have to block here.
2992 			 */
2993 			if (c_seg->c_wanted) {
2994 				lck_mtx_unlock_always(&c_seg_next->c_lock);
2995 				fully_compacted = false;
2996 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
2997 				*bail_wanted_cseg = true;
2998 				break;
2999 			}
3000 
3001 			lck_mtx_unlock_always(c_list_lock);
3002 
3003 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 8, (void*) VM_KERNEL_ADDRPERM(c_seg_next), 0, 0);
3004 
3005 			c_seg_wait_on_busy(c_seg_next);
3006 			lck_mtx_lock_spin_always(c_list_lock);
3007 
3008 			continue;
3009 		}
3010 		/* grab that segment */
3011 		C_SEG_BUSY(c_seg_next);
3012 
3013 		bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3014 		if (c_seg_do_minor_compaction_and_unlock(c_seg_next, FALSE, TRUE, TRUE)) {
3015 			/*
3016 			 * found an empty c_segment and freed it
3017 			 * so we can't continue to use c_seg_next
3018 			 */
3019 			bytes_freed += bytes_to_free;
3020 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3021 			continue;
3022 		}
3023 
3024 		/* unlock the list ... */
3025 		lck_mtx_unlock_always(c_list_lock);
3026 
3027 		/* do the major compaction */
3028 
3029 		keep_compacting = c_seg_major_compact(c_seg, c_seg_next);
3030 
3031 		VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 9, keep_compacting, 0, 0);
3032 
3033 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
3034 
3035 		lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3036 		/*
3037 		 * run a minor compaction on the donor segment
3038 		 * since we pulled at least some of it's
3039 		 * data into our target...  if we've emptied
3040 		 * it, now is a good time to free it which
3041 		 * c_seg_minor_compaction_and_unlock also takes care of
3042 		 *
3043 		 * by passing TRUE, we ask for c_busy to be cleared
3044 		 * and c_wanted to be taken care of
3045 		 */
3046 		bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3047 		if (c_seg_minor_compaction_and_unlock(c_seg_next, TRUE)) {
3048 			bytes_freed += bytes_to_free;
3049 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3050 		} else {
3051 			bytes_to_free -= C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3052 			bytes_freed += bytes_to_free;
3053 		}
3054 
3055 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
3056 
3057 		/* relock the list */
3058 		lck_mtx_lock_spin_always(c_list_lock);
3059 
3060 		if (c_seg->c_wanted) {
3061 			/*
3062 			 * Our c_seg is in demand. Let's
3063 			 * unbusy it and wakeup the waiters
3064 			 * instead of continuing the compaction
3065 			 * because we could be in this loop
3066 			 * for a while.
3067 			 */
3068 			fully_compacted = false;
3069 			*bail_wanted_cseg = true;
3070 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3071 			break;
3072 		}
3073 	} /* major compaction */
3074 
3075 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 10, number_considered, *bail_wanted_cseg, 0);
3076 
3077 	*c_seg_considered += number_considered;
3078 	*total_bytes_freed += bytes_freed;
3079 
3080 	lck_mtx_lock_spin_always(&c_seg->c_lock);
3081 	return fully_compacted;
3082 }
3083 
3084 #define TIME_SUB(rsecs, secs, rfrac, frac, unit)                        \
3085 	MACRO_BEGIN                                                     \
3086 	if ((int)((rfrac) -= (frac)) < 0) {                             \
3087 	        (rfrac) += (unit);                                      \
3088 	        (rsecs) -= 1;                                           \
3089 	}                                                               \
3090 	(rsecs) -= (secs);                                              \
3091 	MACRO_END
3092 
3093 clock_nsec_t c_process_major_report_over_ms = 9; /* report if over 9 ms */
3094 int c_process_major_yield_after = 1000; /* yield after moving 1,000 segments */
3095 uint64_t c_process_major_reports = 0;
3096 clock_sec_t c_process_major_max_sec = 0;
3097 clock_nsec_t c_process_major_max_nsec = 0;
3098 uint32_t c_process_major_peak_segcount = 0;
3099 static void
vm_compressor_process_major_segments(bool ripe_age_only)3100 vm_compressor_process_major_segments(bool ripe_age_only)
3101 {
3102 	c_segment_t c_seg = NULL;
3103 	int count = 0, total = 0, breaks = 0;
3104 	clock_sec_t start_sec, end_sec;
3105 	clock_nsec_t start_nsec, end_nsec;
3106 	clock_nsec_t report_over_ns;
3107 
3108 	if (queue_empty(&c_major_list_head)) {
3109 		return;
3110 	}
3111 
3112 	// printf("%s: starting to move segments from MAJORQ to AGEQ\n", __FUNCTION__);
3113 	if (c_process_major_report_over_ms != 0) {
3114 		report_over_ns = c_process_major_report_over_ms * NSEC_PER_MSEC;
3115 	} else {
3116 		report_over_ns = (clock_nsec_t)-1;
3117 	}
3118 
3119 	if (ripe_age_only) {
3120 		if (c_overage_swapped_count >= c_overage_swapped_limit) {
3121 			/*
3122 			 * Return while we wait for the overage segments
3123 			 * in our queue to get pushed out first.
3124 			 */
3125 			return;
3126 		}
3127 	}
3128 
3129 	clock_get_system_nanotime(&start_sec, &start_nsec);
3130 	while (!queue_empty(&c_major_list_head)) {
3131 		if (!ripe_age_only) {
3132 			/*
3133 			 * Start from the end to preserve aging order. The newer
3134 			 * segments are at the tail and so need to be inserted in
3135 			 * the aging queue in this way so we have the older segments
3136 			 * at the end of the AGE_Q.
3137 			 */
3138 			c_seg = (c_segment_t)queue_last(&c_major_list_head);
3139 		} else {
3140 			c_seg = (c_segment_t)queue_first(&c_major_list_head);
3141 			if ((start_sec - c_seg->c_creation_ts) < vm_ripe_target_age) {
3142 				/*
3143 				 * We have found the first segment in our queue that is not ripe. Segments after it
3144 				 * will be the same. So let's bail here. Return with c_list_lock held.
3145 				 */
3146 				break;
3147 			}
3148 		}
3149 
3150 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3151 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3152 		lck_mtx_unlock_always(&c_seg->c_lock);
3153 
3154 		count++;
3155 		if (count == c_process_major_yield_after ||
3156 		    queue_empty(&c_major_list_head)) {
3157 			/* done or time to take a break */
3158 		} else {
3159 			/* keep going */
3160 			continue;
3161 		}
3162 
3163 		total += count;
3164 		clock_get_system_nanotime(&end_sec, &end_nsec);
3165 		TIME_SUB(end_sec, start_sec, end_nsec, start_nsec, NSEC_PER_SEC);
3166 		if (end_sec > c_process_major_max_sec) {
3167 			c_process_major_max_sec = end_sec;
3168 			c_process_major_max_nsec = end_nsec;
3169 		} else if (end_sec == c_process_major_max_sec &&
3170 		    end_nsec > c_process_major_max_nsec) {
3171 			c_process_major_max_nsec = end_nsec;
3172 		}
3173 		if (total > c_process_major_peak_segcount) {
3174 			c_process_major_peak_segcount = total;
3175 		}
3176 		if (end_sec > 0 ||
3177 		    end_nsec >= report_over_ns) {
3178 			/* we used more than expected */
3179 			c_process_major_reports++;
3180 			printf("%s: moved %d/%d segments from MAJORQ to AGEQ in %lu.%09u seconds and %d breaks\n",
3181 			    __FUNCTION__, count, total,
3182 			    end_sec, end_nsec, breaks);
3183 		}
3184 		if (queue_empty(&c_major_list_head)) {
3185 			/* done */
3186 			break;
3187 		}
3188 		/* take a break to allow someone else to grab the lock */
3189 		lck_mtx_unlock_always(c_list_lock);
3190 		mutex_pause(0); /* 10 microseconds */
3191 		lck_mtx_lock_spin_always(c_list_lock);
3192 		/* start again */
3193 		clock_get_system_nanotime(&start_sec, &start_nsec);
3194 		count = 0;
3195 		breaks++;
3196 	}
3197 }
3198 
3199 /*
3200  * macOS special swappable csegs -> early_swapin queue
3201  * non-macOS special swappable+non-freezer csegs -> late_swapin queue
3202  * Processing special csegs means minor compacting each cseg and then
3203  * major compacting it and putting them on the early or late
3204  * (depending on platform) swapout queue.
3205  */
3206 static void
vm_compressor_process_special_swapped_in_segments_locked(void)3207 vm_compressor_process_special_swapped_in_segments_locked(void)
3208 {
3209 	c_segment_t c_seg = NULL;
3210 	bool            switch_state = true, bail_wanted_cseg = false;
3211 	unsigned int    number_considered = 0, yield_after_considered_per_pass = 0;
3212 	uint64_t        bytes_freed = 0;
3213 	queue_head_t    *special_swappedin_list_head;
3214 
3215 #if XNU_TARGET_OS_OSX
3216 	special_swappedin_list_head = &c_early_swappedin_list_head;
3217 #else /* XNU_TARGET_OS_OSX */
3218 	if (memorystatus_swap_all_apps) {
3219 		special_swappedin_list_head = &c_late_swappedin_list_head;
3220 	} else {
3221 		/* called on unsupported config*/
3222 		return;
3223 	}
3224 #endif /* XNU_TARGET_OS_OSX */
3225 
3226 	yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3227 	while (!queue_empty(special_swappedin_list_head)) {
3228 		c_seg = (c_segment_t)queue_first(special_swappedin_list_head);
3229 
3230 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3231 
3232 		if (c_seg->c_busy) {
3233 			lck_mtx_unlock_always(c_list_lock);
3234 			c_seg_wait_on_busy(c_seg);
3235 			lck_mtx_lock_spin_always(c_list_lock);
3236 			continue;
3237 		}
3238 
3239 		C_SEG_BUSY(c_seg);
3240 		lck_mtx_unlock_always(&c_seg->c_lock);
3241 		lck_mtx_unlock_always(c_list_lock);
3242 
3243 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
3244 
3245 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3246 
3247 		if (c_seg_minor_compaction_and_unlock(c_seg, FALSE /*clear busy?*/)) {
3248 			/*
3249 			 * found an empty c_segment and freed it
3250 			 * so go grab the next guy in the queue
3251 			 */
3252 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
3253 			lck_mtx_lock_spin_always(c_list_lock);
3254 			continue;
3255 		}
3256 
3257 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
3258 		lck_mtx_lock_spin_always(c_list_lock);
3259 
3260 		switch_state = vm_compressor_major_compact_cseg(c_seg, &number_considered, &bail_wanted_cseg, &bytes_freed);
3261 		assert(c_seg->c_busy);
3262 		assert(!c_seg->c_on_minorcompact_q);
3263 
3264 		if (switch_state) {
3265 			if (VM_CONFIG_SWAP_IS_ACTIVE || VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
3266 				/*
3267 				 * Ordinarily we let swapped in segments age out + get
3268 				 * major compacted with the rest of the c_segs on the ageQ.
3269 				 * But the early donated c_segs, if well compacted, should be
3270 				 * kept ready to be swapped out if needed. These are typically
3271 				 * describing memory belonging to a leaky app (macOS) or a swap-
3272 				 * capable app (iPadOS) and for the latter we can keep these
3273 				 * around longer because we control the triggers in the memorystatus
3274 				 * subsystem
3275 				 */
3276 				c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
3277 			}
3278 		}
3279 
3280 		C_SEG_WAKEUP_DONE(c_seg);
3281 
3282 		lck_mtx_unlock_always(&c_seg->c_lock);
3283 
3284 		if (number_considered >= yield_after_considered_per_pass) {
3285 			if (bail_wanted_cseg) {
3286 				/*
3287 				 * We stopped major compactions on a c_seg
3288 				 * that is wanted. We don't know the priority
3289 				 * of the waiter unfortunately but we are at
3290 				 * a very high priority and so, just in case
3291 				 * the waiter is a critical system daemon or
3292 				 * UI thread, let's give up the CPU in case
3293 				 * the system is running a few CPU intensive
3294 				 * tasks.
3295 				 */
3296 				bail_wanted_cseg = false;
3297 				lck_mtx_unlock_always(c_list_lock);
3298 
3299 				mutex_pause(2); /* 100us yield */
3300 
3301 				lck_mtx_lock_spin_always(c_list_lock);
3302 			}
3303 
3304 			number_considered = 0;
3305 		}
3306 	}
3307 }
3308 
3309 void
vm_compressor_process_special_swapped_in_segments(void)3310 vm_compressor_process_special_swapped_in_segments(void)
3311 {
3312 	lck_mtx_lock_spin_always(c_list_lock);
3313 	vm_compressor_process_special_swapped_in_segments_locked();
3314 	lck_mtx_unlock_always(c_list_lock);
3315 }
3316 
3317 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT   10
3318 /*
3319  * Processing regular csegs means aging them.
3320  */
3321 static void
vm_compressor_process_regular_swapped_in_segments(boolean_t flush_all)3322 vm_compressor_process_regular_swapped_in_segments(boolean_t flush_all)
3323 {
3324 	c_segment_t     c_seg;
3325 	clock_sec_t     now;
3326 	clock_nsec_t    nsec;
3327 
3328 	clock_get_system_nanotime(&now, &nsec);
3329 
3330 	while (!queue_empty(&c_regular_swappedin_list_head)) {
3331 		c_seg = (c_segment_t)queue_first(&c_regular_swappedin_list_head);
3332 
3333 		if (flush_all == FALSE && (now - c_seg->c_swappedin_ts) < C_SEGMENT_SWAPPEDIN_AGE_LIMIT) {
3334 			break;
3335 		}
3336 
3337 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3338 
3339 		c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3340 		c_seg->c_agedin_ts = (uint32_t) now;
3341 
3342 		lck_mtx_unlock_always(&c_seg->c_lock);
3343 	}
3344 }
3345 
3346 
3347 extern  int     vm_num_swap_files;
3348 extern  int     vm_num_pinned_swap_files;
3349 extern  int     vm_swappin_enabled;
3350 
3351 extern  unsigned int    vm_swapfile_total_segs_used;
3352 extern  unsigned int    vm_swapfile_total_segs_alloced;
3353 
3354 
3355 void
vm_compressor_flush(void)3356 vm_compressor_flush(void)
3357 {
3358 	uint64_t        vm_swap_put_failures_at_start;
3359 	wait_result_t   wait_result = 0;
3360 	AbsoluteTime    startTime, endTime;
3361 	clock_sec_t     now_sec;
3362 	clock_nsec_t    now_nsec;
3363 	uint64_t        nsec;
3364 	c_segment_t     c_seg, c_seg_next;
3365 
3366 	HIBLOG("vm_compressor_flush - starting\n");
3367 
3368 	clock_get_uptime(&startTime);
3369 
3370 	lck_mtx_lock_spin_always(c_list_lock);
3371 
3372 	fastwake_warmup = FALSE;
3373 	compaction_swapper_abort = 1;
3374 
3375 	while (compaction_swapper_running) {
3376 		assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
3377 
3378 		lck_mtx_unlock_always(c_list_lock);
3379 
3380 		thread_block(THREAD_CONTINUE_NULL);
3381 
3382 		lck_mtx_lock_spin_always(c_list_lock);
3383 	}
3384 	compaction_swapper_abort = 0;
3385 	compaction_swapper_running = 1;
3386 
3387 	hibernate_flushing = TRUE;
3388 	hibernate_no_swapspace = FALSE;
3389 	hibernate_flush_timed_out = FALSE;
3390 	c_generation_id_flush_barrier = c_generation_id + 1000;
3391 
3392 	clock_get_system_nanotime(&now_sec, &now_nsec);
3393 	hibernate_flushing_deadline = now_sec + HIBERNATE_FLUSHING_SECS_TO_COMPLETE;
3394 
3395 	vm_swap_put_failures_at_start = vm_swap_put_failures;
3396 
3397 	/*
3398 	 * We are about to hibernate and so we want all segments flushed to disk.
3399 	 * Segments that are on the major compaction queue won't be considered in
3400 	 * the vm_compressor_compact_and_swap() pass. So we need to bring them to
3401 	 * the ageQ for consideration.
3402 	 */
3403 	if (!queue_empty(&c_major_list_head)) {
3404 		c_seg = (c_segment_t)queue_first(&c_major_list_head);
3405 
3406 		while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) {
3407 			c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3408 			lck_mtx_lock_spin_always(&c_seg->c_lock);
3409 			c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3410 			lck_mtx_unlock_always(&c_seg->c_lock);
3411 			c_seg = c_seg_next;
3412 		}
3413 	}
3414 	vm_compressor_compact_and_swap(TRUE);
3415 
3416 	while (!queue_empty(&c_early_swapout_list_head) || !queue_empty(&c_regular_swapout_list_head) || !queue_empty(&c_late_swapout_list_head)) {
3417 		assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
3418 
3419 		lck_mtx_unlock_always(c_list_lock);
3420 
3421 		wait_result = thread_block(THREAD_CONTINUE_NULL);
3422 
3423 		lck_mtx_lock_spin_always(c_list_lock);
3424 
3425 		if (wait_result == THREAD_TIMED_OUT) {
3426 			break;
3427 		}
3428 	}
3429 	hibernate_flushing = FALSE;
3430 	compaction_swapper_running = 0;
3431 
3432 	if (vm_swap_put_failures > vm_swap_put_failures_at_start) {
3433 		HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n",
3434 		    vm_swap_put_failures - vm_swap_put_failures_at_start, VM_PAGE_COMPRESSOR_COUNT);
3435 	}
3436 
3437 	lck_mtx_unlock_always(c_list_lock);
3438 
3439 	thread_wakeup((event_t)&compaction_swapper_running);
3440 
3441 	clock_get_uptime(&endTime);
3442 	SUB_ABSOLUTETIME(&endTime, &startTime);
3443 	absolutetime_to_nanoseconds(endTime, &nsec);
3444 
3445 	HIBLOG("vm_compressor_flush completed - took %qd msecs - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d, vm_swappin_enabled = %d\n",
3446 	    nsec / 1000000ULL, vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled);
3447 }
3448 
3449 
3450 int             compaction_swap_trigger_thread_awakened = 0;
3451 
3452 static void
vm_compressor_swap_trigger_thread(void)3453 vm_compressor_swap_trigger_thread(void)
3454 {
3455 	current_thread()->options |= TH_OPT_VMPRIV;
3456 
3457 	/*
3458 	 * compaction_swapper_init_now is set when the first call to
3459 	 * vm_consider_waking_compactor_swapper is made from
3460 	 * vm_pageout_scan... since this function is called upon
3461 	 * thread creation, we want to make sure to delay adjusting
3462 	 * the tuneables until we are awakened via vm_pageout_scan
3463 	 * so that we are at a point where the vm_swapfile_open will
3464 	 * be operating on the correct directory (in case the default
3465 	 * of using the VM volume is overridden by the dynamic_pager)
3466 	 */
3467 	if (compaction_swapper_init_now) {
3468 		vm_compaction_swapper_do_init();
3469 
3470 		if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
3471 			thread_vm_bind_group_add();
3472 		}
3473 #if CONFIG_THREAD_GROUPS
3474 		thread_group_vm_add();
3475 #endif
3476 		thread_set_thread_name(current_thread(), "VM_cswap_trigger");
3477 		compaction_swapper_init_now = 0;
3478 	}
3479 	lck_mtx_lock_spin_always(c_list_lock);
3480 
3481 	compaction_swap_trigger_thread_awakened++;
3482 	compaction_swapper_awakened = 0;
3483 
3484 	if (compaction_swapper_running == 0) {
3485 		compaction_swapper_running = 1;
3486 
3487 		vm_compressor_compact_and_swap(FALSE);
3488 
3489 		compaction_swapper_running = 0;
3490 	}
3491 	assert_wait((event_t)&c_compressor_swap_trigger, THREAD_UNINT);
3492 
3493 	if (compaction_swapper_running == 0) {
3494 		thread_wakeup((event_t)&compaction_swapper_running);
3495 	}
3496 
3497 	lck_mtx_unlock_always(c_list_lock);
3498 
3499 	thread_block((thread_continue_t)vm_compressor_swap_trigger_thread);
3500 
3501 	/* NOTREACHED */
3502 }
3503 
3504 
3505 void
vm_compressor_record_warmup_start(void)3506 vm_compressor_record_warmup_start(void)
3507 {
3508 	c_segment_t     c_seg;
3509 
3510 	lck_mtx_lock_spin_always(c_list_lock);
3511 
3512 	if (first_c_segment_to_warm_generation_id == 0) {
3513 		if (!queue_empty(&c_age_list_head)) {
3514 			c_seg = (c_segment_t)queue_last(&c_age_list_head);
3515 
3516 			first_c_segment_to_warm_generation_id = c_seg->c_generation_id;
3517 		} else {
3518 			first_c_segment_to_warm_generation_id = 0;
3519 		}
3520 
3521 		fastwake_recording_in_progress = TRUE;
3522 	}
3523 	lck_mtx_unlock_always(c_list_lock);
3524 }
3525 
3526 
3527 void
vm_compressor_record_warmup_end(void)3528 vm_compressor_record_warmup_end(void)
3529 {
3530 	c_segment_t     c_seg;
3531 
3532 	lck_mtx_lock_spin_always(c_list_lock);
3533 
3534 	if (fastwake_recording_in_progress == TRUE) {
3535 		if (!queue_empty(&c_age_list_head)) {
3536 			c_seg = (c_segment_t)queue_last(&c_age_list_head);
3537 
3538 			last_c_segment_to_warm_generation_id = c_seg->c_generation_id;
3539 		} else {
3540 			last_c_segment_to_warm_generation_id = first_c_segment_to_warm_generation_id;
3541 		}
3542 
3543 		fastwake_recording_in_progress = FALSE;
3544 
3545 		HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
3546 	}
3547 	lck_mtx_unlock_always(c_list_lock);
3548 }
3549 
3550 
3551 #define DELAY_TRIM_ON_WAKE_SECS         25
3552 
3553 void
vm_compressor_delay_trim(void)3554 vm_compressor_delay_trim(void)
3555 {
3556 	clock_sec_t     sec;
3557 	clock_nsec_t    nsec;
3558 
3559 	clock_get_system_nanotime(&sec, &nsec);
3560 	dont_trim_until_ts = sec + DELAY_TRIM_ON_WAKE_SECS;
3561 }
3562 
3563 
3564 void
vm_compressor_do_warmup(void)3565 vm_compressor_do_warmup(void)
3566 {
3567 	lck_mtx_lock_spin_always(c_list_lock);
3568 
3569 	if (first_c_segment_to_warm_generation_id == last_c_segment_to_warm_generation_id) {
3570 		first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3571 
3572 		lck_mtx_unlock_always(c_list_lock);
3573 		return;
3574 	}
3575 
3576 	if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
3577 		fastwake_warmup = TRUE;
3578 
3579 		compaction_swapper_awakened = 1;
3580 		thread_wakeup((event_t)&c_compressor_swap_trigger);
3581 	}
3582 	lck_mtx_unlock_always(c_list_lock);
3583 }
3584 
3585 void
do_fastwake_warmup_all(void)3586 do_fastwake_warmup_all(void)
3587 {
3588 	lck_mtx_lock_spin_always(c_list_lock);
3589 
3590 	if (queue_empty(&c_swappedout_list_head) && queue_empty(&c_swappedout_sparse_list_head)) {
3591 		lck_mtx_unlock_always(c_list_lock);
3592 		return;
3593 	}
3594 
3595 	fastwake_warmup = TRUE;
3596 
3597 	do_fastwake_warmup(&c_swappedout_list_head, TRUE);
3598 
3599 	do_fastwake_warmup(&c_swappedout_sparse_list_head, TRUE);
3600 
3601 	fastwake_warmup = FALSE;
3602 
3603 	lck_mtx_unlock_always(c_list_lock);
3604 }
3605 
3606 void
do_fastwake_warmup(queue_head_t * c_queue,boolean_t consider_all_cseg)3607 do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg)
3608 {
3609 	c_segment_t     c_seg = NULL;
3610 	AbsoluteTime    startTime, endTime;
3611 	uint64_t        nsec;
3612 
3613 
3614 	HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
3615 
3616 	clock_get_uptime(&startTime);
3617 
3618 	lck_mtx_unlock_always(c_list_lock);
3619 
3620 	proc_set_thread_policy(current_thread(),
3621 	    TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2);
3622 
3623 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
3624 
3625 	lck_mtx_lock_spin_always(c_list_lock);
3626 
3627 	while (!queue_empty(c_queue) && fastwake_warmup == TRUE) {
3628 		c_seg = (c_segment_t) queue_first(c_queue);
3629 
3630 		if (consider_all_cseg == FALSE) {
3631 			if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id ||
3632 			    c_seg->c_generation_id > last_c_segment_to_warm_generation_id) {
3633 				break;
3634 			}
3635 
3636 			if (vm_page_free_count < (AVAILABLE_MEMORY / 4)) {
3637 				break;
3638 			}
3639 		}
3640 
3641 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3642 		lck_mtx_unlock_always(c_list_lock);
3643 
3644 		if (c_seg->c_busy) {
3645 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
3646 			c_seg_wait_on_busy(c_seg);
3647 			PAGE_REPLACEMENT_DISALLOWED(TRUE);
3648 		} else {
3649 			if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) {
3650 				lck_mtx_unlock_always(&c_seg->c_lock);
3651 			}
3652 			c_segment_warmup_count++;
3653 
3654 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
3655 			vm_pageout_io_throttle();
3656 			PAGE_REPLACEMENT_DISALLOWED(TRUE);
3657 		}
3658 		lck_mtx_lock_spin_always(c_list_lock);
3659 	}
3660 	lck_mtx_unlock_always(c_list_lock);
3661 
3662 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
3663 
3664 	proc_set_thread_policy(current_thread(),
3665 	    TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0);
3666 
3667 	clock_get_uptime(&endTime);
3668 	SUB_ABSOLUTETIME(&endTime, &startTime);
3669 	absolutetime_to_nanoseconds(endTime, &nsec);
3670 
3671 	HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec / 1000000ULL);
3672 
3673 	lck_mtx_lock_spin_always(c_list_lock);
3674 
3675 	if (consider_all_cseg == FALSE) {
3676 		first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3677 	}
3678 }
3679 
3680 extern bool     vm_swapout_thread_running;
3681 extern boolean_t        compressor_store_stop_compaction;
3682 
3683 void
vm_compressor_compact_and_swap(boolean_t flush_all)3684 vm_compressor_compact_and_swap(boolean_t flush_all)
3685 {
3686 	c_segment_t     c_seg;
3687 	bool            switch_state, bail_wanted_cseg = false;
3688 	clock_sec_t     now;
3689 	clock_nsec_t    nsec;
3690 	mach_timespec_t start_ts, end_ts;
3691 	unsigned int    number_considered, wanted_cseg_found, yield_after_considered_per_pass, number_yields;
3692 	uint64_t        bytes_freed, delta_usec;
3693 	uint32_t        c_swapout_count = 0;
3694 
3695 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_START, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
3696 
3697 	if (fastwake_warmup == TRUE) {
3698 		uint64_t        starting_warmup_count;
3699 
3700 		starting_warmup_count = c_segment_warmup_count;
3701 
3702 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_START, c_segment_warmup_count,
3703 		    first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0);
3704 		do_fastwake_warmup(&c_swappedout_list_head, FALSE);
3705 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_END, c_segment_warmup_count, c_segment_warmup_count - starting_warmup_count, 0, 0, 0);
3706 
3707 		fastwake_warmup = FALSE;
3708 	}
3709 
3710 #if (XNU_TARGET_OS_OSX && __arm64__)
3711 	/*
3712 	 * Re-considering major csegs showed benefits on all platforms by
3713 	 * significantly reducing fragmentation and getting back memory.
3714 	 * However, on smaller devices, eg watch, there was increased power
3715 	 * use for the additional compactions. And the turnover in csegs on
3716 	 * those smaller platforms is high enough in the decompression/free
3717 	 * path that we can skip reconsidering them here because we already
3718 	 * consider them for major compaction in those paths.
3719 	 */
3720 	vm_compressor_process_major_segments(false /*all segments and not just the ripe-aged ones*/);
3721 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
3722 
3723 	/*
3724 	 * it's possible for the c_age_list_head to be empty if we
3725 	 * hit our limits for growing the compressor pool and we subsequently
3726 	 * hibernated... on the next hibernation we could see the queue as
3727 	 * empty and not proceeed even though we have a bunch of segments on
3728 	 * the swapped in queue that need to be dealt with.
3729 	 */
3730 	vm_compressor_do_delayed_compactions(flush_all);
3731 	vm_compressor_process_special_swapped_in_segments_locked();
3732 	vm_compressor_process_regular_swapped_in_segments(flush_all);
3733 
3734 	/*
3735 	 * we only need to grab the timestamp once per
3736 	 * invocation of this function since the
3737 	 * timescale we're interested in is measured
3738 	 * in days
3739 	 */
3740 	clock_get_system_nanotime(&now, &nsec);
3741 
3742 	start_ts.tv_sec = (int) now;
3743 	start_ts.tv_nsec = nsec;
3744 	delta_usec = 0;
3745 	number_considered = 0;
3746 	wanted_cseg_found = 0;
3747 	number_yields = 0;
3748 	bytes_freed = 0;
3749 	yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3750 
3751 #if 0
3752 	/**
3753 	 * SW: Need to figure out how to properly rate limit this log because it is currently way too
3754 	 * noisy. rdar://99379414 (Figure out how to rate limit the fragmentation level logging)
3755 	 */
3756 	os_log(OS_LOG_DEFAULT, "memorystatus: before compaction fragmentation level %u\n", vm_compressor_fragmentation_level());
3757 #endif
3758 
3759 	while (!queue_empty(&c_age_list_head) && !compaction_swapper_abort && !compressor_store_stop_compaction) {
3760 		if (hibernate_flushing == TRUE) {
3761 			clock_sec_t     sec;
3762 
3763 			if (hibernate_should_abort()) {
3764 				HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n");
3765 				break;
3766 			}
3767 			if (hibernate_no_swapspace == TRUE) {
3768 				HIBLOG("vm_compressor_flush - out of swap space\n");
3769 				break;
3770 			}
3771 			if (vm_swap_files_pinned() == FALSE) {
3772 				HIBLOG("vm_compressor_flush - unpinned swap files\n");
3773 				break;
3774 			}
3775 			if (hibernate_in_progress_with_pinned_swap == TRUE &&
3776 			    (vm_swapfile_total_segs_alloced == vm_swapfile_total_segs_used)) {
3777 				HIBLOG("vm_compressor_flush - out of pinned swap space\n");
3778 				break;
3779 			}
3780 			clock_get_system_nanotime(&sec, &nsec);
3781 
3782 			if (sec > hibernate_flushing_deadline) {
3783 				hibernate_flush_timed_out = TRUE;
3784 				HIBLOG("vm_compressor_flush - failed to finish before deadline\n");
3785 				break;
3786 			}
3787 		}
3788 
3789 		c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
3790 		if (VM_CONFIG_SWAP_IS_ACTIVE && !vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
3791 			assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 100, 1000 * NSEC_PER_USEC);
3792 
3793 			if (!vm_swapout_thread_running) {
3794 				thread_wakeup((event_t)&vm_swapout_thread);
3795 			}
3796 
3797 			lck_mtx_unlock_always(c_list_lock);
3798 
3799 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 1, c_swapout_count, 0, 0);
3800 
3801 			thread_block(THREAD_CONTINUE_NULL);
3802 
3803 			lck_mtx_lock_spin_always(c_list_lock);
3804 		}
3805 		/*
3806 		 * Minor compactions
3807 		 */
3808 		vm_compressor_do_delayed_compactions(flush_all);
3809 
3810 		/*
3811 		 * vm_compressor_process_early_swapped_in_segments()
3812 		 * might be too aggressive. So OFF for now.
3813 		 */
3814 		vm_compressor_process_regular_swapped_in_segments(flush_all);
3815 
3816 		/* Recompute because we dropped the c_list_lock above*/
3817 		c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
3818 		if (VM_CONFIG_SWAP_IS_ACTIVE && !vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
3819 			/*
3820 			 * we timed out on the above thread_block
3821 			 * let's loop around and try again
3822 			 * the timeout allows us to continue
3823 			 * to do minor compactions to make
3824 			 * more memory available
3825 			 */
3826 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 2, c_swapout_count, 0, 0);
3827 
3828 			continue;
3829 		}
3830 
3831 		/*
3832 		 * Swap out segments?
3833 		 */
3834 		if (flush_all == FALSE) {
3835 			bool needs_to_swap;
3836 
3837 			lck_mtx_unlock_always(c_list_lock);
3838 
3839 			needs_to_swap = compressor_needs_to_swap();
3840 
3841 			lck_mtx_lock_spin_always(c_list_lock);
3842 
3843 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 3, needs_to_swap, 0, 0);
3844 
3845 			if (!needs_to_swap) {
3846 				break;
3847 			}
3848 		}
3849 		if (queue_empty(&c_age_list_head)) {
3850 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 4, c_age_count, 0, 0);
3851 			break;
3852 		}
3853 		c_seg = (c_segment_t) queue_first(&c_age_list_head);
3854 
3855 		assert(c_seg->c_state == C_ON_AGE_Q);
3856 
3857 		if (flush_all == TRUE && c_seg->c_generation_id > c_generation_id_flush_barrier) {
3858 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 5, 0, 0, 0);
3859 			break;
3860 		}
3861 
3862 		lck_mtx_lock_spin_always(&c_seg->c_lock);
3863 
3864 		if (c_seg->c_busy) {
3865 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 6, (void*) VM_KERNEL_ADDRPERM(c_seg), 0, 0);
3866 
3867 			lck_mtx_unlock_always(c_list_lock);
3868 			c_seg_wait_on_busy(c_seg);
3869 			lck_mtx_lock_spin_always(c_list_lock);
3870 
3871 			continue;
3872 		}
3873 		C_SEG_BUSY(c_seg);
3874 
3875 		if (c_seg_do_minor_compaction_and_unlock(c_seg, FALSE, TRUE, TRUE)) {
3876 			/*
3877 			 * found an empty c_segment and freed it
3878 			 * so go grab the next guy in the queue
3879 			 */
3880 			VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 7, 0, 0, 0);
3881 			c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3882 			continue;
3883 		}
3884 
3885 		switch_state = vm_compressor_major_compact_cseg(c_seg, &number_considered, &bail_wanted_cseg, &bytes_freed);
3886 		if (bail_wanted_cseg) {
3887 			wanted_cseg_found++;
3888 			bail_wanted_cseg = false;
3889 		}
3890 
3891 		assert(c_seg->c_busy);
3892 		assert(!c_seg->c_on_minorcompact_q);
3893 
3894 		if (switch_state) {
3895 			if (VM_CONFIG_SWAP_IS_ACTIVE) {
3896 				int new_state = C_ON_SWAPOUT_Q;
3897 #if (XNU_TARGET_OS_OSX && __arm64__)
3898 				if (flush_all == false && compressor_swapout_conditions_met() == false) {
3899 					new_state = C_ON_MAJORCOMPACT_Q;
3900 				}
3901 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
3902 
3903 				if (new_state == C_ON_SWAPOUT_Q) {
3904 					/*
3905 					 * This mode of putting a generic c_seg on the swapout list is
3906 					 * only supported when we have general swapping enabled
3907 					 */
3908 					clock_sec_t lnow;
3909 					clock_nsec_t lnsec;
3910 					clock_get_system_nanotime(&lnow, &lnsec);
3911 					if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 30) {
3912 						vmcs_stats.unripe_under_30s++;
3913 					} else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 60) {
3914 						vmcs_stats.unripe_under_60s++;
3915 					} else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 300) {
3916 						vmcs_stats.unripe_under_300s++;
3917 					}
3918 				}
3919 
3920 				c_seg_switch_state(c_seg, new_state, FALSE);
3921 			} else {
3922 				if ((vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit)) {
3923 					assert(VM_CONFIG_SWAP_IS_PRESENT);
3924 					/*
3925 					 * we are running compressor sweeps with swap-behind
3926 					 * make sure the c_seg has aged enough before swapping it
3927 					 * out...
3928 					 */
3929 					if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
3930 						c_seg->c_overage_swap = TRUE;
3931 						c_overage_swapped_count++;
3932 						c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
3933 					}
3934 				}
3935 			}
3936 			if (c_seg->c_state == C_ON_AGE_Q) {
3937 				/*
3938 				 * this c_seg didn't get moved to the swapout queue
3939 				 * so we need to move it out of the way...
3940 				 * we just did a major compaction on it so put it
3941 				 * on that queue
3942 				 */
3943 				c_seg_switch_state(c_seg, C_ON_MAJORCOMPACT_Q, FALSE);
3944 			} else {
3945 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].wasted_space_in_swapouts += c_seg_bufsize - c_seg->c_bytes_used;
3946 				c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_swapouts++;
3947 			}
3948 		}
3949 
3950 		C_SEG_WAKEUP_DONE(c_seg);
3951 
3952 		lck_mtx_unlock_always(&c_seg->c_lock);
3953 
3954 		/*
3955 		 * On systems _with_ general swap, regardless of jetsam, we wake up the swapout thread here.
3956 		 * On systems _without_ general swap, it's the responsibility of the memorystatus
3957 		 * subsystem to wake up the swapper.
3958 		 * TODO: When we have full jetsam support on a swap enabled system, we will need to revisit
3959 		 * this policy.
3960 		 */
3961 		if (VM_CONFIG_SWAP_IS_ACTIVE && c_swapout_count) {
3962 			/*
3963 			 * We don't pause/yield here because we will either
3964 			 * yield below or at the top of the loop with the
3965 			 * assert_wait_timeout.
3966 			 */
3967 			if (!vm_swapout_thread_running) {
3968 				thread_wakeup((event_t)&vm_swapout_thread);
3969 			}
3970 		}
3971 
3972 		if (number_considered >= yield_after_considered_per_pass) {
3973 			if (wanted_cseg_found) {
3974 				/*
3975 				 * We stopped major compactions on a c_seg
3976 				 * that is wanted. We don't know the priority
3977 				 * of the waiter unfortunately but we are at
3978 				 * a very high priority and so, just in case
3979 				 * the waiter is a critical system daemon or
3980 				 * UI thread, let's give up the CPU in case
3981 				 * the system is running a few CPU intensive
3982 				 * tasks.
3983 				 */
3984 				lck_mtx_unlock_always(c_list_lock);
3985 
3986 				mutex_pause(2); /* 100us yield */
3987 
3988 				number_yields++;
3989 
3990 				VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 11, number_considered, number_yields, 0);
3991 
3992 				lck_mtx_lock_spin_always(c_list_lock);
3993 			}
3994 
3995 			number_considered = 0;
3996 			wanted_cseg_found = 0;
3997 		}
3998 	}
3999 	clock_get_system_nanotime(&now, &nsec);
4000 
4001 	end_ts = major_compact_ts = (mach_timespec_t){.tv_sec = (int)now, .tv_nsec = nsec};
4002 
4003 	SUB_MACH_TIMESPEC(&end_ts, &start_ts);
4004 
4005 	delta_usec = (end_ts.tv_sec * USEC_PER_SEC) + (end_ts.tv_nsec / NSEC_PER_USEC) - (number_yields * 100);
4006 
4007 	delta_usec = MAX(1, delta_usec); /* we could have 0 usec run if conditions weren't right */
4008 
4009 	c_seg_major_compact_stats[c_seg_major_compact_stats_now].bytes_freed_rate_us = (bytes_freed / delta_usec);
4010 
4011 	if ((c_seg_major_compact_stats_now + 1) == C_SEG_MAJOR_COMPACT_STATS_MAX) {
4012 		c_seg_major_compact_stats_now = 0;
4013 	} else {
4014 		c_seg_major_compact_stats_now++;
4015 	}
4016 
4017 	assert(c_seg_major_compact_stats_now < C_SEG_MAJOR_COMPACT_STATS_MAX);
4018 
4019 	VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_END, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
4020 }
4021 
4022 
4023 static c_segment_t
c_seg_allocate(c_segment_t * current_chead)4024 c_seg_allocate(c_segment_t *current_chead)
4025 {
4026 	c_segment_t     c_seg;
4027 	int             min_needed;
4028 	int             size_to_populate;
4029 	c_segment_t     *donate_queue_head;
4030 
4031 #if XNU_TARGET_OS_OSX
4032 	if (vm_compressor_low_on_space()) {
4033 		vm_compressor_take_paging_space_action();
4034 	}
4035 #endif /* XNU_TARGET_OS_OSX */
4036 
4037 	if ((c_seg = *current_chead) == NULL) {
4038 		uint32_t        c_segno;
4039 
4040 		lck_mtx_lock_spin_always(c_list_lock);
4041 
4042 		while (c_segments_busy == TRUE) {
4043 			assert_wait((event_t) (&c_segments_busy), THREAD_UNINT);
4044 
4045 			lck_mtx_unlock_always(c_list_lock);
4046 
4047 			thread_block(THREAD_CONTINUE_NULL);
4048 
4049 			lck_mtx_lock_spin_always(c_list_lock);
4050 		}
4051 		if (c_free_segno_head == (uint32_t)-1) {
4052 			uint32_t        c_segments_available_new;
4053 			uint32_t        compressed_pages;
4054 
4055 #if CONFIG_FREEZE
4056 			if (freezer_incore_cseg_acct) {
4057 				compressed_pages = c_segment_pages_compressed_incore;
4058 			} else {
4059 				compressed_pages = c_segment_pages_compressed;
4060 			}
4061 #else
4062 			compressed_pages = c_segment_pages_compressed;
4063 #endif /* CONFIG_FREEZE */
4064 
4065 			if (c_segments_available >= c_segments_limit || compressed_pages >= c_segment_pages_compressed_limit) {
4066 				lck_mtx_unlock_always(c_list_lock);
4067 
4068 				return NULL;
4069 			}
4070 			c_segments_busy = TRUE;
4071 			lck_mtx_unlock_always(c_list_lock);
4072 
4073 			kernel_memory_populate((vm_offset_t)c_segments_next_page,
4074 			    PAGE_SIZE, KMA_NOFAIL | KMA_KOBJECT,
4075 			    VM_KERN_MEMORY_COMPRESSOR);
4076 			c_segments_next_page += PAGE_SIZE;
4077 
4078 			c_segments_available_new = c_segments_available + C_SEGMENTS_PER_PAGE;
4079 
4080 			if (c_segments_available_new > c_segments_limit) {
4081 				c_segments_available_new = c_segments_limit;
4082 			}
4083 
4084 			for (c_segno = c_segments_available + 1; c_segno < c_segments_available_new; c_segno++) {
4085 				c_segments[c_segno - 1].c_segno = c_segno;
4086 			}
4087 
4088 			lck_mtx_lock_spin_always(c_list_lock);
4089 
4090 			c_segments[c_segno - 1].c_segno = c_free_segno_head;
4091 			c_free_segno_head = c_segments_available;
4092 			c_segments_available = c_segments_available_new;
4093 
4094 			c_segments_busy = FALSE;
4095 			thread_wakeup((event_t) (&c_segments_busy));
4096 		}
4097 		c_segno = c_free_segno_head;
4098 		assert(c_segno >= 0 && c_segno < c_segments_limit);
4099 
4100 		c_free_segno_head = (uint32_t)c_segments[c_segno].c_segno;
4101 
4102 		/*
4103 		 * do the rest of the bookkeeping now while we're still behind
4104 		 * the list lock and grab our generation id now into a local
4105 		 * so that we can install it once we have the c_seg allocated
4106 		 */
4107 		c_segment_count++;
4108 		if (c_segment_count > c_segment_count_max) {
4109 			c_segment_count_max = c_segment_count;
4110 		}
4111 
4112 		lck_mtx_unlock_always(c_list_lock);
4113 
4114 		c_seg = zalloc_flags(compressor_segment_zone, Z_WAITOK | Z_ZERO);
4115 
4116 		c_seg->c_store.c_buffer = (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno);
4117 
4118 		lck_mtx_init(&c_seg->c_lock, &vm_compressor_lck_grp, LCK_ATTR_NULL);
4119 
4120 		c_seg->c_state = C_IS_EMPTY;
4121 		c_seg->c_firstemptyslot = C_SLOT_MAX_INDEX;
4122 		c_seg->c_mysegno = c_segno;
4123 
4124 		lck_mtx_lock_spin_always(c_list_lock);
4125 		c_empty_count++;
4126 		c_seg_switch_state(c_seg, C_IS_FILLING, FALSE);
4127 		c_segments[c_segno].c_seg = c_seg;
4128 		assert(c_segments[c_segno].c_segno > c_segments_available);
4129 		lck_mtx_unlock_always(c_list_lock);
4130 
4131 		for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
4132 #if XNU_TARGET_OS_OSX
4133 			donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_early_swapout_chead);
4134 #else /* XNU_TARGET_OS_OSX */
4135 			if (memorystatus_swap_all_apps) {
4136 				donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_late_swapout_chead);
4137 			} else {
4138 				donate_queue_head = NULL;
4139 			}
4140 #endif /* XNU_TARGET_OS_OSX */
4141 
4142 			if (current_chead == donate_queue_head) {
4143 				c_seg->c_has_donated_pages = 1;
4144 				break;
4145 			}
4146 		}
4147 
4148 		*current_chead = c_seg;
4149 
4150 #if DEVELOPMENT || DEBUG
4151 		C_SEG_MAKE_WRITEABLE(c_seg);
4152 #endif
4153 	}
4154 	c_seg_alloc_nextslot(c_seg);
4155 
4156 	size_to_populate = c_seg_allocsize - C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset);
4157 
4158 	if (size_to_populate) {
4159 		min_needed = PAGE_SIZE + (c_seg_allocsize - c_seg_bufsize);
4160 
4161 		if (C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset) < (unsigned) min_needed) {
4162 			if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
4163 				size_to_populate = C_SEG_MAX_POPULATE_SIZE;
4164 			}
4165 
4166 			OSAddAtomic64(size_to_populate / PAGE_SIZE, &vm_pageout_vminfo.vm_compressor_pages_grabbed);
4167 
4168 			kernel_memory_populate(
4169 				(vm_offset_t) &c_seg->c_store.c_buffer[c_seg->c_populated_offset],
4170 				size_to_populate,
4171 				KMA_NOFAIL | KMA_COMPRESSOR,
4172 				VM_KERN_MEMORY_COMPRESSOR);
4173 		} else {
4174 			size_to_populate = 0;
4175 		}
4176 	}
4177 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
4178 
4179 	lck_mtx_lock_spin_always(&c_seg->c_lock);
4180 
4181 	if (size_to_populate) {
4182 		c_seg->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
4183 	}
4184 
4185 	return c_seg;
4186 }
4187 
4188 #if DEVELOPMENT || DEBUG
4189 #if CONFIG_FREEZE
4190 extern boolean_t memorystatus_freeze_to_memory;
4191 #endif /* CONFIG_FREEZE */
4192 #endif /* DEVELOPMENT || DEBUG */
4193 uint64_t c_seg_total_donated_bytes = 0; /* For testing/debugging only for now. Remove and add new counters for vm_stat.*/
4194 
4195 uint64_t c_seg_filled_no_contention = 0;
4196 uint64_t c_seg_filled_contention = 0;
4197 clock_sec_t c_seg_filled_contention_sec_max = 0;
4198 clock_nsec_t c_seg_filled_contention_nsec_max = 0;
4199 
4200 static void
c_current_seg_filled(c_segment_t c_seg,c_segment_t * current_chead)4201 c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead)
4202 {
4203 	uint32_t        unused_bytes;
4204 	uint32_t        offset_to_depopulate;
4205 	int             new_state = C_ON_AGE_Q;
4206 	clock_sec_t     sec;
4207 	clock_nsec_t    nsec;
4208 	bool            head_insert = false, wakeup_swapout_thread = false;
4209 
4210 	unused_bytes = trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset));
4211 
4212 	if (unused_bytes) {
4213 		offset_to_depopulate = C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset)));
4214 
4215 		/*
4216 		 *  release the extra physical page(s) at the end of the segment
4217 		 */
4218 		lck_mtx_unlock_always(&c_seg->c_lock);
4219 
4220 		kernel_memory_depopulate(
4221 			(vm_offset_t) &c_seg->c_store.c_buffer[offset_to_depopulate],
4222 			unused_bytes,
4223 			KMA_COMPRESSOR,
4224 			VM_KERN_MEMORY_COMPRESSOR);
4225 
4226 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4227 
4228 		c_seg->c_populated_offset = offset_to_depopulate;
4229 	}
4230 	assert(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset) <= c_seg_bufsize);
4231 
4232 #if DEVELOPMENT || DEBUG
4233 	{
4234 		boolean_t       c_seg_was_busy = FALSE;
4235 
4236 		if (!c_seg->c_busy) {
4237 			C_SEG_BUSY(c_seg);
4238 		} else {
4239 			c_seg_was_busy = TRUE;
4240 		}
4241 
4242 		lck_mtx_unlock_always(&c_seg->c_lock);
4243 
4244 		C_SEG_WRITE_PROTECT(c_seg);
4245 
4246 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4247 
4248 		if (c_seg_was_busy == FALSE) {
4249 			C_SEG_WAKEUP_DONE(c_seg);
4250 		}
4251 	}
4252 #endif
4253 
4254 #if CONFIG_FREEZE
4255 	if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) &&
4256 	    VM_CONFIG_SWAP_IS_PRESENT &&
4257 	    VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
4258 #if DEVELOPMENT || DEBUG
4259 	    && !memorystatus_freeze_to_memory
4260 #endif /* DEVELOPMENT || DEBUG */
4261 	    ) {
4262 		new_state = C_ON_SWAPOUT_Q;
4263 		wakeup_swapout_thread = true;
4264 	}
4265 #endif /* CONFIG_FREEZE */
4266 
4267 	if (vm_darkwake_mode == TRUE) {
4268 		new_state = C_ON_SWAPOUT_Q;
4269 		head_insert = true;
4270 		wakeup_swapout_thread = true;
4271 	} else {
4272 		c_segment_t *donate_queue_head;
4273 		for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
4274 #if XNU_TARGET_OS_OSX
4275 			donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_early_swapout_chead);
4276 #else /* XNU_TARGET_OS_OSX */
4277 			donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_late_swapout_chead);
4278 #endif /* XNU_TARGET_OS_OSX */
4279 
4280 			if (current_chead == donate_queue_head) {
4281 				assert(c_seg->c_has_donated_pages);
4282 				new_state = C_ON_SWAPOUT_Q;
4283 				c_seg_total_donated_bytes += c_seg->c_bytes_used;
4284 				break;
4285 			}
4286 		}
4287 	}
4288 
4289 	clock_get_system_nanotime(&sec, &nsec);
4290 	c_seg->c_creation_ts = (uint32_t)sec;
4291 
4292 	if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
4293 		clock_sec_t     sec2;
4294 		clock_nsec_t    nsec2;
4295 
4296 		lck_mtx_lock_spin_always(c_list_lock);
4297 		clock_get_system_nanotime(&sec2, &nsec2);
4298 		TIME_SUB(sec2, sec, nsec2, nsec, NSEC_PER_SEC);
4299 		// printf("FBDP %s: head %p waited for c_list_lock for %lu.%09u seconds\n", __FUNCTION__, current_chead, sec2, nsec2);
4300 		if (sec2 > c_seg_filled_contention_sec_max) {
4301 			c_seg_filled_contention_sec_max = sec2;
4302 			c_seg_filled_contention_nsec_max = nsec2;
4303 		} else if (sec2 == c_seg_filled_contention_sec_max &&
4304 		    nsec2 > c_seg_filled_contention_nsec_max) {
4305 			c_seg_filled_contention_nsec_max = nsec2;
4306 		}
4307 		c_seg_filled_contention++;
4308 	} else {
4309 		c_seg_filled_no_contention++;
4310 	}
4311 
4312 #if CONFIG_FREEZE
4313 	if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead)) {
4314 		if (freezer_context_global.freezer_ctx_task->donates_own_pages) {
4315 			assert(!c_seg->c_has_donated_pages);
4316 			c_seg->c_has_donated_pages = 1;
4317 			OSAddAtomic(c_seg->c_slots_used, &c_segment_pages_compressed_incore_late_swapout);
4318 		}
4319 		c_seg->c_has_freezer_pages = 1;
4320 	}
4321 #endif /* CONFIG_FREEZE */
4322 
4323 	c_seg->c_generation_id = c_generation_id++;
4324 	c_seg_switch_state(c_seg, new_state, head_insert);
4325 
4326 #if CONFIG_FREEZE
4327 	/*
4328 	 * Donated segments count as frozen to swap if we go through the freezer.
4329 	 * TODO: What we need is a new ledger and cseg state that can describe
4330 	 * a frozen cseg from a donated task so we can accurately decrement it on
4331 	 * swapins.
4332 	 */
4333 	if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) && (c_seg->c_state == C_ON_SWAPOUT_Q)) {
4334 		/*
4335 		 * darkwake and freezer can't co-exist together
4336 		 * We'll need to fix this accounting as a start.
4337 		 * And early donation c_segs are separate from frozen c_segs.
4338 		 */
4339 		assert(vm_darkwake_mode == FALSE);
4340 		c_seg_update_task_owner(c_seg, freezer_context_global.freezer_ctx_task);
4341 		freezer_context_global.freezer_ctx_swapped_bytes += c_seg->c_bytes_used;
4342 	}
4343 #endif /* CONFIG_FREEZE */
4344 
4345 	if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4346 #if CONFIG_FREEZE
4347 		assert(c_seg->c_task_owner == NULL);
4348 #endif /* CONFIG_FREEZE */
4349 		c_seg_need_delayed_compaction(c_seg, TRUE);
4350 	}
4351 
4352 	lck_mtx_unlock_always(c_list_lock);
4353 
4354 	if (wakeup_swapout_thread) {
4355 		/*
4356 		 * Darkwake and Freeze configs always
4357 		 * wake up the swapout thread because
4358 		 * the compactor thread that normally handles
4359 		 * it may not be running as much in these
4360 		 * configs.
4361 		 */
4362 		thread_wakeup((event_t)&vm_swapout_thread);
4363 	}
4364 
4365 	*current_chead = NULL;
4366 }
4367 
4368 /*
4369  * returns with c_seg locked
4370  */
4371 void
c_seg_swapin_requeue(c_segment_t c_seg,boolean_t has_data,boolean_t minor_compact_ok,boolean_t age_on_swapin_q)4372 c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data, boolean_t minor_compact_ok, boolean_t age_on_swapin_q)
4373 {
4374 	clock_sec_t     sec;
4375 	clock_nsec_t    nsec;
4376 
4377 	clock_get_system_nanotime(&sec, &nsec);
4378 
4379 	lck_mtx_lock_spin_always(c_list_lock);
4380 	lck_mtx_lock_spin_always(&c_seg->c_lock);
4381 
4382 	assert(c_seg->c_busy_swapping);
4383 	assert(c_seg->c_busy);
4384 
4385 	c_seg->c_busy_swapping = 0;
4386 
4387 	if (c_seg->c_overage_swap == TRUE) {
4388 		c_overage_swapped_count--;
4389 		c_seg->c_overage_swap = FALSE;
4390 	}
4391 	if (has_data == TRUE) {
4392 		if (age_on_swapin_q == TRUE || c_seg->c_has_donated_pages) {
4393 #if CONFIG_FREEZE
4394 			/*
4395 			 * If a segment has both identities, frozen and donated bits set, the donated
4396 			 * bit wins on the swapin path. This is because the segment is being swapped back
4397 			 * in and so is in demand and should be given more time to spend in memory before
4398 			 * being swapped back out under pressure.
4399 			 */
4400 			if (c_seg->c_has_donated_pages) {
4401 				c_seg->c_has_freezer_pages = 0;
4402 			}
4403 #endif /* CONFIG_FREEZE */
4404 			c_seg_switch_state(c_seg, C_ON_SWAPPEDIN_Q, FALSE);
4405 		} else {
4406 			c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
4407 		}
4408 
4409 		if (minor_compact_ok == TRUE && !c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4410 			c_seg_need_delayed_compaction(c_seg, TRUE);
4411 		}
4412 	} else {
4413 		c_seg->c_store.c_buffer = (int32_t*) NULL;
4414 		c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
4415 
4416 		c_seg_switch_state(c_seg, C_ON_BAD_Q, FALSE);
4417 	}
4418 	c_seg->c_swappedin_ts = (uint32_t)sec;
4419 	c_seg->c_swappedin = true;
4420 
4421 	lck_mtx_unlock_always(c_list_lock);
4422 }
4423 
4424 
4425 
4426 /*
4427  * c_seg has to be locked and is returned locked if the c_seg isn't freed
4428  * PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
4429  * c_seg_swapin returns 1 if the c_seg was freed, 0 otherwise
4430  */
4431 
4432 int
c_seg_swapin(c_segment_t c_seg,boolean_t force_minor_compaction,boolean_t age_on_swapin_q)4433 c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_on_swapin_q)
4434 {
4435 	vm_offset_t     addr = 0;
4436 	uint32_t        io_size = 0;
4437 	uint64_t        f_offset;
4438 	thread_pri_floor_t token;
4439 
4440 	assert(C_SEG_IS_ONDISK(c_seg));
4441 
4442 #if !CHECKSUM_THE_SWAP
4443 	c_seg_trim_tail(c_seg);
4444 #endif
4445 	io_size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
4446 	f_offset = c_seg->c_store.c_swap_handle;
4447 
4448 	C_SEG_BUSY(c_seg);
4449 	c_seg->c_busy_swapping = 1;
4450 
4451 	/*
4452 	 * This thread is likely going to block for I/O.
4453 	 * Make sure it is ready to run when the I/O completes because
4454 	 * it needs to clear the busy bit on the c_seg so that other
4455 	 * waiting threads can make progress too.
4456 	 */
4457 	token = thread_priority_floor_start();
4458 	lck_mtx_unlock_always(&c_seg->c_lock);
4459 
4460 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
4461 
4462 	addr = (vm_offset_t)C_SEG_BUFFER_ADDRESS(c_seg->c_mysegno);
4463 	c_seg->c_store.c_buffer = (int32_t*) addr;
4464 
4465 	kernel_memory_populate(addr, io_size, KMA_NOFAIL | KMA_COMPRESSOR,
4466 	    VM_KERN_MEMORY_COMPRESSOR);
4467 
4468 	if (vm_swap_get(c_seg, f_offset, io_size) != KERN_SUCCESS) {
4469 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
4470 
4471 		kernel_memory_depopulate(addr, io_size, KMA_COMPRESSOR,
4472 		    VM_KERN_MEMORY_COMPRESSOR);
4473 
4474 		c_seg_swapin_requeue(c_seg, FALSE, TRUE, age_on_swapin_q);
4475 	} else {
4476 #if ENCRYPTED_SWAP
4477 		vm_swap_decrypt(c_seg);
4478 #endif /* ENCRYPTED_SWAP */
4479 
4480 #if CHECKSUM_THE_SWAP
4481 		if (c_seg->cseg_swap_size != io_size) {
4482 			panic("swapin size doesn't match swapout size");
4483 		}
4484 
4485 		if (c_seg->cseg_hash != vmc_hash((char*) c_seg->c_store.c_buffer, (int)io_size)) {
4486 			panic("c_seg_swapin - Swap hash mismatch");
4487 		}
4488 #endif /* CHECKSUM_THE_SWAP */
4489 
4490 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
4491 
4492 		c_seg_swapin_requeue(c_seg, TRUE, force_minor_compaction == TRUE ? FALSE : TRUE, age_on_swapin_q);
4493 
4494 #if CONFIG_FREEZE
4495 		/*
4496 		 * c_seg_swapin_requeue() returns with the c_seg lock held.
4497 		 */
4498 		if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
4499 			assert(c_seg->c_busy);
4500 
4501 			lck_mtx_unlock_always(&c_seg->c_lock);
4502 			lck_mtx_lock_spin_always(c_list_lock);
4503 			lck_mtx_lock_spin_always(&c_seg->c_lock);
4504 		}
4505 
4506 		if (c_seg->c_task_owner) {
4507 			c_seg_update_task_owner(c_seg, NULL);
4508 		}
4509 
4510 		lck_mtx_unlock_always(c_list_lock);
4511 
4512 		OSAddAtomic(c_seg->c_slots_used, &c_segment_pages_compressed_incore);
4513 		if (c_seg->c_has_donated_pages) {
4514 			OSAddAtomic(c_seg->c_slots_used, &c_segment_pages_compressed_incore_late_swapout);
4515 		}
4516 #endif /* CONFIG_FREEZE */
4517 
4518 		OSAddAtomic64(c_seg->c_bytes_used, &compressor_bytes_used);
4519 
4520 		if (force_minor_compaction == TRUE) {
4521 			if (c_seg_minor_compaction_and_unlock(c_seg, FALSE)) {
4522 				/*
4523 				 * c_seg was completely empty so it was freed,
4524 				 * so be careful not to reference it again
4525 				 *
4526 				 * Drop the boost so that the thread priority
4527 				 * is returned back to where it is supposed to be.
4528 				 */
4529 				thread_priority_floor_end(&token);
4530 				return 1;
4531 			}
4532 
4533 			lck_mtx_lock_spin_always(&c_seg->c_lock);
4534 		}
4535 	}
4536 	C_SEG_WAKEUP_DONE(c_seg);
4537 
4538 	/*
4539 	 * Drop the boost so that the thread priority
4540 	 * is returned back to where it is supposed to be.
4541 	 */
4542 	thread_priority_floor_end(&token);
4543 
4544 	return 0;
4545 }
4546 
4547 
4548 static void
c_segment_sv_hash_drop_ref(int hash_indx)4549 c_segment_sv_hash_drop_ref(int hash_indx)
4550 {
4551 	struct c_sv_hash_entry o_sv_he, n_sv_he;
4552 
4553 	while (1) {
4554 		o_sv_he.he_record = c_segment_sv_hash_table[hash_indx].he_record;
4555 
4556 		n_sv_he.he_ref = o_sv_he.he_ref - 1;
4557 		n_sv_he.he_data = o_sv_he.he_data;
4558 
4559 		if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_indx].he_record) == TRUE) {
4560 			if (n_sv_he.he_ref == 0) {
4561 				OSAddAtomic(-1, &c_segment_svp_in_hash);
4562 			}
4563 			break;
4564 		}
4565 	}
4566 }
4567 
4568 
4569 static int
c_segment_sv_hash_insert(uint32_t data)4570 c_segment_sv_hash_insert(uint32_t data)
4571 {
4572 	int             hash_sindx;
4573 	int             misses;
4574 	struct c_sv_hash_entry o_sv_he, n_sv_he;
4575 	boolean_t       got_ref = FALSE;
4576 
4577 	if (data == 0) {
4578 		OSAddAtomic(1, &c_segment_svp_zero_compressions);
4579 	} else {
4580 		OSAddAtomic(1, &c_segment_svp_nonzero_compressions);
4581 	}
4582 
4583 	hash_sindx = data & C_SV_HASH_MASK;
4584 
4585 	for (misses = 0; misses < C_SV_HASH_MAX_MISS; misses++) {
4586 		o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
4587 
4588 		while (o_sv_he.he_data == data || o_sv_he.he_ref == 0) {
4589 			n_sv_he.he_ref = o_sv_he.he_ref + 1;
4590 			n_sv_he.he_data = data;
4591 
4592 			if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_sindx].he_record) == TRUE) {
4593 				if (n_sv_he.he_ref == 1) {
4594 					OSAddAtomic(1, &c_segment_svp_in_hash);
4595 				}
4596 				got_ref = TRUE;
4597 				break;
4598 			}
4599 			o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
4600 		}
4601 		if (got_ref == TRUE) {
4602 			break;
4603 		}
4604 		hash_sindx++;
4605 
4606 		if (hash_sindx == C_SV_HASH_SIZE) {
4607 			hash_sindx = 0;
4608 		}
4609 	}
4610 	if (got_ref == FALSE) {
4611 		return -1;
4612 	}
4613 
4614 	return hash_sindx;
4615 }
4616 
4617 
4618 #if RECORD_THE_COMPRESSED_DATA
4619 
4620 static void
c_compressed_record_data(char * src,int c_size)4621 c_compressed_record_data(char *src, int c_size)
4622 {
4623 	if ((c_compressed_record_cptr + c_size + 4) >= c_compressed_record_ebuf) {
4624 		panic("c_compressed_record_cptr >= c_compressed_record_ebuf");
4625 	}
4626 
4627 	*(int *)((void *)c_compressed_record_cptr) = c_size;
4628 
4629 	c_compressed_record_cptr += 4;
4630 
4631 	memcpy(c_compressed_record_cptr, src, c_size);
4632 	c_compressed_record_cptr += c_size;
4633 }
4634 #endif
4635 
4636 
4637 static int
c_compress_page(char * src,c_slot_mapping_t slot_ptr,c_segment_t * current_chead,char * scratch_buf)4638 c_compress_page(char *src, c_slot_mapping_t slot_ptr, c_segment_t *current_chead, char *scratch_buf)
4639 {
4640 	int             c_size = -1;
4641 	int             c_rounded_size = 0;
4642 	int             max_csize;
4643 	c_slot_t        cs;
4644 	c_segment_t     c_seg;
4645 	bool            single_value = false;
4646 
4647 	KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START, *current_chead, 0, 0, 0, 0);
4648 retry:
4649 	if ((c_seg = c_seg_allocate(current_chead)) == NULL) {
4650 		return 1;
4651 	}
4652 	/*
4653 	 * returns with c_seg lock held
4654 	 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
4655 	 * c_nextslot has been allocated and
4656 	 * c_store.c_buffer populated
4657 	 */
4658 	assert(c_seg->c_state == C_IS_FILLING);
4659 
4660 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_seg->c_nextslot);
4661 
4662 	C_SLOT_ASSERT_PACKABLE(slot_ptr);
4663 	cs->c_packed_ptr = C_SLOT_PACK_PTR(slot_ptr);
4664 
4665 	cs->c_offset = c_seg->c_nextoffset;
4666 
4667 	max_csize = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)cs->c_offset);
4668 
4669 	if (max_csize > PAGE_SIZE) {
4670 		max_csize = PAGE_SIZE;
4671 	}
4672 
4673 #if CHECKSUM_THE_DATA
4674 	cs->c_hash_data = vmc_hash(src, PAGE_SIZE);
4675 #endif
4676 	boolean_t incomp_copy = FALSE;
4677 	int max_csize_adj = (max_csize - 4);
4678 
4679 	if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
4680 #if defined(__arm64__)
4681 		uint16_t ccodec = CINVALID;
4682 		uint32_t inline_popcount;
4683 		if (max_csize >= C_SEG_OFFSET_ALIGNMENT_BOUNDARY) {
4684 			c_size = metacompressor((const uint8_t *) src,
4685 			    (uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
4686 			    max_csize_adj, &ccodec,
4687 			    scratch_buf, &incomp_copy, &inline_popcount);
4688 			assert(inline_popcount == C_SLOT_NO_POPCOUNT);
4689 
4690 #if C_SEG_OFFSET_ALIGNMENT_BOUNDARY > 4
4691 			if (c_size > max_csize_adj) {
4692 				c_size = -1;
4693 			}
4694 #endif
4695 		} else {
4696 			c_size = -1;
4697 		}
4698 		assert(ccodec == CCWK || ccodec == CCLZ4);
4699 		cs->c_codec = ccodec;
4700 #endif
4701 	} else {
4702 #if defined(__arm64__)
4703 		cs->c_codec = CCWK;
4704 		__unreachable_ok_push
4705 		if (PAGE_SIZE == 4096) {
4706 			c_size = WKdm_compress_4k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4707 			    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4708 		} else {
4709 			c_size = WKdm_compress_16k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4710 			    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4711 		}
4712 		__unreachable_ok_pop
4713 #else
4714 		c_size = WKdm_compress_new((const WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4715 		    (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4716 #endif
4717 	}
4718 	assertf(((c_size <= max_csize_adj) && (c_size >= -1)),
4719 	    "c_size invalid (%d, %d), cur compressions: %d", c_size, max_csize_adj, c_segment_pages_compressed);
4720 
4721 	if (c_size == -1) {
4722 		if (max_csize < PAGE_SIZE) {
4723 			c_current_seg_filled(c_seg, current_chead);
4724 			assert(*current_chead == NULL);
4725 
4726 			lck_mtx_unlock_always(&c_seg->c_lock);
4727 			/* TODO: it may be worth requiring codecs to distinguish
4728 			 * between incompressible inputs and failures due to
4729 			 * budget exhaustion.
4730 			 */
4731 			PAGE_REPLACEMENT_DISALLOWED(FALSE);
4732 			goto retry;
4733 		}
4734 		c_size = PAGE_SIZE;
4735 
4736 		if (incomp_copy == FALSE) {
4737 			memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
4738 		}
4739 
4740 		OSAddAtomic(1, &c_segment_noncompressible_pages);
4741 	} else if (c_size == 0) {
4742 		int             hash_index;
4743 
4744 		/*
4745 		 * special case - this is a page completely full of a single 32 bit value
4746 		 */
4747 		single_value = true;
4748 		hash_index = c_segment_sv_hash_insert(*(uint32_t *)(uintptr_t)src);
4749 
4750 		if (hash_index != -1) {
4751 			slot_ptr->s_cindx = hash_index;
4752 			slot_ptr->s_cseg = C_SV_CSEG_ID;
4753 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4754 			slot_ptr->s_uncompressed = 0;
4755 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4756 
4757 			OSAddAtomic(1, &c_segment_svp_hash_succeeded);
4758 #if RECORD_THE_COMPRESSED_DATA
4759 			c_compressed_record_data(src, 4);
4760 #endif
4761 			goto sv_compression;
4762 		}
4763 		c_size = 4;
4764 
4765 		memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
4766 
4767 		OSAddAtomic(1, &c_segment_svp_hash_failed);
4768 	}
4769 
4770 #if RECORD_THE_COMPRESSED_DATA
4771 	c_compressed_record_data((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
4772 #endif
4773 #if CHECKSUM_THE_COMPRESSED_DATA
4774 	cs->c_hash_compressed_data = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
4775 #endif
4776 #if POPCOUNT_THE_COMPRESSED_DATA
4777 	cs->c_pop_cdata = vmc_pop((uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset], c_size);
4778 #endif
4779 	c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
4780 
4781 	PACK_C_SIZE(cs, c_size);
4782 	c_seg->c_bytes_used += c_rounded_size;
4783 	c_seg->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
4784 	c_seg->c_slots_used++;
4785 
4786 #if CONFIG_FREEZE
4787 	/* TODO: should c_segment_pages_compressed be up here too? See 88598046 for details */
4788 	OSAddAtomic(1, &c_segment_pages_compressed_incore);
4789 	if (c_seg->c_has_donated_pages) {
4790 		OSAddAtomic(1, &c_segment_pages_compressed_incore_late_swapout);
4791 	}
4792 #endif /* CONFIG_FREEZE */
4793 
4794 	slot_ptr->s_cindx = c_seg->c_nextslot++;
4795 	/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
4796 	slot_ptr->s_cseg = c_seg->c_mysegno + 1;
4797 
4798 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4799 	slot_ptr->s_uncompressed = 0;
4800 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4801 
4802 sv_compression:
4803 	if (c_seg->c_nextoffset >= c_seg_off_limit || c_seg->c_nextslot >= C_SLOT_MAX_INDEX) {
4804 		c_current_seg_filled(c_seg, current_chead);
4805 		assert(*current_chead == NULL);
4806 	}
4807 
4808 	lck_mtx_unlock_always(&c_seg->c_lock);
4809 
4810 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
4811 
4812 #if RECORD_THE_COMPRESSED_DATA
4813 	if ((c_compressed_record_cptr - c_compressed_record_sbuf) >= c_seg_allocsize) {
4814 		c_compressed_record_write(c_compressed_record_sbuf, (int)(c_compressed_record_cptr - c_compressed_record_sbuf));
4815 		c_compressed_record_cptr = c_compressed_record_sbuf;
4816 	}
4817 #endif
4818 	if (c_size) {
4819 		OSAddAtomic64(c_size, &c_segment_compressed_bytes);
4820 		OSAddAtomic64(c_rounded_size, &compressor_bytes_used);
4821 	}
4822 	OSAddAtomic64(PAGE_SIZE, &c_segment_input_bytes);
4823 
4824 	OSAddAtomic(1, &c_segment_pages_compressed);
4825 #if DEVELOPMENT || DEBUG
4826 	if (!compressor_running_perf_test) {
4827 		/*
4828 		 * The perf_compressor benchmark should not be able to trigger
4829 		 * compressor thrashing jetsams.
4830 		 */
4831 		OSAddAtomic(1, &sample_period_compression_count);
4832 	}
4833 #else /* DEVELOPMENT || DEBUG */
4834 	OSAddAtomic(1, &sample_period_compression_count);
4835 #endif /* DEVELOPMENT || DEBUG */
4836 
4837 	KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END, *current_chead, c_size, c_segment_input_bytes, c_segment_compressed_bytes, 0);
4838 
4839 	return 0;
4840 }
4841 
4842 static inline void
sv_decompress(int32_t * ddst,int32_t pattern)4843 sv_decompress(int32_t *ddst, int32_t pattern)
4844 {
4845 //	assert(__builtin_constant_p(PAGE_SIZE) != 0);
4846 #if defined(__x86_64__)
4847 	memset_word(ddst, pattern, PAGE_SIZE / sizeof(int32_t));
4848 #elif defined(__arm64__)
4849 	assert((PAGE_SIZE % 128) == 0);
4850 	if (pattern == 0) {
4851 		fill32_dczva((addr64_t)ddst, PAGE_SIZE);
4852 	} else {
4853 		fill32_nt((addr64_t)ddst, PAGE_SIZE, pattern);
4854 	}
4855 #else
4856 	size_t          i;
4857 
4858 	/* Unroll the pattern fill loop 4x to encourage the
4859 	 * compiler to emit NEON stores, cf.
4860 	 * <rdar://problem/25839866> Loop autovectorization
4861 	 * anomalies.
4862 	 */
4863 	/* * We use separate loops for each PAGE_SIZE
4864 	 * to allow the autovectorizer to engage, as PAGE_SIZE
4865 	 * may not be a constant.
4866 	 */
4867 
4868 	__unreachable_ok_push
4869 	if (PAGE_SIZE == 4096) {
4870 		for (i = 0; i < (4096U / sizeof(int32_t)); i += 4) {
4871 			*ddst++ = pattern;
4872 			*ddst++ = pattern;
4873 			*ddst++ = pattern;
4874 			*ddst++ = pattern;
4875 		}
4876 	} else {
4877 		assert(PAGE_SIZE == 16384);
4878 		for (i = 0; i < (int)(16384U / sizeof(int32_t)); i += 4) {
4879 			*ddst++ = pattern;
4880 			*ddst++ = pattern;
4881 			*ddst++ = pattern;
4882 			*ddst++ = pattern;
4883 		}
4884 	}
4885 	__unreachable_ok_pop
4886 #endif
4887 }
4888 
4889 static int
c_decompress_page(char * dst,volatile c_slot_mapping_t slot_ptr,vm_compressor_options_t flags,int * zeroslot)4890 c_decompress_page(char *dst, volatile c_slot_mapping_t slot_ptr, vm_compressor_options_t flags, int *zeroslot)
4891 {
4892 	c_slot_t        cs;
4893 	c_segment_t     c_seg;
4894 	uint32_t        c_segno;
4895 	uint16_t        c_indx;
4896 	int             c_rounded_size;
4897 	uint32_t        c_size;
4898 	int             retval = 0;
4899 	boolean_t       need_unlock = TRUE;
4900 	boolean_t       consider_defragmenting = FALSE;
4901 	boolean_t       kdp_mode = FALSE;
4902 
4903 	if (__improbable(flags & C_KDP)) {
4904 		if (not_in_kdp) {
4905 			panic("C_KDP passed to decompress page from outside of debugger context");
4906 		}
4907 
4908 		assert((flags & C_KEEP) == C_KEEP);
4909 		assert((flags & C_DONT_BLOCK) == C_DONT_BLOCK);
4910 
4911 		if ((flags & (C_DONT_BLOCK | C_KEEP)) != (C_DONT_BLOCK | C_KEEP)) {
4912 			return -2;
4913 		}
4914 
4915 		kdp_mode = TRUE;
4916 		*zeroslot = 0;
4917 	}
4918 
4919 ReTry:
4920 	if (__probable(!kdp_mode)) {
4921 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
4922 	} else {
4923 		if (kdp_lck_rw_lock_is_acquired_exclusive(&c_master_lock)) {
4924 			return -2;
4925 		}
4926 	}
4927 
4928 #if HIBERNATION
4929 	/*
4930 	 * if hibernation is enabled, it indicates (via a call
4931 	 * to 'vm_decompressor_lock' that no further
4932 	 * decompressions are allowed once it reaches
4933 	 * the point of flushing all of the currently dirty
4934 	 * anonymous memory through the compressor and out
4935 	 * to disk... in this state we allow freeing of compressed
4936 	 * pages and must honor the C_DONT_BLOCK case
4937 	 */
4938 	if (__improbable(dst && decompressions_blocked == TRUE)) {
4939 		if (flags & C_DONT_BLOCK) {
4940 			if (__probable(!kdp_mode)) {
4941 				PAGE_REPLACEMENT_DISALLOWED(FALSE);
4942 			}
4943 
4944 			*zeroslot = 0;
4945 			return -2;
4946 		}
4947 		/*
4948 		 * it's safe to atomically assert and block behind the
4949 		 * lock held in shared mode because "decompressions_blocked" is
4950 		 * only set and cleared and the thread_wakeup done when the lock
4951 		 * is held exclusively
4952 		 */
4953 		assert_wait((event_t)&decompressions_blocked, THREAD_UNINT);
4954 
4955 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
4956 
4957 		thread_block(THREAD_CONTINUE_NULL);
4958 
4959 		goto ReTry;
4960 	}
4961 #endif
4962 	/* s_cseg is actually "segno+1" */
4963 	c_segno = slot_ptr->s_cseg - 1;
4964 
4965 	if (__improbable(c_segno >= c_segments_available)) {
4966 		panic("c_decompress_page: c_segno %d >= c_segments_available %d, slot_ptr(%p), slot_data(%x)",
4967 		    c_segno, c_segments_available, slot_ptr, *(int *)((void *)slot_ptr));
4968 	}
4969 
4970 	if (__improbable(c_segments[c_segno].c_segno < c_segments_available)) {
4971 		panic("c_decompress_page: c_segno %d is free, slot_ptr(%p), slot_data(%x)",
4972 		    c_segno, slot_ptr, *(int *)((void *)slot_ptr));
4973 	}
4974 
4975 	c_seg = c_segments[c_segno].c_seg;
4976 
4977 	if (__probable(!kdp_mode)) {
4978 		lck_mtx_lock_spin_always(&c_seg->c_lock);
4979 	} else {
4980 		if (kdp_lck_mtx_lock_spin_is_acquired(&c_seg->c_lock)) {
4981 			return -2;
4982 		}
4983 	}
4984 
4985 	assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
4986 
4987 	if (dst == NULL && c_seg->c_busy_swapping) {
4988 		assert(c_seg->c_busy);
4989 
4990 		goto bypass_busy_check;
4991 	}
4992 	if (flags & C_DONT_BLOCK) {
4993 		if (c_seg->c_busy || (C_SEG_IS_ONDISK(c_seg) && dst)) {
4994 			*zeroslot = 0;
4995 
4996 			retval = -2;
4997 			goto done;
4998 		}
4999 	}
5000 	if (c_seg->c_busy) {
5001 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5002 
5003 		c_seg_wait_on_busy(c_seg);
5004 
5005 		goto ReTry;
5006 	}
5007 bypass_busy_check:
5008 
5009 	c_indx = slot_ptr->s_cindx;
5010 
5011 	if (__improbable(c_indx >= c_seg->c_nextslot)) {
5012 		panic("c_decompress_page: c_indx %d >= c_nextslot %d, c_seg(%p), slot_ptr(%p), slot_data(%x)",
5013 		    c_indx, c_seg->c_nextslot, c_seg, slot_ptr, *(int *)((void *)slot_ptr));
5014 	}
5015 
5016 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
5017 
5018 	c_size = UNPACK_C_SIZE(cs);
5019 
5020 	if (__improbable(c_size == 0)) {
5021 		panic("c_decompress_page: c_size == 0, c_seg(%p), slot_ptr(%p), slot_data(%x)",
5022 		    c_seg, slot_ptr, *(int *)((void *)slot_ptr));
5023 	}
5024 
5025 	c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
5026 
5027 	if (dst) {
5028 		uint32_t        age_of_cseg;
5029 		clock_sec_t     cur_ts_sec;
5030 		clock_nsec_t    cur_ts_nsec;
5031 
5032 		if (C_SEG_IS_ONDISK(c_seg)) {
5033 #if CONFIG_FREEZE
5034 			if (freezer_incore_cseg_acct) {
5035 				if ((c_seg->c_slots_used + c_segment_pages_compressed_incore) >= c_segment_pages_compressed_nearing_limit) {
5036 					PAGE_REPLACEMENT_DISALLOWED(FALSE);
5037 					lck_mtx_unlock_always(&c_seg->c_lock);
5038 
5039 					memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
5040 
5041 					goto ReTry;
5042 				}
5043 
5044 				uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
5045 				if ((incore_seg_count + 1) >= c_segments_nearing_limit) {
5046 					PAGE_REPLACEMENT_DISALLOWED(FALSE);
5047 					lck_mtx_unlock_always(&c_seg->c_lock);
5048 
5049 					memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
5050 
5051 					goto ReTry;
5052 				}
5053 			}
5054 #endif /* CONFIG_FREEZE */
5055 			assert(kdp_mode == FALSE);
5056 			retval = c_seg_swapin(c_seg, FALSE, TRUE);
5057 			assert(retval == 0);
5058 
5059 			retval = 1;
5060 		}
5061 		if (c_seg->c_state == C_ON_BAD_Q) {
5062 			assert(c_seg->c_store.c_buffer == NULL);
5063 			*zeroslot = 0;
5064 
5065 			retval = -1;
5066 			goto done;
5067 		}
5068 
5069 #if POPCOUNT_THE_COMPRESSED_DATA
5070 		unsigned csvpop;
5071 		uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
5072 		if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
5073 			panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%x 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
5074 		}
5075 #endif
5076 
5077 #if CHECKSUM_THE_COMPRESSED_DATA
5078 		unsigned csvhash;
5079 		if (cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
5080 			panic("Compressed data doesn't match original %p %p %u %u %u", c_seg, cs, c_size, cs->c_hash_compressed_data, csvhash);
5081 		}
5082 #endif
5083 		if (c_rounded_size == PAGE_SIZE) {
5084 			/*
5085 			 * page wasn't compressible... just copy it out
5086 			 */
5087 			memcpy(dst, &c_seg->c_store.c_buffer[cs->c_offset], PAGE_SIZE);
5088 		} else if (c_size == 4) {
5089 			int32_t         data;
5090 			int32_t         *dptr;
5091 
5092 			/*
5093 			 * page was populated with a single value
5094 			 * that didn't fit into our fast hash
5095 			 * so we packed it in as a single non-compressed value
5096 			 * that we need to populate the page with
5097 			 */
5098 			dptr = (int32_t *)(uintptr_t)dst;
5099 			data = *(int32_t *)(&c_seg->c_store.c_buffer[cs->c_offset]);
5100 			sv_decompress(dptr, data);
5101 		} else {
5102 			uint32_t        my_cpu_no;
5103 			char            *scratch_buf;
5104 
5105 			if (__probable(!kdp_mode)) {
5106 				/*
5107 				 * we're behind the c_seg lock held in spin mode
5108 				 * which means pre-emption is disabled... therefore
5109 				 * the following sequence is atomic and safe
5110 				 */
5111 				my_cpu_no = cpu_number();
5112 
5113 				assert(my_cpu_no < compressor_cpus);
5114 
5115 				scratch_buf = &compressor_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
5116 			} else {
5117 				scratch_buf = kdp_compressor_scratch_buf;
5118 			}
5119 
5120 			if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
5121 #if defined(__arm64__)
5122 				uint16_t c_codec = cs->c_codec;
5123 				uint32_t inline_popcount;
5124 				if (!metadecompressor((const uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
5125 				    (uint8_t *)dst, c_size, c_codec, (void *)scratch_buf, &inline_popcount)) {
5126 					retval = -1;
5127 				} else {
5128 					assert(inline_popcount == C_SLOT_NO_POPCOUNT);
5129 				}
5130 #endif
5131 			} else {
5132 #if defined(__arm64__)
5133 				__unreachable_ok_push
5134 				if (PAGE_SIZE == 4096) {
5135 					WKdm_decompress_4k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5136 					    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5137 				} else {
5138 					WKdm_decompress_16k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5139 					    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5140 				}
5141 				__unreachable_ok_pop
5142 #else
5143 				WKdm_decompress_new((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5144 				    (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5145 #endif
5146 			}
5147 		}
5148 
5149 #if CHECKSUM_THE_DATA
5150 		if (cs->c_hash_data != vmc_hash(dst, PAGE_SIZE)) {
5151 #if defined(__arm64__)
5152 			int32_t *dinput = &c_seg->c_store.c_buffer[cs->c_offset];
5153 			panic("decompressed data doesn't match original cs: %p, hash: 0x%x, offset: %d, c_size: %d, c_rounded_size: %d, codec: %d, header: 0x%x 0x%x 0x%x", cs, cs->c_hash_data, cs->c_offset, c_size, c_rounded_size, cs->c_codec, *dinput, *(dinput + 1), *(dinput + 2));
5154 #else
5155 			panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs, cs->c_hash_data, cs->c_offset, c_size);
5156 #endif
5157 		}
5158 #endif
5159 		if (c_seg->c_swappedin_ts == 0 && !kdp_mode) {
5160 			clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
5161 
5162 			age_of_cseg = (uint32_t)cur_ts_sec - c_seg->c_creation_ts;
5163 			if (age_of_cseg < DECOMPRESSION_SAMPLE_MAX_AGE) {
5164 				OSAddAtomic(1, &age_of_decompressions_during_sample_period[age_of_cseg]);
5165 			} else {
5166 				OSAddAtomic(1, &overage_decompressions_during_sample_period);
5167 			}
5168 
5169 			OSAddAtomic(1, &sample_period_decompression_count);
5170 		}
5171 	}
5172 #if CONFIG_FREEZE
5173 	else {
5174 		/*
5175 		 * We are freeing an uncompressed page from this c_seg and so balance the ledgers.
5176 		 */
5177 		if (C_SEG_IS_ONDISK(c_seg)) {
5178 			/*
5179 			 * The compression sweep feature will push out anonymous pages to disk
5180 			 * without going through the freezer path and so those c_segs, while
5181 			 * swapped out, won't have an owner.
5182 			 */
5183 			if (c_seg->c_task_owner) {
5184 				task_update_frozen_to_swap_acct(c_seg->c_task_owner, PAGE_SIZE_64, DEBIT_FROM_SWAP);
5185 			}
5186 
5187 			/*
5188 			 * We are freeing a page in swap without swapping it in. We bump the in-core
5189 			 * count here to simulate a swapin of a page so that we can accurately
5190 			 * decrement it below.
5191 			 */
5192 			OSAddAtomic(1, &c_segment_pages_compressed_incore);
5193 			if (c_seg->c_has_donated_pages) {
5194 				OSAddAtomic(1, &c_segment_pages_compressed_incore_late_swapout);
5195 			}
5196 		} else if (c_seg->c_state == C_ON_BAD_Q) {
5197 			assert(c_seg->c_store.c_buffer == NULL);
5198 			*zeroslot = 0;
5199 
5200 			retval = -1;
5201 			goto done;
5202 		}
5203 	}
5204 #endif /* CONFIG_FREEZE */
5205 
5206 	if (flags & C_KEEP) {
5207 		*zeroslot = 0;
5208 		goto done;
5209 	}
5210 	assert(kdp_mode == FALSE);
5211 
5212 	c_seg->c_bytes_unused += c_rounded_size;
5213 	c_seg->c_bytes_used -= c_rounded_size;
5214 
5215 	assert(c_seg->c_slots_used);
5216 	c_seg->c_slots_used--;
5217 	if (dst && c_seg->c_swappedin) {
5218 		task_t task = current_task();
5219 		if (task) {
5220 			ledger_credit(task->ledger, task_ledgers.swapins, PAGE_SIZE);
5221 		}
5222 	}
5223 
5224 	PACK_C_SIZE(cs, 0);
5225 
5226 	if (c_indx < c_seg->c_firstemptyslot) {
5227 		c_seg->c_firstemptyslot = c_indx;
5228 	}
5229 
5230 	OSAddAtomic(-1, &c_segment_pages_compressed);
5231 #if CONFIG_FREEZE
5232 	OSAddAtomic(-1, &c_segment_pages_compressed_incore);
5233 	assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count %p 0x%x", c_seg, c_segment_pages_compressed_incore);
5234 	if (c_seg->c_has_donated_pages) {
5235 		OSAddAtomic(-1, &c_segment_pages_compressed_incore_late_swapout);
5236 		assertf(c_segment_pages_compressed_incore_late_swapout >= 0, "-ve lateswapout count %p 0x%x", c_seg, c_segment_pages_compressed_incore_late_swapout);
5237 	}
5238 #endif /* CONFIG_FREEZE */
5239 
5240 	if (c_seg->c_state != C_ON_BAD_Q && !(C_SEG_IS_ONDISK(c_seg))) {
5241 		/*
5242 		 * C_SEG_IS_ONDISK == TRUE can occur when we're doing a
5243 		 * free of a compressed page (i.e. dst == NULL)
5244 		 */
5245 		OSAddAtomic64(-c_rounded_size, &compressor_bytes_used);
5246 	}
5247 	if (c_seg->c_busy_swapping) {
5248 		/*
5249 		 * bypass case for c_busy_swapping...
5250 		 * let the swapin/swapout paths deal with putting
5251 		 * the c_seg on the minor compaction queue if needed
5252 		 */
5253 		assert(c_seg->c_busy);
5254 		goto done;
5255 	}
5256 	assert(!c_seg->c_busy);
5257 
5258 	if (c_seg->c_state != C_IS_FILLING) {
5259 		if (c_seg->c_bytes_used == 0) {
5260 			if (!(C_SEG_IS_ONDISK(c_seg))) {
5261 				int     pages_populated;
5262 
5263 				pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
5264 				c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
5265 
5266 				if (pages_populated) {
5267 					assert(c_seg->c_state != C_ON_BAD_Q);
5268 					assert(c_seg->c_store.c_buffer != NULL);
5269 
5270 					C_SEG_BUSY(c_seg);
5271 					lck_mtx_unlock_always(&c_seg->c_lock);
5272 
5273 					kernel_memory_depopulate(
5274 						(vm_offset_t) c_seg->c_store.c_buffer,
5275 						ptoa(pages_populated),
5276 						KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
5277 
5278 					lck_mtx_lock_spin_always(&c_seg->c_lock);
5279 					C_SEG_WAKEUP_DONE(c_seg);
5280 				}
5281 				if (!c_seg->c_on_minorcompact_q && c_seg->c_state != C_ON_SWAPIO_Q) {
5282 					if (c_seg->c_state == C_ON_SWAPOUT_Q) {
5283 						bool clear_busy = false;
5284 						if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
5285 							C_SEG_BUSY(c_seg);
5286 
5287 							lck_mtx_unlock_always(&c_seg->c_lock);
5288 							lck_mtx_lock_spin_always(c_list_lock);
5289 							lck_mtx_lock_spin_always(&c_seg->c_lock);
5290 							clear_busy = true;
5291 						}
5292 						c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
5293 						if (clear_busy) {
5294 							C_SEG_WAKEUP_DONE(c_seg);
5295 							clear_busy = false;
5296 						}
5297 						lck_mtx_unlock_always(c_list_lock);
5298 					}
5299 					c_seg_need_delayed_compaction(c_seg, FALSE);
5300 				}
5301 			} else {
5302 				if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q) {
5303 					c_seg_move_to_sparse_list(c_seg);
5304 					consider_defragmenting = TRUE;
5305 				}
5306 			}
5307 		} else if (c_seg->c_on_minorcompact_q) {
5308 			assert(c_seg->c_state != C_ON_BAD_Q);
5309 			assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
5310 
5311 			if (C_SEG_SHOULD_MINORCOMPACT_NOW(c_seg)) {
5312 				c_seg_try_minor_compaction_and_unlock(c_seg);
5313 				need_unlock = FALSE;
5314 			}
5315 		} else if (!(C_SEG_IS_ONDISK(c_seg))) {
5316 			if (c_seg->c_state != C_ON_BAD_Q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q &&
5317 			    C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
5318 				c_seg_need_delayed_compaction(c_seg, FALSE);
5319 			}
5320 		} else if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q && C_SEG_ONDISK_IS_SPARSE(c_seg)) {
5321 			c_seg_move_to_sparse_list(c_seg);
5322 			consider_defragmenting = TRUE;
5323 		}
5324 	}
5325 done:
5326 	if (__improbable(kdp_mode)) {
5327 		return retval;
5328 	}
5329 
5330 	if (need_unlock == TRUE) {
5331 		lck_mtx_unlock_always(&c_seg->c_lock);
5332 	}
5333 
5334 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
5335 
5336 	if (consider_defragmenting == TRUE) {
5337 		vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE);
5338 	}
5339 
5340 #if !XNU_TARGET_OS_OSX
5341 	if ((c_minor_count && COMPRESSOR_NEEDS_TO_MINOR_COMPACT()) || vm_compressor_needs_to_major_compact()) {
5342 		vm_wake_compactor_swapper();
5343 	}
5344 #endif /* !XNU_TARGET_OS_OSX */
5345 
5346 	return retval;
5347 }
5348 
5349 
5350 inline bool
vm_compressor_is_slot_compressed(int * slot)5351 vm_compressor_is_slot_compressed(int *slot)
5352 {
5353 #if !CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5354 #pragma unused(slot)
5355 	return true;
5356 #else /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
5357 	c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
5358 	return !slot_ptr->s_uncompressed;
5359 #endif /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
5360 }
5361 
5362 int
vm_compressor_get(ppnum_t pn,int * slot,vm_compressor_options_t flags)5363 vm_compressor_get(ppnum_t pn, int *slot, vm_compressor_options_t flags)
5364 {
5365 	c_slot_mapping_t  slot_ptr;
5366 	char    *dst;
5367 	int     zeroslot = 1;
5368 	int     retval;
5369 
5370 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5371 	if (flags & C_PAGE_UNMODIFIED) {
5372 		retval = vm_uncompressed_get(pn, slot, flags | C_KEEP);
5373 		if (retval == 0) {
5374 			os_atomic_inc(&compressor_ro_uncompressed_get, relaxed);
5375 		}
5376 
5377 		return retval;
5378 	}
5379 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5380 
5381 	dst = pmap_map_compressor_page(pn);
5382 	slot_ptr = (c_slot_mapping_t)slot;
5383 
5384 	assert(dst != NULL);
5385 
5386 	if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
5387 		int32_t         data;
5388 		int32_t         *dptr;
5389 
5390 		/*
5391 		 * page was populated with a single value
5392 		 * that found a home in our hash table
5393 		 * grab that value from the hash and populate the page
5394 		 * that we need to populate the page with
5395 		 */
5396 		dptr = (int32_t *)(uintptr_t)dst;
5397 		data = c_segment_sv_hash_table[slot_ptr->s_cindx].he_data;
5398 		sv_decompress(dptr, data);
5399 		if (!(flags & C_KEEP)) {
5400 			c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
5401 
5402 			OSAddAtomic(-1, &c_segment_pages_compressed);
5403 			*slot = 0;
5404 		}
5405 		if (data) {
5406 			OSAddAtomic(1, &c_segment_svp_nonzero_decompressions);
5407 		} else {
5408 			OSAddAtomic(1, &c_segment_svp_zero_decompressions);
5409 		}
5410 
5411 		pmap_unmap_compressor_page(pn, dst);
5412 		return 0;
5413 	}
5414 
5415 	retval = c_decompress_page(dst, slot_ptr, flags, &zeroslot);
5416 
5417 	/*
5418 	 * zeroslot will be set to 0 by c_decompress_page if (flags & C_KEEP)
5419 	 * or (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be TRUE
5420 	 */
5421 	if (zeroslot) {
5422 		*slot = 0;
5423 	}
5424 
5425 	pmap_unmap_compressor_page(pn, dst);
5426 
5427 	/*
5428 	 * returns 0 if we successfully decompressed a page from a segment already in memory
5429 	 * returns 1 if we had to first swap in the segment, before successfully decompressing the page
5430 	 * returns -1 if we encountered an error swapping in the segment - decompression failed
5431 	 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be true
5432 	 */
5433 	return retval;
5434 }
5435 
5436 int
vm_compressor_free(int * slot,vm_compressor_options_t flags)5437 vm_compressor_free(int *slot, vm_compressor_options_t flags)
5438 {
5439 	bool slot_is_compressed = vm_compressor_is_slot_compressed(slot);
5440 
5441 	if (slot_is_compressed) {
5442 		c_slot_mapping_t  slot_ptr;
5443 		int     zeroslot = 1;
5444 		int     retval = 0;
5445 
5446 		assert(flags == 0 || flags == C_DONT_BLOCK);
5447 
5448 		slot_ptr = (c_slot_mapping_t)slot;
5449 
5450 		if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
5451 			c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
5452 			OSAddAtomic(-1, &c_segment_pages_compressed);
5453 
5454 			*slot = 0;
5455 			return 0;
5456 		}
5457 		retval = c_decompress_page(NULL, slot_ptr, flags, &zeroslot);
5458 		/*
5459 		 * returns 0 if we successfully freed the specified compressed page
5460 		 * returns -1 if we encountered an error swapping in the segment - decompression failed
5461 		 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set
5462 		 */
5463 
5464 		if (retval == 0) {
5465 			*slot = 0;
5466 		}
5467 
5468 		return retval;
5469 	}
5470 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5471 	else {
5472 		if ((flags & C_PAGE_UNMODIFIED) == 0) {
5473 			/* moving from uncompressed state to compressed. Free it.*/
5474 			vm_uncompressed_free(slot, 0);
5475 			assert(*slot == 0);
5476 		}
5477 	}
5478 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5479 	return KERN_SUCCESS;
5480 }
5481 
5482 int
vm_compressor_put(ppnum_t pn,int * slot,void ** current_chead,char * scratch_buf,bool unmodified)5483 vm_compressor_put(ppnum_t pn, int *slot, void  **current_chead, char *scratch_buf, bool unmodified)
5484 {
5485 	char    *src;
5486 	int     retval = 0;
5487 
5488 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5489 	if (unmodified) {
5490 		if (*slot) {
5491 			os_atomic_inc(&compressor_ro_uncompressed_skip_returned, relaxed);
5492 			return retval;
5493 		} else {
5494 			retval = vm_uncompressed_put(pn, slot);
5495 			if (retval == KERN_SUCCESS) {
5496 				os_atomic_inc(&compressor_ro_uncompressed_put, relaxed);
5497 				return retval;
5498 			}
5499 		}
5500 	}
5501 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5502 #pragma unused(unmodified)
5503 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5504 
5505 	src = pmap_map_compressor_page(pn);
5506 	assert(src != NULL);
5507 
5508 	retval = c_compress_page(src, (c_slot_mapping_t)slot, (c_segment_t *)current_chead, scratch_buf);
5509 	pmap_unmap_compressor_page(pn, src);
5510 
5511 	return retval;
5512 }
5513 
5514 void
vm_compressor_transfer(int * dst_slot_p,int * src_slot_p)5515 vm_compressor_transfer(
5516 	int     *dst_slot_p,
5517 	int     *src_slot_p)
5518 {
5519 	c_slot_mapping_t        dst_slot, src_slot;
5520 	c_segment_t             c_seg;
5521 	uint16_t                c_indx;
5522 	c_slot_t                cs;
5523 
5524 	src_slot = (c_slot_mapping_t) src_slot_p;
5525 
5526 	if (src_slot->s_cseg == C_SV_CSEG_ID || !vm_compressor_is_slot_compressed(src_slot_p)) {
5527 		*dst_slot_p = *src_slot_p;
5528 		*src_slot_p = 0;
5529 		return;
5530 	}
5531 	dst_slot = (c_slot_mapping_t) dst_slot_p;
5532 Retry:
5533 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
5534 	/* get segment for src_slot */
5535 	c_seg = c_segments[src_slot->s_cseg - 1].c_seg;
5536 	/* lock segment */
5537 	lck_mtx_lock_spin_always(&c_seg->c_lock);
5538 	/* wait if it's busy */
5539 	if (c_seg->c_busy && !c_seg->c_busy_swapping) {
5540 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5541 		c_seg_wait_on_busy(c_seg);
5542 		goto Retry;
5543 	}
5544 	/* find the c_slot */
5545 	c_indx = src_slot->s_cindx;
5546 	cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
5547 	/* point the c_slot back to dst_slot instead of src_slot */
5548 	C_SLOT_ASSERT_PACKABLE(dst_slot);
5549 	cs->c_packed_ptr = C_SLOT_PACK_PTR(dst_slot);
5550 	/* transfer */
5551 	*dst_slot_p = *src_slot_p;
5552 	*src_slot_p = 0;
5553 	lck_mtx_unlock_always(&c_seg->c_lock);
5554 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
5555 }
5556 
5557 #if defined(__arm64__)
5558 extern clock_sec_t             vm_swapfile_last_failed_to_create_ts;
5559 __attribute__((noreturn))
5560 void
vm_panic_hibernate_write_image_failed(int err)5561 vm_panic_hibernate_write_image_failed(int err)
5562 {
5563 	panic("hibernate_write_image encountered error 0x%x - %u, %u, %d, %d, %d, %d, %d, %d, %d, %d, %llu, %d, %d, %d\n",
5564 	    err,
5565 	    VM_PAGE_COMPRESSOR_COUNT, vm_page_wire_count,
5566 	    c_age_count, c_major_count, c_minor_count, (c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count), c_swappedout_sparse_count,
5567 	    vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled, vm_swap_put_failures,
5568 	    (vm_swapfile_last_failed_to_create_ts ? 1:0), hibernate_no_swapspace, hibernate_flush_timed_out);
5569 }
5570 #endif /*(__arm64__)*/
5571 
5572 #if CONFIG_FREEZE
5573 
5574 int     freezer_finished_filling = 0;
5575 
5576 void
vm_compressor_finished_filling(void ** current_chead)5577 vm_compressor_finished_filling(
5578 	void    **current_chead)
5579 {
5580 	c_segment_t     c_seg;
5581 
5582 	if ((c_seg = *(c_segment_t *)current_chead) == NULL) {
5583 		return;
5584 	}
5585 
5586 	assert(c_seg->c_state == C_IS_FILLING);
5587 
5588 	lck_mtx_lock_spin_always(&c_seg->c_lock);
5589 
5590 	c_current_seg_filled(c_seg, (c_segment_t *)current_chead);
5591 
5592 	lck_mtx_unlock_always(&c_seg->c_lock);
5593 
5594 	freezer_finished_filling++;
5595 }
5596 
5597 
5598 /*
5599  * This routine is used to transfer the compressed chunks from
5600  * the c_seg/cindx pointed to by slot_p into a new c_seg headed
5601  * by the current_chead and a new cindx within that c_seg.
5602  *
5603  * Currently, this routine is only used by the "freezer backed by
5604  * compressor with swap" mode to create a series of c_segs that
5605  * only contain compressed data belonging to one task. So, we
5606  * move a task's previously compressed data into a set of new
5607  * c_segs which will also hold the task's yet to be compressed data.
5608  */
5609 
5610 kern_return_t
vm_compressor_relocate(void ** current_chead,int * slot_p)5611 vm_compressor_relocate(
5612 	void            **current_chead,
5613 	int             *slot_p)
5614 {
5615 	c_slot_mapping_t        slot_ptr;
5616 	c_slot_mapping_t        src_slot;
5617 	uint32_t                c_rounded_size;
5618 	uint32_t                c_size;
5619 	uint16_t                dst_slot;
5620 	c_slot_t                c_dst;
5621 	c_slot_t                c_src;
5622 	uint16_t                c_indx;
5623 	c_segment_t             c_seg_dst = NULL;
5624 	c_segment_t             c_seg_src = NULL;
5625 	kern_return_t           kr = KERN_SUCCESS;
5626 
5627 
5628 	src_slot = (c_slot_mapping_t) slot_p;
5629 
5630 	if (src_slot->s_cseg == C_SV_CSEG_ID) {
5631 		/*
5632 		 * no need to relocate... this is a page full of a single
5633 		 * value which is hashed to a single entry not contained
5634 		 * in a c_segment_t
5635 		 */
5636 		return kr;
5637 	}
5638 
5639 	if (vm_compressor_is_slot_compressed((int *)src_slot) == false) {
5640 		/*
5641 		 * Unmodified anonymous pages are sitting uncompressed on disk.
5642 		 * So don't pull them back in again.
5643 		 */
5644 		return kr;
5645 	}
5646 
5647 Relookup_dst:
5648 	c_seg_dst = c_seg_allocate((c_segment_t *)current_chead);
5649 	/*
5650 	 * returns with c_seg lock held
5651 	 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
5652 	 * c_nextslot has been allocated and
5653 	 * c_store.c_buffer populated
5654 	 */
5655 	if (c_seg_dst == NULL) {
5656 		/*
5657 		 * Out of compression segments?
5658 		 */
5659 		kr = KERN_RESOURCE_SHORTAGE;
5660 		goto out;
5661 	}
5662 
5663 	assert(c_seg_dst->c_busy == 0);
5664 
5665 	C_SEG_BUSY(c_seg_dst);
5666 
5667 	dst_slot = c_seg_dst->c_nextslot;
5668 
5669 	lck_mtx_unlock_always(&c_seg_dst->c_lock);
5670 
5671 Relookup_src:
5672 	c_seg_src = c_segments[src_slot->s_cseg - 1].c_seg;
5673 
5674 	assert(c_seg_dst != c_seg_src);
5675 
5676 	lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5677 
5678 	if (C_SEG_IS_ON_DISK_OR_SOQ(c_seg_src) ||
5679 	    c_seg_src->c_state == C_IS_FILLING) {
5680 		/*
5681 		 * Skip this page if :-
5682 		 * a) the src c_seg is already on-disk (or on its way there)
5683 		 *    A "thaw" can mark a process as eligible for
5684 		 * another freeze cycle without bringing any of
5685 		 * its swapped out c_segs back from disk (because
5686 		 * that is done on-demand).
5687 		 *    Or, this page may be mapped elsewhere in the task's map,
5688 		 * and we may have marked it for swap already.
5689 		 *
5690 		 * b) Or, the src c_seg is being filled by the compressor
5691 		 * thread. We don't want the added latency of waiting for
5692 		 * this c_seg in the freeze path and so we skip it.
5693 		 */
5694 
5695 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5696 
5697 		lck_mtx_unlock_always(&c_seg_src->c_lock);
5698 
5699 		c_seg_src = NULL;
5700 
5701 		goto out;
5702 	}
5703 
5704 	if (c_seg_src->c_busy) {
5705 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5706 		c_seg_wait_on_busy(c_seg_src);
5707 
5708 		c_seg_src = NULL;
5709 
5710 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
5711 
5712 		goto Relookup_src;
5713 	}
5714 
5715 	C_SEG_BUSY(c_seg_src);
5716 
5717 	lck_mtx_unlock_always(&c_seg_src->c_lock);
5718 
5719 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
5720 
5721 	/* find the c_slot */
5722 	c_indx = src_slot->s_cindx;
5723 
5724 	c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, c_indx);
5725 
5726 	c_size = UNPACK_C_SIZE(c_src);
5727 
5728 	assert(c_size);
5729 
5730 	if (c_size > (uint32_t)(c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)c_seg_dst->c_nextoffset))) {
5731 		/*
5732 		 * This segment is full. We need a new one.
5733 		 */
5734 
5735 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
5736 
5737 		lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5738 		C_SEG_WAKEUP_DONE(c_seg_src);
5739 		lck_mtx_unlock_always(&c_seg_src->c_lock);
5740 
5741 		c_seg_src = NULL;
5742 
5743 		lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
5744 
5745 		assert(c_seg_dst->c_busy);
5746 		assert(c_seg_dst->c_state == C_IS_FILLING);
5747 		assert(!c_seg_dst->c_on_minorcompact_q);
5748 
5749 		c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
5750 		assert(*current_chead == NULL);
5751 
5752 		C_SEG_WAKEUP_DONE(c_seg_dst);
5753 
5754 		lck_mtx_unlock_always(&c_seg_dst->c_lock);
5755 
5756 		c_seg_dst = NULL;
5757 
5758 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5759 
5760 		goto Relookup_dst;
5761 	}
5762 
5763 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
5764 
5765 	memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], c_size);
5766 	/*
5767 	 * Is platform alignment actually necessary since wkdm aligns its output?
5768 	 */
5769 	c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
5770 
5771 	cslot_copy(c_dst, c_src);
5772 	c_dst->c_offset = c_seg_dst->c_nextoffset;
5773 
5774 	if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
5775 		c_seg_dst->c_firstemptyslot++;
5776 	}
5777 
5778 	c_seg_dst->c_slots_used++;
5779 	c_seg_dst->c_nextslot++;
5780 	c_seg_dst->c_bytes_used += c_rounded_size;
5781 	c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
5782 
5783 
5784 	PACK_C_SIZE(c_src, 0);
5785 
5786 	c_seg_src->c_bytes_used -= c_rounded_size;
5787 	c_seg_src->c_bytes_unused += c_rounded_size;
5788 
5789 	assert(c_seg_src->c_slots_used);
5790 	c_seg_src->c_slots_used--;
5791 
5792 	if (!c_seg_src->c_swappedin) {
5793 		/* Pessimistically lose swappedin status when non-swappedin pages are added. */
5794 		c_seg_dst->c_swappedin = false;
5795 	}
5796 
5797 	if (c_indx < c_seg_src->c_firstemptyslot) {
5798 		c_seg_src->c_firstemptyslot = c_indx;
5799 	}
5800 
5801 	c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
5802 
5803 	PAGE_REPLACEMENT_ALLOWED(TRUE);
5804 	slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
5805 	/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
5806 	slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
5807 	slot_ptr->s_cindx = dst_slot;
5808 
5809 	PAGE_REPLACEMENT_ALLOWED(FALSE);
5810 
5811 out:
5812 	if (c_seg_src) {
5813 		lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5814 
5815 		C_SEG_WAKEUP_DONE(c_seg_src);
5816 
5817 		if (c_seg_src->c_bytes_used == 0 && c_seg_src->c_state != C_IS_FILLING) {
5818 			if (!c_seg_src->c_on_minorcompact_q) {
5819 				c_seg_need_delayed_compaction(c_seg_src, FALSE);
5820 			}
5821 		}
5822 
5823 		lck_mtx_unlock_always(&c_seg_src->c_lock);
5824 	}
5825 
5826 	if (c_seg_dst) {
5827 		PAGE_REPLACEMENT_DISALLOWED(TRUE);
5828 
5829 		lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
5830 
5831 		if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
5832 			/*
5833 			 * Nearing or exceeded maximum slot and offset capacity.
5834 			 */
5835 			assert(c_seg_dst->c_busy);
5836 			assert(c_seg_dst->c_state == C_IS_FILLING);
5837 			assert(!c_seg_dst->c_on_minorcompact_q);
5838 
5839 			c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
5840 			assert(*current_chead == NULL);
5841 		}
5842 
5843 		C_SEG_WAKEUP_DONE(c_seg_dst);
5844 
5845 		lck_mtx_unlock_always(&c_seg_dst->c_lock);
5846 
5847 		c_seg_dst = NULL;
5848 
5849 		PAGE_REPLACEMENT_DISALLOWED(FALSE);
5850 	}
5851 
5852 	return kr;
5853 }
5854 #endif /* CONFIG_FREEZE */
5855 
5856 #if DEVELOPMENT || DEBUG
5857 
5858 void
vm_compressor_inject_error(int * slot)5859 vm_compressor_inject_error(int *slot)
5860 {
5861 	c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
5862 
5863 	/* No error detection for single-value compression. */
5864 	if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
5865 		printf("%s(): cannot inject errors in SV-compressed pages\n", __func__ );
5866 		return;
5867 	}
5868 
5869 	/* s_cseg is actually "segno+1" */
5870 	const uint32_t c_segno = slot_ptr->s_cseg - 1;
5871 
5872 	assert(c_segno < c_segments_available);
5873 	assert(c_segments[c_segno].c_segno >= c_segments_available);
5874 
5875 	const c_segment_t c_seg = c_segments[c_segno].c_seg;
5876 
5877 	PAGE_REPLACEMENT_DISALLOWED(TRUE);
5878 
5879 	lck_mtx_lock_spin_always(&c_seg->c_lock);
5880 	assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
5881 
5882 	const uint16_t c_indx = slot_ptr->s_cindx;
5883 	assert(c_indx < c_seg->c_nextslot);
5884 
5885 	/*
5886 	 * To safely make this segment temporarily writable, we need to mark
5887 	 * the segment busy, which allows us to release the segment lock.
5888 	 */
5889 	while (c_seg->c_busy) {
5890 		c_seg_wait_on_busy(c_seg);
5891 		lck_mtx_lock_spin_always(&c_seg->c_lock);
5892 	}
5893 	C_SEG_BUSY(c_seg);
5894 
5895 	bool already_writable = (c_seg->c_state == C_IS_FILLING);
5896 	if (!already_writable) {
5897 		/*
5898 		 * Protection update must be performed preemptibly, so temporarily drop
5899 		 * the lock. Having set c_busy will prevent most other concurrent
5900 		 * operations.
5901 		 */
5902 		lck_mtx_unlock_always(&c_seg->c_lock);
5903 		C_SEG_MAKE_WRITEABLE(c_seg);
5904 		lck_mtx_lock_spin_always(&c_seg->c_lock);
5905 	}
5906 
5907 	/*
5908 	 * Once we've released the lock following our c_state == C_IS_FILLING check,
5909 	 * c_current_seg_filled() can (re-)write-protect the segment. However, it
5910 	 * will transition from C_IS_FILLING before releasing the c_seg lock, so we
5911 	 * can detect this by re-checking after we've reobtained the lock.
5912 	 */
5913 	if (already_writable && c_seg->c_state != C_IS_FILLING) {
5914 		lck_mtx_unlock_always(&c_seg->c_lock);
5915 		C_SEG_MAKE_WRITEABLE(c_seg);
5916 		lck_mtx_lock_spin_always(&c_seg->c_lock);
5917 		already_writable = false;
5918 		/* Segment can't be freed while c_busy is set. */
5919 		assert(c_seg->c_state != C_IS_FILLING);
5920 	}
5921 
5922 	/*
5923 	 * Skip if the segment is on disk. This check can only be performed after
5924 	 * the final acquisition of the segment lock before we attempt to write to
5925 	 * the segment.
5926 	 */
5927 	if (!C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) {
5928 		c_slot_t cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
5929 		int32_t *data = &c_seg->c_store.c_buffer[cs->c_offset];
5930 		/* assume that the compressed data holds at least one int32_t */
5931 		assert(UNPACK_C_SIZE(cs) > sizeof(*data));
5932 		/*
5933 		 * This bit is known to be in the payload of a MISS packet resulting from
5934 		 * the pattern used in the test pattern from decompression_failure.c.
5935 		 * Flipping it should result in many corrupted bits in the test page.
5936 		 */
5937 		data[0] ^= 0x00000100;
5938 	}
5939 
5940 	if (!already_writable) {
5941 		lck_mtx_unlock_always(&c_seg->c_lock);
5942 		C_SEG_WRITE_PROTECT(c_seg);
5943 		lck_mtx_lock_spin_always(&c_seg->c_lock);
5944 	}
5945 
5946 	C_SEG_WAKEUP_DONE(c_seg);
5947 	lck_mtx_unlock_always(&c_seg->c_lock);
5948 
5949 	PAGE_REPLACEMENT_DISALLOWED(FALSE);
5950 }
5951 
5952 #endif /* DEVELOPMENT || DEBUG */
5953 
5954 
5955 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5956 
5957 struct vnode;
5958 extern void vm_swapfile_open(const char *path, struct vnode **vp);
5959 extern int vm_swapfile_preallocate(struct vnode *vp, uint64_t *size, boolean_t *pin);
5960 
5961 struct vnode *uncompressed_vp0 = NULL;
5962 struct vnode *uncompressed_vp1 = NULL;
5963 uint32_t uncompressed_file0_free_pages = 0, uncompressed_file1_free_pages = 0;
5964 uint64_t uncompressed_file0_free_offset = 0, uncompressed_file1_free_offset = 0;
5965 
5966 uint64_t compressor_ro_uncompressed = 0;
5967 uint64_t compressor_ro_uncompressed_total_returned = 0;
5968 uint64_t compressor_ro_uncompressed_skip_returned = 0;
5969 uint64_t compressor_ro_uncompressed_get = 0;
5970 uint64_t compressor_ro_uncompressed_put = 0;
5971 uint64_t compressor_ro_uncompressed_swap_usage = 0;
5972 
5973 extern void vnode_put(struct vnode* vp);
5974 extern int vnode_getwithref(struct vnode* vp);
5975 extern int vm_swapfile_io(struct vnode *vp, uint64_t offset, uint64_t start, int npages, int flags, void *upl_ctx);
5976 
5977 #define MAX_OFFSET_PAGES        (255)
5978 uint64_t uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
5979 uint64_t uncompressed_file1_space_bitmap[MAX_OFFSET_PAGES];
5980 
5981 #define UNCOMPRESSED_FILEIDX_OFFSET_MASK (((uint32_t)1<<31ull) - 1)
5982 #define UNCOMPRESSED_FILEIDX_SHIFT (29)
5983 #define UNCOMPRESSED_FILEIDX_MASK (3)
5984 #define UNCOMPRESSED_OFFSET_SHIFT (29)
5985 #define UNCOMPRESSED_OFFSET_MASK (7)
5986 
5987 static uint32_t
vm_uncompressed_extract_swap_file(int slot)5988 vm_uncompressed_extract_swap_file(int slot)
5989 {
5990 	uint32_t fileidx = (((uint32_t)slot & UNCOMPRESSED_FILEIDX_OFFSET_MASK) >> UNCOMPRESSED_FILEIDX_SHIFT) & UNCOMPRESSED_FILEIDX_MASK;
5991 	return fileidx;
5992 }
5993 
5994 static uint32_t
vm_uncompressed_extract_swap_offset(int slot)5995 vm_uncompressed_extract_swap_offset(int slot)
5996 {
5997 	return slot & (uint32_t)(~(UNCOMPRESSED_OFFSET_MASK << UNCOMPRESSED_OFFSET_SHIFT));
5998 }
5999 
6000 static void
vm_uncompressed_return_space_to_swap(int slot)6001 vm_uncompressed_return_space_to_swap(int slot)
6002 {
6003 	PAGE_REPLACEMENT_ALLOWED(TRUE);
6004 	uint32_t fileidx = vm_uncompressed_extract_swap_file(slot);
6005 	if (fileidx == 1) {
6006 		uint32_t free_offset = vm_uncompressed_extract_swap_offset(slot);
6007 		uint64_t pgidx = free_offset / PAGE_SIZE_64;
6008 		uint64_t chunkidx = pgidx / 64;
6009 		uint64_t chunkoffset = pgidx % 64;
6010 #if DEVELOPMENT || DEBUG
6011 		uint64_t vaddr = (uint64_t)&uncompressed_file0_space_bitmap[chunkidx];
6012 		uint64_t maxvaddr = (uint64_t)&uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6013 		assertf(vaddr < maxvaddr, "0x%llx 0x%llx", vaddr, maxvaddr);
6014 #endif /*DEVELOPMENT || DEBUG*/
6015 		assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6016 		    "0x%x %llu %llu", slot, chunkidx, chunkoffset);
6017 		uncompressed_file0_space_bitmap[chunkidx] &= ~((uint64_t)1 << chunkoffset);
6018 		assertf(!(uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6019 		    "0x%x %llu %llu", slot, chunkidx, chunkoffset);
6020 
6021 		uncompressed_file0_free_pages++;
6022 	} else {
6023 		uint32_t free_offset = vm_uncompressed_extract_swap_offset(slot);
6024 		uint64_t pgidx = free_offset / PAGE_SIZE_64;
6025 		uint64_t chunkidx = pgidx / 64;
6026 		uint64_t chunkoffset = pgidx % 64;
6027 		assertf((uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6028 		    "%llu %llu", chunkidx, chunkoffset);
6029 		uncompressed_file1_space_bitmap[chunkidx] &= ~((uint64_t)1 << chunkoffset);
6030 
6031 		uncompressed_file1_free_pages++;
6032 	}
6033 	compressor_ro_uncompressed_swap_usage--;
6034 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6035 }
6036 
6037 static int
vm_uncompressed_reserve_space_in_swap()6038 vm_uncompressed_reserve_space_in_swap()
6039 {
6040 	int slot = 0;
6041 	if (uncompressed_file0_free_pages == 0 && uncompressed_file1_free_pages == 0) {
6042 		return -1;
6043 	}
6044 
6045 	PAGE_REPLACEMENT_ALLOWED(TRUE);
6046 	if (uncompressed_file0_free_pages) {
6047 		uint64_t chunkidx = 0;
6048 		uint64_t chunkoffset = 0;
6049 		while (uncompressed_file0_space_bitmap[chunkidx] == 0xffffffffffffffff) {
6050 			chunkidx++;
6051 		}
6052 		while (uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) {
6053 			chunkoffset++;
6054 		}
6055 
6056 		assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) == 0,
6057 		    "%llu %llu", chunkidx, chunkoffset);
6058 #if DEVELOPMENT || DEBUG
6059 		uint64_t vaddr = (uint64_t)&uncompressed_file0_space_bitmap[chunkidx];
6060 		uint64_t maxvaddr = (uint64_t)&uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6061 		assertf(vaddr < maxvaddr, "0x%llx 0x%llx", vaddr, maxvaddr);
6062 #endif /*DEVELOPMENT || DEBUG*/
6063 		uncompressed_file0_space_bitmap[chunkidx] |= ((uint64_t)1 << chunkoffset);
6064 		uncompressed_file0_free_offset = ((chunkidx * 64) + chunkoffset) * PAGE_SIZE_64;
6065 		assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6066 		    "%llu %llu", chunkidx, chunkoffset);
6067 
6068 		assert(uncompressed_file0_free_offset <= (1 << UNCOMPRESSED_OFFSET_SHIFT));
6069 		slot = (int)((1 << UNCOMPRESSED_FILEIDX_SHIFT) + uncompressed_file0_free_offset);
6070 		uncompressed_file0_free_pages--;
6071 	} else {
6072 		uint64_t chunkidx = 0;
6073 		uint64_t chunkoffset = 0;
6074 		while (uncompressed_file1_space_bitmap[chunkidx] == 0xFFFFFFFFFFFFFFFF) {
6075 			chunkidx++;
6076 		}
6077 		while (uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) {
6078 			chunkoffset++;
6079 		}
6080 		assert((uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) == 0);
6081 		uncompressed_file1_space_bitmap[chunkidx] |= ((uint64_t)1 << chunkoffset);
6082 		uncompressed_file1_free_offset = ((chunkidx * 64) + chunkoffset) * PAGE_SIZE_64;
6083 		slot = (int)((2 << UNCOMPRESSED_FILEIDX_SHIFT) + uncompressed_file1_free_offset);
6084 		uncompressed_file1_free_pages--;
6085 	}
6086 	compressor_ro_uncompressed_swap_usage++;
6087 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6088 	return slot;
6089 }
6090 
6091 #define MAX_IO_REQ (16)
6092 struct _uncompressor_io_req {
6093 	uint64_t addr;
6094 	bool inuse;
6095 } uncompressor_io_req[MAX_IO_REQ];
6096 
6097 int
vm_uncompressed_put(ppnum_t pn,int * slot)6098 vm_uncompressed_put(ppnum_t pn, int *slot)
6099 {
6100 	int retval = 0;
6101 	struct vnode *uncompressed_vp = NULL;
6102 	uint64_t uncompress_offset = 0;
6103 
6104 again:
6105 	if (uncompressed_vp0 == NULL) {
6106 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6107 		if (uncompressed_vp0 == NULL) {
6108 			uint64_t size = (MAX_OFFSET_PAGES * 1024 * 1024ULL);
6109 			vm_swapfile_open("/private/var/vm/uncompressedswap0", &uncompressed_vp0);
6110 			if (uncompressed_vp0 == NULL) {
6111 				PAGE_REPLACEMENT_ALLOWED(FALSE);
6112 				return KERN_NO_ACCESS;
6113 			}
6114 			vm_swapfile_preallocate(uncompressed_vp0, &size, NULL);
6115 			uncompressed_file0_free_pages = (uint32_t)atop(size);
6116 			bzero(uncompressed_file0_space_bitmap, sizeof(uint64_t) * MAX_OFFSET_PAGES);
6117 
6118 			int i = 0;
6119 			for (; i < MAX_IO_REQ; i++) {
6120 				kmem_alloc(kernel_map, (vm_offset_t*)&uncompressor_io_req[i].addr, PAGE_SIZE_64, KMA_NOFAIL | KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR);
6121 				uncompressor_io_req[i].inuse = false;
6122 			}
6123 
6124 			vm_swapfile_open("/private/var/vm/uncompressedswap1", &uncompressed_vp1);
6125 			assert(uncompressed_vp1);
6126 			vm_swapfile_preallocate(uncompressed_vp1, &size, NULL);
6127 			uncompressed_file1_free_pages = (uint32_t)atop(size);
6128 			bzero(uncompressed_file1_space_bitmap, sizeof(uint64_t) * MAX_OFFSET_PAGES);
6129 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6130 		} else {
6131 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6132 			delay(100);
6133 			goto again;
6134 		}
6135 	}
6136 
6137 	int swapinfo = vm_uncompressed_reserve_space_in_swap();
6138 	if (swapinfo == -1) {
6139 		*slot = 0;
6140 		return KERN_RESOURCE_SHORTAGE;
6141 	}
6142 
6143 	if (vm_uncompressed_extract_swap_file(swapinfo) == 1) {
6144 		uncompressed_vp = uncompressed_vp0;
6145 	} else {
6146 		uncompressed_vp = uncompressed_vp1;
6147 	}
6148 	uncompress_offset = vm_uncompressed_extract_swap_offset(swapinfo);
6149 	if ((retval = vnode_getwithref(uncompressed_vp)) != 0) {
6150 		os_log_error_with_startup_serial(OS_LOG_DEFAULT, "vm_uncompressed_put: vnode_getwithref on swapfile failed with %d\n", retval);
6151 	} else {
6152 		int i = 0;
6153 retry:
6154 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6155 		for (i = 0; i < MAX_IO_REQ; i++) {
6156 			if (uncompressor_io_req[i].inuse == false) {
6157 				uncompressor_io_req[i].inuse = true;
6158 				break;
6159 			}
6160 		}
6161 		if (i == MAX_IO_REQ) {
6162 			assert_wait((event_t)&uncompressor_io_req, THREAD_UNINT);
6163 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6164 			thread_block(THREAD_CONTINUE_NULL);
6165 			goto retry;
6166 		}
6167 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6168 		void *addr = pmap_map_compressor_page(pn);
6169 		memcpy((void*)uncompressor_io_req[i].addr, addr, PAGE_SIZE_64);
6170 		pmap_unmap_compressor_page(pn, addr);
6171 
6172 		retval = vm_swapfile_io(uncompressed_vp, uncompress_offset, (uint64_t)uncompressor_io_req[i].addr, 1, SWAP_WRITE, NULL);
6173 		if (retval) {
6174 			*slot = 0;
6175 		} else {
6176 			*slot = (int)swapinfo;
6177 			((c_slot_mapping_t)(slot))->s_uncompressed = 1;
6178 		}
6179 		vnode_put(uncompressed_vp);
6180 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6181 		uncompressor_io_req[i].inuse = false;
6182 		thread_wakeup((event_t)&uncompressor_io_req);
6183 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6184 	}
6185 	return retval;
6186 }
6187 
6188 int
vm_uncompressed_get(ppnum_t pn,int * slot,__unused vm_compressor_options_t flags)6189 vm_uncompressed_get(ppnum_t pn, int *slot, __unused vm_compressor_options_t flags)
6190 {
6191 	int retval = 0;
6192 	struct vnode *uncompressed_vp = NULL;
6193 	uint32_t fileidx = vm_uncompressed_extract_swap_file(*slot);
6194 	uint64_t uncompress_offset = vm_uncompressed_extract_swap_offset(*slot);
6195 
6196 	if (__improbable(flags & C_KDP)) {
6197 		return -2;
6198 	}
6199 
6200 	if (fileidx == 1) {
6201 		uncompressed_vp = uncompressed_vp0;
6202 	} else {
6203 		uncompressed_vp = uncompressed_vp1;
6204 	}
6205 
6206 	if ((retval = vnode_getwithref(uncompressed_vp)) != 0) {
6207 		os_log_error_with_startup_serial(OS_LOG_DEFAULT, "vm_uncompressed_put: vnode_getwithref on swapfile failed with %d\n", retval);
6208 	} else {
6209 		int i = 0;
6210 retry:
6211 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6212 		for (i = 0; i < MAX_IO_REQ; i++) {
6213 			if (uncompressor_io_req[i].inuse == false) {
6214 				uncompressor_io_req[i].inuse = true;
6215 				break;
6216 			}
6217 		}
6218 		if (i == MAX_IO_REQ) {
6219 			assert_wait((event_t)&uncompressor_io_req, THREAD_UNINT);
6220 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6221 			thread_block(THREAD_CONTINUE_NULL);
6222 			goto retry;
6223 		}
6224 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6225 		retval = vm_swapfile_io(uncompressed_vp, uncompress_offset, (uint64_t)uncompressor_io_req[i].addr, 1, SWAP_READ, NULL);
6226 		vnode_put(uncompressed_vp);
6227 		void *addr = pmap_map_compressor_page(pn);
6228 		memcpy(addr, (void*)uncompressor_io_req[i].addr, PAGE_SIZE_64);
6229 		pmap_unmap_compressor_page(pn, addr);
6230 		PAGE_REPLACEMENT_ALLOWED(TRUE);
6231 		uncompressor_io_req[i].inuse = false;
6232 		thread_wakeup((event_t)&uncompressor_io_req);
6233 		PAGE_REPLACEMENT_ALLOWED(FALSE);
6234 	}
6235 	return retval;
6236 }
6237 
6238 int
vm_uncompressed_free(int * slot,__unused vm_compressor_options_t flags)6239 vm_uncompressed_free(int *slot, __unused vm_compressor_options_t flags)
6240 {
6241 	vm_uncompressed_return_space_to_swap(*slot);
6242 	*slot = 0;
6243 	return 0;
6244 }
6245 
6246 #endif /*CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
6247